diff options
Diffstat (limited to 'sql')
234 files changed, 91542 insertions, 26340 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am index 0fc81a48c63..218cf5dbca5 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -16,91 +16,115 @@ #called from the top level Makefile - MYSQLDATAdir = $(localstatedir) MYSQLSHAREdir = $(pkgdatadir) MYSQLBASEdir= $(prefix) -INCLUDES = @MT_INCLUDES@ \ - @bdb_includes@ @innodb_includes@ \ - -I$(srcdir)/../include \ - -I$(srcdir)/../regex \ - -I$(srcdir) -I../include -I. $(openssl_includes) +INCLUDES = @MT_INCLUDES@ @ZLIB_INCLUDES@ \ + @bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \ + -I$(top_builddir)/include -I$(top_srcdir)/include \ + -I$(top_srcdir)/regex -I$(srcdir) $(openssl_includes) WRAPLIBS= @WRAPLIBS@ SUBDIRS = share libexec_PROGRAMS = mysqld noinst_PROGRAMS = gen_lex_hash +bin_PROGRAMS = mysql_tzinfo_to_sql gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ LDADD = @isam_libs@ \ - ../myisam/libmyisam.a \ - ../myisammrg/libmyisammrg.a \ - ../heap/libheap.a \ - ../vio/libvio.a \ - ../mysys/libmysys.a \ - ../dbug/libdbug.a \ - ../regex/libregex.a \ - ../strings/libmystrings.a + $(top_builddir)/myisam/libmyisam.a \ + $(top_builddir)/myisammrg/libmyisammrg.a \ + $(top_builddir)/heap/libheap.a \ + $(top_builddir)/vio/libvio.a \ + $(top_builddir)/mysys/libmysys.a \ + $(top_builddir)/dbug/libdbug.a \ + $(top_builddir)/regex/libregex.a \ + $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ @NDB_SCI_LIBS@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \ @bdb_libs@ @innodb_libs@ @pstack_libs@ \ @innodb_system_libs@ \ + @ndbcluster_libs@ @ndbcluster_system_libs@ \ $(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ @openssl_libs@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ item_strfunc.h item_timefunc.h item_uniq.h \ - item_create.h mysql_priv.h \ + item_create.h item_subselect.h item_row.h \ + mysql_priv.h item_geofunc.h sql_bitmap.h \ procedure.h sql_class.h sql_lex.h sql_list.h \ sql_manager.h sql_map.h sql_string.h unireg.h \ field.h handler.h mysqld_suffix.h \ ha_isammrg.h ha_isam.h ha_myisammrg.h\ ha_heap.h ha_myisam.h ha_berkeley.h ha_innodb.h \ - opt_range.h opt_ft.h \ + ha_ndbcluster.h opt_range.h protocol.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h\ lex.h lex_symbol.h sql_acl.h sql_crypt.h \ - log_event.h mini_client.h sql_repl.h slave.h \ - stacktrace.h sql_sort.h sql_cache.h set_var.h + log_event.h sql_repl.h slave.h \ + stacktrace.h sql_sort.h sql_cache.h set_var.h \ + spatial.h gstream.h client_settings.h tzfile.h \ + tztime.h examples/ha_example.h examples/ha_archive.h \ + examples/ha_tina.h ha_blackhole.h mysqld_SOURCES = sql_lex.cc sql_handler.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \ - thr_malloc.cc item_create.cc \ - field.cc key.cc sql_class.cc sql_list.cc \ - net_serv.cc net_pkg.cc lock.cc my_lock.c \ + thr_malloc.cc item_create.cc item_subselect.cc \ + item_row.cc item_geofunc.cc \ + field.cc strfunc.cc key.cc sql_class.cc sql_list.cc \ + net_serv.cc protocol.cc sql_state.c \ + lock.cc my_lock.c \ sql_string.cc sql_manager.cc sql_map.cc \ mysqld.cc password.c hash_filo.cc hostname.cc \ - convert.cc set_var.cc sql_parse.cc sql_yacc.yy \ + set_var.cc sql_parse.cc sql_yacc.yy \ sql_base.cc table.cc sql_select.cc sql_insert.cc \ + sql_prepare.cc sql_error.cc \ sql_update.cc sql_delete.cc uniques.cc sql_do.cc \ procedure.cc item_uniq.cc sql_test.cc \ log.cc log_event.cc init.cc derror.cc sql_acl.cc \ unireg.cc des_key_file.cc \ - time.cc opt_range.cc opt_sum.cc opt_ft.cc \ + discover.cc time.cc opt_range.cc opt_sum.cc \ records.cc filesort.cc handler.cc \ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ ha_berkeley.cc ha_innodb.cc \ - ha_isam.cc ha_isammrg.cc \ + ha_isam.cc ha_isammrg.cc ha_ndbcluster.cc \ sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ - slave.cc sql_repl.cc sql_union.cc \ - mini_client.cc mini_client_errors.c \ - stacktrace.c repl_failsafe.h repl_failsafe.cc + slave.cc sql_repl.cc sql_union.cc sql_derived.cc \ + client.c sql_client.cc mini_client_errors.c pack.c\ + stacktrace.c repl_failsafe.h repl_failsafe.cc \ + gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \ + tztime.cc my_time.c \ + examples/ha_example.cc examples/ha_archive.cc \ + examples/ha_tina.cc ha_blackhole.cc + gen_lex_hash_SOURCES = gen_lex_hash.cc gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) +mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc +mysql_tzinfo_to_sql_LDADD = @MYSQLD_EXTRA_LDFLAGS@ $(LDADD) $(CXXLDFLAGS) DEFS = -DMYSQL_SERVER \ -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \ -DDATADIR="\"$(MYSQLDATAdir)\"" \ -DSHAREDIR="\"$(MYSQLSHAREdir)\"" \ @DEFS@ -# Don't put lex_hash.h in BUILT_SOURCES as this will give infinite recursion -BUILT_SOURCES = sql_yacc.cc sql_yacc.h + +BUILT_SOURCES = sql_yacc.cc sql_yacc.h lex_hash.h EXTRA_DIST = udf_example.cc $(BUILT_SOURCES) AM_YFLAGS = -d -link_sources: +mysql_tzinfo_to_sql.cc: + rm -f mysql_tzinfo_to_sql.cc + @LN_CP_F@ $(srcdir)/tztime.cc mysql_tzinfo_to_sql.cc + +link_sources: mysql_tzinfo_to_sql.cc rm -f mini_client_errors.c - @LN_CP_F@ ../libmysql/errmsg.c mini_client_errors.c + @LN_CP_F@ $(top_srcdir)/libmysql/errmsg.c mini_client_errors.c + rm -f pack.c + @LN_CP_F@ $(top_srcdir)/sql-common/pack.c pack.c + rm -f client.c + @LN_CP_F@ $(top_srcdir)/sql-common/client.c client.c + rm -f my_time.c + @LN_CP_F@ $(top_srcdir)/sql-common/my_time.c my_time.c -gen_lex_hash.o: gen_lex_hash.cc lex.h - $(CXXCOMPILE) -c $(INCLUDES) $< +mysql_tzinfo_to_sql.o: $(mysql_tzinfo_to_sql_SOURCES) + $(CXXCOMPILE) -c $(INCLUDES) -DTZINFO2SQL $< # Try to get better dependencies for the grammar. Othervise really bad # things like different grammars for different pars of MySQL can @@ -108,24 +132,20 @@ gen_lex_hash.o: gen_lex_hash.cc lex.h sql_yacc.cc: sql_yacc.yy sql_yacc.h: sql_yacc.yy -sql_yacc.o: sql_yacc.cc sql_yacc.h $(noinst_HEADERS) +sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS) @echo "Note: The following compile may take a long time." @echo "If it fails, re-run configure with --with-low-memory" $(CXXCOMPILE) $(LM_CFLAGS) -c $< -lex_hash.h: lex.h gen_lex_hash.cc sql_yacc.h - $(MAKE) gen_lex_hash$(EXEEXT) +lex_hash.h: gen_lex_hash$(EXEEXT) ./gen_lex_hash$(EXEEXT) > $@ -# Hack to ensure that lex_hash.h is built early -sql_lex.o: lex_hash.h - # For testing of udf_example.so; Works on platforms with gcc # (This is not part of our build process but only provided as an example) udf_example.so: udf_example.cc $(CXXCOMPILE) -shared -o $@ $< -distclean: +distclean-local: rm -f lex_hash.h # Don't update the files from bitkeeper diff --git a/sql/cache_manager.cc b/sql/cache_manager.cc deleted file mode 100644 index 307fe331e5c..00000000000 --- a/sql/cache_manager.cc +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifdef __GNUC__ -#pragma implementation /* gcc: Class implementation */ -#endif - -#include <my_global.h> -#include <my_sys.h> -#include "cache_manager.h" - -/* - cache_manager.cc - ----------------------------------------------------------------------- - The cache_manager class manages a number of blocks (which are allocatable - units of memory). - ----------------------------------------------------------------------- -*/ - -#define HEADER_LENGTH ALIGN_SIZE(8) -#define SUFFIX_LENGTH 4 - -#define ALLOC_MASK 0x3FFFFFFFL -#define FREE_BIT (1L << 31) -#define LOCK_BIT (1L << 30) -#define SMALLEST_BLOCK 32 - - - -/* -** Internal Methods -** -------------------- -*/ - - -/* list manipulation methods */ -void *cache_manager::init_list(void) -{ - -return; -} - - -void *cache_manager::link_into_abs(void *ptr) -{ - for (int i(0); (*abs_list)[i] != NULL; i++); - - (*abs_list)[i] = ptr; - - return (abs_list)[i]; // ??? -} - - - -bool *cache_manager::unlink_from_abs(void *ptr) -{ - (*ptr) = NULL; - -return; -} - - - -/* memory allocation methods */ -void *cache_manager::find_in_llist(uint) -{ - -return; -} - - -void cache_manager::defrag(void) -{ - printf("Defragging: .........."); - - return; -} - - - -/* -** Public Methods -** ------------------ -*/ - -cache_manager::cache_manager(uint size) -{ - base_ptr = my_malloc(size, MY_WME); /* MY_WME = write mesg on err */ - - return; -} - - -cache_manager::~cache_manager(void) -{ - free(base_ptr); - delete base_ptr; - - return; -} - - -void *cache_manager::alloc(uint size) -{ - void *llist; - void *abs_ptr; - - size=ALIGN_SIZE(size+HEADER_LENGTH+SUFFIX_LENGTH); - if (!(llist = find_in_llist(size))) - { - //defrag(); - if (!(llist = find_in_llist(size))) - return 0; /* return null pointer, buffer exhausted! */ - } - size_of_found_block=int4korr((char*) llist) & ALLOC_MASK; - // if (size_of_found_block < SMALLEST_BLOCK) - - abs_ptr = link_into_abs(llist); - return abs_ptr; -} - - -void cache_manager::dealloc(void) -{ - printf("Deallocating: ..........\n"); - - return; -} - - - -void cache_manager::clear(void) -{ - // reset the internal linked list, forgetting all pointers to mem blks - - return; -} diff --git a/sql/cache_manager.h b/sql/cache_manager.h deleted file mode 100644 index d422a86ea8e..00000000000 --- a/sql/cache_manager.h +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifdef __GNUC__ -#pragma interface /* gcc class implementation */ -#endif - -#ifndef _CACHE_MANAGER_H_ -#define _CACHE_MANAGER_H_ -#endif - -/* - cache_manager.h - ----------------------------------------------------------------------- - The cache_manager class manages a number of blocks (which are allocatable - units of memory). - ----------------------------------------------------------------------- -*/ - - - -class cache_manager { - void **abs_list; /* List holding block abstraction ptrs */ - - typedef struct free_blks { - struct free_blks *next, **prev; - uint Size; - } FREE_BLKS; - FREE_BLKS *base_ptr; /* Pointer to newly allocated sys mem */ - - - /* list manipulation methods */ - void *link_into_abs(void *); /* Return an abstract pointer to blk */ - bool *unlink_from_abs(void *); /* Used to dealloc a blk */ - void *find_in_fblist(uint); /* */ - - /* memory allocation methods */ - void defrag(void); /* Defragment the cache */ - bool *init_blk(void *); /* Return a pointer to new list */ - - public: - cache_manager(uint); /* Get allocation of size from system */ - ~cache_manager(void); /* Destructor; return the cache */ - - void *alloc(uint); /* Alloc size bytes from the cache */ - bool *dealloc(void *); /* Deallocate blocks (with *ptr_arg) */ - void clear(void); /* Clear the cache */ -}; diff --git a/sql/opt_ft.h b/sql/client_settings.h index 69b6b72f3fc..a8cd36af102 100644 --- a/sql/opt_ft.h +++ b/sql/client_settings.h @@ -1,41 +1,36 @@ /* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* classes to use when handling where clause */ - -#ifndef _opt_ft_h -#define _opt_ft_h - -#ifdef __GNUC__ -#pragma interface /* gcc class implementation */ -#endif - -class FT_SELECT: public QUICK_SELECT { -public: - TABLE_REF *ref; +#include <thr_alarm.h> - FT_SELECT(THD *thd, TABLE *table, TABLE_REF *tref) : - QUICK_SELECT (thd, table, tref->key, 1), ref(tref) { init(); } +#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | \ + CLIENT_SECURE_CONNECTION | CLIENT_TRANSACTIONS | \ + CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION) - int init() { return error=file->ft_init(); } - int get_next() { return error=file->ft_read(record); } -}; +#define init_sigpipe_variables +#define set_sigpipe(mysql) +#define reset_sigpipe(mysql) +#define read_user_name(A) {} +#define mysql_rpl_query_type(A,B) MYSQL_RPL_ADMIN +#define mysql_master_send_query(A, B, C) 1 +#define mysql_slave_send_query(A, B, C) 1 +#define mysql_rpl_probe(mysql) 0 +#undef HAVE_SMEM +#undef _CUSTOMCONFIG_ -QUICK_SELECT *get_ft_or_quick_select_for_ref(THD *thd, TABLE *table, - JOIN_TAB *tab); +#define mysql_server_init(a,b,c) 0 -#endif diff --git a/sql/convert.cc b/sql/convert.cc deleted file mode 100644 index f84c80a6121..00000000000 --- a/sql/convert.cc +++ /dev/null @@ -1,468 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -/* -** Convert tables between different character sets -** Some of the tables are hidden behind IFDEF to reduce some space. -** One can enable them by removing the // characters from the next comment -** One must also give a name to each mapping that one wants to use... -** -** All tables are activated if --with-extra-charsets=all or -** --with-extra-charsets=complex was given to configure. -*/ - -/* #define DEFINE_ALL_CHARACTER_SETS */ - -#include "mysql_priv.h" - -/**************************************************************************** - Convert tables -****************************************************************************/ - -/* Windows cp1251->koi8 and reverse conversion by Timur I. Bakeyev <translate@bat.ru> */ -/* based on Russian-Apache Team tables by Dmitry M. Klimoff <dmk@kosnet.ru> */ - -static unsigned char cp1251_koi8[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, -144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, -160,161,162,163,164,189,166,167,179,169,180,171,172,173,174,183, -176,177,182,166,173,181,182,183,163,185,164,187,188,189,190,167, -225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, -242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, -193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, -210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209 -}; - -static unsigned char koi8_cp1251[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, -144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, -160,161,162,184,186,165,179,191,168,169,170,171,172,180,174,175, -176,177,178,168,170,181,178,175,184,185,186,187,188,165,190,191, -254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238, -239,255,240,241,242,243,230,226,252,251,231,248,253,249,247,250, -222,192,193,214,196,197,212,195,213,200,201,202,203,204,205,206, -207,223,208,209,210,211,198,194,220,219,199,216,221,217,215,218 -}; - - -#ifdef DEFINE_ALL_CHARACTER_SETS - -/* These tables was generated from package 'cstools' (author Jan "Yenya" Kasprzak <kas@muni.cz>) */ - -/* Windows pc1250 to iso 8859-2 */ - -static unsigned char t1250_til2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129, 0,131, 0, 0, 0, 0,136, 0,169, 0,166,171,174,172, -144, 0, 0, 0, 0, 0, 0, 0,152, 0,185, 0,182,187,190,188, -160,183,162,163,164,161, 0,167,168, 0,170, 0, 0,173, 0,175, -176, 0,178,179,180, 0, 0, 0,184,177,186, 0,165,189,181,191, -192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, -208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, -224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, -240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255 -}; - -static unsigned char til2_t1250[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129, 0,131, 0, 0, 0, 0,136, 0, 0, 0, 0, 0, 0, 0, -144, 0, 0, 0, 0, 0, 0, 0,152, 0, 0, 0, 0, 0, 0, 0, -160,165,162,163,164,188,140,167,168,138,170,141,143,173,142,175, -176,185,178,179,180,190,156,161,184,154,186,157,159,189,158,191, -192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, -208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, -224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, -240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255 -}; - -/* Windows pc1252 to iso 8859-2 */ - -static unsigned char t1252_til2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129, 0, 0, 0, 0, 0, 0,136, 0,169, 0, 0,141,142,143, -144, 0, 0, 0, 0, 0, 0, 0,189, 0,185, 0, 0,157,158, 0, -160, 0, 0, 0,164, 0, 0,167,168, 0, 0, 0, 0,173, 0, 0, -176, 0, 0, 0,180, 0, 0, 0,184, 0, 0, 0, 0, 0, 0, 0, - 0,193,194, 0,196, 0, 0,199, 0,201, 0,203, 0,205,206, 0, -208, 0, 0,211,212, 0,214,215, 0, 0,218, 0,220,221, 0,223, - 0,225,226, 0,228, 0, 0,231, 0,233, 0,235, 0,237,238, 0, -240, 0, 0,243,244, 0,246,247, 0, 0,250, 0,252,253, 0, 0 -}; -static unsigned char til2_t1252[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129, 0, 0, 0, 0, 0, 0,136, 0, 0, 0, 0,141,142,143, -144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,157,158, 0, -160, 0, 0, 0,164, 0, 0,167,168,138, 0, 0, 0,173, 0, 0, -176, 0, 0, 0,180, 0, 0, 0,184,154, 0, 0, 0,152, 0, 0, - 0,193,194, 0,196, 0, 0,199, 0,201, 0,203, 0,205,206, 0, -208, 0, 0,211,212, 0,214,215, 0, 0,218, 0,220,221, 0,223, - 0,225,226, 0,228, 0, 0,231, 0,233, 0,235, 0,237,238, 0, -240, 0, 0,243,244, 0,246,247, 0, 0,250, 0,252,253, 0, 0 -}; - -/* MSDOS Kamenicky encoding (for Czech/Slovak) to iso 8859-2 */ - -static unsigned char tkam_til2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -200,252,233,239,228,207,171,232,236,204,197,205,181,229,196,193, -201,190,174,244,246,211,249,218,253,214,220,169,165,221,216,187, -225,237,243,250,242,210,217,212,185,248,224,192, 0,167, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0,247, 0,176, 0, 0, 0, 0, 0, 0, 0 -}; -static unsigned char til2_tkam[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0,156, 0,173, 0,155, 0,134, 0, 0,146, 0, -248, 0, 0, 0, 0,140, 0, 0, 0,168, 0,159, 0, 0,145, 0, -171,143, 0, 0,142,138, 0, 0,128,144, 0, 0,137,139, 0,133, - 0, 0,165,149,167, 0,153, 0,158,166,151, 0,154,157, 0, 0, -170,160, 0, 0,132,141, 0, 0,135,130, 0, 0,136,161, 0,131, - 0, 0,164,162,147, 0,148,246,169,150,163, 0,129,152, 0, 0 -}; - -/* Macintosh Roman encoding to iso 8859-2 */ - -static unsigned char tmac_til2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -196, 0,199,201, 0,214,220,225, 0,226,228, 0, 0,231,233, 0, - 0,235,237, 0,238, 0, 0,243, 0,244,246, 0,250, 0, 0,252, - 0,176, 0, 0,167, 0, 0,223, 0, 0, 0,180,168, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,160, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0,247, 0, 0, 0, 0,164, 0, 0, 0, 0, - 0, 0, 0, 0, 0,194, 0,193,203, 0,205,206, 0, 0,211,212, - 0, 0,218, 0, 0, 0, 0, 0, 0,162,255, 0,184,189,178,183 -}; -static unsigned char til2_tmac[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -202, 0,249, 0,219, 0, 0,164,172, 0, 0, 0, 0, 0, 0, 0, -161, 0,254, 0,171, 0, 0,255,252, 0, 0, 0, 0,253, 0, 0, - 0,231,229, 0,128, 0, 0,130, 0,131, 0,232, 0,234,235, 0, - 0, 0, 0,238,239, 0,133, 0, 0, 0,242, 0,134, 0, 0,167, - 0,135,137, 0,138, 0, 0,141, 0,142, 0,145, 0,146,148, 0, - 0, 0, 0,151,153, 0,154,214, 0, 0,156, 0,159, 0, 0,250 -}; - -/* Macintosh Central European encodingto iso 8859-2 */ - -static unsigned char tmacce_til2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -196, 0, 0,201,161,214,220,225,177,200,228,232,198,230,233,172, -188,207,237,239, 0, 0, 0,243, 0,244,246, 0,250,204,236,252, - 0,176,202, 0,167, 0, 0,223, 0, 0, 0,234,168, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0,179, 0, 0,165,181,197,229, 0, - 0,209, 0, 0,241,210, 0, 0, 0, 0,160,242,213, 0,245, 0, - 0, 0, 0, 0, 0, 0,247, 0, 0,192,224,216, 0, 0,248, 0, - 0,169, 0, 0,185,166,182,193,171,187,205,174,190, 0,211,212, - 0,217,218,249,219,251, 0, 0,221,253, 0,175,163,191, 0,183 -}; -static unsigned char til2_tmacce[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -202,132, 0,252, 0,187,229,164,172,225, 0,232,143, 0,235,251, -161,136, 0,184, 0,188,230,255, 0,228, 0,233,144, 0,236,253, -217,231, 0, 0,128,189,140, 0,137,131,162, 0,157,234, 0,145, - 0,193,197,238,239,204,133, 0,219,241,242,244,134,248, 0,167, -218,135, 0, 0,138,190,141, 0,139,142,171, 0,158,146, 0,147, - 0,196,203,151,153,206,154,214,222,243,156,245,159,249, 0, 0 -}; - -/* PC-Latin2 encoding, supported by M$-DOS to iso 8859-2 */ - -static unsigned char tpc2_til2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -199,252,233,226,228,249,230,231,179,235,213,245,238,172,196,198, -201,197,229,244,246,165,181,166,182,214,220,171,187,163,215,232, -225,237,243,250,161,177,174,190,202,234, 0,188,200,186, 0, 0, - 0, 0, 0, 0, 0,193,194,204,170, 0, 0, 0, 0,175,191, 0, - 0, 0, 0, 0, 0, 0,195,227, 0, 0, 0, 0, 0, 0, 0,164, -240,208,207,203,239,210,205,206,236, 0, 0, 0, 0,222,217, 0, -211,223,212,209,241,242,169,185,192,218,224,219,253,221,254,180, - 0,189,178,183,162,167,247,184, 0,168,255,251,216,248, 0, 0 -}; -static unsigned char til2_tpc2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0,164,244,157,207,149,151,245,249,230,184,155,141, 0,166,189, - 0,165,242,136,239,150,152,243,247,231,173,156,171,241,167,190, -232,181,182,198,142,145,143,128,172,144,168,211,183,214,215,210, -209,227,213,224,226,138,153,158,252,222,233,235,154,237,221,225, -234,160,131,199,132,146,134,135,159,130,169,137,216,161,140,212, -208,228,229,162,147,139,148,246,253,133,163,251,129,236,238,250 -}; - -/* Encoding used by standard IBM PC vga cards to iso8859-2 */ -static unsigned char tvga_til2[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -199,252,233,226,228, 0, 0,231, 0,235, 0, 0,238, 0,196, 0, -201, 0, 0,244,246, 0, 0, 0, 0,214,220, 0, 0, 0, 0, 0, -225,237,243,250, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0,247, 0,176, 0, 0, 0, 0, 0, 0, 0 -}; -static unsigned char til2_tvga[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -248, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0,142, 0, 0,128, 0,144, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0,153, 0, 0, 0, 0, 0,154, 0, 0, 0, - 0,160,131, 0,132, 0, 0,135, 0,130, 0,137, 0,161,140, 0, - 0, 0, 0,162,147, 0,148,246, 0, 0,163, 0,129, 0, 0, 0 -}; - -//Ukrainian koi8 and win1251 converting tables by Max Veremayenko -//(verem@tg.kiev.ua - -static unsigned char koi8_ukr_win1251ukr[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, -144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, -160,161,162,184,186,165,179,191,168,169,170,171,172,180,174,175, -176,177,178,168,170,181,178,175,184,185,186,187,188,165,190,191, -254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238, -239,255,240,241,242,243,230,226,252,251,231,248,253,249,247,250, -222,192,193,214,196,197,212,195,213,200,201,202,203,204,205,206, -207,223,208,209,210,211,198,194,220,219,199,216,221,217,215,218 -}; - -static unsigned char win1251ukr_koi8_ukr[256] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, -112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, -128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, -144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, -160,161,162,163,164,189,166,167,179,169,180,171,172,173,174,183, -176,177,182,166,173,181,182,183,163,185,164,187,188,189,190,167, -225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, -242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, -193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, -210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209 -}; - -#endif /* DEFINE_ALL_CHARACTER_SETS */ - -/**************************************************************************** -** Declare mapping variables -****************************************************************************/ - - -CONVERT conv_cp1251_koi8("cp1251_koi8", cp1251_koi8, koi8_cp1251, 1); -#ifdef DEFINE_ALL_CHARACTER_SETS -CONVERT conv_cp1250_latin2("cp1250_latin2", t1250_til2, til2_t1250, 2); -CONVERT conv_kam_latin2("kam_latin2", tkam_til2, til2_tkam, 3); -CONVERT conv_mac_latin2("mac_latin2", tmac_til2, til2_tmac, 4); -CONVERT conv_macce_latin2("macce_latin2", tmacce_til2, til2_tmacce, 5); -CONVERT conv_pc2_latin2("pc2_latin2", tpc2_til2, til2_tpc2, 6); -CONVERT conv_vga_latin2("vga_latin2", tvga_til2, til2_tvga, 7); -CONVERT conv_koi8_cp1251("koi8_cp1251", koi8_cp1251, cp1251_koi8, 8); -CONVERT conv_win1251ukr_koi8_ukr("win1251ukr_koi8_ukr", win1251ukr_koi8_ukr, - koi8_ukr_win1251ukr, 9); -CONVERT conv_koi8_ukr_win1251ukr("koi8_ukr_win1251ukr", koi8_ukr_win1251ukr, - win1251ukr_koi8_ukr, 10); -#endif /* DEFINE_ALL_CHARACTER_SETS */ - -CONVERT *convert_tables[]= { - &conv_cp1251_koi8, -#ifdef DEFINE_ALL_CHARACTER_SETS - &conv_cp1250_latin2, - &conv_kam_latin2, - &conv_mac_latin2, - &conv_macce_latin2, - &conv_pc2_latin2, - &conv_vga_latin2, - &conv_koi8_cp1251, - &conv_win1251ukr_koi8_ukr, - &conv_koi8_ukr_win1251ukr, -#endif /* DEFINE_ALL_CHARACTER_SETS */ - NULL -}; - - -CONVERT *get_convert_set(const char *name) -{ - for (CONVERT **ptr=convert_tables ; *ptr ; ptr++) - { - if (!my_strcasecmp((*ptr)->name,name)) - return (*ptr); - } - return 0; -} - - -void CONVERT::convert_array(const uchar *map, uchar * buf, uint len) -{ - for (uchar *end=buf+len ; buf != end ; buf++) - *buf= map[*buf]; -} - - -/* This is identical as net_store_data, but with a conversion */ - -bool CONVERT::store(String *packet,const char *from,uint length) -{ - uint packet_length=packet->length(); - if (packet_length+5+length > packet->alloced_length() && - packet->realloc(packet_length+5+length)) - return 1; - char *to=(char*) net_store_length((char*) packet->ptr()+packet_length, - (ulonglong)length); - - for (const char *end=from+length ; from != end ; from++) - *to++= to_map[(uchar) *from]; - packet->length((uint) (to-packet->ptr())); - return 0; -} diff --git a/sql/derror.cc b/sql/derror.cc index 7ebe6e4b3c5..09f43d20044 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -20,27 +20,28 @@ #include "mysql_priv.h" #include "mysys_err.h" -static void read_texts(const char *file_name,const char ***point, +static bool read_texts(const char *file_name,const char ***point, uint error_messages); static void init_myfunc_errs(void); /* Read messages from errorfile */ -void init_errmessage(void) +bool init_errmessage(void) { DBUG_ENTER("init_errmessage"); - read_texts(ERRMSG_FILE,&my_errmsg[ERRMAPP],ER_ERROR_MESSAGES); + if (read_texts(ERRMSG_FILE,&my_errmsg[ERRMAPP],ER_ERROR_MESSAGES)) + DBUG_RETURN(TRUE); errmesg=my_errmsg[ERRMAPP]; /* Init global variabel */ init_myfunc_errs(); /* Init myfunc messages */ - DBUG_VOID_RETURN; + DBUG_RETURN(FALSE); } /* Read text from packed textfile in language-directory */ /* If we can't read messagefile then it's panic- we can't continue */ -static void read_texts(const char *file_name,const char ***point, +static bool read_texts(const char *file_name,const char ***point, uint error_messages) { register uint i; @@ -49,6 +50,7 @@ static void read_texts(const char *file_name,const char ***point, char name[FN_REFLEN]; const char *buff; uchar head[32],*pos; + CHARSET_INFO *cset; // For future DBUG_ENTER("read_texts"); *point=0; // If something goes wrong @@ -65,6 +67,22 @@ static void read_texts(const char *file_name,const char ***point, head[2] != 2 || head[3] != 1) goto err; /* purecov: inspected */ textcount=head[4]; + + if (!head[30]) + { + sql_print_error("Character set information not found in '%s'. \ +Please install the latest version of this file.",name); + goto err1; + } + + /* TODO: Convert the character set to server system character set */ + if (!(cset= get_charset(head[30],MYF(MY_WME)))) + { + sql_print_error("Character set #%d is not supported for messagefile '%s'", + (int)head[30],name); + goto err1; + } + length=uint2korr(head+6); count=uint2korr(head+8); if (count < error_messages) @@ -100,7 +118,7 @@ Check that the above file is the right version for this program!", point[i]= *point +uint2korr(head+10+i+i); } VOID(my_close(file,MYF(0))); - DBUG_VOID_RETURN; + DBUG_RETURN(0); err: switch (funktpos) { @@ -114,10 +132,12 @@ err: buff="Can't find messagefile '%s'"; break; } + sql_print_error(buff,name); +err1: if (file != FERR) VOID(my_close(file,MYF(MY_WME))); - sql_print_error(buff,name); unireg_abort(1); + DBUG_RETURN(1); // keep compiler happy } /* read_texts */ diff --git a/sql/des_key_file.cc b/sql/des_key_file.cc index 619691d183e..77cb0c8de0f 100644 --- a/sql/des_key_file.cc +++ b/sql/des_key_file.cc @@ -14,15 +14,13 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <mysql_priv.h> +#include "mysql_priv.h" #include <m_ctype.h> #ifdef HAVE_OPENSSL struct st_des_keyschedule des_keyschedule[10]; uint des_default_key; -pthread_mutex_t LOCK_des_key_file; -static int initialized; /* Function which loads DES keys from plaintext file into memory on MySQL @@ -34,6 +32,8 @@ static int initialized; 1 Error */ +#define des_cs &my_charset_latin1 + bool load_des_key_file(const char *file_name) { @@ -43,12 +43,6 @@ load_des_key_file(const char *file_name) DBUG_ENTER("load_des_key_file"); DBUG_PRINT("enter",("name: %s",file_name)); - if (!initialized) - { - initialized=1; - pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST); - } - VOID(pthread_mutex_lock(&LOCK_des_key_file)); if ((file=my_open(file_name,O_RDONLY | O_BINARY ,MYF(MY_WME))) < 0 || init_io_cache(&io, file, IO_SIZE*2, READ_CACHE, 0, 0, MYF(MY_WME))) @@ -70,9 +64,10 @@ load_des_key_file(const char *file_name) { offset=(char) (offset - '0'); // Remove newline and possible other control characters - for (start=buf+1 ; isspace(*start) ; start++) ; + for (start=buf+1 ; my_isspace(des_cs, *start) ; start++) ; end=buf+length; - for (end=strend(buf) ; end > start && !isgraph(end[-1]) ; end--) ; + for (end=strend(buf) ; + end > start && !my_isgraph(des_cs, end[-1]) ; end--) ; if (start != end) { @@ -104,15 +99,4 @@ error: VOID(pthread_mutex_unlock(&LOCK_des_key_file)); DBUG_RETURN(result); } - - -void free_des_key_file() -{ - if (initialized) - { - initialized= 01; - pthread_mutex_destroy(&LOCK_des_key_file); - } -} - #endif /* HAVE_OPENSSL */ diff --git a/sql/discover.cc b/sql/discover.cc new file mode 100644 index 00000000000..1251055c70e --- /dev/null +++ b/sql/discover.cc @@ -0,0 +1,128 @@ +/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +/* Functions for discover of frm file from handler */ + +#include "mysql_priv.h" +#include <my_dir.h> + +/* + Read the contents of a .frm file + + SYNOPSIS + readfrm() + + name path to table-file "db/name" + frmdata frm data + len length of the read frmdata + + RETURN VALUES + 0 ok + 1 Could not open file + 2 Could not stat file + 3 Could not allocate data for read + Could not read file + + frmdata and len are set to 0 on error +*/ + +int readfrm(const char *name, + const void **frmdata, uint *len) +{ + int error; + char index_file[FN_REFLEN]; + File file; + ulong read_len; + char *read_data; + MY_STAT state; + DBUG_ENTER("readfrm"); + DBUG_PRINT("enter",("name: '%s'",name)); + + *frmdata= NULL; // In case of errors + *len= 0; + error= 1; + if ((file=my_open(fn_format(index_file,name,"",reg_ext,4), + O_RDONLY | O_SHARE, + MYF(0))) < 0) + goto err_end; + + // Get length of file + error= 2; + if (my_fstat(file, &state, MYF(0))) + goto err; + read_len= state.st_size; + + // Read whole frm file + error= 3; + read_data= 0; + if (read_string(file, &read_data, read_len)) + goto err; + + // Setup return data + *frmdata= (void*) read_data; + *len= read_len; + error= 0; + + err: + if (file > 0) + VOID(my_close(file,MYF(MY_WME))); + + err_end: /* Here when no file */ + DBUG_RETURN (error); +} /* readfrm */ + + +/* + Write the content of a frm data pointer + to a frm file + + SYNOPSIS + writefrm() + + name path to table-file "db/name" + frmdata frm data + len length of the frmdata + + RETURN VALUES + 0 ok + 2 Could not write file +*/ + +int writefrm(const char *name, const void *frmdata, uint len) +{ + File file; + char index_file[FN_REFLEN]; + int error; + DBUG_ENTER("writefrm"); + DBUG_PRINT("enter",("name: '%s' len: %d ",name,len)); + //DBUG_DUMP("frmdata", (char*)frmdata, len); + + error= 0; + if ((file=my_create(fn_format(index_file,name,"",reg_ext,4), + CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) + { + if (my_write(file,(byte*)frmdata,len,MYF(MY_WME | MY_NABP))) + error= 2; + } + VOID(my_close(file,MYF(0))); + DBUG_RETURN(error); +} /* writefrm */ + + + + + diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc new file mode 100644 index 00000000000..ee4cad25460 --- /dev/null +++ b/sql/examples/ha_archive.cc @@ -0,0 +1,1015 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "../mysql_priv.h" + +#ifdef HAVE_ARCHIVE_DB +#include "ha_archive.h" + +/* + First, if you want to understand storage engines you should look at + ha_example.cc and ha_example.h. + This example was written as a test case for a customer who needed + a storage engine without indexes that could compress data very well. + So, welcome to a completely compressed storage engine. This storage + engine only does inserts. No replace, deletes, or updates. All reads are + complete table scans. Compression is done through gzip (bzip compresses + better, but only marginally, if someone asks I could add support for + it too, but beaware that it costs a lot more in CPU time then gzip). + + We keep a file pointer open for each instance of ha_archive for each read + but for writes we keep one open file handle just for that. We flush it + only if we have a read occur. gzip handles compressing lots of records + at once much better then doing lots of little records between writes. + It is possible to not lock on writes but this would then mean we couldn't + handle bulk inserts as well (that is if someone was trying to read at + the same time since we would want to flush). + + A "meta" file is kept alongside the data file. This file serves two purpose. + The first purpose is to track the number of rows in the table. The second + purpose is to determine if the table was closed properly or not. When the + meta file is first opened it is marked as dirty. It is opened when the table + itself is opened for writing. When the table is closed the new count for rows + is written to the meta file and the file is marked as clean. If the meta file + is opened and it is marked as dirty, it is assumed that a crash occured. At + this point an error occurs and the user is told to rebuild the file. + A rebuild scans the rows and rewrites the meta file. If corruption is found + in the data file then the meta file is not repaired. + + At some point a recovery method for such a drastic case needs to be divised. + + Locks are row level, and you will get a consistant read. + + For performance as far as table scans go it is quite fast. I don't have + good numbers but locally it has out performed both Innodb and MyISAM. For + Innodb the question will be if the table can be fit into the buffer + pool. For MyISAM its a question of how much the file system caches the + MyISAM file. With enough free memory MyISAM is faster. Its only when the OS + doesn't have enough memory to cache entire table that archive turns out + to be any faster. For writes it is always a bit slower then MyISAM. It has no + internal limits though for row length. + + Examples between MyISAM (packed) and Archive. + + Table with 76695844 identical rows: + 29680807 a_archive.ARZ + 920350317 a.MYD + + + Table with 8991478 rows (all of Slashdot's comments): + 1922964506 comment_archive.ARZ + 2944970297 comment_text.MYD + + + TODO: + Add bzip optional support. + Allow users to set compression level. + Add truncate table command. + Implement versioning, should be easy. + Allow for errors, find a way to mark bad rows. + Talk to the gzip guys, come up with a writable format so that updates are doable + without switching to a block method. + Add optional feature so that rows can be flushed at interval (which will cause less + compression but may speed up ordered searches). + Checkpoint the meta file to allow for faster rebuilds. + Dirty open (right now the meta file is repaired if a crash occured). + Option to allow for dirty reads, this would lower the sync calls, which would make + inserts a lot faster, but would mean highly arbitrary reads. + + -Brian +*/ +/* + Notes on file formats. + The Meta file is layed out as: + check - Just an int of 254 to make sure that the the file we are opening was + never corrupted. + version - The current version of the file format. + rows - This is an unsigned long long which is the number of rows in the data + file. + check point - Reserved for future use + dirty - Status of the file, whether or not its values are the latest. This + flag is what causes a repair to occur + + The data file: + check - Just an int of 254 to make sure that the the file we are opening was + never corrupted. + version - The current version of the file format. + data - The data is stored in a "row +blobs" format. +*/ + +/* If the archive storage engine has been inited */ +static bool archive_inited= 0; +/* Variables for archive share methods */ +pthread_mutex_t archive_mutex; +static HASH archive_open_tables; + +/* The file extension */ +#define ARZ ".ARZ" // The data file +#define ARN ".ARN" // Files used during an optimize call +#define ARM ".ARM" // Meta file +/* + uchar + uchar + ulonglong + ulonglong + uchar +*/ +#define META_BUFFER_SIZE 19 // Size of the data used in the meta file +/* + uchar + uchar +*/ +#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file +#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption + +/* + Used for hash table that tracks open tables. +*/ +static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length, + my_bool not_used __attribute__((unused))) +{ + *length=share->table_name_length; + return (byte*) share->table_name; +} + + +/* + Initialize the archive handler. + + SYNOPSIS + archive_db_init() + void + + RETURN + FALSE OK + TRUE Error +*/ + +bool archive_db_init() +{ + archive_inited= 1; + VOID(pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST)); + return (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0, + (hash_get_key) archive_get_key, 0, 0)); +} + + +/* + Release the archive handler. + + SYNOPSIS + archive_db_end() + void + + RETURN + FALSE OK +*/ + +bool archive_db_end() +{ + if (archive_inited) + { + hash_free(&archive_open_tables); + VOID(pthread_mutex_destroy(&archive_mutex)); + } + archive_inited= 0; + return FALSE; +} + + +/* + This method reads the header of a datafile and returns whether or not it was successful. +*/ +int ha_archive::read_data_header(gzFile file_to_read) +{ + uchar data_buffer[DATA_BUFFER_SIZE]; + DBUG_ENTER("ha_archive::read_data_header"); + + if (gzrewind(file_to_read) == -1) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + if (gzread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE) + DBUG_RETURN(errno ? errno : -1); + + DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0])); + DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1])); + + if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) && + (data_buffer[1] != (uchar)ARCHIVE_VERSION)) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + DBUG_RETURN(0); +} + +/* + This method writes out the header of a datafile and returns whether or not it was successful. +*/ +int ha_archive::write_data_header(gzFile file_to_write) +{ + uchar data_buffer[DATA_BUFFER_SIZE]; + DBUG_ENTER("ha_archive::write_data_header"); + + data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER; + data_buffer[1]= (uchar)ARCHIVE_VERSION; + + if (gzwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) != + DATA_BUFFER_SIZE) + goto error; + DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0])); + DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1])); + + DBUG_RETURN(0); +error: + DBUG_RETURN(errno); +} + +/* + This method reads the header of a meta file and returns whether or not it was successful. + *rows will contain the current number of rows in the data file upon success. +*/ +int ha_archive::read_meta_file(File meta_file, ulonglong *rows) +{ + uchar meta_buffer[META_BUFFER_SIZE]; + ulonglong check_point; + + DBUG_ENTER("ha_archive::read_meta_file"); + + VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); + if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) + DBUG_RETURN(-1); + + /* + Parse out the meta data, we ignore version at the moment + */ + *rows= uint8korr(meta_buffer + 2); + check_point= uint8korr(meta_buffer + 10); + + DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); + DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); + DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows)); + DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point)); + DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18])); + + if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) || + ((bool)meta_buffer[18] == TRUE)) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + my_sync(meta_file, MYF(MY_WME)); + + DBUG_RETURN(0); +} + +/* + This method writes out the header of a meta file and returns whether or not it was successful. + By setting dirty you say whether or not the file represents the actual state of the data file. + Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during + a read that the file was dirty we will force a rebuild of this file. +*/ +int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty) +{ + uchar meta_buffer[META_BUFFER_SIZE]; + ulonglong check_point= 0; //Reserved for the future + + DBUG_ENTER("ha_archive::write_meta_file"); + + meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER; + meta_buffer[1]= (uchar)ARCHIVE_VERSION; + int8store(meta_buffer + 2, rows); + int8store(meta_buffer + 10, check_point); + *(meta_buffer + 18)= (uchar)dirty; + DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER)); + DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); + DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", rows)); + DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); + DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); + + VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); + if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) + DBUG_RETURN(-1); + + my_sync(meta_file, MYF(MY_WME)); + + DBUG_RETURN(0); +} + + +/* + We create the shared memory space that we will use for the open table. + See ha_example.cc for a longer description. +*/ +ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) +{ + ARCHIVE_SHARE *share; + char meta_file_name[FN_REFLEN]; + uint length; + char *tmp_name; + + pthread_mutex_lock(&archive_mutex); + length=(uint) strlen(table_name); + + if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables, + (byte*) table_name, + length))) + { + if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &share, sizeof(*share), + &tmp_name, length+1, + NullS)) + { + pthread_mutex_unlock(&archive_mutex); + return NULL; + } + + share->use_count= 0; + share->table_name_length= length; + share->table_name= tmp_name; + fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); + fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); + strmov(share->table_name,table_name); + /* + We will use this lock for rows. + */ + VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST)); + if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) + goto error; + + if (read_meta_file(share->meta_file, &share->rows_recorded)) + { + /* + The problem here is that for some reason, probably a crash, the meta + file has been corrupted. So what do we do? Well we try to rebuild it + ourself. Once that happens, we reread it, but if that fails we just + call it quits and return an error. + */ + if (rebuild_meta_file(share->table_name, share->meta_file)) + goto error; + if (read_meta_file(share->meta_file, &share->rows_recorded)) + goto error; + } + /* + After we read, we set the file to dirty. When we close, we will do the + opposite. + */ + (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); + + /* + It is expensive to open and close the data files and since you can't have + a gzip file that can be both read and written we keep a writer open + that is shared amoung all open tables. + */ + if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) + goto error2; + if (my_hash_insert(&archive_open_tables, (byte*) share)) + goto error3; + thr_lock_init(&share->lock); + } + share->use_count++; + pthread_mutex_unlock(&archive_mutex); + + return share; + +error3: + /* We close, but ignore errors since we already have errors */ + (void)gzclose(share->archive_write); +error2: + my_close(share->meta_file,MYF(0)); +error: + pthread_mutex_unlock(&archive_mutex); + VOID(pthread_mutex_destroy(&share->mutex)); + my_free((gptr) share, MYF(0)); + + return NULL; +} + + +/* + Free the share. + See ha_example.cc for a description. +*/ +int ha_archive::free_share(ARCHIVE_SHARE *share) +{ + int rc= 0; + pthread_mutex_lock(&archive_mutex); + if (!--share->use_count) + { + hash_delete(&archive_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + VOID(pthread_mutex_destroy(&share->mutex)); + (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE); + if (gzclose(share->archive_write) == Z_ERRNO) + rc= 1; + if (my_close(share->meta_file, MYF(0))) + rc= 1; + my_free((gptr) share, MYF(0)); + } + pthread_mutex_unlock(&archive_mutex); + + return rc; +} + + +/* + We just implement one additional file extension. +*/ +const char **ha_archive::bas_ext() const +{ static const char *ext[]= { ARZ, ARN, ARM, NullS }; return ext; } + + +/* + When opening a file we: + Create/get our shared structure. + Init out lock. + We open the file we will read from. +*/ +int ha_archive::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_archive::open"); + + if (!(share= get_share(name, table))) + DBUG_RETURN(1); + thr_lock_data_init(&share->lock,&lock,NULL); + + if ((archive= gzopen(share->data_file_name, "rb")) == NULL) + { + (void)free_share(share); //We void since we already have an error + DBUG_RETURN(errno ? errno : -1); + } + + DBUG_RETURN(0); +} + + +/* + Closes the file. + + SYNOPSIS + close(); + + IMPLEMENTATION: + + We first close this storage engines file handle to the archive and + then remove our reference count to the table (and possibly free it + as well). + + RETURN + 0 ok + 1 Error +*/ + +int ha_archive::close(void) +{ + int rc= 0; + DBUG_ENTER("ha_archive::close"); + + /* First close stream */ + if (gzclose(archive) == Z_ERRNO) + rc= 1; + /* then also close share */ + rc|= free_share(share); + + DBUG_RETURN(rc); +} + + +/* + We create our data file here. The format is pretty simple. + You can read about the format of the data file above. + Unlike other storage engines we do not "pack" our data. Since we + are about to do a general compression, packing would just be a waste of + CPU time. If the table has blobs they are written after the row in the order + of creation. +*/ + +int ha_archive::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) +{ + File create_file; // We use to create the datafile and the metafile + char name_buff[FN_REFLEN]; + int error; + DBUG_ENTER("ha_archive::create"); + + if ((create_file= my_create(fn_format(name_buff,name,"",ARM, + MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, + O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) + { + error= my_errno; + goto error; + } + write_meta_file(create_file, 0, FALSE); + my_close(create_file,MYF(0)); + + /* + We reuse name_buff since it is available. + */ + if ((create_file= my_create(fn_format(name_buff,name,"",ARZ, + MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, + O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) + { + error= my_errno; + goto error; + } + if ((archive= gzdopen(create_file, "wb")) == NULL) + { + error= errno; + goto error2; + } + if (write_data_header(archive)) + { + error= errno; + goto error3; + } + + if (gzclose(archive)) + { + error= errno; + goto error2; + } + + my_close(create_file, MYF(0)); + + DBUG_RETURN(0); + +error3: + /* We already have an error, so ignore results of gzclose. */ + (void)gzclose(archive); +error2: + my_close(create_file, MYF(0)); + delete_table(name); +error: + /* Return error number, if we got one */ + DBUG_RETURN(error ? error : -1); +} + + +/* + Look at ha_archive::open() for an explanation of the row format. + Here we just write out the row. + + Wondering about start_bulk_insert()? We don't implement it for + archive since it optimizes for lots of writes. The only save + for implementing start_bulk_insert() is that we could skip + setting dirty to true each time. +*/ +int ha_archive::write_row(byte * buf) +{ + z_off_t written; + Field_blob **field; + DBUG_ENTER("ha_archive::write_row"); + + statistic_increment(ha_write_count,&LOCK_status); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + pthread_mutex_lock(&share->mutex); + written= gzwrite(share->archive_write, buf, table->reclength); + DBUG_PRINT("ha_archive::get_row", ("Wrote %d bytes expected %d", written, table->reclength)); + share->dirty= TRUE; + if (written != (z_off_t)table->reclength) + goto error; + /* + We should probably mark the table as damagaged if the record is written + but the blob fails. + */ + for (field= table->blob_field ; *field ; field++) + { + char *ptr; + uint32 size= (*field)->get_length(); + + if (size) + { + (*field)->get_ptr(&ptr); + written= gzwrite(share->archive_write, ptr, (unsigned)size); + if (written != (z_off_t)size) + goto error; + } + } + share->rows_recorded++; + pthread_mutex_unlock(&share->mutex); + + DBUG_RETURN(0); +error: + pthread_mutex_unlock(&share->mutex); + DBUG_RETURN(errno ? errno : -1); +} + + +/* + All calls that need to scan the table start with this method. If we are told + that it is a table scan we rewind the file to the beginning, otherwise + we assume the position will be set. +*/ + +int ha_archive::rnd_init(bool scan) +{ + DBUG_ENTER("ha_archive::rnd_init"); + + /* We rewind the file so that we can read from the beginning if scan */ + if (scan) + { + scan_rows= share->rows_recorded; + records= 0; + + /* + If dirty, we lock, and then reset/flush the data. + I found that just calling gzflush() doesn't always work. + */ + if (share->dirty == TRUE) + { + pthread_mutex_lock(&share->mutex); + if (share->dirty == TRUE) + { + gzflush(share->archive_write, Z_SYNC_FLUSH); + share->dirty= FALSE; + } + pthread_mutex_unlock(&share->mutex); + } + + if (read_data_header(archive)) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + } + + DBUG_RETURN(0); +} + + +/* + This is the method that is used to read a row. It assumes that the row is + positioned where you want it. +*/ +int ha_archive::get_row(gzFile file_to_read, byte *buf) +{ + int read; // Bytes read, gzread() returns int + char *last; + size_t total_blob_length= 0; + Field_blob **field; + DBUG_ENTER("ha_archive::get_row"); + + read= gzread(file_to_read, buf, table->reclength); + DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->reclength)); + + if (read == Z_STREAM_ERROR) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + /* If we read nothing we are at the end of the file */ + if (read == 0) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + /* If the record is the wrong size, the file is probably damaged */ + if ((ulong) read != table->reclength) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + /* Calculate blob length, we use this for our buffer */ + for (field=table->blob_field; *field ; field++) + total_blob_length += (*field)->get_length(); + + /* Adjust our row buffer if we need be */ + buffer.alloc(total_blob_length); + last= (char *)buffer.ptr(); + + /* Loop through our blobs and read them */ + for (field=table->blob_field; *field ; field++) + { + size_t size= (*field)->get_length(); + if (size) + { + read= gzread(file_to_read, last, size); + if ((size_t) read != size) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + (*field)->set_ptr(size, last); + last += size; + } + } + DBUG_RETURN(0); +} + + +/* + Called during ORDER BY. Its position is either from being called sequentially + or by having had ha_archive::rnd_pos() called before it is called. +*/ + +int ha_archive::rnd_next(byte *buf) +{ + int rc; + DBUG_ENTER("ha_archive::rnd_next"); + + if (!scan_rows) + DBUG_RETURN(HA_ERR_END_OF_FILE); + scan_rows--; + + statistic_increment(ha_read_rnd_next_count,&LOCK_status); + current_position= gztell(archive); + rc= get_row(archive, buf); + + + if (rc != HA_ERR_END_OF_FILE) + records++; + + DBUG_RETURN(rc); +} + + +/* + Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after + each call to ha_archive::rnd_next() if an ordering of the rows is + needed. +*/ + +void ha_archive::position(const byte *record) +{ + DBUG_ENTER("ha_archive::position"); + ha_store_ptr(ref, ref_length, current_position); + DBUG_VOID_RETURN; +} + + +/* + This is called after a table scan for each row if the results of the + scan need to be ordered. It will take *pos and use it to move the + cursor in the file so that the next row that is called is the + correctly ordered row. +*/ + +int ha_archive::rnd_pos(byte * buf, byte *pos) +{ + DBUG_ENTER("ha_archive::rnd_pos"); + statistic_increment(ha_read_rnd_count,&LOCK_status); + current_position= ha_get_ptr(pos, ref_length); + (void)gzseek(archive, current_position, SEEK_SET); + + DBUG_RETURN(get_row(archive, buf)); +} + +/* + This method rebuilds the meta file. It does this by walking the datafile and + rewriting the meta file. +*/ +int ha_archive::rebuild_meta_file(char *table_name, File meta_file) +{ + int rc; + byte *buf; + ulonglong rows_recorded= 0; + gzFile rebuild_file; /* Archive file we are working with */ + char data_file_name[FN_REFLEN]; + DBUG_ENTER("ha_archive::rebuild_meta_file"); + + /* + Open up the meta file to recreate it. + */ + fn_format(data_file_name, table_name, "", ARZ, + MY_REPLACE_EXT|MY_UNPACK_FILENAME); + if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL) + DBUG_RETURN(errno ? errno : -1); + + if ((rc= read_data_header(rebuild_file))) + goto error; + + /* + We malloc up the buffer we will use for counting the rows. + I know, this malloc'ing memory but this should be a very + rare event. + */ + if (!(buf= (byte*) my_malloc(table->rec_buff_length > sizeof(ulonglong) +1 ? + table->rec_buff_length : sizeof(ulonglong) +1 , + MYF(MY_WME)))) + { + rc= HA_ERR_CRASHED_ON_USAGE; + goto error; + } + + while (!(rc= get_row(rebuild_file, buf))) + rows_recorded++; + + /* + Only if we reach the end of the file do we assume we can rewrite. + At this point we reset rc to a non-message state. + */ + if (rc == HA_ERR_END_OF_FILE) + { + (void)write_meta_file(meta_file, rows_recorded, FALSE); + rc= 0; + } + + my_free((gptr) buf, MYF(0)); +error: + gzclose(rebuild_file); + + DBUG_RETURN(rc); +} + +/* + The table can become fragmented if data was inserted, read, and then + inserted again. What we do is open up the file and recompress it completely. +*/ +int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) +{ + DBUG_ENTER("ha_archive::optimize"); + int read; // Bytes read, gzread() returns int + gzFile reader, writer; + char block[IO_SIZE]; + char writer_filename[FN_REFLEN]; + + /* Lets create a file to contain the new data */ + fn_format(writer_filename, share->table_name, "", ARN, + MY_REPLACE_EXT|MY_UNPACK_FILENAME); + + /* Closing will cause all data waiting to be flushed, to be flushed */ + gzclose(share->archive_write); + + if ((reader= gzopen(share->data_file_name, "rb")) == NULL) + DBUG_RETURN(-1); + + if ((writer= gzopen(writer_filename, "wb")) == NULL) + { + gzclose(reader); + DBUG_RETURN(-1); + } + + while ((read= gzread(reader, block, IO_SIZE))) + gzwrite(writer, block, read); + + gzclose(reader); + gzclose(writer); + + my_rename(writer_filename,share->data_file_name,MYF(0)); + + /* + We reopen the file in case some IO is waiting to go through. + In theory the table is closed right after this operation, + but it is possible for IO to still happen. + I may be being a bit too paranoid right here. + */ + if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) + DBUG_RETURN(errno ? errno : -1); + share->dirty= FALSE; + + DBUG_RETURN(0); +} + + +/* + No transactions yet, so this is pretty dull. +*/ +int ha_archive::external_lock(THD *thd, int lock_type) +{ + DBUG_ENTER("ha_archive::external_lock"); + DBUG_RETURN(0); +} + +/* + Below is an example of how to setup row level locking. +*/ +THR_LOCK_DATA **ha_archive::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + { + /* + Here is where we get into the guts of a row level lock. + If TL_UNLOCK is set + If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers + */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd->in_lock_tables + && !thd->tablespace_op) + lock_type = TL_WRITE_ALLOW_WRITE; + + /* + In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. + */ + + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) + lock_type = TL_READ; + + lock.type=lock_type; + } + + *to++= &lock; + + return to; +} + + +/****************************************************************************** + + Everything below here is default, please look at ha_example.cc for + descriptions. + + ******************************************************************************/ + +int ha_archive::update_row(const byte * old_data, byte * new_data) +{ + + DBUG_ENTER("ha_archive::update_row"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_archive::delete_row(const byte * buf) +{ + DBUG_ENTER("ha_archive::delete_row"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_archive::index_read(byte * buf, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_archive::index_read"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_archive::index_read_idx(byte * buf, uint index, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_archive::index_read_idx"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +int ha_archive::index_next(byte * buf) +{ + DBUG_ENTER("ha_archive::index_next"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_archive::index_prev(byte * buf) +{ + DBUG_ENTER("ha_archive::index_prev"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_archive::index_first(byte * buf) +{ + DBUG_ENTER("ha_archive::index_first"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_archive::index_last(byte * buf) +{ + DBUG_ENTER("ha_archive::index_last"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +void ha_archive::info(uint flag) +{ + DBUG_ENTER("ha_archive::info"); + + /* This is a lie, but you don't want the optimizer to see zero or 1 */ + records= share->rows_recorded; + deleted= 0; + + DBUG_VOID_RETURN; +} + +int ha_archive::extra(enum ha_extra_function operation) +{ + DBUG_ENTER("ha_archive::extra"); + DBUG_RETURN(0); +} + +int ha_archive::reset(void) +{ + DBUG_ENTER("ha_archive::reset"); + DBUG_RETURN(0); +} + +ha_rows ha_archive::records_in_range(uint inx, key_range *min_key, + key_range *max_key) +{ + DBUG_ENTER("ha_archive::records_in_range "); + DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND +} + +/* + We cancel a truncate command. The only way to delete an archive table is to drop it. + This is done for security reasons. In a later version we will enable this by + allowing the user to select a different row format. +*/ +int ha_archive::delete_all_rows() +{ + DBUG_ENTER("ha_archive::delete_all_rows"); + DBUG_RETURN(0); +} +#endif /* HAVE_ARCHIVE_DB */ diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h new file mode 100644 index 00000000000..6ceb660e951 --- /dev/null +++ b/sql/examples/ha_archive.h @@ -0,0 +1,132 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +#include <zlib.h> + +/* + Please read ha_archive.cc first. If you are looking for more general + answers on how storage engines work, look at ha_example.cc and + ha_example.h. +*/ + +typedef struct st_archive_share { + char *table_name; + char data_file_name[FN_REFLEN]; + uint table_name_length,use_count; + pthread_mutex_t mutex; + THR_LOCK lock; + File meta_file; /* Meta file we use */ + gzFile archive_write; /* Archive file we are working with */ + bool dirty; /* Flag for if a flush should occur */ + ulonglong rows_recorded; /* Number of rows in tables */ +} ARCHIVE_SHARE; + +/* + Version for file format. + 1 - Initial Version +*/ +#define ARCHIVE_VERSION 1 + +class ha_archive: public handler +{ + THR_LOCK_DATA lock; /* MySQL lock */ + ARCHIVE_SHARE *share; /* Shared lock info */ + gzFile archive; /* Archive file we are working with */ + z_off_t current_position; /* The position of the row we just read */ + byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */ + String buffer; /* Buffer used for blob storage */ + ulonglong scan_rows; /* Number of rows left in scan */ + +public: + ha_archive(TABLE *table): handler(table) + { + /* Set our original buffer from pre-allocated memory */ + buffer.set((char*)byte_buffer, IO_SIZE, system_charset_info); + + /* The size of the offset value we will use for position() */ + ref_length = sizeof(z_off_t); + } + ~ha_archive() + { + } + const char *table_type() const { return "ARCHIVE"; } + const char *index_type(uint inx) { return "NONE"; } + const char **bas_ext() const; + ulong table_flags() const + { + return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT | + HA_FILE_BASED); + } + ulong index_flags(uint idx, uint part, bool all_parts) const + { + return 0; + } + /* + Have to put something here, there is no real limit as far as + archive is concerned. + */ + uint max_supported_record_length() const { return UINT_MAX; } + /* + Called in test_quick_select to determine if indexes should be used. + */ + virtual double scan_time() { return (double) (records) / 20.0+10; } + /* The next method will never be called */ + virtual double read_time(uint index, uint ranges, ha_rows rows) + { return (double) rows / 20.0+1; } + int open(const char *name, int mode, uint test_if_locked); + int close(void); + int write_row(byte * buf); + int update_row(const byte * old_data, byte * new_data); + int delete_row(const byte * buf); + int delete_all_rows(); + int index_read(byte * buf, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_idx(byte * buf, uint idx, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_next(byte * buf); + int index_prev(byte * buf); + int index_first(byte * buf); + int index_last(byte * buf); + int rnd_init(bool scan=1); + int rnd_next(byte *buf); + int rnd_pos(byte * buf, byte *pos); + int get_row(gzFile file_to_read, byte *buf); + int read_meta_file(File meta_file, ulonglong *rows); + int write_meta_file(File meta_file, ulonglong rows, bool dirty); + ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table); + int free_share(ARCHIVE_SHARE *share); + int rebuild_meta_file(char *table_name, File meta_file); + int read_data_header(gzFile file_to_read); + int write_data_header(gzFile file_to_write); + void position(const byte *record); + void info(uint); + int extra(enum ha_extra_function operation); + int reset(void); + int external_lock(THD *thd, int lock_type); + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); + int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); + int optimize(THD* thd, HA_CHECK_OPT* check_opt); + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type); +}; + +bool archive_db_init(void); +bool archive_db_end(void); + diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc new file mode 100644 index 00000000000..b3edce5ba4a --- /dev/null +++ b/sql/examples/ha_example.cc @@ -0,0 +1,664 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + ha_example is a stubbed storage engine. It does nothing at this point. It + will let you create/open/delete tables but that is all. You can enable it + in your buld by doing the following during your build process: + ./configure --with-example-storage-engine + + Once this is done mysql will let you create tables with: + CREATE TABLE A (...) ENGINE=EXAMPLE; + + The example is setup to use table locks. It implements an example "SHARE" + that is inserted into a hash by table name. You can use this to store + information of state that any example handler object will be able to see + if it is using the same table. + + Please read the object definition in ha_example.h before reading the rest + if this file. + + To get an idea of what occurs here is an example select that would do a + scan of an entire table: + ha_example::store_lock + ha_example::external_lock + ha_example::info + ha_example::rnd_init + ha_example::extra + ENUM HA_EXTRA_CACHE Cash record in HA_rrnd() + ha_example::rnd_next + ha_example::rnd_next + ha_example::rnd_next + ha_example::rnd_next + ha_example::rnd_next + ha_example::rnd_next + ha_example::rnd_next + ha_example::rnd_next + ha_example::rnd_next + ha_example::extra + ENUM HA_EXTRA_NO_CACHE End cacheing of records (def) + ha_example::external_lock + ha_example::extra + ENUM HA_EXTRA_RESET Reset database to after open + + In the above example has 9 row called before rnd_next signalled that it was + at the end of its data. In the above example the table was already opened + (or you would have seen a call to ha_example::open(). Calls to + ha_example::extra() are hints as to what will be occuring to the request. + + Happy coding! + -Brian +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "../mysql_priv.h" + +#ifdef HAVE_EXAMPLE_DB +#include "ha_example.h" + +/* Variables for example share methods */ +static HASH example_open_tables; // Hash used to track open tables +pthread_mutex_t example_mutex; // This is the mutex we use to init the hash +static int example_init= 0; // Variable for checking the init state of hash + + +/* + Function we use in the creation of our hash to get key. +*/ +static byte* example_get_key(EXAMPLE_SHARE *share,uint *length, + my_bool not_used __attribute__((unused))) +{ + *length=share->table_name_length; + return (byte*) share->table_name; +} + + +/* + Example of simple lock controls. The "share" it creates is structure we will + pass to each example handler. Do you have to have one of these? Well, you have + pieces that are used for locking, and they are needed to function. +*/ +static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table) +{ + EXAMPLE_SHARE *share; + uint length; + char *tmp_name; + + /* + So why does this exist? There is no way currently to init a storage engine. + Innodb and BDB both have modifications to the server to allow them to + do this. Since you will not want to do this, this is probably the next + best method. + */ + if (!example_init) + { + /* Hijack a mutex for init'ing the storage engine */ + pthread_mutex_lock(&LOCK_mysql_create_db); + if (!example_init) + { + example_init++; + VOID(pthread_mutex_init(&example_mutex,MY_MUTEX_INIT_FAST)); + (void) hash_init(&example_open_tables,system_charset_info,32,0,0, + (hash_get_key) example_get_key,0,0); + } + pthread_mutex_unlock(&LOCK_mysql_create_db); + } + pthread_mutex_lock(&example_mutex); + length=(uint) strlen(table_name); + + if (!(share=(EXAMPLE_SHARE*) hash_search(&example_open_tables, + (byte*) table_name, + length))) + { + if (!(share=(EXAMPLE_SHARE *) + my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &share, sizeof(*share), + &tmp_name, length+1, + NullS))) + { + pthread_mutex_unlock(&example_mutex); + return NULL; + } + + share->use_count=0; + share->table_name_length=length; + share->table_name=tmp_name; + strmov(share->table_name,table_name); + if (my_hash_insert(&example_open_tables, (byte*) share)) + goto error; + thr_lock_init(&share->lock); + pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + } + share->use_count++; + pthread_mutex_unlock(&example_mutex); + + return share; + +error: + pthread_mutex_destroy(&share->mutex); + pthread_mutex_unlock(&example_mutex); + my_free((gptr) share, MYF(0)); + + return NULL; +} + + +/* + Free lock controls. We call this whenever we close a table. If the table had + the last reference to the share then we free memory associated with it. +*/ +static int free_share(EXAMPLE_SHARE *share) +{ + pthread_mutex_lock(&example_mutex); + if (!--share->use_count) + { + hash_delete(&example_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); + my_free((gptr) share, MYF(0)); + } + pthread_mutex_unlock(&example_mutex); + + return 0; +} + + +/* + If frm_error() is called then we will use this to to find out what file extentions + exist for the storage engine. This is also used by the default rename_table and + delete_table method in handler.cc. +*/ +const char **ha_example::bas_ext() const +{ static const char *ext[]= { NullS }; return ext; } + + +/* + Used for opening tables. The name will be the name of the file. + A table is opened when it needs to be opened. For instance + when a request comes in for a select on the table (tables are not + open and closed for each request, they are cached). + + Called from handler.cc by handler::ha_open(). The server opens all tables by + calling ha_open() which then calls the handler specific open(). +*/ +int ha_example::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_example::open"); + + if (!(share = get_share(name, table))) + DBUG_RETURN(1); + thr_lock_data_init(&share->lock,&lock,NULL); + + DBUG_RETURN(0); +} + + +/* + Closes a table. We call the free_share() function to free any resources + that we have allocated in the "shared" structure. + + Called from sql_base.cc, sql_select.cc, and table.cc. + In sql_select.cc it is only used to close up temporary tables or during + the process where a temporary table is converted over to being a + myisam table. + For sql_base.cc look at close_data_tables(). +*/ +int ha_example::close(void) +{ + DBUG_ENTER("ha_example::close"); + DBUG_RETURN(free_share(share)); +} + + +/* + write_row() inserts a row. No extra() hint is given currently if a bulk load + is happeneding. buf() is a byte array of data. You can use the field + information to extract the data from the native byte array type. + Example of this would be: + for (Field **field=table->field ; *field ; field++) + { + ... + } + + See ha_tina.cc for an example of extracting all of the data as strings. + ha_berekly.cc has an example of how to store it intact by "packing" it + for ha_berkeley's own native storage type. + + See the note for update_row() on auto_increments and timestamps. This + case also applied to write_row(). + + Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, + sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. +*/ +int ha_example::write_row(byte * buf) +{ + DBUG_ENTER("ha_example::write_row"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + Yes, update_row() does what you expect, it updates a row. old_data will have + the previous row record in it, while new_data will have the newest data in + it. + Keep in mind that the server can do updates based on ordering if an ORDER BY + clause was used. Consecutive ordering is not guarenteed. + Currently new_data will not have an updated auto_increament record, or + and updated timestamp field. You can do these for example by doing these: + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); + if (table->next_number_field && record == table->record[0]) + update_auto_increment(); + + Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. +*/ +int ha_example::update_row(const byte * old_data, byte * new_data) +{ + + DBUG_ENTER("ha_example::update_row"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + This will delete a row. buf will contain a copy of the row to be deleted. + The server will call this right after the current row has been called (from + either a previous rnd_nexT() or index call). + If you keep a pointer to the last row or can access a primary key it will + make doing the deletion quite a bit easier. + Keep in mind that the server does no guarentee consecutive deletions. ORDER BY + clauses can be used. + + Called in sql_acl.cc and sql_udf.cc to manage internal table information. + Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select it is + used for removing duplicates while in insert it is used for REPLACE calls. +*/ +int ha_example::delete_row(const byte * buf) +{ + DBUG_ENTER("ha_example::delete_row"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. +*/ +int ha_example::index_read(byte * buf, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_example::index_read"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + Positions an index cursor to the index specified in key. Fetches the + row if any. This is only used to read whole keys. +*/ +int ha_example::index_read_idx(byte * buf, uint index, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_example::index_read_idx"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + Used to read forward through the index. +*/ +int ha_example::index_next(byte * buf) +{ + DBUG_ENTER("ha_example::index_next"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + Used to read backwards through the index. +*/ +int ha_example::index_prev(byte * buf) +{ + DBUG_ENTER("ha_example::index_prev"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + index_first() asks for the first key in the index. + + Called from opt_range.cc, opt_sum.cc, sql_handler.cc, + and sql_select.cc. +*/ +int ha_example::index_first(byte * buf) +{ + DBUG_ENTER("ha_example::index_first"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + index_last() asks for the last key in the index. + + Called from opt_range.cc, opt_sum.cc, sql_handler.cc, + and sql_select.cc. +*/ +int ha_example::index_last(byte * buf) +{ + DBUG_ENTER("ha_example::index_last"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + rnd_init() is called when the system wants the storage engine to do a table + scan. + See the example in the introduction at the top of this file to see when + rnd_init() is called. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc, + and sql_update.cc. +*/ +int ha_example::rnd_init(bool scan) +{ + DBUG_ENTER("ha_example::rnd_init"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_example::rnd_end() +{ + DBUG_ENTER("ha_example::rnd_end"); + DBUG_RETURN(0); +} + +/* + This is called for each row of the table scan. When you run out of records + you should return HA_ERR_END_OF_FILE. Fill buff up with the row information. + The Field structure for the table is the key to getting data into buf + in a manner that will allow the server to understand it. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc, + and sql_update.cc. +*/ +int ha_example::rnd_next(byte *buf) +{ + DBUG_ENTER("ha_example::rnd_next"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + + +/* + position() is called after each call to rnd_next() if the data needs + to be ordered. You can do something like the following to store + the position: + ha_store_ptr(ref, ref_length, current_position); + + The server uses ref to store data. ref_length in the above case is + the size needed to store current_position. ref is just a byte array + that the server will maintain. If you are using offsets to mark rows, then + current_position should be the offset. If it is a primary key like in + BDB, then it needs to be a primary key. + + Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. +*/ +void ha_example::position(const byte *record) +{ + DBUG_ENTER("ha_example::position"); + DBUG_VOID_RETURN; +} + + +/* + This is like rnd_next, but you are given a position to use + to determine the row. The position will be of the type that you stored in + ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key + or position you saved when position() was called. + Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. +*/ +int ha_example::rnd_pos(byte * buf, byte *pos) +{ + DBUG_ENTER("ha_example::rnd_pos"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + ::info() is used to return information to the optimizer. + Currently this table handler doesn't implement most of the fields + really needed. SHOW also makes use of this data + Another note, you will probably want to have the following in your + code: + if (records < 2) + records = 2; + The reason is that the server will optimize for cases of only a single + record. If in a table scan you don't know the number of records + it will probably be better to set records to two so you can return + as many records as you need. + Along with records a few more variables you may wish to set are: + records + deleted + data_file_length + index_file_length + delete_length + check_time + Take a look at the public variables in handler.h for more information. + + Called in: + filesort.cc + ha_heap.cc + item_sum.cc + opt_sum.cc + sql_delete.cc + sql_delete.cc + sql_derived.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_table.cc + sql_union.cc + sql_update.cc + +*/ +void ha_example::info(uint flag) +{ + DBUG_ENTER("ha_example::info"); + DBUG_VOID_RETURN; +} + + +/* + extra() is called whenever the server wishes to send a hint to + the storage engine. The myisam engine implements the most hints. + ha_innodb.cc has the most exhaustive list of these hints. +*/ +int ha_example::extra(enum ha_extra_function operation) +{ + DBUG_ENTER("ha_example::extra"); + DBUG_RETURN(0); +} + + +/* + Deprecated and likely to be removed in the future. Storage engines normally + just make a call like: + ha_example::extra(HA_EXTRA_RESET); + to handle it. +*/ +int ha_example::reset(void) +{ + DBUG_ENTER("ha_example::reset"); + DBUG_RETURN(0); +} + + +/* + Used to delete all rows in a table. Both for cases of truncate and + for cases where the optimizer realizes that all rows will be + removed as a result of a SQL statement. + + Called from item_sum.cc by Item_func_group_concat::clear(), + Item_sum_count_distinct::clear(), and Item_func_group_concat::clear(). + Called from sql_delete.cc by mysql_delete(). + Called from sql_select.cc by JOIN::reinit(). + Called from sql_union.cc by st_select_lex_unit::exec(). +*/ +int ha_example::delete_all_rows() +{ + DBUG_ENTER("ha_example::delete_all_rows"); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + First you should go read the section "locking functions for mysql" in + lock.cc to understand this. + This create a lock on the table. If you are implementing a storage engine + that can handle transacations look at ha_berkely.cc to see how you will + want to goo about doing this. Otherwise you should consider calling flock() + here. + + Called from lock.cc by lock_external() and unlock_external(). Also called + from sql_table.cc by copy_data_between_tables(). +*/ +int ha_example::external_lock(THD *thd, int lock_type) +{ + DBUG_ENTER("ha_example::external_lock"); + DBUG_RETURN(0); +} + + +/* + The idea with handler::store_lock() is the following: + + The statement decided which locks we should need for the table + for updates/deletes/inserts we get WRITE locks, for SELECT... we get + read locks. + + Before adding the lock into the table lock handler (see thr_lock.c) + mysqld calls store lock with the requested locks. Store lock can now + modify a write lock to a read lock (or some other lock), ignore the + lock (if we don't want to use MySQL table locks at all) or add locks + for many tables (like we do when we are using a MERGE handler). + + Berkeley DB for example changes all WRITE locks to TL_WRITE_ALLOW_WRITE + (which signals that we are doing WRITES, but we are still allowing other + reader's and writer's. + + When releasing locks, store_lock() are also called. In this case one + usually doesn't have to do anything. + + In some exceptional cases MySQL may send a request for a TL_IGNORE; + This means that we are requesting the same lock as last time and this + should also be ignored. (This may happen when someone does a flush + table when we have opened a part of the tables, in which case mysqld + closes and reopens the tables and tries to get the same locks at last + time). In the future we will probably try to remove this. + + Called from lock.cc by get_lock_data(). +*/ +THR_LOCK_DATA **ha_example::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + lock.type=lock_type; + *to++= &lock; + return to; +} + +/* + Used to delete a table. By the time delete_table() has been called all + opened references to this table will have been closed (and your globally + shared references released. The variable name will just be the name of + the table. You will need to remove any files you have created at this point. + + If you do not implement this, the default delete_table() is called from + handler.cc and it will delete all files with the file extentions returned + by bas_ext(). + + Called from handler.cc by delete_table and ha_create_table(). Only used + during create if the table_flag HA_DROP_BEFORE_CREATE was specified for + the storage engine. +*/ +int ha_example::delete_table(const char *name) +{ + DBUG_ENTER("ha_example::delete_table"); + /* This is not implemented but we want someone to be able that it works. */ + DBUG_RETURN(0); +} + +/* + Renames a table from one name to another from alter table call. + + If you do not implement this, the default rename_table() is called from + handler.cc and it will delete all files with the file extentions returned + by bas_ext(). + + Called from sql_table.cc by mysql_rename_table(). +*/ +int ha_example::rename_table(const char * from, const char * to) +{ + DBUG_ENTER("ha_example::rename_table "); + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +/* + Given a starting key, and an ending key estimate the number of rows that + will exist between the two. end_key may be empty which in case determine + if start_key matches any rows. + + Called from opt_range.cc by check_quick_keys(). +*/ +ha_rows ha_example::records_in_range(uint inx, key_range *min_key, + key_range *max_key) +{ + DBUG_ENTER("ha_example::records_in_range"); + DBUG_RETURN(10); // low number to force index usage +} + + +/* + create() is called to create a database. The variable name will have the name + of the table. When create() is called you do not need to worry about opening + the table. Also, the FRM file will have already been created so adjusting + create_info will not do you any good. You can overwrite the frm file at this + point if you wish to change the table definition, but there are no methods + currently provided for doing that. + + Called from handle.cc by ha_create_table(). +*/ +int ha_example::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) +{ + DBUG_ENTER("ha_example::create"); + /* This is not implemented but we want someone to be able that it works. */ + DBUG_RETURN(0); +} +#endif /* HAVE_EXAMPLE_DB */ diff --git a/sql/examples/ha_example.h b/sql/examples/ha_example.h new file mode 100644 index 00000000000..ae72e5bb275 --- /dev/null +++ b/sql/examples/ha_example.h @@ -0,0 +1,156 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Please read ha_exmple.cc before reading this file. + Please keep in mind that the example storage engine implements all methods + that are required to be implemented. handler.h has a full list of methods + that you can implement. +*/ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +/* + EXAMPLE_SHARE is a structure that will be shared amoung all open handlers + The example implements the minimum of what you will probably need. +*/ +typedef struct st_example_share { + char *table_name; + uint table_name_length,use_count; + pthread_mutex_t mutex; + THR_LOCK lock; +} EXAMPLE_SHARE; + +/* + Class definition for the storage engine +*/ +class ha_example: public handler +{ + THR_LOCK_DATA lock; /* MySQL lock */ + EXAMPLE_SHARE *share; /* Shared lock info */ + +public: + ha_example(TABLE *table): handler(table) + { + } + ~ha_example() + { + } + /* The name that will be used for display purposes */ + const char *table_type() const { return "EXAMPLE"; } + /* + The name of the index type that will be used for display + don't implement this method unless you really have indexes + */ + const char *index_type(uint inx) { return "HASH"; } + const char **bas_ext() const; + /* + This is a list of flags that says what the storage engine + implements. The current table flags are documented in + handler.h + */ + ulong table_flags() const + { + return 0; + } + /* + This is a bitmap of flags that says how the storage engine + implements indexes. The current index flags are documented in + handler.h. If you do not implement indexes, just return zero + here. + + part is the key part to check. First key part is 0 + If all_parts it's set, MySQL want to know the flags for the combined + index up to and including 'part'. + */ + ulong index_flags(uint inx, uint part, bool all_parts) const + { + return 0; + } + /* + unireg.cc will call the following to make sure that the storage engine can + handle the data it is about to send. + + Return *real* limits of your storage engine here. MySQL will do + min(your_limits, MySQL_limits) automatically + + There is no need to implement ..._key_... methods if you don't suport + indexes. + */ + uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + uint max_supported_keys() const { return 0; } + uint max_supported_key_parts() const { return 0; } + uint max_supported_key_length() const { return 0; } + /* + Called in test_quick_select to determine if indexes should be used. + */ + virtual double scan_time() { return (double) (records+deleted) / 20.0+10; } + /* + The next method will never be called if you do not implement indexes. + */ + virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; } + + /* + Everything below are methods that we implment in ha_example.cc. + + Most of these methods are not obligatory, skip them and + MySQL will treat them as not implemented + */ + int open(const char *name, int mode, uint test_if_locked); // required + int close(void); // required + + int write_row(byte * buf); + int update_row(const byte * old_data, byte * new_data); + int delete_row(const byte * buf); + int index_read(byte * buf, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_idx(byte * buf, uint idx, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_next(byte * buf); + int index_prev(byte * buf); + int index_first(byte * buf); + int index_last(byte * buf); + /* + unlike index_init(), rnd_init() can be called two times + without rnd_end() in between (it only makes sense if scan=1). + then the second call should prepare for the new table scan + (e.g if rnd_init allocates the cursor, second call should + position it to the start of the table, no need to deallocate + and allocate it again + */ + int rnd_init(bool scan); //required + int rnd_end(); + int rnd_next(byte *buf); //required + int rnd_pos(byte * buf, byte *pos); //required + void position(const byte *record); //required + void info(uint); //required + + int extra(enum ha_extra_function operation); + int reset(void); + int external_lock(THD *thd, int lock_type); //required + int delete_all_rows(void); + ha_rows records_in_range(uint inx, key_range *min_key, + key_range *max_key); + int delete_table(const char *from); + int rename_table(const char * from, const char * to); + int create(const char *name, TABLE *form, + HA_CREATE_INFO *create_info); //required + + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type); //required +}; diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc new file mode 100644 index 00000000000..91e42bfea31 --- /dev/null +++ b/sql/examples/ha_tina.cc @@ -0,0 +1,841 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Make sure to look at ha_tina.h for more details. + + First off, this is a play thing for me, there are a number of things wrong with it: + *) It was designed for csv and therefor its performance is highly questionable. + *) Indexes have not been implemented. This is because the files can be traded in + and out of the table directory without having to worry about rebuilding anything. + *) NULLs and "" are treated equally (like a spreadsheet). + *) There was in the beginning no point to anyone seeing this other then me, so there + is a good chance that I haven't quite documented it well. + *) Less design, more "make it work" + + Now there are a few cool things with it: + *) Errors can result in corrupted data files. + *) Data files can be read by spreadsheets directly. + +TODO: + *) Move to a block system for larger files + *) Error recovery, its all there, just need to finish it + *) Document how the chains work. + + -Brian +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "mysql_priv.h" + +#ifdef HAVE_CSV_DB + +#include "ha_tina.h" +#include <sys/mman.h> + +/* Stuff for shares */ +pthread_mutex_t tina_mutex; +static HASH tina_open_tables; +static int tina_init= 0; + +/***************************************************************************** + ** TINA tables + *****************************************************************************/ + +/* + Used for sorting chains with qsort(). +*/ +int sort_set (tina_set *a, tina_set *b) +{ + /* + We assume that intervals do not intersect. So, it is enought to compare + any two points. Here we take start of intervals for comparison. + */ + return ( a->begin > b->begin ? -1 : ( a->begin < b->begin ? 1 : 0 ) ); +} + +static byte* tina_get_key(TINA_SHARE *share,uint *length, + my_bool not_used __attribute__((unused))) +{ + *length=share->table_name_length; + return (byte*) share->table_name; +} + +/* + Reloads the mmap file. +*/ +int get_mmap(TINA_SHARE *share, int write) +{ + DBUG_ENTER("ha_tina::get_mmap"); + if (share->mapped_file && munmap(share->mapped_file, share->file_stat.st_size)) + DBUG_RETURN(1); + + if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1) + DBUG_RETURN(1); + + if (share->file_stat.st_size) + { + if (write) + share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size, + PROT_READ|PROT_WRITE, MAP_SHARED, + share->data_file, 0); + else + share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size, + PROT_READ, MAP_PRIVATE, + share->data_file, 0); + if ((share->mapped_file ==(caddr_t)-1)) + { + /* + Bad idea you think? See the problem is that nothing actually checks + the return value of ::rnd_init(), so tossing an error is about + it for us. + Never going to happen right? :) + */ + my_message(errno, "Woops, blew up opening a mapped file", 0); + DBUG_ASSERT(0); + DBUG_RETURN(1); + } + } + else + share->mapped_file= NULL; + + DBUG_RETURN(0); +} + +/* + Simple lock controls. +*/ +static TINA_SHARE *get_share(const char *table_name, TABLE *table) +{ + TINA_SHARE *share; + char *tmp_name; + uint length; + + if (!tina_init) + { + /* Hijack a mutex for init'ing the storage engine */ + pthread_mutex_lock(&LOCK_mysql_create_db); + if (!tina_init) + { + tina_init++; + VOID(pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST)); + (void) hash_init(&tina_open_tables,system_charset_info,32,0,0, + (hash_get_key) tina_get_key,0,0); + } + pthread_mutex_unlock(&LOCK_mysql_create_db); + } + pthread_mutex_lock(&tina_mutex); + length=(uint) strlen(table_name); + if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables, + (byte*) table_name, + length))) + { + char data_file_name[FN_REFLEN]; + if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &share, sizeof(*share), + &tmp_name, length+1, + NullS)) + { + pthread_mutex_unlock(&tina_mutex); + return NULL; + } + + share->use_count=0; + share->table_name_length=length; + share->table_name=tmp_name; + strmov(share->table_name,table_name); + fn_format(data_file_name, table_name, "", ".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME); + if (my_hash_insert(&tina_open_tables, (byte*) share)) + goto error; + thr_lock_init(&share->lock); + pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + + if ((share->data_file= my_open(data_file_name, O_RDWR|O_APPEND, + MYF(0))) == -1) + goto error2; + + /* We only use share->data_file for writing, so we scan to the end to append */ + if (my_seek(share->data_file, 0, SEEK_END, MYF(0)) == MY_FILEPOS_ERROR) + goto error2; + + share->mapped_file= NULL; // We don't know the state since we just allocated it + if (get_mmap(share, 0) > 0) + goto error3; + } + share->use_count++; + pthread_mutex_unlock(&tina_mutex); + + return share; + +error3: + my_close(share->data_file,MYF(0)); +error2: + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); +error: + pthread_mutex_unlock(&tina_mutex); + my_free((gptr) share, MYF(0)); + + return NULL; +} + + +/* + Free lock controls. +*/ +static int free_share(TINA_SHARE *share) +{ + DBUG_ENTER("ha_tina::free_share"); + pthread_mutex_lock(&tina_mutex); + int result_code= 0; + if (!--share->use_count){ + /* Drop the mapped file */ + if (share->mapped_file) + munmap(share->mapped_file, share->file_stat.st_size); + result_code= my_close(share->data_file,MYF(0)); + hash_delete(&tina_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); + my_free((gptr) share, MYF(0)); + } + pthread_mutex_unlock(&tina_mutex); + + DBUG_RETURN(result_code); +} + + +/* + Finds the end of a line. + Currently only supports files written on a UNIX OS. +*/ +byte * find_eoln(byte *data, off_t begin, off_t end) +{ + for (off_t x= begin; x < end; x++) + if (data[x] == '\n') + return data + x; + + return 0; +} + +/* + Encode a buffer into the quoted format. +*/ +int ha_tina::encode_quote(byte *buf) +{ + char attribute_buffer[1024]; + String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin); + + buffer.length(0); + for (Field **field=table->field ; *field ; field++) + { + const char *ptr; + const char *end_ptr; + + (*field)->val_str(&attribute,&attribute); + ptr= attribute.ptr(); + end_ptr= attribute.length() + ptr; + + buffer.append('"'); + + while (ptr < end_ptr) + { + if (*ptr == '"') + { + buffer.append('\\'); + buffer.append('"'); + *ptr++; + } + else if (*ptr == '\r') + { + buffer.append('\\'); + buffer.append('r'); + *ptr++; + } + else if (*ptr == '\\') + { + buffer.append('\\'); + buffer.append('\\'); + *ptr++; + } + else if (*ptr == '\n') + { + buffer.append('\\'); + buffer.append('n'); + *ptr++; + } + else + buffer.append(*ptr++); + } + buffer.append('"'); + buffer.append(','); + } + // Remove the comma, add a line feed + buffer.length(buffer.length() - 1); + buffer.append('\n'); + //buffer.replace(buffer.length(), 0, "\n", 1); + + return (buffer.length()); +} + +/* + chain_append() adds delete positions to the chain that we use to keep track of space. +*/ +int ha_tina::chain_append() +{ + if ( chain_ptr != chain && (chain_ptr -1)->end == current_position) + (chain_ptr -1)->end= next_position; + else + { + /* We set up for the next position */ + if ((off_t)(chain_ptr - chain) == (chain_size -1)) + { + off_t location= chain_ptr - chain; + chain_size += DEFAULT_CHAIN_LENGTH; + if (chain_alloced) + { + /* Must cast since my_malloc unlike malloc doesn't have a void ptr */ + if ((chain= (tina_set *)my_realloc((gptr)chain,chain_size,MYF(MY_WME))) == NULL) + return -1; + } + else + { + tina_set *ptr= (tina_set *)my_malloc(chain_size * sizeof(tina_set),MYF(MY_WME)); + memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set)); + chain= ptr; + chain_alloced++; + } + chain_ptr= chain + location; + } + chain_ptr->begin= current_position; + chain_ptr->end= next_position; + chain_ptr++; + } + + return 0; +} + + +/* + Scans for a row. +*/ +int ha_tina::find_current_row(byte *buf) +{ + byte *mapped_ptr= (byte *)share->mapped_file + current_position; + byte *end_ptr; + DBUG_ENTER("ha_tina::find_current_row"); + + /* EOF should be counted as new line */ + if ((end_ptr= find_eoln(share->mapped_file, current_position, share->file_stat.st_size)) == 0) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + for (Field **field=table->field ; *field ; field++) + { + buffer.length(0); + mapped_ptr++; // Increment past the first quote + for(;mapped_ptr != end_ptr; mapped_ptr++) + { + //Need to convert line feeds! + if (*mapped_ptr == '"' && + (((mapped_ptr[1] == ',') && (mapped_ptr[2] == '"')) || (mapped_ptr == end_ptr -1 ))) + { + mapped_ptr += 2; // Move past the , and the " + break; + } + if (*mapped_ptr == '\\' && mapped_ptr != (end_ptr - 1)) + { + mapped_ptr++; + if (*mapped_ptr == 'r') + buffer.append('\r'); + else if (*mapped_ptr == 'n' ) + buffer.append('\n'); + else if ((*mapped_ptr == '\\') || (*mapped_ptr == '"')) + buffer.append(*mapped_ptr); + else /* This could only happed with an externally created file */ + { + buffer.append('\\'); + buffer.append(*mapped_ptr); + } + } + else + buffer.append(*mapped_ptr); + } + (*field)->store(buffer.ptr(), buffer.length(), system_charset_info); + } + next_position= (end_ptr - share->mapped_file)+1; + /* Maybe use \N for null? */ + memset(buf, 0, table->null_bytes); /* We do not implement nulls! */ + + DBUG_RETURN(0); +} + +/* + If frm_error() is called in table.cc this is called to find out what file + extensions exist for this handler. +*/ +const char **ha_tina::bas_ext() const +{ static const char *ext[]= { ".CSV", NullS }; return ext; } + + +/* + Open a database file. Keep in mind that tables are caches, so + this will not be called for every request. Any sort of positions + that need to be reset should be kept in the ::extra() call. +*/ +int ha_tina::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_tina::open"); + + if (!(share= get_share(name, table))) + DBUG_RETURN(1); + thr_lock_data_init(&share->lock,&lock,NULL); + ref_length=sizeof(off_t); + + DBUG_RETURN(0); +} + + +/* + Close a database file. We remove ourselves from the shared strucutre. + If it is empty we destroy it and free the mapped file. +*/ +int ha_tina::close(void) +{ + DBUG_ENTER("ha_tina::close"); + DBUG_RETURN(free_share(share)); +} + +/* + This is an INSERT. At the moment this handler just seeks to the end + of the file and appends the data. In an error case it really should + just truncate to the original position (this is not done yet). +*/ +int ha_tina::write_row(byte * buf) +{ + int size; + DBUG_ENTER("ha_tina::write_row"); + + statistic_increment(ha_write_count,&LOCK_status); + + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + + size= encode_quote(buf); + + if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP))) + DBUG_RETURN(-1); + + /* + Ok, this is means that we will be doing potentially bad things + during a bulk insert on some OS'es. What we need is a cleanup + call for ::write_row that would let us fix up everything after the bulk + insert. The archive handler does this with an extra mutx call, which + might be a solution for this. + */ + if (get_mmap(share, 0) > 0) + DBUG_RETURN(-1); + DBUG_RETURN(0); +} + + +/* + This is called for an update. + Make sure you put in code to increment the auto increment, also + update any timestamp data. Currently auto increment is not being + fixed since autoincrements have yet to be added to this table handler. + This will be called in a table scan right before the previous ::rnd_next() + call. +*/ +int ha_tina::update_row(const byte * old_data, byte * new_data) +{ + int size; + DBUG_ENTER("ha_tina::update_row"); + + statistic_increment(ha_update_count,&LOCK_status); + + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); + + size= encode_quote(new_data); + + if (chain_append()) + DBUG_RETURN(-1); + + if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP))) + DBUG_RETURN(-1); + DBUG_RETURN(0); +} + + +/* + Deletes a row. First the database will find the row, and then call this method. + In the case of a table scan, the previous call to this will be the ::rnd_next() + that found this row. + The exception to this is an ORDER BY. This will cause the table handler to walk + the table noting the positions of all rows that match a query. The table will + then be deleted/positioned based on the ORDER (so RANDOM, DESC, ASC). +*/ +int ha_tina::delete_row(const byte * buf) +{ + DBUG_ENTER("ha_tina::delete_row"); + statistic_increment(ha_delete_count,&LOCK_status); + + if (chain_append()) + DBUG_RETURN(-1); + + --records; + + DBUG_RETURN(0); +} + +/* + Fill buf with value from key. Simply this is used for a single index read + with a key. +*/ +int ha_tina::index_read(byte * buf, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_tina::index_read"); + DBUG_ASSERT(0); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); +} + +/* + Fill buf with value from key. Simply this is used for a single index read + with a key. + Whatever the current key is we will use it. This is what will be in "index". +*/ +int ha_tina::index_read_idx(byte * buf, uint index, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_tina::index_read_idx"); + DBUG_ASSERT(0); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); +} + + +/* + Read the next position in the index. +*/ +int ha_tina::index_next(byte * buf) +{ + DBUG_ENTER("ha_tina::index_next"); + DBUG_ASSERT(0); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); +} + +/* + Read the previous position in the index. +*/ +int ha_tina::index_prev(byte * buf) +{ + DBUG_ENTER("ha_tina::index_prev"); + DBUG_ASSERT(0); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); +} + +/* + Read the first position in the index +*/ +int ha_tina::index_first(byte * buf) +{ + DBUG_ENTER("ha_tina::index_first"); + DBUG_ASSERT(0); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); +} + +/* + Read the last position in the index + With this we don't need to do a filesort() with index. + We just read the last row and call previous. +*/ +int ha_tina::index_last(byte * buf) +{ + DBUG_ENTER("ha_tina::index_last"); + DBUG_ASSERT(0); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); +} + +/* + All table scans call this first. + The order of a table scan is: + + ha_tina::store_lock + ha_tina::external_lock + ha_tina::info + ha_tina::rnd_init + ha_tina::extra + ENUM HA_EXTRA_CACHE Cash record in HA_rrnd() + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::rnd_next + ha_tina::extra + ENUM HA_EXTRA_NO_CACHE End cacheing of records (def) + ha_tina::external_lock + ha_tina::extra + ENUM HA_EXTRA_RESET Reset database to after open + + Each call to ::rnd_next() represents a row returned in the can. When no more + rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE. + The ::info() call is just for the optimizer. + +*/ + +int ha_tina::rnd_init(bool scan) +{ + DBUG_ENTER("ha_tina::rnd_init"); + + current_position= next_position= 0; + records= 0; + chain_ptr= chain; +#ifdef HAVE_MADVISE + if (scan) + (void)madvise(share->mapped_file,share->file_stat.st_size,MADV_SEQUENTIAL); +#endif + + DBUG_RETURN(0); +} + +/* + ::rnd_next() does all the heavy lifting for a table scan. You will need to populate *buf + with the correct field data. You can walk the field to determine at what position you + should store the data (take a look at how ::find_current_row() works). The structure + is something like: + 0Foo Dog Friend + The first offset is for the first attribute. All space before that is reserved for null count. + Basically this works as a mask for which rows are nulled (compared to just empty). + This table handler doesn't do nulls and does not know the difference between NULL and "". This + is ok since this table handler is for spreadsheets and they don't know about them either :) +*/ +int ha_tina::rnd_next(byte *buf) +{ + DBUG_ENTER("ha_tina::rnd_next"); + + statistic_increment(ha_read_rnd_next_count,&LOCK_status); + + current_position= next_position; + if (!share->mapped_file) + DBUG_RETURN(HA_ERR_END_OF_FILE); + if (HA_ERR_END_OF_FILE == find_current_row(buf) ) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + records++; + DBUG_RETURN(0); +} + +/* + In the case of an order by rows will need to be sorted. + ::position() is called after each call to ::rnd_next(), + the data it stores is to a byte array. You can store this + data via ha_store_ptr(). ref_length is a variable defined to the + class that is the sizeof() of position being stored. In our case + its just a position. Look at the bdb code if you want to see a case + where something other then a number is stored. +*/ +void ha_tina::position(const byte *record) +{ + DBUG_ENTER("ha_tina::position"); + ha_store_ptr(ref, ref_length, current_position); + DBUG_VOID_RETURN; +} + + +/* + Used to fetch a row from a posiion stored with ::position(). + ha_get_ptr() retrieves the data for you. +*/ + +int ha_tina::rnd_pos(byte * buf, byte *pos) +{ + DBUG_ENTER("ha_tina::rnd_pos"); + statistic_increment(ha_read_rnd_count,&LOCK_status); + current_position= ha_get_ptr(pos,ref_length); + DBUG_RETURN(find_current_row(buf)); +} + +/* + ::info() is used to return information to the optimizer. + Currently this table handler doesn't implement most of the fields + really needed. SHOW also makes use of this data +*/ +void ha_tina::info(uint flag) +{ + DBUG_ENTER("ha_tina::info"); + /* This is a lie, but you don't want the optimizer to see zero or 1 */ + if (records < 2) + records= 2; + DBUG_VOID_RETURN; +} + +/* + Grab bag of flags that are sent to the able handler every so often. + HA_EXTRA_RESET and HA_EXTRA_RESET_STATE are the most frequently called. + You are not required to implement any of these. +*/ +int ha_tina::extra(enum ha_extra_function operation) +{ + DBUG_ENTER("ha_tina::extra"); + DBUG_RETURN(0); +} + +/* + This is no longer used. +*/ +int ha_tina::reset(void) +{ + DBUG_ENTER("ha_tina::reset"); + ha_tina::extra(HA_EXTRA_RESET); + DBUG_RETURN(0); +} + + +/* + Called after deletes, inserts, and updates. This is where we clean up all of + the dead space we have collected while writing the file. +*/ +int ha_tina::rnd_end() +{ + DBUG_ENTER("ha_tina::rnd_end"); + + /* First position will be truncate position, second will be increment */ + if ((chain_ptr - chain) > 0) + { + tina_set *ptr; + off_t length; + + /* + Setting up writable map, this will contain all of the data after the + get_mmap call that we have added to the file. + */ + if (get_mmap(share, 1) > 0) + DBUG_RETURN(-1); + length= share->file_stat.st_size; + + /* + The sort handles updates/deletes with random orders. + It also sorts so that we move the final blocks to the + beginning so that we move the smallest amount of data possible. + */ + qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), (qsort_cmp)sort_set); + for (ptr= chain; ptr < chain_ptr; ptr++) + { + memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end, + length - (size_t)ptr->end); + length= length - (size_t)(ptr->end - ptr->begin); + } + + /* Truncate the file to the new size */ + if (my_chsize(share->data_file, length, 0, MYF(MY_WME))) + DBUG_RETURN(-1); + + if (munmap(share->mapped_file, length)) + DBUG_RETURN(-1); + + /* We set it to null so that get_mmap() won't try to unmap it */ + share->mapped_file= NULL; + if (get_mmap(share, 0) > 0) + DBUG_RETURN(-1); + } + + DBUG_RETURN(0); +} + +/* + Truncate table and others of its ilk call this. +*/ +int ha_tina::delete_all_rows() +{ + DBUG_ENTER("ha_tina::delete_all_rows"); + + int rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME)); + + if (get_mmap(share, 0) > 0) + DBUG_RETURN(-1); + + DBUG_RETURN(rc); +} + +/* + Always called by the start of a transaction (or by "lock tables"); +*/ +int ha_tina::external_lock(THD *thd, int lock_type) +{ + DBUG_ENTER("ha_tina::external_lock"); + DBUG_RETURN(0); // No external locking +} + +/* + Called by the database to lock the table. Keep in mind that this + is an internal lock. +*/ +THR_LOCK_DATA **ha_tina::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + lock.type=lock_type; + *to++= &lock; + return to; +} + +/* + Range optimizer calls this. + I need to update the information on this. +*/ +ha_rows ha_tina::records_in_range(int inx, + const byte *start_key,uint start_key_len, + enum ha_rkey_function start_search_flag, + const byte *end_key,uint end_key_len, + enum ha_rkey_function end_search_flag) +{ + DBUG_ENTER("ha_tina::records_in_range "); + DBUG_RETURN(records); // Good guess +} + + +/* + Create a table. You do not want to leave the table open after a call to + this (the database will call ::open() if it needs to). +*/ + +int ha_tina::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) +{ + char name_buff[FN_REFLEN]; + File create_file; + DBUG_ENTER("ha_tina::create"); + + if ((create_file= my_create(fn_format(name_buff,name,"",".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, + O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) + DBUG_RETURN(-1); + + my_close(create_file,MYF(0)); + + DBUG_RETURN(0); +} + +#endif /* enable CSV */ diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h new file mode 100644 index 00000000000..22193c01013 --- /dev/null +++ b/sql/examples/ha_tina.h @@ -0,0 +1,138 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <sys/types.h> +#include <sys/stat.h> +#include <my_dir.h> + +#define DEFAULT_CHAIN_LENGTH 512 + +typedef struct st_tina_share { + char *table_name; + byte *mapped_file; /* mapped region of file */ + uint table_name_length,use_count; + MY_STAT file_stat; /* Stat information for the data file */ + File data_file; /* Current open data file */ + pthread_mutex_t mutex; + THR_LOCK lock; +} TINA_SHARE; + +typedef struct tina_set { + off_t begin; + off_t end; +}; + +class ha_tina: public handler +{ + THR_LOCK_DATA lock; /* MySQL lock */ + TINA_SHARE *share; /* Shared lock info */ + off_t current_position; /* Current position in the file during a file scan */ + off_t next_position; /* Next position in the file scan */ + byte byte_buffer[IO_SIZE]; + String buffer; + tina_set chain_buffer[DEFAULT_CHAIN_LENGTH]; + tina_set *chain; + tina_set *chain_ptr; + byte chain_alloced; + uint32 chain_size; + + public: + ha_tina(TABLE *table): handler(table), + /* + These definitions are found in hanler.h + Theses are not probably completely right. + */ + current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH) + { + /* Set our original buffers from pre-allocated memory */ + buffer.set(byte_buffer, IO_SIZE, system_charset_info); + chain = chain_buffer; + } + ~ha_tina() + { + if (chain_alloced) + my_free((gptr)chain,0); + } + const char *table_type() const { return "CSV"; } + const char *index_type(uint inx) { return "NONE"; } + const char **bas_ext() const; + ulong table_flags() const + { + return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | + HA_NO_AUTO_INCREMENT ); + } + ulong index_flags(uint idx, uint part, bool all_parts) const + { + /* We will never have indexes so this will never be called(AKA we return zero) */ + return 0; + } + uint max_record_length() const { return HA_MAX_REC_LENGTH; } + uint max_keys() const { return 0; } + uint max_key_parts() const { return 0; } + uint max_key_length() const { return 0; } + /* + Called in test_quick_select to determine if indexes should be used. + */ + virtual double scan_time() { return (double) (records+deleted) / 20.0+10; } + /* The next method will never be called */ + virtual double read_time(ha_rows rows) { DBUG_ASSERT(0); return((double) rows / 20.0+1); } + virtual bool fast_key_read() { return 1;} + /* + TODO: return actual upper bound of number of records in the table. + (e.g. save number of records seen on full table scan and/or use file size + as upper bound) + */ + ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; } + + int open(const char *name, int mode, uint test_if_locked); + int close(void); + int write_row(byte * buf); + int update_row(const byte * old_data, byte * new_data); + int delete_row(const byte * buf); + int index_read(byte * buf, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_idx(byte * buf, uint idx, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_next(byte * buf); + int index_prev(byte * buf); + int index_first(byte * buf); + int index_last(byte * buf); + int rnd_init(bool scan=1); + int rnd_next(byte *buf); + int rnd_pos(byte * buf, byte *pos); + int rnd_end(); + void position(const byte *record); + void info(uint); + int extra(enum ha_extra_function operation); + int reset(void); + int external_lock(THD *thd, int lock_type); + int delete_all_rows(void); + ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len, + enum ha_rkey_function start_search_flag, + const byte *end_key,uint end_key_len, + enum ha_rkey_function end_search_flag); +// int delete_table(const char *from); +// int rename_table(const char * from, const char * to); + int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); + + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type); + + /* The following methods were added just for TINA */ + int encode_quote(byte *buf); + int find_current_row(byte *buf); + int chain_append(); +}; diff --git a/sql/field.cc b/sql/field.cc index 59a71a93d68..a64eaad7308 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -15,21 +15,11 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* - NOTES: - Some of the number class uses the system functions strtol(), strtoll()... - To avoid patching the end \0 or copying the buffer unnecessary, all calls - to system functions are wrapped to a String object that adds the end null - if it only if it isn't there. - This adds some overhead when assigning numbers from strings but makes - everything simpler. - */ - /***************************************************************************** ** This file implements classes defined in field.h *****************************************************************************/ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -37,7 +27,6 @@ #include "sql_select.h" #include <m_ctype.h> #include <errno.h> -#include <assert.h> #ifdef HAVE_FCONVERT #include <floatingpoint.h> #endif @@ -45,8 +34,6 @@ // Maximum allowed exponent value for converting string to decimal #define MAX_EXPONENT 1024 - - /***************************************************************************** Instansiate templates and static variables *****************************************************************************/ @@ -60,6 +47,795 @@ uchar Field_null::null[1]={1}; const char field_separator=','; #define DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE 320 +#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \ +((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1))) + +/* + Rules for merging different types of fields in UNION + + NOTE: to avoid 256*256 table, gap in table types numeration is skiped + following #defines describe that gap and how to canculate number of fields + and index of field in thia array. +*/ +#define FIELDTYPE_TEAR_FROM (MYSQL_TYPE_NEWDATE+1) +#define FIELDTYPE_TEAR_TO (MYSQL_TYPE_ENUM-1) +#define FIELDTYPE_NUM (FIELDTYPE_TEAR_FROM + (255-FIELDTYPE_TEAR_TO)) +inline int field_type2index (enum_field_types field_type) +{ + return (field_type < FIELDTYPE_TEAR_FROM ? + field_type : + ((int)FIELDTYPE_TEAR_FROM) + (field_type - FIELDTYPE_TEAR_TO) - 1); +} + +static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= +{ + /* MYSQL_TYPE_DECIMAL -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_DECIMAL, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_DECIMAL, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_DECIMAL, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_TINY -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_TINY, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_SHORT -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_SHORT, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_SHORT, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_SHORT, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_LONG -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_LONG, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_LONG, MYSQL_TYPE_LONG, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_LONG, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_LONG, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_FLOAT -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_FLOAT, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_FLOAT, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_FLOAT, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_FLOAT, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_DOUBLE -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_NULL -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_TIME, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_DATETIME, MYSQL_TYPE_YEAR, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_NEWDATE, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_ENUM, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY + }, + /* MYSQL_TYPE_TIMESTAMP -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_TIMESTAMP, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_DATETIME, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_DATETIME, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_LONGLONG -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_LONGLONG, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONGLONG, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_LONGLONG, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_INT24 -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_INT24, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_INT24, MYSQL_TYPE_LONG, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_INT24, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_INT24, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_DATE -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_NEWDATE, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_TIME -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_TIME, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIME, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_DATETIME, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_DATETIME -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_DATETIME, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_DATETIME, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_DATETIME, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_YEAR -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_YEAR, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_YEAR, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_NEWDATE -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_NEWDATE, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_ENUM -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_ENUM, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_SET -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_SET, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_TINY_BLOB -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_TINY_BLOB, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB + }, + /* MYSQL_TYPE_MEDIUM_BLOB -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_MEDIUM_BLOB, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB + }, + /* MYSQL_TYPE_LONG_BLOB -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_LONG_BLOB, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB + }, + /* MYSQL_TYPE_BLOB -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_BLOB, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_BLOB, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB + }, + /* MYSQL_TYPE_VAR_STRING -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING + }, + /* MYSQL_TYPE_STRING -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING + }, + /* MYSQL_TYPE_GEOMETRY -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_GEOMETRY, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_NEWDATE <14> + MYSQL_TYPE_VAR_STRING, + //<246> MYSQL_TYPE_ENUM + MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY + } +}; + +/* + Return type of which can carry value of both given types in UNION result + + SYNOPSIS + Field::field_type_merge() + a, b types for merging + + RETURN + type of field +*/ + +enum_field_types Field::field_type_merge(enum_field_types a, + enum_field_types b) +{ + DBUG_ASSERT(a < FIELDTYPE_TEAR_FROM || a > FIELDTYPE_TEAR_TO); + DBUG_ASSERT(b < FIELDTYPE_TEAR_FROM || b > FIELDTYPE_TEAR_TO); + return field_types_merge_rules[field_type2index(a)] + [field_type2index(b)]; +} + + +static Item_result field_types_result_type [FIELDTYPE_NUM]= +{ + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + REAL_RESULT, INT_RESULT, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + INT_RESULT, INT_RESULT, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + REAL_RESULT, REAL_RESULT, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + STRING_RESULT, STRING_RESULT, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + INT_RESULT, INT_RESULT, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + STRING_RESULT, STRING_RESULT, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + STRING_RESULT, INT_RESULT, + //MYSQL_TYPE_NEWDATE <14> + STRING_RESULT, + //<246> MYSQL_TYPE_ENUM + STRING_RESULT, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + STRING_RESULT, STRING_RESULT, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + STRING_RESULT, STRING_RESULT, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + STRING_RESULT, STRING_RESULT, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + STRING_RESULT, STRING_RESULT +}; + + +/* + Detect Item_result by given field type of UNION merge result + + SYNOPSIS + Field::result_merge_type() + field_type given field type + + RETURN + Item_result (type of internal MySQL expression result) +*/ + +Item_result Field::result_merge_type(enum_field_types field_type) +{ + DBUG_ASSERT(field_type < FIELDTYPE_TEAR_FROM || field_type + > FIELDTYPE_TEAR_TO); + return field_types_result_type[field_type2index(field_type)]; +} /***************************************************************************** Static help functions @@ -80,44 +856,54 @@ void Field_num::prepend_zeros(String *value) /* Test if given number is a int (or a fixed format float with .000) - This is only used to give warnings in ALTER TABLE or LOAD DATA... + + SYNOPSIS + test_if_int() + str String to test + end Pointer to char after last used digit + cs Character set + + NOTES + This is called after one has called my_strntol() or similar function. + This is only used to give warnings in ALTER TABLE or LOAD DATA... + + TODO + Make this multi-byte-character safe + + RETURN + 0 ok + 1 error */ -bool test_if_int(const char *str,int length) +bool test_if_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs) { + if (str == int_end) + return 0; // Empty string const char *end=str+length; + if ((str= int_end) == end) + return 1; // All digits was used - while (str != end && isspace(*str)) // Allow start space - str++; /* purecov: inspected */ - if (str != end && (*str == '-' || *str == '+')) - str++; - if (str == end) - return 0; // Error: Empty string - for (; str != end ; str++) + /* Allow end .0000 */ + if (*str == '.') { - if (!isdigit(*str)) - { - if (*str == '.') - { // Allow '.0000' - for (str++ ; str != end && *str == '0'; str++) ; - if (str == end) - return 1; - } - if (!isspace(*str)) - return 0; - for (str++ ; str != end ; str++) - if (!isspace(*str)) - return 0; - return 1; - } + for (str++ ; str != end && *str == '0'; str++) ; + } + /* Allow end space */ + for (str++ ; str != end ; str++) + { + if (!my_isspace(cs,*str)) + return 0; } return 1; } - -static bool test_if_real(const char *str,int length) +#ifdef NOT_USED +static bool test_if_real(const char *str,int length, CHARSET_INFO *cs) { - while (length && isspace(*str)) + cs= system_charset_info; // QQ move test_if_real into CHARSET_INFO struct + + while (length && my_isspace(cs,*str)) { // Allow start space length--; str++; } @@ -126,10 +912,10 @@ static bool test_if_real(const char *str,int length) if (*str == '+' || *str == '-') { length--; str++; - if (!length || !(isdigit(*str) || *str == '.')) + if (!length || !(my_isdigit(cs,*str) || *str == '.')) return 0; } - while (length && isdigit(*str)) + while (length && my_isdigit(cs,*str)) { length--; str++; } @@ -138,7 +924,7 @@ static bool test_if_real(const char *str,int length) if (*str == '.') { length--; str++; - while (length && isdigit(*str)) + while (length && my_isdigit(cs,*str)) { length--; str++; } @@ -147,32 +933,26 @@ static bool test_if_real(const char *str,int length) return 1; if (*str == 'E' || *str == 'e') { - if (length < 3 || (str[1] != '+' && str[1] != '-') || !isdigit(str[2])) + if (length < 3 || (str[1] != '+' && str[1] != '-') || + !my_isdigit(cs,str[2])) return 0; length-=3; str+=3; - while (length && isdigit(*str)) + while (length && my_isdigit(cs,*str)) { length--; str++; } } for (; length ; length--, str++) { // Allow end space - if (!isspace(*str)) + if (!my_isspace(cs,*str)) return 0; } return 1; } +#endif -static inline uint field_length_without_space(const char *ptr, uint length) -{ - const char *end= ptr+length; - while (end > ptr && end[-1] == ' ') - end--; - return (uint) (end-ptr); -} - /**************************************************************************** ** Functions for the base classes ** This is an unpacked number. @@ -183,13 +963,16 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg, utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg) :ptr(ptr_arg),null_ptr(null_ptr_arg), - table(table_arg),table_name(table_arg ? table_arg->table_name : 0), + table(table_arg),orig_table(table_arg), + table_name(table_arg ? table_arg->table_name : 0), field_name(field_name_arg), - query_id(0),key_start(0),part_of_key(0),part_of_sortkey(0), + query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0), unireg_check(unireg_check_arg), field_length(length_arg),null_bit(null_bit_arg) { flags=null_ptr ? 0: NOT_NULL_FLAG; + comment.str= (char*) ""; + comment.length=0; } uint Field::offset() @@ -209,23 +992,17 @@ void Field::copy_from_tmp(int row_offset) } -bool Field::send(THD *thd, String *packet) +bool Field::send_binary(Protocol *protocol) { - if (is_null()) - return net_store_null(packet); char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)); - val_str(&tmp,&tmp); - CONVERT *convert; - if ((convert=thd->variables.convert_set)) - return convert->store(packet,tmp.ptr(),tmp.length()); - return net_store_data(packet,tmp.ptr(),tmp.length()); + String tmp(buff,sizeof(buff),charset()); + val_str(&tmp); + return protocol->store(tmp.ptr(), tmp.length(), tmp.charset()); } void Field_num::add_zerofill_and_unsigned(String &res) const { - res.length((uint) strlen(res.ptr())); // Fix length if (unsigned_flag) res.append(" unsigned"); if (zerofill) @@ -234,8 +1011,19 @@ void Field_num::add_zerofill_and_unsigned(String &res) const void Field_num::make_field(Send_field *field) { - field->table_name=table_name; - field->col_name=field_name; + /* table_cache_key is not set for temp tables */ + if (orig_table->table_cache_key) + { + field->db_name= orig_table->table_cache_key; + field->org_table_name= orig_table->real_name; + } + else + { + field->db_name= field->org_table_name= ""; + } + field->table_name= orig_table->table_name; + field->col_name=field->org_col_name=field_name; + field->charsetnr= charset()->number; field->length=field_length; field->type=type(); field->flags=table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; @@ -245,8 +1033,19 @@ void Field_num::make_field(Send_field *field) void Field_str::make_field(Send_field *field) { - field->table_name=table_name; - field->col_name=field_name; + /* table_cache_key is not set for temp tables */ + if (orig_table->table_cache_key) + { + field->db_name= orig_table->table_cache_key; + field->org_table_name= orig_table->real_name; + } + else + { + field->db_name= field->org_table_name= ""; + } + field->table_name= orig_table->table_name; + field->col_name=field->org_col_name=field_name; + field->charsetnr= charset()->number; field->length=field_length; field->type=type(); field->flags=table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; @@ -256,6 +1055,7 @@ void Field_str::make_field(Send_field *field) uint Field::fill_cache_field(CACHE_FIELD *copy) { + uint store_length; copy->str=ptr; copy->length=pack_length(); copy->blob_field=0; @@ -268,18 +1068,26 @@ uint Field::fill_cache_field(CACHE_FIELD *copy) } else if (!zero_pack() && (type() == FIELD_TYPE_STRING && copy->length > 4 || type() == FIELD_TYPE_VAR_STRING)) + { copy->strip=1; /* Remove end space */ + store_length= 2; + } else + { copy->strip=0; - return copy->length+(int) copy->strip; + store_length= 0; + } + return copy->length+ store_length; } -bool Field::get_date(TIME *ltime,bool fuzzydate) + +bool Field::get_date(TIME *ltime,uint fuzzydate) { char buff[40]; - String tmp(buff,sizeof(buff)),tmp2,*res; - if (!(res=val_str(&tmp,&tmp2)) || - str_to_TIME(res->ptr(),res->length(),ltime,fuzzydate) == TIMESTAMP_NONE) + String tmp(buff,sizeof(buff),&my_charset_bin),*res; + if (!(res=val_str(&tmp)) || + str_to_datetime_with_warn(res->ptr(), res->length(), + ltime, fuzzydate) <= MYSQL_TIMESTAMP_ERROR) return 1; return 0; } @@ -287,47 +1095,44 @@ bool Field::get_date(TIME *ltime,bool fuzzydate) bool Field::get_time(TIME *ltime) { char buff[40]; - String tmp(buff,sizeof(buff)),tmp2,*res; - if (!(res=val_str(&tmp,&tmp2)) || - str_to_time(res->ptr(),res->length(),ltime)) + String tmp(buff,sizeof(buff),&my_charset_bin),*res; + if (!(res=val_str(&tmp)) || + str_to_time_with_warn(res->ptr(), res->length(), ltime)) return 1; return 0; } +/* + This is called when storing a date in a string + + NOTES + Needs to be changed if/when we want to support different time formats +*/ -/* This is called when storing a date in a string */ void Field::store_time(TIME *ltime,timestamp_type type) { - char buff[25]; - switch (type) { - case TIMESTAMP_NONE: - store("",0); // Probably an error - break; - case TIMESTAMP_DATE: - sprintf(buff,"%04d-%02d-%02d", ltime->year,ltime->month,ltime->day); - store(buff,10); - break; - case TIMESTAMP_FULL: - sprintf(buff,"%04d-%02d-%02d %02d:%02d:%02d", - ltime->year,ltime->month,ltime->day, - ltime->hour,ltime->minute,ltime->second); - store(buff,19); - break; - case TIMESTAMP_TIME: - sprintf(buff, "%02d:%02d:%02d", - ltime->hour,ltime->minute,ltime->second); - store(buff,(uint) strlen(buff)); - break; - } + char buff[MAX_DATE_STRING_REP_LENGTH]; + uint length= (uint) my_TIME_to_str(ltime, buff); + store(buff, length, &my_charset_bin); } -bool Field::optimize_range(uint idx) +bool Field::optimize_range(uint idx, uint part) { - return test(table->file->index_flags(idx) & HA_READ_NEXT); + return test(table->file->index_flags(idx, part, 1) & HA_READ_RANGE); } /**************************************************************************** + Field_null, a field that always return NULL +****************************************************************************/ + +void Field_null::sql_type(String &res) const +{ + res.set_ascii("null", 4); +} + + +/**************************************************************************** Functions for the Field_decimal class This is an number stored as a pre-space (or pre-zero) string ****************************************************************************/ @@ -335,7 +1140,7 @@ bool Field::optimize_range(uint idx) void Field_decimal::reset(void) { - Field_decimal::store("0",1); + Field_decimal::store("0",1,&my_charset_bin); } void Field_decimal::overflow(bool negative) @@ -343,7 +1148,7 @@ void Field_decimal::overflow(bool negative) uint len=field_length; char *to=ptr, filler= '9'; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); if (negative) { if (!unsigned_flag) @@ -377,8 +1182,20 @@ void Field_decimal::overflow(bool negative) } -void Field_decimal::store(const char *from,uint len) +int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) { + char buff[80]; + String tmp(buff,sizeof(buff), &my_charset_bin); + + /* Convert character set if the old one is multi byte */ + if (cs->mbmaxlen > 1) + { + uint dummy_errors; + tmp.copy(from, len, cs, &my_charset_bin, &dummy_errors); + from= tmp.ptr(); + len= tmp.length(); + } + const char *end= from+len; /* The pointer where the field value starts (i.e., "where to write") */ char *to=ptr; @@ -388,13 +1205,13 @@ void Field_decimal::store(const char *from,uint len) specified), '+' or '-' */ char sign_char=0; - /* The pointers where prezeros start and stop */ + /* The pointers where prezeros start and stop */ const char *pre_zeros_from, *pre_zeros_end; - /* The pointers where digits at the left of '.' start and stop */ + /* The pointers where digits at the left of '.' start and stop */ const char *int_digits_from, *int_digits_end; - /* The pointers where digits at the right of '.' start and stop */ + /* The pointers where digits at the right of '.' start and stop */ const char *frac_digits_from, *frac_digits_end; - /* The sign of the exponent : will be 0 (means no exponent), '+' or '-' */ + /* The sign of the exponent : will be 0 (means no exponent), '+' or '-' */ char expo_sign_char=0; uint exponent=0; // value of the exponent /* @@ -404,21 +1221,21 @@ void Field_decimal::store(const char *from,uint len) const char *int_digits_tail_from; /* Number of 0 that need to be added at the left of the '.' (1E3: 3 zeros) */ uint int_digits_added_zeros; - /* - Pointer used when digits move from the right of the '.' to the left - of the '.' - */ + /* + Pointer used when digits move from the right of the '.' to the left + of the '.' + */ const char *frac_digits_head_end; - /* Number of 0 that need to be added at the right of the '.' (for 1E-3) */ + /* Number of 0 that need to be added at the right of the '.' (for 1E-3) */ uint frac_digits_added_zeros; char *pos,*tmp_left_pos,*tmp_right_pos; /* Pointers that are used as limits (begin and end of the field buffer) */ char *left_wall,*right_wall; char tmp_char; - /* - To remember if current_thd->cuted_fields has already been incremented, - to do that only once - */ + /* + To remember if table->in_use->cuted_fields has already been incremented, + to do that only once + */ bool is_cuted_fields_incr=0; LINT_INIT(int_digits_tail_from); @@ -428,19 +1245,21 @@ void Field_decimal::store(const char *from,uint len) /* There are three steps in this function : - - parse the input string - - modify the position of digits around the decimal dot '.' - according to the exponent value (if specified) - - write the formatted number + - parse the input string + - modify the position of digits around the decimal dot '.' + according to the exponent value (if specified) + - write the formatted number */ if ((tmp_dec=dec)) tmp_dec++; - for (; from !=end && isspace(*from); from++) ; // Read spaces + /* skip pre-space */ + while (from != end && my_isspace(&my_charset_bin,*from)) + from++; if (from == end) { - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); is_cuted_fields_incr=1; } else if (*from == '+' || *from == '-') // Found some sign ? @@ -456,11 +1275,11 @@ void Field_decimal::store(const char *from,uint len) if (sign_char=='-') { Field_decimal::overflow(1); - return; + return 1; } /* - Defining this will not store "+" for unsigned decimal type even if - it is passed in numeric string. This will make some tests to fail + Defining this will not store "+" for unsigned decimal type even if + it is passed in numeric string. This will make some tests to fail */ #ifdef DONT_ALLOW_UNSIGNED_PLUS else @@ -473,13 +1292,13 @@ void Field_decimal::store(const char *from,uint len) for (; from!=end && *from == '0'; from++) ; // Read prezeros pre_zeros_end=int_digits_from=from; /* Read non zero digits at the left of '.'*/ - for (; from!=end && isdigit(*from);from++) ; + for (; from != end && my_isdigit(&my_charset_bin, *from) ; from++) ; int_digits_end=from; if (from!=end && *from == '.') // Some '.' ? from++; frac_digits_from= from; /* Read digits at the right of '.' */ - for (;from!=end && isdigit(*from); from++) ; + for (;from!=end && my_isdigit(&my_charset_bin, *from); from++) ; frac_digits_end=from; // Some exponentiation symbol ? if (from != end && (*from == 'e' || *from == 'E')) @@ -495,7 +1314,7 @@ void Field_decimal::store(const char *from,uint len) exponents will become small (e.g. 1e4294967296 will become 1e0, and the field will finally contain 1 instead of its max possible value). */ - for (;from!=end && isdigit(*from); from++) + for (;from!=end && my_isdigit(&my_charset_bin, *from); from++) { exponent=10*exponent+(*from-'0'); if (exponent>MAX_EXPONENT) @@ -510,12 +1329,13 @@ void Field_decimal::store(const char *from,uint len) it makes the code easer to read. */ - if (current_thd->count_cuted_fields) + if (table->in_use->count_cuted_fields) { - for (;from != end && isspace(*from); from++) ; // Read end spaces + // Skip end spaces + for (;from != end && my_isspace(&my_charset_bin, *from); from++) ; if (from != end) // If still something left, warn { - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); is_cuted_fields_incr=1; } } @@ -524,21 +1344,21 @@ void Field_decimal::store(const char *from,uint len) Now "move" digits around the decimal dot according to the exponent value, and add necessary zeros. Examples : - - 1E+3 : needs 3 more zeros at the left of '.' (int_digits_added_zeros=3) - - 1E-3 : '1' moves at the right of '.', and 2 more zeros are needed - between '.' and '1' + - 1E+3 : needs 3 more zeros at the left of '.' (int_digits_added_zeros=3) + - 1E-3 : '1' moves at the right of '.', and 2 more zeros are needed + between '.' and '1' - 1234.5E-3 : '234' moves at the right of '.' - These moves are implemented with pointers which point at the begin - and end of each moved segment. Examples : + These moves are implemented with pointers which point at the begin + and end of each moved segment. Examples : - 1234.5E-3 : before the code below is executed, the int_digits part is - from '1' to '4' and the frac_digits part from '5' to '5'. After the code - below, the int_digits part is from '1' to '1', the frac_digits_head - part is from '2' to '4', and the frac_digits part from '5' to '5'. + from '1' to '4' and the frac_digits part from '5' to '5'. After the code + below, the int_digits part is from '1' to '1', the frac_digits_head + part is from '2' to '4', and the frac_digits part from '5' to '5'. - 1234.5E3 : before the code below is executed, the int_digits part is - from '1' to '4' and the frac_digits part from '5' to '5'. After the code - below, the int_digits part is from '1' to '4', the int_digits_tail - part is from '5' to '5', the frac_digits part is empty, and - int_digits_added_zeros=2 (to make 1234500). + from '1' to '4' and the frac_digits part from '5' to '5'. After the code + below, the int_digits part is from '1' to '4', the int_digits_tail + part is from '5' to '5', the frac_digits part is empty, and + int_digits_added_zeros=2 (to make 1234500). */ /* @@ -600,7 +1420,7 @@ void Field_decimal::store(const char *from,uint len) { // too big number, change to max or min number Field_decimal::overflow(sign_char == '-'); - return; + return 1; } /* @@ -661,7 +1481,7 @@ void Field_decimal::store(const char *from,uint len) /* Write digits of the frac_% parts ; - Depending on current_thd->count_cutted_fields, we may also want + Depending on table->in_use->count_cutted_fields, we may also want to know if some non-zero tail of these parts will be truncated (for example, 0.002->0.00 will generate a warning, while 0.000->0.00 will not) @@ -679,9 +1499,9 @@ void Field_decimal::store(const char *from,uint len) { if (pos == right_wall) { - if (current_thd->count_cuted_fields && !is_cuted_fields_incr) + if (table->in_use->count_cuted_fields && !is_cuted_fields_incr) break; // Go on below to see if we lose non zero digits - return; + return 0; } *pos++='0'; } @@ -693,8 +1513,9 @@ void Field_decimal::store(const char *from,uint len) if (tmp_char != '0') // Losing a non zero digit ? { if (!is_cuted_fields_incr) - current_thd->cuted_fields++; - return; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); + return 0; } continue; } @@ -710,8 +1531,8 @@ void Field_decimal::store(const char *from,uint len) if (tmp_char != '0') // Losing a non zero digit ? { if (!is_cuted_fields_incr) - current_thd->cuted_fields++; - return; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + return 0; } continue; } @@ -720,26 +1541,26 @@ void Field_decimal::store(const char *from,uint len) while (pos != right_wall) *pos++='0'; // Fill with zeros at right of '.' - + return 0; } -void Field_decimal::store(double nr) +int Field_decimal::store(double nr) { if (unsigned_flag && nr < 0) { overflow(1); - return; + return 1; } #ifdef HAVE_FINITE if (!finite(nr)) // Handle infinity as special case { overflow(nr < 0.0); - return; + return 1; } #endif - + reg4 uint i,length; char fyllchar,*to; char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE]; @@ -748,36 +1569,43 @@ void Field_decimal::store(double nr) #ifdef HAVE_SNPRINTF buff[sizeof(buff)-1]=0; // Safety snprintf(buff,sizeof(buff)-1, "%.*f",(int) dec,nr); + length=(uint) strlen(buff); #else - sprintf(buff,"%.*f",dec,nr); + length=(uint) my_sprintf(buff,(buff,"%.*f",dec,nr)); #endif - length=(uint) strlen(buff); if (length > field_length) + { overflow(nr < 0.0); + return 1; + } else { to=ptr; for (i=field_length-length ; i-- > 0 ;) *to++ = fyllchar; memcpy(to,buff,length); + return 0; } } -void Field_decimal::store(longlong nr) +int Field_decimal::store(longlong nr) { if (unsigned_flag && nr < 0) { overflow(1); - return; + return 1; } char buff[22]; uint length=(uint) (longlong10_to_str(nr,buff,-10)-buff); uint int_part=field_length- (dec ? dec+1 : 0); if (length > int_part) + { overflow(test(nr < 0L)); /* purecov: inspected */ + return 1; + } else { char fyllchar = zerofill ? (char) '0' : (char) ' '; @@ -790,40 +1618,42 @@ void Field_decimal::store(longlong nr) to[length]='.'; bfill(to+length+1,dec,'0'); } + return 0; } } double Field_decimal::val_real(void) { - char temp= *(ptr+field_length); *(ptr+field_length) = '\0'; - double nr=atod(ptr); - *(ptr+field_length)=temp; - return(nr); + int not_used; + char *end_not_used; + return my_strntod(&my_charset_bin, ptr, field_length, &end_not_used, + ¬_used); } longlong Field_decimal::val_int(void) { - char temp= *(ptr+field_length); *(ptr+field_length) = '\0'; - longlong nr; + int not_used; if (unsigned_flag) - nr=(longlong) strtoull(ptr,NULL,10); + return my_strntoull(&my_charset_bin, ptr, field_length, 10, NULL, + ¬_used); else - nr=strtoll(ptr,NULL,10); - *(ptr+field_length)=temp; - return(nr); + return my_strntoll(&my_charset_bin, ptr, field_length, 10, NULL, + ¬_used); } + String *Field_decimal::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { char *str; for (str=ptr ; *str == ' ' ; str++) ; uint tmp_length=(uint) (str-ptr); + val_ptr->set_charset(&my_charset_bin); if (field_length < tmp_length) // Error in data val_ptr->length(0); else - val_ptr->set((const char*) str,field_length-tmp_length); + val_ptr->set_ascii((const char*) str, field_length-tmp_length); return val_ptr; } @@ -840,8 +1670,10 @@ int Field_decimal::cmp(const char *a_ptr,const char *b_ptr) for (end=a_ptr+field_length; a_ptr != end && (*a_ptr == *b_ptr || - ((isspace(*a_ptr) || *a_ptr == '+' || *a_ptr == '0') && - (isspace(*b_ptr) || *b_ptr == '+' || *b_ptr == '0'))); + ((my_isspace(&my_charset_bin,*a_ptr) || *a_ptr == '+' || + *a_ptr == '0') && + (my_isspace(&my_charset_bin,*b_ptr) || *b_ptr == '+' || + *b_ptr == '0'))); a_ptr++,b_ptr++) { if (*a_ptr == '-') // If both numbers are negative @@ -868,8 +1700,8 @@ void Field_decimal::sort_string(char *to,uint length) char *str,*end; for (str=ptr,end=ptr+length; str != end && - ((isspace(*str) || *str == '+' || *str == '0')) ; - + ((my_isspace(&my_charset_bin,*str) || *str == '+' || + *str == '0')) ; str++) *to++=' '; if (str == end) @@ -880,7 +1712,7 @@ void Field_decimal::sort_string(char *to,uint length) *to++=1; // Smaller than any number str++; while (str != end) - if (isdigit(*str)) + if (my_isdigit(&my_charset_bin,*str)) *to++= (char) ('9' - *str++); else *to++= *str++; @@ -888,14 +1720,17 @@ void Field_decimal::sort_string(char *to,uint length) else memcpy(to,str,(uint) (end-str)); } + void Field_decimal::sql_type(String &res) const { + CHARSET_INFO *cs=res.charset(); uint tmp=field_length; if (!unsigned_flag) tmp--; if (dec) tmp--; - sprintf((char*) res.ptr(),"decimal(%d,%d)",tmp,dec); + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "decimal(%d,%d)",tmp,dec)); add_zerofill_and_unsigned(res); } @@ -904,59 +1739,75 @@ void Field_decimal::sql_type(String &res) const ** tiny int ****************************************************************************/ -void Field_tiny::store(const char *from,uint len) +int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) { - String tmp_str(from,len); - long tmp= strtol(tmp_str.c_ptr(),NULL,10); + int not_used; // We can ignore result from str2int + char *end; + long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); + int error= 0; if (unsigned_flag) { if (tmp < 0) { tmp=0; /* purecov: inspected */ - current_thd->cuted_fields++; /* purecov: inspected */ + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (tmp > 255) { tmp= 255; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; } - else if (current_thd->count_cuted_fields && !test_if_int(from,len)) - current_thd->cuted_fields++; } else { if (tmp < -128) { tmp= -128; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (tmp >= 128) { tmp= 127; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; } - else if (current_thd->count_cuted_fields && !test_if_int(from,len)) - current_thd->cuted_fields++; } ptr[0]= (char) tmp; + return error; } -void Field_tiny::store(double nr) +int Field_tiny::store(double nr) { + int error= 0; nr=rint(nr); if (unsigned_flag) { if (nr < 0.0) { *ptr=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > 255.0) { *ptr=(char) 255; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else *ptr=(char) nr; @@ -966,31 +1817,37 @@ void Field_tiny::store(double nr) if (nr < -128.0) { *ptr= (char) -128; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > 127.0) { *ptr=127; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else *ptr=(char) nr; } + return error; } -void Field_tiny::store(longlong nr) +int Field_tiny::store(longlong nr) { + int error= 0; if (unsigned_flag) { if (nr < 0L) { *ptr=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > 255L) { *ptr= (char) 255; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else *ptr=(char) nr; @@ -1000,16 +1857,19 @@ void Field_tiny::store(longlong nr) if (nr < -128L) { *ptr= (char) -128; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > 127L) { *ptr=127; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else *ptr=(char) nr; } + return error; } @@ -1030,19 +1890,29 @@ longlong Field_tiny::val_int(void) String *Field_tiny::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + CHARSET_INFO *cs= &my_charset_bin; uint length; - val_buffer->alloc(max(field_length+1,5)); + uint mlength=max(field_length+1,5*cs->mbmaxlen); + val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); + if (unsigned_flag) - length= (uint) (int10_to_str((long) *((uchar*) ptr),to,10)-to); + length= (uint) cs->cset->long10_to_str(cs,to,mlength, 10, + (long) *((uchar*) ptr)); else - length= (uint) (int10_to_str((long) *((signed char*) ptr),to,-10)-to); + length= (uint) cs->cset->long10_to_str(cs,to,mlength,-10, + (long) *((signed char*) ptr)); + val_buffer->length(length); if (zerofill) prepend_zeros(val_buffer); return val_buffer; } +bool Field_tiny::send_binary(Protocol *protocol) +{ + return protocol->store_tiny((longlong) (int8) ptr[0]); +} int Field_tiny::cmp(const char *a_ptr, const char *b_ptr) { @@ -1063,51 +1933,62 @@ void Field_tiny::sort_string(char *to,uint length __attribute__((unused))) void Field_tiny::sql_type(String &res) const { - sprintf((char*) res.ptr(),"tinyint(%d)",(int) field_length); + CHARSET_INFO *cs=res.charset(); + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "tinyint(%d)",(int) field_length)); add_zerofill_and_unsigned(res); } /**************************************************************************** -** short int + Field type short int (2 byte) ****************************************************************************/ - -// Note: Sometimes this should be fixed to use one strtol() to use -// len and check for garbage after number. - -void Field_short::store(const char *from,uint len) +int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) { - String tmp_str(from,len); - long tmp= strtol(tmp_str.c_ptr(),NULL,10); + int not_used; // We can ignore result from str2int + char *end; + long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); + int error= 0; + if (unsigned_flag) { if (tmp < 0) { tmp=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (tmp > (uint16) ~0) { tmp=(uint16) ~0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; } - else if (current_thd->count_cuted_fields && !test_if_int(from,len)) - current_thd->cuted_fields++; } else { if (tmp < INT_MIN16) { tmp= INT_MIN16; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (tmp > INT_MAX16) { tmp=INT_MAX16; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; } - else if (current_thd->count_cuted_fields && !test_if_int(from,len)) - current_thd->cuted_fields++; } #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -1117,11 +1998,13 @@ void Field_short::store(const char *from,uint len) else #endif shortstore(ptr,(short) tmp); + return error; } -void Field_short::store(double nr) +int Field_short::store(double nr) { + int error= 0; int16 res; nr=rint(nr); if (unsigned_flag) @@ -1129,12 +2012,14 @@ void Field_short::store(double nr) if (nr < 0) { res=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > (double) (uint16) ~0) { res=(int16) (uint16) ~0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int16) (uint16) nr; @@ -1144,12 +2029,14 @@ void Field_short::store(double nr) if (nr < (double) INT_MIN16) { res=INT_MIN16; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > (double) INT_MAX16) { res=INT_MAX16; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int16) nr; @@ -1162,22 +2049,26 @@ void Field_short::store(double nr) else #endif shortstore(ptr,res); + return error; } -void Field_short::store(longlong nr) +int Field_short::store(longlong nr) { + int error= 0; int16 res; if (unsigned_flag) { if (nr < 0L) { res=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > (longlong) (uint16) ~0) { res=(int16) (uint16) ~0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int16) (uint16) nr; @@ -1187,12 +2078,14 @@ void Field_short::store(longlong nr) if (nr < INT_MIN16) { res=INT_MIN16; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > INT_MAX16) { res=INT_MAX16; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int16) nr; @@ -1205,6 +2098,7 @@ void Field_short::store(longlong nr) else #endif shortstore(ptr,res); + return error; } @@ -1232,11 +2126,14 @@ longlong Field_short::val_int(void) return unsigned_flag ? (longlong) (unsigned short) j : (longlong) j; } + String *Field_short::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + CHARSET_INFO *cs= &my_charset_bin; uint length; - val_buffer->alloc(max(field_length+1,7)); + uint mlength=max(field_length+1,7*cs->mbmaxlen); + val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); short j; #ifdef WORDS_BIGENDIAN @@ -1247,9 +2144,10 @@ String *Field_short::val_str(String *val_buffer, shortget(j,ptr); if (unsigned_flag) - length=(uint) (int10_to_str((long) (uint16) j,to,10)-to); + length=(uint) cs->cset->long10_to_str(cs, to, mlength, 10, + (long) (uint16) j); else - length=(uint) (int10_to_str((long) j,to,-10)-to); + length=(uint) cs->cset->long10_to_str(cs, to, mlength,-10, (long) j); val_buffer->length(length); if (zerofill) prepend_zeros(val_buffer); @@ -1257,6 +2155,12 @@ String *Field_short::val_str(String *val_buffer, } +bool Field_short::send_binary(Protocol *protocol) +{ + return protocol->store_short(Field_short::val_int()); +} + + int Field_short::cmp(const char *a_ptr, const char *b_ptr) { short a,b; @@ -1303,73 +2207,88 @@ void Field_short::sort_string(char *to,uint length __attribute__((unused))) void Field_short::sql_type(String &res) const { - sprintf((char*) res.ptr(),"smallint(%d)",(int) field_length); + CHARSET_INFO *cs=res.charset(); + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "smallint(%d)",(int) field_length)); add_zerofill_and_unsigned(res); } /**************************************************************************** -** medium int + Field type medium int (3 byte) ****************************************************************************/ -// Note: Sometimes this should be fixed to use one strtol() to use -// len and check for garbage after number. - -void Field_medium::store(const char *from,uint len) +int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) { - String tmp_str(from,len); - long tmp= strtol(tmp_str.c_ptr(),NULL,10); + int not_used; // We can ignore result from str2int + char *end; + long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); + int error= 0; if (unsigned_flag) { if (tmp < 0) { tmp=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (tmp >= (long) (1L << 24)) { tmp=(long) (1L << 24)-1L; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; } - else if (current_thd->count_cuted_fields && !test_if_int(from,len)) - current_thd->cuted_fields++; } else { if (tmp < INT_MIN24) { tmp= INT_MIN24; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (tmp > INT_MAX24) { tmp=INT_MAX24; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; } - else if (current_thd->count_cuted_fields && !test_if_int(from,len)) - current_thd->cuted_fields++; } int3store(ptr,tmp); + return error; } -void Field_medium::store(double nr) +int Field_medium::store(double nr) { + int error= 0; nr=rint(nr); if (unsigned_flag) { if (nr < 0) { int3store(ptr,0); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr >= (double) (long) (1L << 24)) { uint32 tmp=(uint32) (1L << 24)-1L; int3store(ptr,tmp); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else int3store(ptr,(uint32) nr); @@ -1380,33 +2299,39 @@ void Field_medium::store(double nr) { long tmp=(long) INT_MIN24; int3store(ptr,tmp); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > (double) INT_MAX24) { long tmp=(long) INT_MAX24; int3store(ptr,tmp); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else int3store(ptr,(long) nr); } + return error; } -void Field_medium::store(longlong nr) +int Field_medium::store(longlong nr) { + int error= 0; if (unsigned_flag) { if (nr < 0L) { int3store(ptr,0); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr >= (longlong) (long) (1L << 24)) { long tmp=(long) (1L << 24)-1L;; int3store(ptr,tmp); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else int3store(ptr,(uint32) nr); @@ -1417,17 +2342,20 @@ void Field_medium::store(longlong nr) { long tmp=(long) INT_MIN24; int3store(ptr,tmp); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > (longlong) INT_MAX24) { long tmp=(long) INT_MAX24; int3store(ptr,tmp); - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else int3store(ptr,(long) nr); } + return error; } @@ -1437,21 +2365,25 @@ double Field_medium::val_real(void) return (double) j; } + longlong Field_medium::val_int(void) { long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr); return (longlong) j; } + String *Field_medium::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + CHARSET_INFO *cs= &my_charset_bin; uint length; - val_buffer->alloc(max(field_length+1,10)); + uint mlength=max(field_length+1,10*cs->mbmaxlen); + val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr); - length=(uint) (int10_to_str(j,to,-10)-to); + length=(uint) cs->cset->long10_to_str(cs,to,mlength,-10,j); val_buffer->length(length); if (zerofill) prepend_zeros(val_buffer); /* purecov: inspected */ @@ -1459,6 +2391,12 @@ String *Field_medium::val_str(String *val_buffer, } +bool Field_medium::send_binary(Protocol *protocol) +{ + return protocol->store_long(Field_medium::val_int()); +} + + int Field_medium::cmp(const char *a_ptr, const char *b_ptr) { long a,b; @@ -1488,7 +2426,9 @@ void Field_medium::sort_string(char *to,uint length __attribute__((unused))) void Field_medium::sql_type(String &res) const { - sprintf((char*) res.ptr(),"mediumint(%d)",(int) field_length); + CHARSET_INFO *cs=res.charset(); + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "mediumint(%d)",(int) field_length)); add_zerofill_and_unsigned(res); } @@ -1496,44 +2436,64 @@ void Field_medium::sql_type(String &res) const ** long int ****************************************************************************/ +/* + A helper function to check whether the next character + in the string "s" is MINUS SIGN. +*/ +#ifdef HAVE_CHARSET_ucs2 +static bool test_if_minus(CHARSET_INFO *cs, + const char *s, const char *e) +{ + my_wc_t wc; + return cs->cset->mb_wc(cs, &wc, (uchar*) s, (uchar*) e) > 0 && wc == '-'; +} +#else +/* + If not UCS2 support is compiled then it is easier +*/ +#define test_if_minus(cs, s, e) (*s == '-') +#endif -// Note: Sometimes this should be fixed to use one strtol() to use -// len and check for garbage after number. -void Field_long::store(const char *from,uint len) +int Field_long::store(const char *from,uint len,CHARSET_INFO *cs) { + long tmp; + int error= 0; char *end; - while (len && isspace(*from)) - { - len--; from++; - } - long tmp, cuted_fields=0; - String tmp_str(from,len); - from= tmp_str.c_ptr(); // Add end null if needed - errno=0; + + tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); + len-= tmp; + from+= tmp; + my_errno=0; + if (unsigned_flag) { - if (!len || *from == '-') + if (!len || test_if_minus(cs, from, from + len)) { tmp=0; // Set negative to 0 - errno=ERANGE; + my_errno=ERANGE; + error= 1; } else - tmp=(long) strtoul(from, &end, 10); + tmp=(long) my_strntoul(cs,from,len,10,&end,&error); } else - tmp=strtol(from, &end, 10); - if (errno || - (from+len != end && current_thd->count_cuted_fields && - !test_if_int(from,len))) - cuted_fields=1; + tmp=my_strntol(cs,from,len,10,&end,&error); + if (error || + (from+len != end && table->in_use->count_cuted_fields && + !test_if_int(from,len,end,cs))) + { + if (error != 1) + error= 2; + } #if SIZEOF_LONG > 4 if (unsigned_flag) { - if (tmp > UINT_MAX32) + if ((ulong) tmp > UINT_MAX32) { tmp= UINT_MAX32; - cuted_fields=1; + error= 1; + my_errno=ERANGE; } } else @@ -1541,17 +2501,19 @@ void Field_long::store(const char *from,uint len) if (tmp > INT_MAX32) { tmp= INT_MAX32; - cuted_fields=1; + error= 1; + my_errno=ERANGE; } else if (tmp < INT_MIN32) { tmp= INT_MIN32; - cuted_fields=1; + error= 1; + my_errno=ERANGE; } } #endif - if (cuted_fields) - current_thd->cuted_fields++; + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -1560,11 +2522,13 @@ void Field_long::store(const char *from,uint len) else #endif longstore(ptr,tmp); + return error; } -void Field_long::store(double nr) +int Field_long::store(double nr) { + int error= 0; int32 res; nr=rint(nr); if (unsigned_flag) @@ -1572,12 +2536,14 @@ void Field_long::store(double nr) if (nr < 0) { res=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } - else if (nr > (double) (ulong) ~0L) + else if (nr > (double) UINT_MAX32) { - res=(int32) (uint32) ~0L; - current_thd->cuted_fields++; + res= UINT_MAX32; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int32) (ulong) nr; @@ -1587,12 +2553,14 @@ void Field_long::store(double nr) if (nr < (double) INT_MIN32) { res=(int32) INT_MIN32; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > (double) INT_MAX32) { res=(int32) INT_MAX32; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int32) nr; @@ -1605,23 +2573,35 @@ void Field_long::store(double nr) else #endif longstore(ptr,res); + return error; } -void Field_long::store(longlong nr) +int Field_long::store(longlong nr) { + int error= 0; int32 res; + + /* + This assert has nothing to do with this method per se, it was put here + only because it is one of the best places for catching places there its + condition is broken. + */ + DBUG_ASSERT(table->in_use == current_thd); + if (unsigned_flag) { if (nr < 0) { res=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr >= (LL(1) << 32)) { res=(int32) (uint32) ~0L; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int32) (uint32) nr; @@ -1631,12 +2611,14 @@ void Field_long::store(longlong nr) if (nr < (longlong) INT_MIN32) { res=(int32) INT_MIN32; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr > (longlong) INT_MAX32) { res=(int32) INT_MAX32; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(int32) nr; @@ -1649,6 +2631,7 @@ void Field_long::store(longlong nr) else #endif longstore(ptr,res); + return error; } @@ -1667,6 +2650,8 @@ double Field_long::val_real(void) longlong Field_long::val_int(void) { int32 j; + /* See the comment in Field_long::store(long long) */ + DBUG_ASSERT(table->in_use == current_thd); #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) j=sint4korr(ptr); @@ -1679,8 +2664,10 @@ longlong Field_long::val_int(void) String *Field_long::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + CHARSET_INFO *cs= &my_charset_bin; uint length; - val_buffer->alloc(max(field_length+1,12)); + uint mlength=max(field_length+1,12*cs->mbmaxlen); + val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); int32 j; #ifdef WORDS_BIGENDIAN @@ -1690,9 +2677,10 @@ String *Field_long::val_str(String *val_buffer, #endif longget(j,ptr); - length=(uint) (int10_to_str((unsigned_flag ? (long) (uint32) j : (long) j), - to, - unsigned_flag ? 10 : -10)-to); + if (unsigned_flag) + length=cs->cset->long10_to_str(cs,to,mlength, 10,(long) (uint32)j); + else + length=cs->cset->long10_to_str(cs,to,mlength,-10,(long) j); val_buffer->length(length); if (zerofill) prepend_zeros(val_buffer); @@ -1700,6 +2688,11 @@ String *Field_long::val_str(String *val_buffer, } +bool Field_long::send_binary(Protocol *protocol) +{ + return protocol->store_long(Field_long::val_int()); +} + int Field_long::cmp(const char *a_ptr, const char *b_ptr) { int32 a,b; @@ -1749,41 +2742,49 @@ void Field_long::sort_string(char *to,uint length __attribute__((unused))) void Field_long::sql_type(String &res) const { - sprintf((char*) res.ptr(),"int(%d)",(int) field_length); + CHARSET_INFO *cs=res.charset(); + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "int(%d)",(int) field_length)); add_zerofill_and_unsigned(res); } /**************************************************************************** -** longlong int + Field type longlong int (8 bytes) ****************************************************************************/ -void Field_longlong::store(const char *from,uint len) +int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) { - char *end; - while (len && isspace(*from)) - { // For easy error check - len--; from++; - } longlong tmp; - String tmp_str(from,len); - from= tmp_str.c_ptr(); // Add end null if needed - errno=0; + int error= 0; + char *end; + + tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); + len-= (uint)tmp; + from+= tmp; + my_errno=0; if (unsigned_flag) { - if (!len || *from == '-') + if (!len || test_if_minus(cs, from, from + len)) { tmp=0; // Set negative to 0 - errno=ERANGE; + my_errno= ERANGE; + error= 1; } else - tmp=(longlong) strtoull(from, &end, 10); + tmp=(longlong) my_strntoull(cs,from,len,10,&end,&error); } else - tmp=strtoll(from, &end, 10); - if (errno || - (from+len != end && current_thd->count_cuted_fields && - !test_if_int(from,len))) - current_thd->cuted_fields++; + tmp=my_strntoll(cs,from,len,10,&end,&error); + if (error || + (from+len != end && table->in_use->count_cuted_fields && + !test_if_int(from,len,end,cs))) + { + if (error != 1) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 2; + } + } #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -1792,11 +2793,13 @@ void Field_longlong::store(const char *from,uint len) else #endif longlongstore(ptr,tmp); + return error; } -void Field_longlong::store(double nr) +int Field_longlong::store(double nr) { + int error= 0; longlong res; nr=rint(nr); if (unsigned_flag) @@ -1804,12 +2807,14 @@ void Field_longlong::store(double nr) if (nr < 0) { res=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else if (nr >= (double) ~ (ulonglong) 0) { res= ~(longlong) 0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(longlong) (ulonglong) nr; @@ -1819,12 +2824,14 @@ void Field_longlong::store(double nr) if (nr <= (double) LONGLONG_MIN) { res=(longlong) LONGLONG_MIN; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } - else if (nr >= (double) LONGLONG_MAX) + else if (nr >= (double) (ulonglong) LONGLONG_MAX) { res=(longlong) LONGLONG_MAX; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } else res=(longlong) nr; @@ -1837,10 +2844,11 @@ void Field_longlong::store(double nr) else #endif longlongstore(ptr,res); + return error; } -void Field_longlong::store(longlong nr) +int Field_longlong::store(longlong nr) { #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -1850,6 +2858,7 @@ void Field_longlong::store(longlong nr) else #endif longlongstore(ptr,nr); + return 0; } @@ -1890,8 +2899,10 @@ longlong Field_longlong::val_int(void) String *Field_longlong::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + CHARSET_INFO *cs= &my_charset_bin; uint length; - val_buffer->alloc(max(field_length+1,22)); + uint mlength=max(field_length+1,22*cs->mbmaxlen); + val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); longlong j; #ifdef WORDS_BIGENDIAN @@ -1901,7 +2912,8 @@ String *Field_longlong::val_str(String *val_buffer, #endif longlongget(j,ptr); - length=(uint) (longlong10_to_str(j,to,unsigned_flag ? 10 : -10)-to); + length=(uint) (cs->cset->longlong10_to_str)(cs,to,mlength, + unsigned_flag ? 10 : -10, j); val_buffer->length(length); if (zerofill) prepend_zeros(val_buffer); @@ -1909,6 +2921,12 @@ String *Field_longlong::val_str(String *val_buffer, } +bool Field_longlong::send_binary(Protocol *protocol) +{ + return protocol->store_longlong(Field_longlong::val_int(), unsigned_flag); +} + + int Field_longlong::cmp(const char *a_ptr, const char *b_ptr) { longlong a,b; @@ -1967,46 +2985,84 @@ void Field_longlong::sort_string(char *to,uint length __attribute__((unused))) void Field_longlong::sql_type(String &res) const { - sprintf((char*) res.ptr(),"bigint(%d)",(int) field_length); + CHARSET_INFO *cs=res.charset(); + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "bigint(%d)",(int) field_length)); add_zerofill_and_unsigned(res); } + /**************************************************************************** -** single precision float + single precision float ****************************************************************************/ -void Field_float::store(const char *from,uint len) +int Field_float::store(const char *from,uint len,CHARSET_INFO *cs) { - String tmp_str(from,len); - errno=0; - Field_float::store(atof(tmp_str.c_ptr())); - if (errno || current_thd->count_cuted_fields && !test_if_real(from,len)) - current_thd->cuted_fields++; + int error; + char *end; + double nr= my_strntod(cs,(char*) from,len,&end,&error); + if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) + { + error= 2; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + } + Field_float::store(nr); + return error; } -void Field_float::store(double nr) +int Field_float::store(double nr) { float j; - if (dec < NOT_FIXED_DEC) - nr=floor(nr*log_10[dec]+0.5)/log_10[dec]; // To fixed point - if (unsigned_flag && nr < 0) + int error= 0; + + if (isnan(nr)) { - current_thd->cuted_fields++; - nr=0; + j= 0; + set_null(); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } - if (nr < -FLT_MAX) + else if (unsigned_flag && nr < 0) { - j= -FLT_MAX; - current_thd->cuted_fields++; + j= 0; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } - else if (nr > FLT_MAX) + else { - j=FLT_MAX; - current_thd->cuted_fields++; + double max_value; + if (dec >= NOT_FIXED_DEC) + { + max_value= FLT_MAX; + } + else + { + uint tmp=min(field_length,array_elements(log_10)-1); + max_value= (log_10[tmp]-1)/log_10[dec]; + /* + The following comparison is needed to not get an overflow if nr + is close to FLT_MAX + */ + if (fabs(nr) < FLT_MAX/10.0e+32) + nr= floor(nr*log_10[dec]+0.5)/log_10[dec]; + } + if (nr < -max_value) + { + j= (float)-max_value; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (nr > max_value) + { + j= (float)max_value; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else + j= (float) nr; } - else - j= (float) nr; + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2015,25 +3071,13 @@ void Field_float::store(double nr) else #endif memcpy_fixed(ptr,(byte*) &j,sizeof(j)); + return error; } -void Field_float::store(longlong nr) +int Field_float::store(longlong nr) { - float j= (float) nr; - if (unsigned_flag && j < 0) - { - current_thd->cuted_fields++; - j=0; - } -#ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) - { - float4store(ptr,j); - } - else -#endif - memcpy_fixed(ptr,(byte*) &j,sizeof(j)); + return store((double)nr); } @@ -2133,10 +3177,10 @@ String *Field_float::val_str(String *val_buffer, #ifdef HAVE_SNPRINTF to[to_length-1]=0; // Safety snprintf(to,to_length-1,"%.*f",dec,nr); + to=strend(to); #else - sprintf(to,"%.*f",dec,nr); + to+= my_sprintf(to,(to,"%.*f",dec,nr)); #endif - to=strend(to); #endif } #ifdef HAVE_FCONVERT @@ -2212,51 +3256,92 @@ void Field_float::sort_string(char *to,uint length __attribute__((unused))) } +bool Field_float::send_binary(Protocol *protocol) +{ + return protocol->store((float) Field_float::val_real(), dec, (String*) 0); +} + + void Field_float::sql_type(String &res) const { if (dec == NOT_FIXED_DEC) - strmov((char*) res.ptr(),"float"); + { + res.set_ascii("float", 5); + } else - sprintf((char*) res.ptr(),"float(%d,%d)",(int) field_length,dec); + { + CHARSET_INFO *cs= res.charset(); + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "float(%d,%d)",(int) field_length,dec)); + } add_zerofill_and_unsigned(res); } + /**************************************************************************** -** double precision floating point numbers + double precision floating point numbers ****************************************************************************/ -void Field_double::store(const char *from,uint len) +int Field_double::store(const char *from,uint len,CHARSET_INFO *cs) { - String tmp_str(from,len); - errno=0; - double j= atof(tmp_str.c_ptr()); - if (errno || current_thd->count_cuted_fields && !test_if_real(from,len)) - current_thd->cuted_fields++; - if (unsigned_flag && j < 0) - { - current_thd->cuted_fields++; - j=0; - } -#ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + int error; + char *end; + double nr= my_strntod(cs,(char*) from, len, &end, &error); + if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) { - float8store(ptr,j); + error= 2; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } - else -#endif - doublestore(ptr,j); + Field_double::store(nr); + return error; } -void Field_double::store(double nr) +int Field_double::store(double nr) { - if (dec < NOT_FIXED_DEC) - nr=floor(nr*log_10[dec]+0.5)/log_10[dec]; // To fixed point - if (unsigned_flag && nr < 0) + int error= 0; + + if (isnan(nr)) { - current_thd->cuted_fields++; - nr=0; + nr= 0; + set_null(); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } + else if (unsigned_flag && nr < 0) + { + nr= 0; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else + { + double max_value; + if (dec >= NOT_FIXED_DEC) + { + max_value= DBL_MAX; + } + else + { + uint tmp=min(field_length,array_elements(log_10)-1); + max_value= (log_10[tmp]-1)/log_10[dec]; + if (fabs(nr) < DBL_MAX/10.0e+32) + nr= floor(nr*log_10[dec]+0.5)/log_10[dec]; + } + if (nr < -max_value) + { + nr= -max_value; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (nr > max_value) + { + nr= max_value; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + } + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2265,25 +3350,13 @@ void Field_double::store(double nr) else #endif doublestore(ptr,nr); + return error; } -void Field_double::store(longlong nr) +int Field_double::store(longlong nr) { - double j= (double) nr; - if (unsigned_flag && j < 0) - { - current_thd->cuted_fields++; - j=0; - } -#ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) - { - float8store(ptr,j); - } - else -#endif - doublestore(ptr,j); + return store((double)nr); } @@ -2312,6 +3385,11 @@ longlong Field_double::val_int(void) else #endif doubleget(j,ptr); + /* Check whether we fit into longlong range */ + if (j <= (double) LONGLONG_MIN) + return (longlong) LONGLONG_MIN; + if (j >= (double) (ulonglong) LONGLONG_MAX) + return (longlong) LONGLONG_MAX; return ((longlong) j); } @@ -2383,10 +3461,10 @@ String *Field_double::val_str(String *val_buffer, #ifdef HAVE_SNPRINTF to[to_length-1]=0; // Safety snprintf(to,to_length-1,"%.*f",dec,nr); + to=strend(to); #else - sprintf(to,"%.*f",dec,nr); + to+= my_sprintf(to,(to,"%.*f",dec,nr)); #endif - to=strend(to); #endif } #ifdef HAVE_FCONVERT @@ -2399,6 +3477,11 @@ String *Field_double::val_str(String *val_buffer, return val_buffer; } +bool Field_double::send_binary(Protocol *protocol) +{ + return protocol->store((double) Field_double::val_real(), dec, (String*) 0); +} + int Field_double::cmp(const char *a_ptr, const char *b_ptr) { @@ -2412,14 +3495,8 @@ int Field_double::cmp(const char *a_ptr, const char *b_ptr) else #endif { -/* could this ALWAYS be 2 calls to doubleget() ?? */ -#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) doubleget(a, a_ptr); doubleget(b, b_ptr); -#else - memcpy_fixed(&a,a_ptr,sizeof(double)); - memcpy_fixed(&b,b_ptr,sizeof(double)); -#endif } return (a < b) ? -1 : (a > b) ? 1 : 0; } @@ -2439,62 +3516,169 @@ void Field_double::sort_string(char *to,uint length __attribute__((unused))) } else #endif -/* could this ALWAYS be 2 calls to doubleget() ?? */ -#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) doubleget(nr,ptr); -#else - memcpy_fixed(&nr,ptr,sizeof(nr)); -#endif change_double_for_sort(nr, (byte*) to); } void Field_double::sql_type(String &res) const { + CHARSET_INFO *cs=res.charset(); if (dec == NOT_FIXED_DEC) - strmov((char*) res.ptr(),"double"); + { + res.set_ascii("double",6); + } else - sprintf((char*) res.ptr(),"double(%d,%d)",(int) field_length,dec); + { + res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(), + "double(%d,%d)",(int) field_length,dec)); + } add_zerofill_and_unsigned(res); } -/**************************************************************************** -** timestamp -** The first timestamp in the table is automaticly updated -** by handler.cc. The form->timestamp points at the automatic timestamp. -****************************************************************************/ - -enum Item_result Field_timestamp::result_type() const -{ - return ((field_length == 8 || field_length == 14) ? INT_RESULT : - STRING_RESULT); -} - +/* + TIMESTAMP type. + Holds datetime values in range from 1970-01-01 00:00:01 UTC to + 2038-01-01 00:00:00 UTC stored as number of seconds since Unix + Epoch in UTC. + + Up to one of timestamps columns in the table can be automatically + set on row update and/or have NOW() as default value. + TABLE::timestamp_field points to Field object for such timestamp with + auto-set-on-update. TABLE::time_stamp holds offset in record + 1 for this + field, and is used by handler code which performs updates required. + + Actually SQL-99 says that we should allow niladic functions (like NOW()) + as defaults for any field. Current limitations (only NOW() and only + for one TIMESTAMP field) are because of restricted binary .frm format + and should go away in the future. + + Also because of this limitation of binary .frm format we use 5 different + unireg_check values with TIMESTAMP field to distinguish various cases of + DEFAULT or ON UPDATE values. These values are: + + TIMESTAMP_OLD_FIELD - old timestamp, if there was not any fields with + auto-set-on-update (or now() as default) in this table before, then this + field has NOW() as default and is updated when row changes, else it is + field which has 0 as default value and is not automaitcally updated. + TIMESTAMP_DN_FIELD - field with NOW() as default but not set on update + automatically (TIMESTAMP DEFAULT NOW()) + TIMESTAMP_UN_FIELD - field which is set on update automatically but has not + NOW() as default (but it may has 0 or some other const timestamp as + default) (TIMESTAMP ON UPDATE NOW()). + TIMESTAMP_DNUN_FIELD - field which has now() as default and is auto-set on + update. (TIMESTAMP DEFAULT NOW() ON UPDATE NOW()) + NONE - field which is not auto-set on update with some other than NOW() + default value (TIMESTAMP DEFAULT 0). + + Note that TIMESTAMP_OLD_FIELD's are never created explicitly now, they are + left only for preserving ability to read old tables. Such fields replaced + with their newer analogs in CREATE TABLE and in SHOW CREATE TABLE. This is + because we want to prefer NONE unireg_check before TIMESTAMP_OLD_FIELD for + "TIMESTAMP DEFAULT 'Const'" field. (Old timestamps allowed such + specification too but ignored default value for first timestamp, which of + course is non-standard.) In most cases user won't notice any change, only + exception is different behavior of old/new timestamps during ALTER TABLE. + */ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg, + uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) - :Field_num(ptr_arg, len_arg, (uchar*) 0,0, - unireg_check_arg, field_name_arg, table_arg, - 0, 1, 1) -#if MYSQL_VERSION_ID < 40100 - , orig_field_length(len_arg) -#endif + struct st_table *table_arg, + CHARSET_INFO *cs) + :Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg, cs) +{ + /* For 4.0 MYD and 4.0 InnoDB compatibility */ + flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; + if (table && !table->timestamp_field && + unireg_check != NONE) + { + /* This timestamp has auto-update */ + table->timestamp_field= this; + flags|=TIMESTAMP_FLAG; + } +} + + +/* + Get auto-set type for TIMESTAMP field. + + SYNOPSIS + get_auto_set_type() + + DESCRIPTION + Returns value indicating during which operations this TIMESTAMP field + should be auto-set to current timestamp. +*/ +timestamp_auto_set_type Field_timestamp::get_auto_set_type() const { - if (table && !table->timestamp_field) + switch (unireg_check) { - table->timestamp_field= this; // Automatic timestamp - table->time_stamp=(ulong) (ptr_arg - (char*) table->record[0])+1; - flags|=TIMESTAMP_FLAG; + case TIMESTAMP_DN_FIELD: + return TIMESTAMP_AUTO_SET_ON_INSERT; + case TIMESTAMP_UN_FIELD: + return TIMESTAMP_AUTO_SET_ON_UPDATE; + case TIMESTAMP_OLD_FIELD: + /* + Altough we can have several such columns in legacy tables this + function should be called only for first of them (i.e. the one + having auto-set property). + */ + DBUG_ASSERT(table->timestamp_field == this); + /* Fall-through */ + case TIMESTAMP_DNUN_FIELD: + return TIMESTAMP_AUTO_SET_ON_BOTH; + default: + /* + Normally this function should not be called for TIMESTAMPs without + auto-set property. + */ + DBUG_ASSERT(0); + return TIMESTAMP_NO_AUTO_SET; } } -void Field_timestamp::store(const char *from,uint len) +int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) { - long tmp=(long) str_to_timestamp(from,len); + TIME l_time; + my_time_t tmp= 0; + int error; + bool have_smth_to_conv; + bool in_dst_time_gap; + THD *thd= table->in_use; + + have_smth_to_conv= (str_to_datetime(from, len, &l_time, 0, &error) > + MYSQL_TIMESTAMP_ERROR); + + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + from, len, MYSQL_TIMESTAMP_DATETIME, 1); + + if (have_smth_to_conv) + { + if (!(tmp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap))) + { + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + from, len, MYSQL_TIMESTAMP_DATETIME, !error); + + error= 1; + } + else if (in_dst_time_gap) + { + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_INVALID_TIMESTAMP, + from, len, MYSQL_TIMESTAMP_DATETIME, !error); + error= 1; + } + } + if (error > 1) + error= 2; + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2503,101 +3687,56 @@ void Field_timestamp::store(const char *from,uint len) else #endif longstore(ptr,tmp); + return error; } - -void Field_timestamp::store(double nr) +int Field_timestamp::store(double nr) { + int error= 0; if (nr < 0 || nr > 99991231235959.0) { - nr=0; // Avoid overflow on buff - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + nr, MYSQL_TIMESTAMP_DATETIME); + nr= 0; // Avoid overflow on buff + error= 1; } - Field_timestamp::store((longlong) rint(nr)); -} - - -/* -** Convert a datetime of formats YYMMDD, YYYYMMDD or YYMMDDHHMSS to -** YYYYMMDDHHMMSS. The high date '99991231235959' is checked before this -** function. -*/ - -static longlong fix_datetime(longlong nr, TIME *time_res) -{ - long part1,part2; - - if (nr == LL(0) || nr >= LL(10000101000000)) - goto ok; - if (nr < 101) - goto err; - if (nr <= (YY_PART_YEAR-1)*10000L+1231L) - { - nr= (nr+20000000L)*1000000L; // YYMMDD, year: 2000-2069 - goto ok; - } - if (nr < (YY_PART_YEAR)*10000L+101L) - goto err; - if (nr <= 991231L) - { - nr= (nr+19000000L)*1000000L; // YYMMDD, year: 1970-1999 - goto ok; - } - if (nr < 10000101L) - goto err; - if (nr <= 99991231L) - { - nr= nr*1000000L; - goto ok; - } - if (nr < 101000000L) - goto err; - if (nr <= (YY_PART_YEAR-1)*LL(10000000000)+LL(1231235959)) - { - nr= nr+LL(20000000000000); // YYMMDDHHMMSS, 2000-2069 - goto ok; - } - if (nr < YY_PART_YEAR*LL(10000000000)+ LL(101000000)) - goto err; - if (nr <= LL(991231235959)) - nr= nr+LL(19000000000000); // YYMMDDHHMMSS, 1970-1999 - - ok: - part1=(long) (nr/LL(1000000)); - part2=(long) (nr - (longlong) part1*LL(1000000)); - time_res->year= (int) (part1/10000L); part1%=10000L; - time_res->month= (int) part1 / 100; - time_res->day= (int) part1 % 100; - time_res->hour= (int) (part2/10000L); part2%=10000L; - time_res->minute=(int) part2 / 100; - time_res->second=(int) part2 % 100; - - if (time_res->year <= 9999 && time_res->month <= 12 && - time_res->day <= 31 && time_res->hour <= 23 && - time_res->minute <= 59 && time_res->second <= 59) - return nr; - - err: - current_thd->cuted_fields++; - return LL(0); + error|= Field_timestamp::store((longlong) rint(nr)); + return error; } -void Field_timestamp::store(longlong nr) +int Field_timestamp::store(longlong nr) { TIME l_time; - time_t timestamp= 0; + my_time_t timestamp= 0; + int error; + bool in_dst_time_gap; + THD *thd= table->in_use; - if ((nr= fix_datetime(nr, &l_time))) + if (number_to_TIME(nr, &l_time, 0, &error)) { - long not_used; - - timestamp= my_gmt_sec(&l_time, ¬_used); - - if (!timestamp) - current_thd->cuted_fields++; - } + if (!(timestamp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap))) + { + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + nr, MYSQL_TIMESTAMP_DATETIME, 1); + error= 1; + } + if (in_dst_time_gap) + { + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_INVALID_TIMESTAMP, + nr, MYSQL_TIMESTAMP_DATETIME, !error); + error= 1; + } + } + else if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, + nr, MYSQL_TIMESTAMP_DATETIME, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2606,6 +3745,8 @@ void Field_timestamp::store(longlong nr) else #endif longstore(ptr,(uint32) timestamp); + + return error; } @@ -2616,13 +3757,9 @@ double Field_timestamp::val_real(void) longlong Field_timestamp::val_int(void) { - uint len, pos, max_int_rep_len; - int part_time; uint32 temp; - time_t time_arg; - struct tm *l_time; - longlong res; - struct tm tm_tmp; + TIME time_tmp; + THD *thd= table->in_use; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -2633,48 +3770,26 @@ longlong Field_timestamp::val_int(void) if (temp == 0L) // No time return(0); /* purecov: inspected */ - time_arg=(time_t) temp; - localtime_r(&time_arg,&tm_tmp); - l_time=&tm_tmp; - res=(longlong) 0; - max_int_rep_len= min(field_length, 14); - for (pos= len= 0; len+1 < max_int_rep_len ; len+= 2,pos++) - { - bool year_flag=0; - switch (dayord.pos[pos]) { - case 0: part_time=l_time->tm_year % 100; year_flag=1 ; break; - case 1: part_time=l_time->tm_mon+1; break; - case 2: part_time=l_time->tm_mday; break; - case 3: part_time=l_time->tm_hour; break; - case 4: part_time=l_time->tm_min; break; - case 5: part_time=l_time->tm_sec; break; - default: part_time=0; break; /* purecov: deadcode */ - } - if (year_flag && (max_int_rep_len == 8 || max_int_rep_len == 14)) - { - res=res*(longlong) 10000+(part_time+ - ((part_time < YY_PART_YEAR) ? 2000 : 1900)); - len+=2; - } - else - res=res*(longlong) 100+part_time; - } - return (longlong) res; + + thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, (my_time_t)temp); + thd->time_zone_used= 1; + + return time_tmp.year * LL(10000000000) + time_tmp.month * LL(100000000) + + time_tmp.day * 1000000L + time_tmp.hour * 10000L + + time_tmp.minute * 100 + time_tmp.second; } -String *Field_timestamp::val_str(String *val_buffer, - String *val_ptr __attribute__((unused))) +String *Field_timestamp::val_str(String *val_buffer, String *val_ptr) { - uint pos; - int part_time; - uint32 temp; - time_t time_arg; - struct tm *l_time; - struct tm tm_tmp; - my_bool new_format= field_length == 19, - full_year=(field_length == 8 || field_length == 14 || new_format); - int real_field_length= new_format ? 19 : field_length; + uint32 temp, temp2; + TIME time_tmp; + THD *thd= table->in_use; + char *to; + + val_buffer->alloc(field_length+1); + to= (char*) val_buffer->ptr(); + val_buffer->length(field_length); #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -2685,60 +3800,62 @@ String *Field_timestamp::val_str(String *val_buffer, if (temp == 0L) { /* Zero time is "000000" */ - if (new_format) - val_buffer->copy("0000-00-00 00:00:00", real_field_length); - else - val_buffer->copy("00000000000000", real_field_length); - return val_buffer; - } - time_arg=(time_t) temp; - localtime_r(&time_arg,&tm_tmp); - l_time=&tm_tmp; - - val_buffer->alloc(real_field_length+1); - char *to=(char*) val_buffer->ptr(),*end=to+real_field_length; - - for (pos=0; to < end ; pos++) - { - bool year_flag=0; - switch (pos) { - case 0: part_time=l_time->tm_year % 100; year_flag=1; break; - case 1: part_time=l_time->tm_mon+1; break; - case 2: part_time=l_time->tm_mday; break; - case 3: part_time=l_time->tm_hour; break; - case 4: part_time=l_time->tm_min; break; - case 5: part_time=l_time->tm_sec; break; - default: part_time=0; break; /* purecov: deadcode */ - } - if (year_flag && full_year) - { - if (part_time < YY_PART_YEAR) - { - *to++='2'; *to++='0'; /* purecov: inspected */ - } - else - { - *to++='1'; *to++='9'; - } - } - *to++=(char) ('0'+((uint) part_time/10)); - *to++=(char) ('0'+((uint) part_time % 10)); - if (new_format) - { - static const char delim[6]="-- ::"; - *to++=delim[pos]; - } + val_ptr->set("0000-00-00 00:00:00", 19, &my_charset_bin); + return val_ptr; } - if (new_format) - to--; - *to=0; // Safeguard - val_buffer->length((uint) (to-val_buffer->ptr())); + val_buffer->set_charset(&my_charset_bin); // Safety + + thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp,(my_time_t)temp); + thd->time_zone_used= 1; + + temp= time_tmp.year % 100; + if (temp < YY_PART_YEAR) + { + *to++= '2'; + *to++= '0'; + } + else + { + *to++= '1'; + *to++= '9'; + } + temp2=temp/10; temp=temp-temp2*10; + *to++= (char) ('0'+(char) (temp2)); + *to++= (char) ('0'+(char) (temp)); + *to++= '-'; + temp=time_tmp.month; + temp2=temp/10; temp=temp-temp2*10; + *to++= (char) ('0'+(char) (temp2)); + *to++= (char) ('0'+(char) (temp)); + *to++= '-'; + temp=time_tmp.day; + temp2=temp/10; temp=temp-temp2*10; + *to++= (char) ('0'+(char) (temp2)); + *to++= (char) ('0'+(char) (temp)); + *to++= ' '; + temp=time_tmp.hour; + temp2=temp/10; temp=temp-temp2*10; + *to++= (char) ('0'+(char) (temp2)); + *to++= (char) ('0'+(char) (temp)); + *to++= ':'; + temp=time_tmp.minute; + temp2=temp/10; temp=temp-temp2*10; + *to++= (char) ('0'+(char) (temp2)); + *to++= (char) ('0'+(char) (temp)); + *to++= ':'; + temp=time_tmp.second; + temp2=temp/10; temp=temp-temp2*10; + *to++= (char) ('0'+(char) (temp2)); + *to++= (char) ('0'+(char) (temp)); + *to= 0; return val_buffer; } -bool Field_timestamp::get_date(TIME *ltime, bool fuzzydate) + +bool Field_timestamp::get_date(TIME *ltime, uint fuzzydate) { long temp; + THD *thd= table->in_use; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) temp=uint4korr(ptr); @@ -2753,19 +3870,8 @@ bool Field_timestamp::get_date(TIME *ltime, bool fuzzydate) } else { - struct tm tm_tmp; - time_t time_arg= (time_t) temp; - localtime_r(&time_arg,&tm_tmp); - struct tm *start= &tm_tmp; - ltime->year= start->tm_year+1900; - ltime->month= start->tm_mon+1; - ltime->day= start->tm_mday; - ltime->hour= start->tm_hour; - ltime->minute= start->tm_min; - ltime->second= start->tm_sec; - ltime->second_part= 0; - ltime->neg= 0; - ltime->time_type=TIMESTAMP_FULL; + thd->variables.time_zone->gmt_sec_to_TIME(ltime, (my_time_t)temp); + thd->time_zone_used= 1; } return 0; } @@ -2775,6 +3881,15 @@ bool Field_timestamp::get_time(TIME *ltime) return Field_timestamp::get_date(ltime,0); } + +bool Field_timestamp::send_binary(Protocol *protocol) +{ + TIME tm; + Field_timestamp::get_date(&tm, TIME_FUZZY_DATE); + return protocol->store(&tm); +} + + int Field_timestamp::cmp(const char *a_ptr, const char *b_ptr) { int32 a,b; @@ -2793,6 +3908,7 @@ int Field_timestamp::cmp(const char *a_ptr, const char *b_ptr) return ((uint32) a < (uint32) b) ? -1 : ((uint32) a > (uint32) b) ? 1 : 0; } + void Field_timestamp::sort_string(char *to,uint length __attribute__((unused))) { #ifdef WORDS_BIGENDIAN @@ -2816,14 +3932,14 @@ void Field_timestamp::sort_string(char *to,uint length __attribute__((unused))) void Field_timestamp::sql_type(String &res) const { - sprintf((char*) res.ptr(),"timestamp(%d)",(int) field_length); - res.length((uint) strlen(res.ptr())); + res.set_ascii("timestamp", 9); } void Field_timestamp::set_time() { - long tmp= (long) current_thd->query_start(); + long tmp= (long) table->in_use->query_start(); + set_notnull(); #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2834,7 +3950,6 @@ void Field_timestamp::set_time() longstore(ptr,tmp); } - /**************************************************************************** ** time type ** In string context: HH:MM:SS @@ -2842,41 +3957,65 @@ void Field_timestamp::set_time() ** Stored as a 3 byte unsigned int ****************************************************************************/ -void Field_time::store(const char *from,uint len) +int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) { TIME ltime; long tmp; - if (str_to_time(from,len,<ime)) + int error; + + if (str_to_time(from, len, <ime, &error)) + { tmp=0L; + error= 2; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + from, len, MYSQL_TIMESTAMP_TIME, 1); + } else { + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, + from, len, MYSQL_TIMESTAMP_TIME, 1); + if (ltime.month) ltime.day=0; tmp=(ltime.day*24L+ltime.hour)*10000L+(ltime.minute*100+ltime.second); if (tmp > 8385959) { tmp=8385959; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + from, len, MYSQL_TIMESTAMP_TIME, !error); + error= 1; } + if (error > 1) + error= 2; } + if (ltime.neg) tmp= -tmp; - Field_time::store((longlong) tmp); + error |= Field_time::store((longlong) tmp); + return error; } -void Field_time::store(double nr) +int Field_time::store(double nr) { long tmp; + int error= 0; if (nr > 8385959.0) { tmp=8385959L; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_TIME); + error= 1; } else if (nr < -8385959.0) { tmp= -8385959L; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_TIME); + error= 1; } else { @@ -2886,25 +4025,36 @@ void Field_time::store(double nr) if (tmp % 100 > 59 || tmp/100 % 100 > 59) { tmp=0; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, + MYSQL_TIMESTAMP_TIME); + error= 1; } } int3store(ptr,tmp); + return error; } -void Field_time::store(longlong nr) +int Field_time::store(longlong nr) { long tmp; + int error= 0; if (nr > (longlong) 8385959L) { tmp=8385959L; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, + MYSQL_TIMESTAMP_TIME, 1); + error= 1; } else if (nr < (longlong) -8385959L) { tmp= -8385959L; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, + MYSQL_TIMESTAMP_TIME, 1); + error= 1; } else { @@ -2912,10 +4062,14 @@ void Field_time::store(longlong nr) if (tmp % 100 > 59 || tmp/100 % 100 > 59) { tmp=0; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, + MYSQL_TIMESTAMP_TIME, 1); + error= 1; } } int3store(ptr,tmp); + return error; } @@ -2930,28 +4084,51 @@ longlong Field_time::val_int(void) return (longlong) sint3korr(ptr); } + +/* + This function is multi-byte safe as the result string is always of type + my_charset_bin +*/ + String *Field_time::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { - val_buffer->alloc(16); + TIME ltime; + val_buffer->alloc(19); long tmp=(long) sint3korr(ptr); - const char *sign=""; + ltime.neg= 0; if (tmp < 0) { tmp= -tmp; - sign= "-"; + ltime.neg= 1; } - sprintf((char*) val_buffer->ptr(),"%s%02d:%02d:%02d", - sign,(int) (tmp/10000), (int) (tmp/100 % 100), - (int) (tmp % 100)); - val_buffer->length((uint) strlen(val_buffer->ptr())); + ltime.day= (uint) 0; + ltime.hour= (uint) (tmp/10000); + ltime.minute= (uint) (tmp/100 % 100); + ltime.second= (uint) (tmp % 100); + make_time((DATE_TIME_FORMAT*) 0, <ime, val_buffer); return val_buffer; } -bool Field_time::get_date(TIME *ltime, - bool fuzzydate __attribute__((unused))) + +/* + Normally we would not consider 'time' as a vaild date, but we allow + get_date() here to be able to do things like + DATE_FORMAT(time, "%l.%i %p") +*/ + +bool Field_time::get_date(TIME *ltime, uint fuzzydate) { - long tmp=(long) sint3korr(ptr); + long tmp; + if (!fuzzydate) + { + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + ER(ER_WARN_DATA_OUT_OF_RANGE), field_name, + table->in_use->row_count); + return 1; + } + tmp=(long) sint3korr(ptr); ltime->neg=0; if (tmp < 0) { @@ -2966,6 +4143,7 @@ bool Field_time::get_date(TIME *ltime, return 0; } + bool Field_time::get_time(TIME *ltime) { long tmp=(long) sint3korr(ptr); @@ -2975,14 +4153,27 @@ bool Field_time::get_time(TIME *ltime) ltime->neg= 1; tmp=-tmp; } + ltime->day= 0; ltime->hour= (int) (tmp/10000); tmp-=ltime->hour*10000; ltime->minute= (int) tmp/100; ltime->second= (int) tmp % 100; ltime->second_part=0; + ltime->time_type= MYSQL_TIMESTAMP_TIME; return 0; } + +bool Field_time::send_binary(Protocol *protocol) +{ + TIME tm; + Field_time::get_time(&tm); + tm.day= tm.hour/24; // Move hours to days + tm.hour-= tm.day*24; + return protocol->store_time(&tm); +} + + int Field_time::cmp(const char *a_ptr, const char *b_ptr) { int32 a,b; @@ -3000,7 +4191,7 @@ void Field_time::sort_string(char *to,uint length __attribute__((unused))) void Field_time::sql_type(String &res) const { - res.set("time",4); + res.set_ascii("time", 4); } /**************************************************************************** @@ -3009,19 +4200,28 @@ void Field_time::sql_type(String &res) const ** Can handle 2 byte or 4 byte years! ****************************************************************************/ -void Field_year::store(const char *from, uint len) +int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) { - String tmp_str(from,len); - long nr= strtol(tmp_str.c_ptr(),NULL,10); + int err; + char *end; + long nr= my_strntol(cs, from, len, 10, &end, &err); + + if (err) + { + if (table->in_use->count_cuted_fields) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + *ptr= 0; + return 0; + } if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) { *ptr=0; - current_thd->cuted_fields++; - return; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; } - if (current_thd->count_cuted_fields && !test_if_int(from,len)) - current_thd->cuted_fields++; + if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); if (nr != 0 || len != 4) { if (nr < YY_PART_YEAR) @@ -3030,23 +4230,28 @@ void Field_year::store(const char *from, uint len) nr-= 1900; } *ptr= (char) (unsigned char) nr; + return 0; } -void Field_year::store(double nr) + +int Field_year::store(double nr) { if (nr < 0.0 || nr >= 2155.0) - Field_year::store((longlong) -1); + { + (void) Field_year::store((longlong) -1); + return 1; + } else - Field_year::store((longlong) nr); + return Field_year::store((longlong) nr); } -void Field_year::store(longlong nr) +int Field_year::store(longlong nr) { if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) { *ptr=0; - current_thd->cuted_fields++; - return; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; } if (nr != 0 || field_length != 4) // 0000 -> 0; 00 -> 2000 { @@ -3056,8 +4261,14 @@ void Field_year::store(longlong nr) nr-= 1900; } *ptr= (char) (unsigned char) nr; + return 0; } +bool Field_year::send_binary(Protocol *protocol) +{ + ulonglong tmp= Field_year::val_int(); + return protocol->store_short(tmp); +} double Field_year::val_real(void) { @@ -3086,8 +4297,9 @@ String *Field_year::val_str(String *val_buffer, void Field_year::sql_type(String &res) const { - sprintf((char*) res.ptr(),"year(%d)",(int) field_length); - res.length((uint) strlen(res.ptr())); + CHARSET_INFO *cs=res.charset(); + res.length(cs->cset->snprintf(cs,(char*)res.ptr(),res.alloced_length(), + "year(%d)",(int) field_length)); } @@ -3098,14 +4310,24 @@ void Field_year::sql_type(String &res) const ** Stored as a 4 byte unsigned int ****************************************************************************/ -void Field_date::store(const char *from, uint len) +int Field_date::store(const char *from, uint len,CHARSET_INFO *cs) { TIME l_time; uint32 tmp; - if (str_to_TIME(from,len,&l_time,1) == TIMESTAMP_NONE) + int error; + + if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) + { tmp=0; + error= 2; + } else tmp=(uint32) l_time.year*10000L + (uint32) (l_time.month*100+l_time.day); + + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + from, len, MYSQL_TIMESTAMP_DATE, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -3114,18 +4336,23 @@ void Field_date::store(const char *from, uint len) else #endif longstore(ptr,tmp); + return error; } -void Field_date::store(double nr) +int Field_date::store(double nr) { long tmp; + int error= 0; if (nr >= 19000000000000.0 && nr <= 99991231235959.0) nr=floor(nr/1000000.0); // Timestamp to date if (nr < 0.0 || nr > 99991231.0) { tmp=0L; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + nr, MYSQL_TIMESTAMP_DATE); + error= 1; } else tmp=(long) rint(nr); @@ -3137,18 +4364,23 @@ void Field_date::store(double nr) else #endif longstore(ptr,tmp); + return error; } -void Field_date::store(longlong nr) +int Field_date::store(longlong nr) { long tmp; + int error= 0; if (nr >= LL(19000000000000) && nr < LL(99991231235959)) nr=nr/LL(1000000); // Timestamp to date if (nr < 0 || nr > LL(99991231)) { tmp=0L; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + nr, MYSQL_TIMESTAMP_DATE, 0); + error= 1; } else tmp=(long) nr; @@ -3160,6 +4392,18 @@ void Field_date::store(longlong nr) else #endif longstore(ptr,tmp); + return error; +} + + +bool Field_date::send_binary(Protocol *protocol) +{ + longlong tmp= Field_date::val_int(); + TIME tm; + tm.year= (uint32) tmp/10000L % 10000; + tm.month= (uint32) tmp/100 % 100; + tm.day= (uint32) tmp % 100; + return protocol->store_date(&tm); } @@ -3190,8 +4434,8 @@ longlong Field_date::val_int(void) String *Field_date::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + TIME ltime; val_buffer->alloc(field_length); - val_buffer->length(field_length); int32 tmp; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -3199,12 +4443,15 @@ String *Field_date::val_str(String *val_buffer, else #endif longget(tmp,ptr); - sprintf((char*) val_buffer->ptr(),"%04d-%02d-%02d", - (int) ((uint32) tmp/10000L % 10000), (int) ((uint32) tmp/100 % 100), - (int) ((uint32) tmp % 100)); + ltime.neg= 0; + ltime.year= (int) ((uint32) tmp/10000L % 10000); + ltime.month= (int) ((uint32) tmp/100 % 100); + ltime.day= (int) ((uint32) tmp % 100); + make_date((DATE_TIME_FORMAT *) 0, <ime, val_buffer); return val_buffer; } + int Field_date::cmp(const char *a_ptr, const char *b_ptr) { int32 a,b; @@ -3246,7 +4493,7 @@ void Field_date::sort_string(char *to,uint length __attribute__((unused))) void Field_date::sql_type(String &res) const { - res.set("date",4); + res.set_ascii("date", 4); } /**************************************************************************** @@ -3255,35 +4502,54 @@ void Field_date::sql_type(String &res) const ** In number context: YYYYMMDD ****************************************************************************/ -void Field_newdate::store(const char *from,uint len) +int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) { TIME l_time; long tmp; - if (str_to_TIME(from,len,&l_time,1) == TIMESTAMP_NONE) + int error; + if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) + { tmp=0L; + error= 2; + } else tmp= l_time.day + l_time.month*32 + l_time.year*16*32; + + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + from, len, MYSQL_TIMESTAMP_DATE, 1); + int3store(ptr,tmp); + return error; } -void Field_newdate::store(double nr) +int Field_newdate::store(double nr) { if (nr < 0.0 || nr > 99991231235959.0) - Field_newdate::store((longlong) -1); + { + (void) Field_newdate::store((longlong) -1); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, nr, MYSQL_TIMESTAMP_DATE); + return 1; + } else - Field_newdate::store((longlong) rint(nr)); + return Field_newdate::store((longlong) rint(nr)); } -void Field_newdate::store(longlong nr) +int Field_newdate::store(longlong nr) { int32 tmp; + int error= 0; if (nr >= LL(100000000) && nr <= LL(99991231235959)) nr=nr/LL(1000000); // Timestamp to date if (nr < 0L || nr > 99991231L) { tmp=0; - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, + MYSQL_TIMESTAMP_DATE, 1); + error= 1; } else { @@ -3300,28 +4566,37 @@ void Field_newdate::store(longlong nr) if (month > 12 || day > 31) { tmp=0L; // Don't allow date to change - current_thd->cuted_fields++; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, + MYSQL_TIMESTAMP_DATE, 1); + error= 1; } else tmp= day + month*32 + (tmp/10000)*16*32; } int3store(ptr,(int32) tmp); + return error; } void Field_newdate::store_time(TIME *ltime,timestamp_type type) { long tmp; - if (type == TIMESTAMP_DATE || type == TIMESTAMP_FULL) + if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) tmp=ltime->year*16*32+ltime->month*32+ltime->day; else { tmp=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } int3store(ptr,tmp); } - +bool Field_newdate::send_binary(Protocol *protocol) +{ + TIME tm; + Field_newdate::get_date(&tm,0); + return protocol->store_date(&tm); +} double Field_newdate::val_real(void) { @@ -3362,7 +4637,7 @@ String *Field_newdate::val_str(String *val_buffer, return val_buffer; } -bool Field_newdate::get_date(TIME *ltime,bool fuzzydate) +bool Field_newdate::get_date(TIME *ltime,uint fuzzydate) { if (is_null()) return 1; @@ -3370,8 +4645,8 @@ bool Field_newdate::get_date(TIME *ltime,bool fuzzydate) ltime->day= tmp & 31; ltime->month= (tmp >> 5) & 15; ltime->year= (tmp >> 9); - ltime->time_type=TIMESTAMP_DATE; - ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0; + ltime->time_type= MYSQL_TIMESTAMP_DATE; + ltime->hour= ltime->minute= ltime->second= ltime->second_part= ltime->neg= 0; return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0; } @@ -3397,7 +4672,7 @@ void Field_newdate::sort_string(char *to,uint length __attribute__((unused))) void Field_newdate::sql_type(String &res) const { - res.set("date",4); + res.set_ascii("date", 4); } @@ -3408,9 +4683,20 @@ void Field_newdate::sql_type(String &res) const ** Stored as a 8 byte unsigned int. Should sometimes be change to a 6 byte int. ****************************************************************************/ -void Field_datetime::store(const char *from,uint len) +int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs) { - longlong tmp=str_to_datetime(from,len,1); + TIME time_tmp; + int error; + ulonglong tmp= 0; + + if (str_to_datetime(from, len, &time_tmp, 1, &error) > MYSQL_TIMESTAMP_ERROR) + tmp= TIME_to_ulonglong_datetime(&time_tmp); + + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + from, len, MYSQL_TIMESTAMP_DATETIME, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -3419,26 +4705,39 @@ void Field_datetime::store(const char *from,uint len) else #endif longlongstore(ptr,tmp); + return error; } -void Field_datetime::store(double nr) +int Field_datetime::store(double nr) { + int error= 0; if (nr < 0.0 || nr > 99991231235959.0) { + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + nr, MYSQL_TIMESTAMP_DATETIME); nr=0.0; - current_thd->cuted_fields++; + error= 1; } - Field_datetime::store((longlong) rint(nr)); + error |= Field_datetime::store((longlong) rint(nr)); + return error; } -void Field_datetime::store(longlong nr) +int Field_datetime::store(longlong nr) { TIME not_used; + int error; + longlong initial_nr= nr; - nr= fix_datetime(nr, ¬_used); - + nr= number_to_TIME(nr, ¬_used, 1, &error); + + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, initial_nr, + MYSQL_TIMESTAMP_DATETIME, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -3447,18 +4746,24 @@ void Field_datetime::store(longlong nr) else #endif longlongstore(ptr,nr); + return error; } + void Field_datetime::store_time(TIME *ltime,timestamp_type type) { longlong tmp; - if (type == TIMESTAMP_DATE || type == TIMESTAMP_FULL) + /* + We don't perform range checking here since values stored in TIME + structure always fit into DATETIME range. + */ + if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+ (ltime->hour*10000L+ltime->minute*100+ltime->second)); else { tmp=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -3470,6 +4775,13 @@ void Field_datetime::store_time(TIME *ltime,timestamp_type type) longlongstore(ptr,tmp); } +bool Field_datetime::send_binary(Protocol *protocol) +{ + TIME tm; + Field_datetime::get_date(&tm, TIME_FUZZY_DATE); + return protocol->store(&tm); +} + double Field_datetime::val_real(void) { @@ -3537,14 +4849,14 @@ String *Field_datetime::val_str(String *val_buffer, return val_buffer; } -bool Field_datetime::get_date(TIME *ltime,bool fuzzydate) +bool Field_datetime::get_date(TIME *ltime, uint fuzzydate) { longlong tmp=Field_datetime::val_int(); uint32 part1,part2; part1=(uint32) (tmp/LL(1000000)); part2=(uint32) (tmp - (ulonglong) part1*LL(1000000)); - ltime->time_type= TIMESTAMP_FULL; + ltime->time_type= MYSQL_TIMESTAMP_DATETIME; ltime->neg= 0; ltime->second_part= 0; ltime->second= (int) (part2%100); @@ -3611,7 +4923,7 @@ void Field_datetime::sort_string(char *to,uint length __attribute__((unused))) void Field_datetime::sql_type(String &res) const { - res.set("datetime",8); + res.set_ascii("datetime", 8); } /**************************************************************************** @@ -3621,39 +4933,54 @@ void Field_datetime::sql_type(String &res) const /* Copy a string and fill with space */ -void Field_string::store(const char *from,uint length) +int Field_string::store(const char *from,uint length,CHARSET_INFO *cs) { -#ifdef USE_TIS620 - if (!binary_flag) { - ThNormalize((uchar *)ptr, field_length, (uchar *)from, length); - if (length < field_length) { - bfill(ptr + length, field_length - length, ' '); - } - } -#else - if (length <= field_length) - { - memcpy(ptr,from,length); - if (length < field_length) - bfill(ptr+length,field_length-length,' '); + int error= 0, well_formed_error; + uint32 not_used; + char buff[80]; + String tmpstr(buff,sizeof(buff), &my_charset_bin); + uint copy_length; + + /* See the comment for Field_long::store(long long) */ + DBUG_ASSERT(table->in_use == current_thd); + + /* Convert character set if nesessary */ + if (String::needs_conversion(length, cs, field_charset, ¬_used)) + { + uint conv_errors; + tmpstr.copy(from, length, cs, field_charset, &conv_errors); + from= tmpstr.ptr(); + length= tmpstr.length(); + if (conv_errors) + error= 2; } - else - { - memcpy(ptr,from,field_length); - if (current_thd->count_cuted_fields) - { // Check if we loosed some info - const char *end=from+length; - for (from+=field_length ; from != end ; from++) - { - if (!isspace(*from)) - { - current_thd->cuted_fields++; - break; - } - } - } + + /* + Make sure we don't break a multibyte sequence + as well as don't copy a malformed data. + */ + copy_length= field_charset->cset->well_formed_len(field_charset, + from,from+length, + field_length/ + field_charset->mbmaxlen, + &well_formed_error); + memcpy(ptr,from,copy_length); + if (copy_length < field_length) // Append spaces if shorter + field_charset->cset->fill(field_charset,ptr+copy_length, + field_length-copy_length,' '); + + if ((copy_length < length) && table->in_use->count_cuted_fields) + { // Check if we loosed some info + const char *end=from+length; + from+= copy_length; + from+= field_charset->cset->scan(field_charset, from, end, + MY_SEQ_SPACES); + if (from != end) + error= 2; } -#endif /* USE_TIS620 */ + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + return error; } @@ -3661,172 +4988,168 @@ void Field_string::store(const char *from,uint length) Store double value in Field_string or Field_varstring. SYNOPSIS - store_double_in_string_field() - field field to store value in - field_length number of characters in the field + store(double nr) nr number DESCRIPTION Pretty prints double number into field_length characters buffer. */ -static void store_double_in_string_field(Field_str *field, uint32 field_length, - double nr) +int Field_str::store(double nr) { - bool use_scientific_notation=TRUE; char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE]; uint length; - if (field_length < 32 && nr > 1) - { - if (field->ceiling == 0) - { - static double e[]= {1e1, 1e2, 1e4, 1e8, 1e16 }; - double p= 1; - for (int i= sizeof(e)/sizeof(e[0]), j= 1<<i ; j; i--, j>>= 1 ) - { - if (field_length & j) - p*= e[i]; - } - field->ceiling= p-1; - } - use_scientific_notation= (field->ceiling < nr); - } - length= (uint)sprintf(buff, "%-.*g", - use_scientific_notation ? max(0,(int)field_length-5) : field_length, - nr); + bool use_scientific_notation= TRUE; + uint char_length= field_length / charset()->mbmaxlen; + /* + Check fabs(nr) against longest value that can be stored in field, + which depends on whether the value is < 1 or not, and negative or not + */ + double anr= fabs(nr); + int neg= (nr < 0.0) ? 1 : 0; + if (char_length > 4 && char_length < 32 && + (anr < 1.0 ? anr > 1/(log_10[max(0,(int) char_length-neg-2)]) /* -2 for "0." */ + : anr < log_10[char_length-neg]-1)) + use_scientific_notation= FALSE; + + length= (uint) my_sprintf(buff, (buff, "%-.*g", + (use_scientific_notation ? + max(0, (int)char_length-neg-5) : + char_length), + nr)); /* +1 below is because "precision" in %g above means the max. number of significant digits, not the output width. Thus the width can be larger than number of significant digits by 1 (for decimal point) - the test for field_length < 5 is for extreme cases, + the test for char_length < 5 is for extreme cases, like inserting 500.0 in char(1) */ - DBUG_ASSERT(field_length < 5 || length <= field_length+1); - field->store(buff, min(length, field_length)); + DBUG_ASSERT(char_length < 5 || length <= char_length+1); + return store((const char *) buff, length, charset()); } -void Field_string::store(double nr) -{ - store_double_in_string_field(this, field_length, nr); -} - -void Field_string::store(longlong nr) +int Field_string::store(longlong nr) { - char buff[22]; - char *end=longlong10_to_str(nr,buff,-10); - Field_string::store(buff,(uint) (end-buff)); + char buff[64]; + int l; + CHARSET_INFO *cs=charset(); + l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr); + return Field_string::store(buff,(uint)l,cs); } double Field_string::val_real(void) { - double value; - char save=ptr[field_length]; // Ok to patch record - ptr[field_length]=0; - value=atof(ptr); - ptr[field_length]=save; - return value; + int not_used; + char *end_not_used; + CHARSET_INFO *cs=charset(); + return my_strntod(cs, ptr, field_length, &end_not_used, ¬_used); } longlong Field_string::val_int(void) { - longlong value; - char save=ptr[field_length]; // Ok to patch record - ptr[field_length]=0; - value=strtoll(ptr,NULL,10); - ptr[field_length]=save; - return value; + int not_used; + CHARSET_INFO *cs=charset(); + return my_strntoll(cs,ptr,field_length,10,NULL,¬_used); } String *Field_string::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { - char *end=ptr+field_length; -#ifdef WANT_TRUE_BINARY_STRINGS - if (!binary) -#endif - while (end > ptr && end[-1] == ' ') - end--; - val_ptr->set((const char*) ptr,(uint) (end - ptr)); + uint length= field_charset->cset->lengthsp(field_charset, ptr, field_length); + /* See the comment for Field_long::store(long long) */ + DBUG_ASSERT(table->in_use == current_thd); + val_ptr->set((const char*) ptr, length, field_charset); return val_ptr; } int Field_string::cmp(const char *a_ptr, const char *b_ptr) { - if (binary_flag) - return memcmp(a_ptr,b_ptr,field_length); -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) + uint a_len, b_len; + + if (field_charset->strxfrm_multiply > 1) { /* We have to remove end space to be able to compare multi-byte-characters like in latin_de 'ae' and 0xe4 */ - uint a_length= field_length_without_space(a_ptr, field_length); - uint b_length= field_length_without_space(b_ptr, field_length); - return my_strnncoll(default_charset_info, - (const uchar*) a_ptr, a_length, - (const uchar*) b_ptr, b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a_ptr, field_length, + (const uchar*) b_ptr, + field_length); } -#endif - return my_sortcmp(a_ptr,b_ptr,field_length); + if (field_charset->mbmaxlen != 1) + { + uint char_len= field_length/field_charset->mbmaxlen; + a_len= my_charpos(field_charset, a_ptr, a_ptr + field_length, char_len); + b_len= my_charpos(field_charset, b_ptr, b_ptr + field_length, char_len); + } + else + a_len= b_len= field_length; + return my_strnncoll(field_charset,(const uchar*) a_ptr, a_len, + (const uchar*) b_ptr, b_len); } + void Field_string::sort_string(char *to,uint length) { - if (binary_flag) - memcpy((byte*) to,(byte*) ptr,(size_t) length); - else - { -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) { - uint tmp=my_strnxfrm(default_charset_info, - (unsigned char *)to, (unsigned char *) ptr, - length, field_length); - if (tmp < length) - bzero(to + tmp, length - tmp); - } - else -#endif - for (char *from=ptr,*end=ptr+length ; from != end ;) - *to++=(char) my_sort_order[(uint) (uchar) *from++]; - } + uint tmp=my_strnxfrm(field_charset, + (unsigned char *) to, length, + (unsigned char *) ptr, field_length); + DBUG_ASSERT(tmp == length); } void Field_string::sql_type(String &res) const { - sprintf((char*) res.ptr(),"%s(%d)", - field_length > 3 && - (table->db_options_in_use & HA_OPTION_PACK_RECORD) ? - "varchar" : "char", - (int) field_length); - res.length((uint) strlen(res.ptr())); - if (binary_flag) + THD *thd= table->in_use; + CHARSET_INFO *cs=res.charset(); + ulong length= cs->cset->snprintf(cs,(char*) res.ptr(), + res.alloced_length(), "%s(%d)", + (field_length > 3 && + (table->db_options_in_use & + HA_OPTION_PACK_RECORD) ? + (has_charset() ? "varchar" : "varbinary") : + (has_charset() ? "char" : "binary")), + (int) field_length / charset()->mbmaxlen); + res.length(length); + if ((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) && + has_charset() && (charset()->state & MY_CS_BINSORT)) res.append(" binary"); } - char *Field_string::pack(char *to, const char *from, uint max_length) { - const char *end=from+min(field_length,max_length); - uchar length; - while (end > from && end[-1] == ' ') - end--; - *to= length=(uchar) (end-from); - memcpy(to+1, from, (int) length); - return to+1+length; + uint length= min(field_length,max_length); + uint char_length= max_length/field_charset->mbmaxlen; + if (length > char_length) + char_length= my_charpos(field_charset, from, from+length, char_length); + set_if_smaller(length, char_length); + while (length && from[length-1] == ' ') + length--; + *to++= (char) (uchar) length; + if (field_length > 255) + *to++= (char) (uchar) (length >> 8); + memcpy(to, from, length); + return to+length; } const char *Field_string::unpack(char *to, const char *from) { - uint length= (uint) (uchar) *from++; + uint length; + if (field_length > 255) + { + length= uint2korr(from); + from+= 2; + } + else + length= (uint) (uchar) *from++; memcpy(to, from, (int) length); bfill(to+length, field_length - length, ' '); return from+length; @@ -3835,32 +5158,42 @@ const char *Field_string::unpack(char *to, const char *from) int Field_string::pack_cmp(const char *a, const char *b, uint length) { - uint a_length= (uint) (uchar) *a++; - uint b_length= (uint) (uchar) *b++; - - if (binary_flag) + uint a_length, b_length; + if (field_length > 255) { - int cmp= memcmp(a,b,min(a_length,b_length)); - return cmp ? cmp : (int) (a_length - b_length); + a_length= uint2korr(a); + b_length= uint2korr(b); + a+= 2; + b+= 2; } - return my_sortncmp(a,a_length, b,b_length); + else + { + a_length= (uint) (uchar) *a++; + b_length= (uint) (uchar) *b++; + } + return my_strnncoll(field_charset, + (const uchar*)a,a_length, + (const uchar*)b,b_length); } int Field_string::pack_cmp(const char *b, uint length) { - uint b_length= (uint) (uchar) *b++; + uint b_length; + if (field_length > 255) + { + b_length= uint2korr(b); + b+= 2; + } + else + b_length= (uint) (uchar) *b++; char *end= ptr + field_length; while (end > ptr && end[-1] == ' ') end--; uint a_length = (uint) (end - ptr); - - if (binary_flag) - { - int cmp= memcmp(ptr,b,min(a_length,b_length)); - return cmp ? cmp : (int) (a_length - b_length); - } - return my_sortncmp(ptr,a_length, b, b_length); + return my_strnncoll(field_charset, + (const uchar*)ptr,a_length, + (const uchar*)b, b_length); } @@ -3883,64 +5216,63 @@ uint Field_string::max_packed_col_length(uint max_length) ****************************************************************************/ -void Field_varstring::store(const char *from,uint length) +int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) { -#ifdef USE_TIS620 - if (!binary_flag) - { - ThNormalize((uchar *) ptr+2, field_length, (uchar *) from, length); - } -#else - if (length <= field_length) - { - memcpy(ptr+2,from,length); + int error= 0; + uint32 not_used; + char buff[80]; + String tmpstr(buff,sizeof(buff), &my_charset_bin); + + /* Convert character set if nesessary */ + if (String::needs_conversion(length, cs, field_charset, ¬_used)) + { + uint conv_errors; + tmpstr.copy(from, length, cs, field_charset, &conv_errors); + from= tmpstr.ptr(); + length= tmpstr.length(); + if (conv_errors) + error= 2; } - else + if (length > field_length) { length=field_length; - memcpy(ptr+2,from,field_length); - current_thd->cuted_fields++; + error= 2; } -#endif /* USE_TIS620 */ - int2store(ptr,length); -} - - -void Field_varstring::store(double nr) -{ - store_double_in_string_field(this, field_length, nr); + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + memcpy(ptr+HA_KEY_BLOB_LENGTH,from,length); + int2store(ptr, length); + return error; } -void Field_varstring::store(longlong nr) +int Field_varstring::store(longlong nr) { - char buff[22]; - char *end=longlong10_to_str(nr,buff,-10); - Field_varstring::store(buff,(uint) (end-buff)); + char buff[64]; + int l; + CHARSET_INFO *cs=charset(); + l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr); + return Field_varstring::store(buff,(uint)l,cs); } double Field_varstring::val_real(void) { - double value; - uint length=uint2korr(ptr)+2; - char save=ptr[length]; // Ok to patch record - ptr[length]=0; - value=atof(ptr+2); - ptr[length]=save; - return value; + int not_used; + uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; + CHARSET_INFO *cs=charset(); + char *end_not_used; + return my_strntod(cs, ptr+HA_KEY_BLOB_LENGTH, length, &end_not_used, + ¬_used); } longlong Field_varstring::val_int(void) { - longlong value; - uint length=uint2korr(ptr)+2; - char save=ptr[length]; // Ok to patch record - ptr[length]=0; - value=strtoll(ptr+2,NULL,10); - ptr[length]=save; - return value; + int not_used; + uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; + CHARSET_INFO *cs=charset(); + return my_strntoll(cs,ptr+HA_KEY_BLOB_LENGTH,length,10,NULL, ¬_used); } @@ -3948,7 +5280,7 @@ String *Field_varstring::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { uint length=uint2korr(ptr); - val_ptr->set((const char*) ptr+2,length); + val_ptr->set((const char*) ptr+HA_KEY_BLOB_LENGTH,length,field_charset); return val_ptr; } @@ -3958,48 +5290,32 @@ int Field_varstring::cmp(const char *a_ptr, const char *b_ptr) uint a_length=uint2korr(a_ptr); uint b_length=uint2korr(b_ptr); int diff; - if (binary_flag) - diff=memcmp(a_ptr+2,b_ptr+2,min(a_length,b_length)); - else - diff=my_sortcmp(a_ptr+2,b_ptr+2,min(a_length,b_length)); + diff= my_strnncoll(field_charset, + (const uchar*) a_ptr+HA_KEY_BLOB_LENGTH, + min(a_length,b_length), + (const uchar*) b_ptr+HA_KEY_BLOB_LENGTH, + min(a_length,b_length)); return diff ? diff : (int) (a_length - b_length); } void Field_varstring::sort_string(char *to,uint length) { uint tot_length=uint2korr(ptr); - if (binary_flag) - memcpy((byte*) to,(byte*) ptr+2,(size_t) tot_length); - else - { -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) - tot_length=my_strnxfrm(default_charset_info, - (unsigned char *) to, (unsigned char *)ptr+2, - length, tot_length); - else - { -#endif - char *tmp=to; - if (tot_length > length) - tot_length=length; - for (char *from=ptr+2,*end=from+tot_length ; from != end ;) - *tmp++=(char) my_sort_order[(uint) (uchar) *from++]; -#ifdef USE_STRCOLL - } -#endif - } - if (tot_length < length) - bzero(to+tot_length,length-tot_length); + tot_length= my_strnxfrm(field_charset, + (uchar*) to, length, + (uchar*) ptr+HA_KEY_BLOB_LENGTH, + tot_length); + DBUG_ASSERT(tot_length == length); } void Field_varstring::sql_type(String &res) const { - sprintf((char*) res.ptr(),"varchar(%d)",(int) field_length); - res.length((uint) strlen(res.ptr())); - if (binary_flag) - res.append(" binary"); + CHARSET_INFO *cs=res.charset(); + ulong length= cs->cset->snprintf(cs,(char*) res.ptr(), + res.alloced_length(),"varchar(%u)", + field_length / charset()->mbmaxlen); + res.length(length); } char *Field_varstring::pack(char *to, const char *from, uint max_length) @@ -4011,7 +5327,25 @@ char *Field_varstring::pack(char *to, const char *from, uint max_length) if (max_length > 255) *to++= (char) (length >> 8); if (length) - memcpy(to, from+2, length); + memcpy(to, from+HA_KEY_BLOB_LENGTH, length); + return to+length; +} + + +char *Field_varstring::pack_key(char *to, const char *from, uint max_length) +{ + uint length=uint2korr(from); + uint char_length= (field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length; + from+=HA_KEY_BLOB_LENGTH; + if (length > char_length) + char_length= my_charpos(field_charset, from, from+length, char_length); + set_if_smaller(length, char_length); + *to++= (char) (length & 255); + if (max_length > 255) + *to++= (char) (length >> 8); + if (length) + memcpy(to, from, length); return to+length; } @@ -4031,7 +5365,7 @@ const char *Field_varstring::unpack(char *to, const char *from) to[1] = *from++; } if (length) - memcpy(to+2, from, length); + memcpy(to+HA_KEY_BLOB_LENGTH, from, length); return from+length; } @@ -4042,47 +5376,41 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length) uint b_length; if (key_length > 255) { - a_length=uint2korr(a); a+=2; - b_length=uint2korr(b); b+=2; + a_length=uint2korr(a); a+= 2; + b_length=uint2korr(b); b+= 2; } else { a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - if (binary_flag) - { - int cmp= memcmp(a,b,min(a_length,b_length)); - return cmp ? cmp : (int) (a_length - b_length); - } - return my_sortncmp(a,a_length, b,b_length); + return my_strnncoll(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length); } int Field_varstring::pack_cmp(const char *b, uint key_length) { - char *a=ptr+2; - uint a_length=uint2korr(ptr); + char *a= ptr+HA_KEY_BLOB_LENGTH; + uint a_length= uint2korr(ptr); uint b_length; if (key_length > 255) { - b_length=uint2korr(b); b+=2; + b_length=uint2korr(b); b+= 2; } else { b_length= (uint) (uchar) *b++; } - if (binary_flag) - { - int cmp= memcmp(a,b,min(a_length,b_length)); - return cmp ? cmp : (int) (a_length - b_length); - } - return my_sortncmp(a,a_length, b,b_length); + return my_strnncoll(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length); } uint Field_varstring::packed_col_length(const char *data_ptr, uint length) { if (length > 255) - return uint2korr(data_ptr)+2; + return uint2korr(data_ptr)+HA_KEY_BLOB_LENGTH; else return (uint) ((uchar) *data_ptr)+1; } @@ -4092,6 +5420,28 @@ uint Field_varstring::max_packed_col_length(uint max_length) return (max_length > 255 ? 2 : 1)+max_length; } +void Field_varstring::get_key_image(char *buff, uint length, CHARSET_INFO *cs, + imagetype type) +{ + uint f_length=uint2korr(ptr); + if (f_length > length) + f_length= length; + int2store(buff,length); + memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+HA_KEY_BLOB_LENGTH, length); +#ifdef HAVE_purify + if (f_length < length) + bzero(buff+HA_KEY_BLOB_LENGTH+f_length, (length-f_length)); +#endif +} + +void Field_varstring::set_key_image(char *buff,uint length, CHARSET_INFO *cs) +{ + length=uint2korr(buff); // Real length is here + (void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length, cs); +} + + + /**************************************************************************** ** blob type ** A blob is saved as a length and a pointer. The length is stored in the @@ -4101,15 +5451,13 @@ uint Field_varstring::max_packed_col_length(uint max_length) Field_blob::Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg,uint blob_pack_length, - bool binary_arg) - :Field_str(ptr_arg, (1L << min(blob_pack_length,3)*8)-1L, + CHARSET_INFO *cs) + :Field_str(ptr_arg, BLOB_PACK_LENGTH_TO_MAX_LENGH(blob_pack_length), null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, - table_arg), - packlength(blob_pack_length),binary_flag(binary_arg) + table_arg, cs), + packlength(blob_pack_length) { flags|= BLOB_FLAG; - if (binary_arg) - flags|=BINARY_FLAG; if (table) table->blob_fields++; } @@ -4119,19 +5467,9 @@ void Field_blob::store_length(uint32 number) { switch (packlength) { case 1: - if (number > 255) - { - number=255; - current_thd->cuted_fields++; - } ptr[0]= (uchar) number; break; case 2: - if (number > (uint16) ~0) - { - number= (uint16) ~0; - current_thd->cuted_fields++; - } #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -4142,11 +5480,6 @@ void Field_blob::store_length(uint32 number) shortstore(ptr,(unsigned short) number); break; case 3: - if (number > (uint32) (1L << 24)) - { - number= (uint32) (1L << 24)-1L; - current_thd->cuted_fields++; - } int3store(ptr,number); break; case 4: @@ -4232,89 +5565,102 @@ void Field_blob::put_length(char *pos, uint32 length) } -void Field_blob::store(const char *from,uint len) +int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) { - if (!len) + int error= 0, well_formed_error; + if (!length) { bzero(ptr,Field_blob::pack_length()); } else { -#ifdef USE_TIS620 - char *th_ptr=0; -#endif - Field_blob::store_length(len); - if (table->copy_blobs || len <= MAX_FIELD_WIDTH) + bool was_conversion; + char buff[80]; + String tmpstr(buff,sizeof(buff), &my_charset_bin); + uint copy_length; + uint32 not_used; + + /* Convert character set if nesessary */ + if ((was_conversion= String::needs_conversion(length, cs, field_charset, + ¬_used))) + { + uint conv_errors; + tmpstr.copy(from, length, cs, field_charset, &conv_errors); + from= tmpstr.ptr(); + length= tmpstr.length(); + if (conv_errors) + error= 2; + } + + copy_length= max_data_length(); + /* + copy_length is ok as last argument to well_formed_len as this is never + used to limit the length of the data. The cut of long data is done with + the 'min()' call below. + */ + copy_length= field_charset->cset->well_formed_len(field_charset, + from,from + + min(length, copy_length), + copy_length, + &well_formed_error); + if (copy_length < length) + error= 2; + Field_blob::store_length(copy_length); + if (was_conversion || table->copy_blobs || copy_length <= MAX_FIELD_WIDTH) { // Must make a copy -#ifdef USE_TIS620 - if (!binary_flag) - { - /* If there isn't enough memory, use original string */ - if ((th_ptr=(char * ) my_malloc(sizeof(char) * len,MYF(0)))) - { - ThNormalize((uchar *) th_ptr, len, (uchar *) from, len); - from= (const char*) th_ptr; - } - } -#endif /* USE_TIS620 */ if (from != value.ptr()) // For valgrind { - value.copy(from, len); - from= value.ptr(); + value.copy(from,copy_length,charset()); + from=value.ptr(); } -#ifdef USE_TIS620 - my_free(th_ptr,MYF(MY_ALLOW_ZERO_PTR)); -#endif } bmove(ptr+packlength,(char*) &from,sizeof(char*)); } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + return 0; } -void Field_blob::store(double nr) +int Field_blob::store(double nr) { - value.set(nr); - Field_blob::store(value.ptr(),(uint) value.length()); + CHARSET_INFO *cs=charset(); + value.set(nr, 2, cs); + return Field_blob::store(value.ptr(),(uint) value.length(), cs); } -void Field_blob::store(longlong nr) +int Field_blob::store(longlong nr) { - value.set(nr); - Field_blob::store(value.ptr(), (uint) value.length()); + CHARSET_INFO *cs=charset(); + value.set(nr, cs); + return Field_blob::store(value.ptr(), (uint) value.length(), cs); } double Field_blob::val_real(void) { + int not_used; char *blob; - + char *end_not_used; memcpy_fixed(&blob,ptr+packlength,sizeof(char*)); if (!blob) return 0.0; uint32 length=get_length(ptr); - - char save=blob[length]; // Ok to patch blob in NISAM - blob[length]=0; - double nr=atof(blob); - blob[length]=save; - return nr; + CHARSET_INFO *cs=charset(); + return my_strntod(cs,blob,length, &end_not_used, ¬_used); } longlong Field_blob::val_int(void) { + int not_used; char *blob; memcpy_fixed(&blob,ptr+packlength,sizeof(char*)); if (!blob) return 0; uint32 length=get_length(ptr); - - char save=blob[length]; // Ok to patch blob in NISAM - blob[length]=0; - longlong nr=strtoll(blob,NULL,10); - blob[length]=save; - return nr; + return my_strntoll(charset(),blob,length,10,NULL,¬_used); } @@ -4324,9 +5670,9 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)), char *blob; memcpy_fixed(&blob,ptr+packlength,sizeof(char*)); if (!blob) - val_ptr->set("",0); // A bit safer than ->length(0) + val_ptr->set("",0,charset()); // A bit safer than ->length(0) else - val_ptr->set((const char*) blob,get_length(ptr)); + val_ptr->set((const char*) blob,get_length(ptr),charset()); return val_ptr; } @@ -4334,12 +5680,10 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)), int Field_blob::cmp(const char *a,uint32 a_length, const char *b, uint32 b_length) { - int diff; - if (binary_flag) - diff=memcmp(a,b,min(a_length,b_length)); - else - diff=my_sortcmp(a,b,min(a_length,b_length)); - return diff ? diff : (int) (a_length - b_length); + return field_charset->coll->strnncoll(field_charset, + (const uchar*)a, a_length, + (const uchar*)b, b_length, + 0); } @@ -4386,45 +5730,83 @@ int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr, /* The following is used only when comparing a key */ -void Field_blob::get_key_image(char *buff,uint length) +void Field_blob::get_key_image(char *buff,uint length, + CHARSET_INFO *cs, imagetype type) { - length-=HA_KEY_BLOB_LENGTH; - uint32 blob_length=get_length(ptr); + uint32 blob_length= get_length(ptr); char *blob; + +#ifdef HAVE_SPATIAL + if (type == itMBR) + { + const char *dummy; + MBR mbr; + Geometry_buffer buffer; + Geometry *gobj; + + if (blob_length < SRID_SIZE) + { + bzero(buff, SIZEOF_STORED_DOUBLE*4); + return; + } + get_ptr(&blob); + gobj= Geometry::construct(&buffer, blob, blob_length); + if (gobj->get_mbr(&mbr, &dummy)) + bzero(buff, SIZEOF_STORED_DOUBLE*4); + else + { + float8store(buff, mbr.xmin); + float8store(buff+8, mbr.xmax); + float8store(buff+16, mbr.ymin); + float8store(buff+24, mbr.ymax); + } + return; + } +#endif /*HAVE_SPATIAL*/ + + get_ptr(&blob); + uint char_length= length / cs->mbmaxlen; + char_length= my_charpos(cs, blob, blob + blob_length, char_length); + set_if_smaller(blob_length, char_length); + if ((uint32) length > blob_length) { /* Must clear this as we do a memcmp in opt_range.cc to detect identical keys */ - bzero(buff+2+blob_length, (length-blob_length)); + bzero(buff+HA_KEY_BLOB_LENGTH+blob_length, (length-blob_length)); length=(uint) blob_length; } int2store(buff,length); - get_ptr(&blob); - memcpy(buff+2,blob,length); + memcpy(buff+HA_KEY_BLOB_LENGTH, blob, length); } -void Field_blob::set_key_image(char *buff,uint length) +void Field_blob::set_key_image(char *buff,uint length, CHARSET_INFO *cs) { - length=uint2korr(buff); - Field_blob::store(buff+2,length); + length= uint2korr(buff); + (void) Field_blob::store(buff+HA_KEY_BLOB_LENGTH, length, cs); } + int Field_blob::key_cmp(const byte *key_ptr, uint max_key_length) { char *blob1; uint blob_length=get_length(ptr); - max_key_length-=2; memcpy_fixed(&blob1,ptr+packlength,sizeof(char*)); + CHARSET_INFO *cs= charset(); + uint char_length= max_key_length / cs->mbmaxlen; + char_length= my_charpos(cs, blob1, blob1+blob_length, char_length); + set_if_smaller(blob_length, char_length); return Field_blob::cmp(blob1,min(blob_length, max_key_length), - (char*) key_ptr+2,uint2korr(key_ptr)); + (char*) key_ptr+HA_KEY_BLOB_LENGTH, + uint2korr(key_ptr)); } int Field_blob::key_cmp(const byte *a,const byte *b) { - return Field_blob::cmp((char*) a+2,uint2korr(a), - (char*) b+2,uint2korr(b)); + return Field_blob::cmp((char*) a+HA_KEY_BLOB_LENGTH, uint2korr(a), + (char*) b+HA_KEY_BLOB_LENGTH, uint2korr(b)); } @@ -4432,39 +5814,17 @@ void Field_blob::sort_string(char *to,uint length) { char *blob; uint blob_length=get_length(); -#ifdef USE_STRCOLL - uint blob_org_length=blob_length; -#endif + if (!blob_length) bzero(to,length); else { - if (blob_length > length) - blob_length=length; memcpy_fixed(&blob,ptr+packlength,sizeof(char*)); - if (binary_flag) - { - memcpy(to,blob,blob_length); - to+=blob_length; - } - else - { -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) - { - blob_length=my_strnxfrm(default_charset_info, - (unsigned char *)to,(unsigned char *)blob, - length,blob_org_length); - if (blob_length >= length) - return; - to+=blob_length; - } - else -#endif - for (char *end=blob+blob_length ; blob != end ;) - *to++=(char) my_sort_order[(uint) (uchar) *blob++]; - } - bzero(to,length-blob_length); + + blob_length=my_strnxfrm(field_charset, + (uchar*) to, length, + (uchar*) blob, blob_length); + DBUG_ASSERT(blob_length == length); } } @@ -4472,14 +5832,20 @@ void Field_blob::sort_string(char *to,uint length) void Field_blob::sql_type(String &res) const { const char *str; + uint length; switch (packlength) { - default: str="tiny"; break; - case 2: str=""; break; - case 3: str="medium"; break; - case 4: str="long"; break; + default: str="tiny"; length=4; break; + case 2: str=""; length=0; break; + case 3: str="medium"; length= 6; break; + case 4: str="long"; length=4; break; + } + res.set_ascii(str,length); + if (charset() == &my_charset_bin) + res.append("blob"); + else + { + res.append("text"); } - res.set(str,(uint) strlen(str)); - res.append(binary_flag ? "blob" : "text"); } @@ -4535,12 +5901,9 @@ int Field_blob::pack_cmp(const char *a, const char *b, uint key_length) a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - if (binary_flag) - { - int cmp= memcmp(a,b,min(a_length,b_length)); - return cmp ? cmp : (int) (a_length - b_length); - } - return my_sortncmp(a,a_length, b,b_length); + return my_strnncoll(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length); } @@ -4561,12 +5924,9 @@ int Field_blob::pack_cmp(const char *b, uint key_length) { b_length= (uint) (uchar) *b++; } - if (binary_flag) - { - int cmp= memcmp(a,b,min(a_length,b_length)); - return cmp ? cmp : (int) (a_length - b_length); - } - return my_sortncmp(a,a_length, b,b_length); + return my_strnncoll(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length); } /* Create a packed key that will be used for storage from a MySQL row */ @@ -4576,16 +5936,17 @@ char *Field_blob::pack_key(char *to, const char *from, uint max_length) char *save=ptr; ptr=(char*) from; uint32 length=get_length(); // Length of from string - if (length > max_length) - length=max_length; + uint char_length= (field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length; + if (length) + get_ptr((char**) &from); + if (length > char_length) + char_length= my_charpos(field_charset, from, from+length, char_length); + set_if_smaller(length, char_length); *to++= (uchar) length; if (max_length > 255) // 2 byte length *to++= (uchar) (length >> 8); - if (length) - { - get_ptr((char**) &from); - memcpy(to, from, length); - } + memcpy(to, from, length); ptr=save; // Restore org row pointer return to+length; } @@ -4646,7 +6007,7 @@ char *Field_blob::pack_key_from_key_image(char *to, const char *from, if (max_length > 255) *to++= (char) (length >> 8); if (length) - memcpy(to, from+2, length); + memcpy(to, from+HA_KEY_BLOB_LENGTH, length); return to+length; } @@ -4663,6 +6024,106 @@ uint Field_blob::max_packed_col_length(uint max_length) return (max_length > 255 ? 2 : 1)+max_length; } + +#ifdef HAVE_SPATIAL + +void Field_geom::get_key_image(char *buff, uint length, CHARSET_INFO *cs, + imagetype type) +{ + char *blob; + const char *dummy; + MBR mbr; + ulong blob_length= get_length(ptr); + Geometry_buffer buffer; + Geometry *gobj; + + if (blob_length < SRID_SIZE) + { + bzero(buff, SIZEOF_STORED_DOUBLE*4); + return; + } + get_ptr(&blob); + gobj= Geometry::construct(&buffer, blob, blob_length); + if (gobj->get_mbr(&mbr, &dummy)) + bzero(buff, SIZEOF_STORED_DOUBLE*4); + else + { + float8store(buff, mbr.xmin); + float8store(buff + 8, mbr.xmax); + float8store(buff + 16, mbr.ymin); + float8store(buff + 24, mbr.ymax); + } +} + + +void Field_geom::set_key_image(char *buff, uint length, CHARSET_INFO *cs) +{ + Field_blob::set_key_image(buff, length, cs); +} + +void Field_geom::sql_type(String &res) const +{ + CHARSET_INFO *cs= &my_charset_latin1; + switch (geom_type) + { + case GEOM_POINT: + res.set("point", 5, cs); + break; + case GEOM_LINESTRING: + res.set("linestring", 10, cs); + break; + case GEOM_POLYGON: + res.set("polygon", 7, cs); + break; + case GEOM_MULTIPOINT: + res.set("multipoint", 10, cs); + break; + case GEOM_MULTILINESTRING: + res.set("multilinestring", 15, cs); + break; + case GEOM_MULTIPOLYGON: + res.set("multipolygon", 12, cs); + break; + case GEOM_GEOMETRYCOLLECTION: + res.set("geometrycollection", 18, cs); + break; + default: + res.set("geometry", 8, cs); + } +} + + +int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs) +{ + if (!length) + bzero(ptr, Field_blob::pack_length()); + else + { + // Check given WKB + uint32 wkb_type; + if (length < SRID_SIZE + WKB_HEADER_SIZE + SIZEOF_STORED_DOUBLE*2) + goto err; + wkb_type= uint4korr(from + SRID_SIZE + 1); + if (wkb_type < (uint32) Geometry::wkb_point || + wkb_type > (uint32) Geometry::wkb_end) + return -1; + Field_blob::store_length(length); + if (table->copy_blobs || length <= MAX_FIELD_WIDTH) + { // Must make a copy + value.copy(from, length, cs); + from= value.ptr(); + } + bmove(ptr + packlength, (char*) &from, sizeof(char*)); + } + return 0; + +err: + bzero(ptr, Field_blob::pack_length()); + return -1; +} + +#endif /*HAVE_SPATIAL*/ + /**************************************************************************** ** enum type. ** This is a string which only can have a selection of different values. @@ -4718,73 +6179,68 @@ void Field_enum::store_type(ulonglong value) } -uint find_enum(TYPELIB *lib,const char *x, uint length) -{ - const char *end=x+length; - while (end > x && isspace(end[-1])) - end--; - - const char *i; - const char *j; - for (uint pos=0 ; (j=lib->type_names[pos]) ; pos++) - { - for (i=x ; i != end && toupper(*i) == toupper(*j) ; i++, j++) ; - if (i == end && ! *j) - return(pos+1); - } - return(0); -} - - /* ** Note. Storing a empty string in a enum field gives a warning ** (if there isn't a empty value in the enum) */ -void Field_enum::store(const char *from,uint length) +int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs) { - uint tmp=find_enum(typelib,from,length); + int err= 0; + uint32 not_used; + char buff[80]; + String tmpstr(buff,sizeof(buff), &my_charset_bin); + + /* Convert character set if nesessary */ + if (String::needs_conversion(length, cs, field_charset, ¬_used)) + { + uint dummy_errors; + tmpstr.copy(from, length, cs, field_charset, &dummy_errors); + from= tmpstr.ptr(); + length= tmpstr.length(); + } + + /* Remove end space */ + length= field_charset->cset->lengthsp(field_charset, from, length); + uint tmp=find_type2(typelib, from, length, field_charset); if (!tmp) { if (length < 6) // Can't be more than 99999 enums { /* This is for reading numbers with LOAD DATA INFILE */ - char buff[7], *end; - const char *conv=from; - if (from[length]) - { - strmake(buff, from, length); - conv=buff; - } - my_errno=0; - tmp=(uint) strtoul(conv,&end,10); - if (my_errno || end != conv+length || tmp > typelib->count) + char *end; + tmp=(uint) my_strntoul(cs,from,length,10,&end,&err); + if (err || end != from+length || tmp > typelib->count) { tmp=0; - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } } else - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } store_type((ulonglong) tmp); + return err; } -void Field_enum::store(double nr) +int Field_enum::store(double nr) { - Field_enum::store((longlong) nr); + return Field_enum::store((longlong) nr); } -void Field_enum::store(longlong nr) +int Field_enum::store(longlong nr) { + int error= 0; if ((uint) nr > typelib->count || nr == 0) { - current_thd->cuted_fields++; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); nr=0; + error=1; } store_type((ulonglong) (uint) nr); + return error; } @@ -4844,10 +6300,11 @@ String *Field_enum::val_str(String *val_buffer __attribute__((unused)), { uint tmp=(uint) Field_enum::val_int(); if (!tmp || tmp > typelib->count) - val_ptr->set("",0); + val_ptr->set("", 0, field_charset); else val_ptr->set((const char*) typelib->type_names[tmp-1], - (uint) strlen(typelib->type_names[tmp-1])); + typelib->type_lengths[tmp-1], + field_charset); return val_ptr; } @@ -4876,97 +6333,89 @@ void Field_enum::sort_string(char *to,uint length __attribute__((unused))) void Field_enum::sql_type(String &res) const { + char buffer[255]; + String enum_item(buffer, sizeof(buffer), res.charset()); + res.length(0); res.append("enum("); bool flag=0; - for (const char **pos=typelib->type_names; *pos ; pos++) + uint *len= typelib->type_lengths; + for (const char **pos= typelib->type_names; *pos; pos++, len++) { + uint dummy_errors; if (flag) res.append(','); - res.append('\''); - append_unescaped(&res,*pos); - res.append('\''); - flag=1; + /* convert to res.charset() == utf8, then quote */ + enum_item.copy(*pos, *len, charset(), res.charset(), &dummy_errors); + append_unescaped(&res, enum_item.ptr(), enum_item.length()); + flag= 1; } res.append(')'); } -/**************************************************************************** -** set type. -** This is a string which can have a collection of different values. -** Each string value is separated with a ','. -** For example "One,two,five" -** If one uses this string in a number context one gets the bits as a longlong -** number. -****************************************************************************/ +/* + set type. + This is a string which can have a collection of different values. + Each string value is separated with a ','. + For example "One,two,five" + If one uses this string in a number context one gets the bits as a longlong + number. +*/ + -ulonglong find_set(TYPELIB *lib,const char *x,uint length) +int Field_set::store(const char *from,uint length,CHARSET_INFO *cs) { - const char *end=x+length; - while (end > x && isspace(end[-1])) - end--; + bool got_warning= 0; + int err= 0; + char *not_used; + uint not_used2; + uint32 not_used_offset; + char buff[80]; + String tmpstr(buff,sizeof(buff), &my_charset_bin); - ulonglong found=0; - if (x != end) - { - const char *start=x; - bool error=0; - for (;;) - { - const char *pos=start; - for (; pos != end && *pos != field_separator ; pos++) ; - uint find=find_enum(lib,start,(uint) (pos-start)); - if (!find) - error=1; - else - found|= ((longlong) 1 << (find-1)); - if (pos == end) - break; - start=pos+1; - } - if (error) - current_thd->cuted_fields++; + /* Convert character set if nesessary */ + if (String::needs_conversion(length, cs, field_charset, ¬_used_offset)) + { + uint dummy_errors; + tmpstr.copy(from, length, cs, field_charset, &dummy_errors); + from= tmpstr.ptr(); + length= tmpstr.length(); } - return found; -} - - -void Field_set::store(const char *from,uint length) -{ - ulonglong tmp=find_set(typelib,from,length); + ulonglong tmp= find_set(typelib, from, length, field_charset, + ¬_used, ¬_used2, &got_warning); if (!tmp && length && length < 22) { /* This is for reading numbers with LOAD DATA INFILE */ - char buff[22], *end; - const char *conv=from; - if (from[length]) + char *end; + tmp=my_strntoull(cs,from,length,10,&end,&err); + if (err || end != from+length || + tmp > (ulonglong) (((longlong) 1 << typelib->count) - (longlong) 1)) { - strmake(buff, from, length); - conv=buff; + tmp=0; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } - my_errno=0; - tmp=strtoull(conv,&end,10); - if (my_errno || end != conv+length || - tmp > (ulonglong) (((longlong) 1 << typelib->count) - (longlong) 1)) - tmp=0; - else - current_thd->cuted_fields--; // Remove warning from find_set } + else if (got_warning) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); store_type(tmp); + return err; } -void Field_set::store(longlong nr) +int Field_set::store(longlong nr) { + int error= 0; if ((ulonglong) nr > (ulonglong) (((longlong) 1 << typelib->count) - (longlong) 1)) { - nr&= (longlong) (((longlong) 1 << typelib->count) - (longlong) 1); - current_thd->cuted_fields++; + nr&= (longlong) (((longlong) 1 << typelib->count) - (longlong) 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error=1; } store_type((ulonglong) nr); + return error; } @@ -4977,14 +6426,16 @@ String *Field_set::val_str(String *val_buffer, uint bitnr=0; val_buffer->length(0); + val_buffer->set_charset(field_charset); while (tmp && bitnr < (uint) typelib->count) { if (tmp & 1) { if (val_buffer->length()) - val_buffer->append(field_separator); + val_buffer->append(&field_separator, 1, &my_charset_latin1); String str(typelib->type_names[bitnr], - (uint) strlen(typelib->type_names[bitnr])); + typelib->type_lengths[bitnr], + field_charset); val_buffer->append(str); } tmp>>=1; @@ -4996,18 +6447,23 @@ String *Field_set::val_str(String *val_buffer, void Field_set::sql_type(String &res) const { + char buffer[255]; + String set_item(buffer, sizeof(buffer), res.charset()); + res.length(0); res.append("set("); bool flag=0; - for (const char **pos=typelib->type_names; *pos ; pos++) + uint *len= typelib->type_lengths; + for (const char **pos= typelib->type_names; *pos; pos++, len++) { + uint dummy_errors; if (flag) res.append(','); - res.append('\''); - append_unescaped(&res,*pos); - res.append('\''); - flag=1; + /* convert to res.charset() == utf8, then quote */ + set_item.copy(*pos, *len, charset(), res.charset(), &dummy_errors); + append_unescaped(&res, set_item.ptr(), set_item.length()); + flag= 1; } res.append(')'); } @@ -5016,7 +6472,7 @@ void Field_set::sql_type(String &res) const bool Field::eq_def(Field *field) { - if (real_type() != field->real_type() || binary() != field->binary() || + if (real_type() != field->real_type() || charset() != field->charset() || pack_length() != field->pack_length()) return 0; return 1; @@ -5031,7 +6487,11 @@ bool Field_enum::eq_def(Field *field) if (typelib->count < from_lib->count) return 0; for (uint i=0 ; i < from_lib->count ; i++) - if (my_strcasecmp(typelib->type_names[i],from_lib->type_names[i])) + if (my_strnncoll(field_charset, + (const uchar*)typelib->type_names[i], + strlen(typelib->type_names[i]), + (const uchar*)from_lib->type_names[i], + strlen(from_lib->type_names[i]))) return 0; return 1; } @@ -5055,6 +6515,39 @@ bool Field_num::eq_def(Field *field) *****************************************************************************/ /* + Convert create_field::length from number of characters to number of bytes + + SYNOPSIS + create_field::create_length_to_internal_length() + + DESCRIPTION + Convert create_field::length from number of characters to number of bytes. +*/ + +void create_field::create_length_to_internal_length(void) +{ + switch (sql_type) { + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + length*= charset->mbmaxlen; + pack_length= calc_pack_length(sql_type == FIELD_TYPE_VAR_STRING ? + FIELD_TYPE_STRING : sql_type, length); + break; + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + length*= charset->mbmaxlen; + break; + default: + /* do nothing */ + break; + } +} + +/* Make a field from the .frm file info */ @@ -5063,7 +6556,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) switch (type) { case FIELD_TYPE_STRING: case FIELD_TYPE_DECIMAL: return (length); - case FIELD_TYPE_VAR_STRING: return (length+2); + case FIELD_TYPE_VAR_STRING: return (length+HA_KEY_BLOB_LENGTH); case FIELD_TYPE_YEAR: case FIELD_TYPE_TINY : return 1; case FIELD_TYPE_SHORT : return 2; @@ -5082,11 +6575,12 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) case FIELD_TYPE_BLOB: return 2+portable_sizeof_char_ptr; case FIELD_TYPE_MEDIUM_BLOB: return 3+portable_sizeof_char_ptr; case FIELD_TYPE_LONG_BLOB: return 4+portable_sizeof_char_ptr; + case FIELD_TYPE_GEOMETRY: return 4+portable_sizeof_char_ptr; case FIELD_TYPE_SET: case FIELD_TYPE_ENUM: abort(); return 0; // This shouldn't happen default: return 0; } - return 0; // This shouldn't happen + return 0; // Keep compiler happy } @@ -5107,6 +6601,8 @@ Field *make_field(char *ptr, uint32 field_length, uchar *null_pos, uchar null_bit, uint pack_flag, enum_field_types field_type, + CHARSET_INFO *field_charset, + Field::geometry_type geom_type, Field::utype unireg_check, TYPELIB *interval, const char *field_name, @@ -5117,34 +6613,55 @@ Field *make_field(char *ptr, uint32 field_length, null_pos=0; null_bit=0; } + + switch (field_type) + { + case FIELD_TYPE_DATE: + case FIELD_TYPE_NEWDATE: + case FIELD_TYPE_TIME: + case FIELD_TYPE_DATETIME: + case FIELD_TYPE_TIMESTAMP: + field_charset= &my_charset_bin; + default: break; + } + if (f_is_alpha(pack_flag)) { if (!f_is_packed(pack_flag)) - return new Field_string(ptr,field_length,null_pos,null_bit, - unireg_check, field_name, table, - f_is_binary(pack_flag) != 0); + { + if (field_type == FIELD_TYPE_STRING || + field_type == FIELD_TYPE_DECIMAL || // 3.23 or 4.0 string + field_type == FIELD_TYPE_VAR_STRING) + return new Field_string(ptr,field_length,null_pos,null_bit, + unireg_check, field_name, table, + field_charset); + return 0; // Error + } uint pack_length=calc_pack_length((enum_field_types) f_packtype(pack_flag), field_length); +#ifdef HAVE_SPATIAL + if (f_is_geom(pack_flag)) + return new Field_geom(ptr,null_pos,null_bit, + unireg_check, field_name, table, + pack_length, geom_type); +#endif if (f_is_blob(pack_flag)) return new Field_blob(ptr,null_pos,null_bit, unireg_check, field_name, table, - pack_length,f_is_binary(pack_flag) != 0); - if (f_is_geom(pack_flag)) - return 0; - + pack_length, field_charset); if (interval) { if (f_is_enum(pack_flag)) return new Field_enum(ptr,field_length,null_pos,null_bit, unireg_check, field_name, table, - pack_length, interval); + pack_length, interval, field_charset); else return new Field_set(ptr,field_length,null_pos,null_bit, unireg_check, field_name, table, - pack_length, interval); + pack_length, interval, field_charset); } } @@ -5193,29 +6710,30 @@ Field *make_field(char *ptr, uint32 field_length, f_is_zerofill(pack_flag) != 0, f_is_dec(pack_flag) == 0); case FIELD_TYPE_TIMESTAMP: - return new Field_timestamp(ptr,field_length, - unireg_check, field_name, table); + return new Field_timestamp(ptr,field_length, null_pos, null_bit, + unireg_check, field_name, table, + field_charset); case FIELD_TYPE_YEAR: return new Field_year(ptr,field_length,null_pos,null_bit, unireg_check, field_name, table); case FIELD_TYPE_DATE: return new Field_date(ptr,null_pos,null_bit, - unireg_check, field_name, table); + unireg_check, field_name, table, field_charset); case FIELD_TYPE_NEWDATE: return new Field_newdate(ptr,null_pos,null_bit, - unireg_check, field_name, table); + unireg_check, field_name, table, field_charset); case FIELD_TYPE_TIME: return new Field_time(ptr,null_pos,null_bit, - unireg_check, field_name, table); + unireg_check, field_name, table, field_charset); case FIELD_TYPE_DATETIME: return new Field_datetime(ptr,null_pos,null_bit, - unireg_check, field_name, table); + unireg_check, field_name, table, field_charset); case FIELD_TYPE_NULL: - return new Field_null(ptr,field_length,unireg_check,field_name,table); + return new Field_null(ptr,field_length,unireg_check,field_name,table, field_charset); default: // Impossible (Wrong version) break; } - return 0; // Impossible + return 0; } @@ -5230,14 +6748,41 @@ create_field::create_field(Field *old_field,Field *orig_field) unireg_check=old_field->unireg_check; pack_length=old_field->pack_length(); sql_type= old_field->real_type(); + charset= old_field->charset(); // May be NULL ptr + comment= old_field->comment; /* Fix if the original table had 4 byte pointer blobs */ if (flags & BLOB_FLAG) pack_length= (pack_length- old_field->table->blob_ptr_size + portable_sizeof_char_ptr); + + switch (sql_type) + { + case FIELD_TYPE_BLOB: + switch (pack_length - portable_sizeof_char_ptr) + { + case 1: sql_type= FIELD_TYPE_TINY_BLOB; break; + case 2: sql_type= FIELD_TYPE_BLOB; break; + case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break; + default: sql_type= FIELD_TYPE_LONG_BLOB; break; + } + length=(length+charset->mbmaxlen-1)/charset->mbmaxlen; // QQ: Probably not needed + break; + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case FIELD_TYPE_STRING: + case FIELD_TYPE_VAR_STRING: + length=(length+charset->mbmaxlen-1)/charset->mbmaxlen; + break; + default: + break; + } + + char_length= length; decimals= old_field->decimals(); if (sql_type == FIELD_TYPE_STRING) { + /* Change CHAR -> VARCHAR if dynamic record length */ sql_type=old_field->type(); decimals=0; } @@ -5250,19 +6795,173 @@ create_field::create_field(Field *old_field,Field *orig_field) old_field->ptr && orig_field) { char buff[MAX_FIELD_WIDTH],*pos; - String tmp(buff,sizeof(buff)); + String tmp(buff,sizeof(buff), charset); - /* Get the value from record[2] (the default value row) */ + /* Get the value from default_values */ my_ptrdiff_t diff= (my_ptrdiff_t) (orig_field->table->rec_buff_length*2); - orig_field->move_field(diff); // Points now at record[2] + orig_field->move_field(diff); // Points now at default_values bool is_null=orig_field->is_real_null(); - orig_field->val_str(&tmp,&tmp); + orig_field->val_str(&tmp); orig_field->move_field(-diff); // Back to record[0] if (!is_null) { pos= (char*) sql_memdup(tmp.ptr(),tmp.length()+1); pos[tmp.length()]=0; - def=new Item_string(pos,tmp.length()); + def= new Item_string(pos, tmp.length(), charset); } } +#ifdef HAVE_SPATIAL + if (sql_type == FIELD_TYPE_GEOMETRY) + { + geom_type= ((Field_geom*)old_field)->geom_type; + } +#endif +} + + +/* Warning handling */ + +/* + Produce warning or note about data saved into field + + SYNOPSYS + set_warning() + level - level of message (Note/Warning/Error) + code - error code of message to be produced + cuted_increment - whenever we should increase cut fields count or not + + NOTE + This function won't produce warning and increase cut fields counter + if count_cuted_fields == FIELD_CHECK_IGNORE for current thread. + + RETURN VALUE + true - if count_cuted_fields == FIELD_CHECK_IGNORE + false - otherwise +*/ +bool +Field::set_warning(const uint level, const uint code, int cuted_increment) +{ + THD *thd= table->in_use; + if (thd->count_cuted_fields) + { + thd->cuted_fields+= cuted_increment; + push_warning_printf(thd, (MYSQL_ERROR::enum_warning_level) level, + code, ER(code), field_name, thd->row_count); + return 0; + } + return 1; +} + + +/* + Produce warning or note about datetime string data saved into field + + SYNOPSYS + set_warning() + level - level of message (Note/Warning/Error) + code - error code of message to be produced + str - string value which we tried to save + str_len - length of string which we tried to save + ts_type - type of datetime value (datetime/date/time) + cuted_increment - whenever we should increase cut fields count or not + + NOTE + This function will always produce some warning but won't increase cut + fields counter if count_cuted_fields == FIELD_CHECK_IGNORE for current + thread. +*/ +void +Field::set_datetime_warning(const uint level, const uint code, + const char *str, uint str_length, + timestamp_type ts_type, int cuted_increment) +{ + if (set_warning(level, code, cuted_increment)) + make_truncated_value_warning(table->in_use, str, str_length, ts_type); +} + + +/* + Produce warning or note about integer datetime value saved into field + + SYNOPSYS + set_warning() + level - level of message (Note/Warning/Error) + code - error code of message to be produced + nr - numeric value which we tried to save + ts_type - type of datetime value (datetime/date/time) + cuted_increment - whenever we should increase cut fields count or not + + NOTE + This function will always produce some warning but won't increase cut + fields counter if count_cuted_fields == FIELD_CHECK_IGNORE for current + thread. +*/ +void +Field::set_datetime_warning(const uint level, const uint code, + longlong nr, timestamp_type ts_type, + int cuted_increment) +{ + if (set_warning(level, code, cuted_increment)) + { + char str_nr[22]; + char *str_end= longlong10_to_str(nr, str_nr, -10); + make_truncated_value_warning(table->in_use, str_nr, str_end - str_nr, + ts_type); + } +} + + +/* + Produce warning or note about double datetime data saved into field + + SYNOPSYS + set_warning() + level - level of message (Note/Warning/Error) + code - error code of message to be produced + nr - double value which we tried to save + ts_type - type of datetime value (datetime/date/time) + + NOTE + This function will always produce some warning but won't increase cut + fields counter if count_cuted_fields == FIELD_CHECK_IGNORE for current + thread. +*/ +void +Field::set_datetime_warning(const uint level, const uint code, + double nr, timestamp_type ts_type) +{ + if (set_warning(level, code, 1)) + { + /* DBL_DIG is enough to print '-[digits].E+###' */ + char str_nr[DBL_DIG + 8]; + uint str_len= my_sprintf(str_nr, (str_nr, "%g", nr)); + make_truncated_value_warning(table->in_use, str_nr, str_len, ts_type); + } +} + +/* + maximum possible display length for blob + + SYNOPSIS + Field_blob::max_length() + + RETURN + length +*/ +uint32 Field_blob::max_length() +{ + switch (packlength) + { + case 1: + return 255 * field_charset->mbmaxlen; + case 2: + return 65535 * field_charset->mbmaxlen; + case 3: + return 16777215 * field_charset->mbmaxlen; + case 4: + return (uint32) 4294967295U; + default: + DBUG_ASSERT(0); // we should never go here + return 0; + } } diff --git a/sql/field.h b/sql/field.h index 3e258f81dcc..966549516b1 100644 --- a/sql/field.h +++ b/sql/field.h @@ -20,33 +20,74 @@ variables must declare the size_of() member function. */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif #define NOT_FIXED_DEC 31 +#define DATETIME_DEC 6 class Send_field; +class Protocol; struct st_cache_field; void field_conv(Field *to,Field *from); -class Field { +inline uint get_enum_pack_length(int elements) +{ + return elements < 256 ? 1 : 2; +} + +inline uint get_set_pack_length(int elements) +{ + uint len= (elements + 7) / 8; + return len > 4 ? 8 : len; +} + +class Field +{ Field(const Item &); /* Prevent use of these */ void operator=(Field &); public: static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } - static void operator delete(void *ptr_arg, size_t size) {} /*lint -e715 */ + static void operator delete(void *ptr_arg, size_t size) { +#ifdef SAFEMALLOC + bfill(ptr_arg, size, 0x8F); +#endif + } char *ptr; // Position to field in record uchar *null_ptr; // Byte where null_bit is + /* + Note that you can use table->in_use as replacement for current_thd member + only inside of val_*() and store() members (e.g. you can't use it in cons) + */ struct st_table *table; // Pointer for table + struct st_table *orig_table; // Pointer to original table const char *table_name,*field_name; + LEX_STRING comment; ulong query_id; // For quick test of used fields /* Field is part of the following keys */ - key_map key_start,part_of_key,part_of_sortkey; + key_map key_start,part_of_key,part_of_sortkey; + /* + We use three additional unireg types for TIMESTAMP to overcome limitation + of current binary format of .frm file. We'd like to be able to support + NOW() as default and on update value for such fields but unable to hold + this info anywhere except unireg_check field. This issue will be resolved + in more clean way with transition to new text based .frm format. + See also comment for Field_timestamp::Field_timestamp(). + */ enum utype { NONE,DATE,SHIELD,NOEMPTY,CASEUP,PNR,BGNR,PGNR,YES,NO,REL, CHECK,EMPTY,UNKNOWN_FIELD,CASEDN,NEXT_NUMBER,INTERVAL_FIELD, - BIT_FIELD, TIMESTAMP_FIELD,CAPITALIZE,BLOB_FIELD}; + BIT_FIELD, TIMESTAMP_OLD_FIELD, CAPITALIZE, BLOB_FIELD, + TIMESTAMP_DN_FIELD, TIMESTAMP_UN_FIELD, TIMESTAMP_DNUN_FIELD}; + enum geometry_type + { + GEOM_GEOMETRY = 0, GEOM_POINT = 1, GEOM_LINESTRING = 2, GEOM_POLYGON = 3, + GEOM_MULTIPOINT = 4, GEOM_MULTILINESTRING = 5, GEOM_MULTIPOLYGON = 6, + GEOM_GEOMETRYCOLLECTION = 7 + }; + enum imagetype { itRAW, itMBR}; + utype unireg_check; uint32 field_length; // Length of field uint16 flags; @@ -56,15 +97,32 @@ public: utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg); virtual ~Field() {} - virtual void store(const char *to,uint length)=0; - virtual void store(double nr)=0; - virtual void store(longlong nr)=0; + /* Store functions returns 1 on overflow and -1 on fatal error */ + virtual int store(const char *to,uint length,CHARSET_INFO *cs)=0; + virtual int store(double nr)=0; + virtual int store(longlong nr)=0; virtual void store_time(TIME *ltime,timestamp_type t_type); virtual double val_real(void)=0; virtual longlong val_int(void)=0; + inline String *val_str(String *str) { return val_str(str, str); } + /* + val_str(buf1, buf2) gets two buffers and should use them as follows: + if it needs a temp buffer to convert result to string - use buf1 + example Field_tiny::val_str() + if the value exists as a string already - use buf2 + example Field_string::val_str() + consequently, buf2 may be created as 'String buf;' - no memory + will be allocated for it. buf1 will be allocated to hold a + value if it's too small. Using allocated buffer for buf2 may result in + an unnecessary free (and later, may be an alloc). + This trickery is used to decrease a number of malloc calls. + */ virtual String *val_str(String*,String *)=0; virtual Item_result result_type () const=0; virtual Item_result cmp_type () const { return result_type(); } + virtual Item_result cast_to_int_type () const { return result_type(); } + static enum_field_types field_type_merge(enum_field_types, enum_field_types); + static Item_result result_merge_type(enum_field_types); bool eq(Field *field) { return ptr == field->ptr && null_ptr == field->null_ptr; } virtual bool eq_def(Field *field); virtual uint32 pack_length() const { return (uint32) field_length; } @@ -72,7 +130,7 @@ public: virtual void reset_fields() {} virtual void set_default() { - my_ptrdiff_t offset = (my_ptrdiff_t) (table->record[2] - + my_ptrdiff_t offset = (my_ptrdiff_t) (table->default_values - table->record[0]); memcpy(ptr, ptr + offset, pack_length()); if (null_ptr) @@ -109,6 +167,13 @@ public: { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : table->null_row; } inline bool is_real_null(uint row_offset=0) { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; } + inline bool is_null_in_record(const uchar *record) + { + if (!null_ptr) + return 0; + return test(record[(uint) (null_ptr - (uchar*) table->record[0])] & + null_bit); + } inline void set_null(int row_offset=0) { if (null_ptr) null_ptr[row_offset]|= null_bit; } inline void set_notnull(int row_offset=0) @@ -117,8 +182,16 @@ public: inline bool real_maybe_null(void) { return null_ptr != 0; } virtual void make_field(Send_field *)=0; virtual void sort_string(char *buff,uint length)=0; - virtual bool optimize_range(uint idx); - virtual bool store_for_compare() { return 0; } + virtual bool optimize_range(uint idx, uint part); + /* + This should be true for fields which, when compared with constant + items, can be casted to longlong. In this case we will at 'fix_fields' + stage cast the constant items to longlongs and at the execution stage + use field->val_int() for comparison. Used to optimize clauses like + 'a_column BETWEEN date_const, date_const'. + */ + virtual bool can_be_compared_as_longlong() const { return FALSE; } + virtual void free() {} Field *new_field(MEM_ROOT *root, struct st_table *new_table) { Field *tmp= (Field*) memdup_root(root,(char*) this,size_of()); @@ -126,10 +199,16 @@ public: { if (tmp->table->maybe_null) tmp->flags&= ~NOT_NULL_FLAG; - tmp->table=new_table; - tmp->key_start=tmp->part_of_key=tmp->part_of_sortkey=0; + tmp->table= new_table; + tmp->key_start.init(0); + tmp->part_of_key.init(0); + tmp->part_of_sortkey.init(0); tmp->unireg_check=Field::NONE; - tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); + tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | + ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); +#ifdef PROBABLY_WRONG + tmp->table_name= new_table->table_name; +#endif tmp->reset_fields(); } return tmp; @@ -145,21 +224,15 @@ public: if (null_ptr) null_ptr=ADD_TO_PTR(null_ptr,ptr_diff,uchar*); } - inline void get_image(char *buff,uint length) + inline void get_image(char *buff,uint length, CHARSET_INFO *cs) { memcpy(buff,ptr,length); } - inline void set_image(char *buff,uint length) + inline void set_image(char *buff,uint length, CHARSET_INFO *cs) { memcpy(ptr,buff,length); } - virtual void get_key_image(char *buff,uint length) - { get_image(buff,length); } - virtual void set_key_image(char *buff,uint length) - { set_image(buff,length); } - inline int cmp_image(char *buff,uint length) - { - if (binary()) - return memcmp(ptr,buff,length); - else - return my_casecmp(ptr,buff,length); - } + virtual void get_key_image(char *buff,uint length, CHARSET_INFO *cs, + imagetype type) + { get_image(buff,length,cs); } + virtual void set_key_image(char *buff,uint length, CHARSET_INFO *cs) + { set_image(buff,length,cs); } inline longlong val_int_offset(uint row_offset) { ptr+=row_offset; @@ -167,7 +240,7 @@ public: ptr-=row_offset; return tmp; } - bool send(THD *thd, String *packet); + virtual bool send_binary(Protocol *protocol); virtual char *pack(char* to, const char *from, uint max_length=~(uint) 0) { uint32 length=pack_length(); @@ -205,8 +278,24 @@ public: uint offset(); // Should be inline ... void copy_from_tmp(int offset); uint fill_cache_field(struct st_cache_field *copy); - virtual bool get_date(TIME *ltime,bool fuzzydate); + virtual bool get_date(TIME *ltime,uint fuzzydate); virtual bool get_time(TIME *ltime); + virtual CHARSET_INFO *charset(void) const { return &my_charset_bin; } + virtual CHARSET_INFO *sort_charset(void) const { return charset(); } + virtual bool has_charset(void) const { return FALSE; } + virtual void set_charset(CHARSET_INFO *charset) { } + bool set_warning(const unsigned int level, const unsigned int code, + int cuted_increment); + void set_datetime_warning(const uint level, const uint code, + const char *str, uint str_len, + timestamp_type ts_type, int cuted_increment); + void set_datetime_warning(const uint level, const uint code, + longlong nr, timestamp_type ts_type, + int cuted_increment); + void set_datetime_warning(const uint level, const uint code, + double nr, timestamp_type ts_type); + /* maximum possible display length */ + virtual uint32 max_length()= 0; friend bool reopen_table(THD *,struct st_table *,bool); friend int cre_myisam(my_string name, register TABLE *form, uint options, ulonglong auto_increment_value); @@ -221,6 +310,7 @@ public: friend class Item_sum_std; friend class Item_sum_min; friend class Item_sum_max; + friend class Item_func_group_concat; }; @@ -254,20 +344,32 @@ public: class Field_str :public Field { +protected: + CHARSET_INFO *field_charset; public: - double ceiling; // for ::store(double nr) Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg,CHARSET_INFO *charset) :Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg), ceiling(0.0) - {} + unireg_check_arg, field_name_arg, table_arg) + { + field_charset=charset; + if (charset->state & MY_CS_BINSORT) + flags|=BINARY_FLAG; + } Item_result result_type () const { return STRING_RESULT; } uint decimals() const { return NOT_FIXED_DEC; } - friend class create_field; + int store(double nr); + int store(longlong nr)=0; + int store(const char *to,uint length,CHARSET_INFO *cs)=0; void make_field(Send_field *); uint size_of() const { return sizeof(*this); } + CHARSET_INFO *charset(void) const { return field_charset; } + void set_charset(CHARSET_INFO *charset) { field_charset=charset; } + bool binary() const { return field_charset == &my_charset_bin; } + uint32 max_length() { return field_length; } + friend class create_field; }; @@ -284,11 +386,11 @@ public: {} enum_field_types type() const { return FIELD_TYPE_DECIMAL;} enum ha_base_keytype key_type() const - { return zerofill ? HA_KEYTYPE_BINARY : HA_KEYTYPE_NUM; } + { return zerofill ? HA_KEYTYPE_BINARY : HA_KEYTYPE_NUM; } void reset(void); - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); double val_real(void); longlong val_int(void); String *val_str(String*,String *); @@ -297,6 +399,7 @@ public: void overflow(bool negative); bool zero_pack() const { return 0; } void sql_type(String &str) const; + uint32 max_length() { return field_length; } }; @@ -315,17 +418,19 @@ public: enum_field_types type() const { return FIELD_TYPE_TINY;} enum ha_base_keytype key_type() const { return unsigned_flag ? HA_KEYTYPE_BINARY : HA_KEYTYPE_INT8; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 1; } void sql_type(String &str) const; + uint32 max_length() { return 4; } }; @@ -340,21 +445,28 @@ public: unireg_check_arg, field_name_arg, table_arg, 0, zero_arg,unsigned_arg) {} + Field_short(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, + struct st_table *table_arg,bool unsigned_arg) + :Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0, + NONE, field_name_arg, table_arg,0,0,unsigned_arg) + {} enum Item_result result_type () const { return INT_RESULT; } enum_field_types type() const { return FIELD_TYPE_SHORT;} enum ha_base_keytype key_type() const { return unsigned_flag ? HA_KEYTYPE_USHORT_INT : HA_KEYTYPE_SHORT_INT;} - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=ptr[1]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 2; } void sql_type(String &str) const; + uint32 max_length() { return 6; } }; @@ -373,17 +485,19 @@ public: enum_field_types type() const { return FIELD_TYPE_INT24;} enum ha_base_keytype key_type() const { return unsigned_flag ? HA_KEYTYPE_UINT24 : HA_KEYTYPE_INT24; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=ptr[1]=ptr[2]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 3; } void sql_type(String &str) const; + uint32 max_length() { return 8; } }; @@ -407,17 +521,19 @@ public: enum_field_types type() const { return FIELD_TYPE_LONG;} enum ha_base_keytype key_type() const { return unsigned_flag ? HA_KEYTYPE_ULONG_INT : HA_KEYTYPE_LONG_INT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; } double val_real(void); longlong val_int(void); + bool send_binary(Protocol *protocol); String *val_str(String*,String *); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 4; } void sql_type(String &str) const; + uint32 max_length() { return 11; } }; @@ -443,18 +559,20 @@ public: enum_field_types type() const { return FIELD_TYPE_LONGLONG;} enum ha_base_keytype key_type() const { return unsigned_flag ? HA_KEYTYPE_ULONGLONG : HA_KEYTYPE_LONGLONG; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 8; } void sql_type(String &str) const; - bool store_for_compare() { return 1; } + bool can_be_compared_as_longlong() const { return TRUE; } + uint32 max_length() { return 20; } }; #endif @@ -469,19 +587,26 @@ public: unireg_check_arg, field_name_arg, table_arg, dec_arg, zero_arg,unsigned_arg) {} + Field_float(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg, + struct st_table *table_arg, uint8 dec_arg) + :Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0, + NONE, field_name_arg, table_arg,dec_arg,0,0) + {} enum_field_types type() const { return FIELD_TYPE_FLOAT;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_FLOAT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { bzero(ptr,sizeof(float)); } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(float); } void sql_type(String &str) const; + uint32 max_length() { return 24; } }; @@ -503,17 +628,19 @@ public: {} enum_field_types type() const { return FIELD_TYPE_DOUBLE;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_DOUBLE; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { bzero(ptr,sizeof(double)); } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(double); } void sql_type(String &str) const; + uint32 max_length() { return 53; } }; @@ -524,14 +651,15 @@ class Field_null :public Field_str { public: Field_null(char *ptr_arg, uint32 len_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, len_arg, null, 1, - unireg_check_arg, field_name_arg, table_arg) + unireg_check_arg, field_name_arg, table_arg, cs) {} enum_field_types type() const { return FIELD_TYPE_NULL;} - void store(const char *to, uint length) { null[0]=1; } - void store(double nr) { null[0]=1; } - void store(longlong nr) { null[0]=1; } + int store(const char *to, uint length, CHARSET_INFO *cs) + { null[0]=1; return 0; } + int store(double nr) { null[0]=1; return 0; } + int store(longlong nr) { null[0]=1; return 0; } void reset(void) {} double val_real(void) { return 0.0;} longlong val_int(void) { return 0;} @@ -540,49 +668,50 @@ public: int cmp(const char *a, const char *b) { return 0;} void sort_string(char *buff, uint length) {} uint32 pack_length() const { return 0; } - void sql_type(String &str) const { str.set("null",4); } + void sql_type(String &str) const; uint size_of() const { return sizeof(*this); } + uint32 max_length() { return 4; } }; -class Field_timestamp :public Field_num { -#if MYSQL_VERSION_ID < 40100 - /* - We save the original field length here because field_length is - changed to a mock value in case when the 'new_mode' is in effect. - */ - uint32 orig_field_length; -#endif +class Field_timestamp :public Field_str { public: Field_timestamp(char *ptr_arg, uint32 len_arg, + uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg); - enum Item_result result_type () const; + struct st_table *table_arg, + CHARSET_INFO *cs); enum_field_types type() const { return FIELD_TYPE_TIMESTAMP;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + enum Item_result cmp_type () const { return INT_RESULT; } + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 4; } void sql_type(String &str) const; - bool store_for_compare() { return 1; } + bool can_be_compared_as_longlong() const { return TRUE; } bool zero_pack() const { return 0; } void set_time(); virtual void set_default() { - if (table->timestamp_field == this) + if (table->timestamp_field == this && + unireg_check != TIMESTAMP_UN_FIELD) set_time(); else Field::set_default(); } - inline long get_timestamp() + /* Get TIMESTAMP field value as seconds since begging of Unix Epoch */ + inline long get_timestamp(my_bool *null_value) { + if ((*null_value= is_null())) + return 0; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) return sint4korr(ptr); @@ -591,13 +720,9 @@ public: longget(tmp,ptr); return tmp; } - bool get_date(TIME *ltime,bool fuzzydate); + bool get_date(TIME *ltime,uint fuzzydate); bool get_time(TIME *ltime); - -#if MYSQL_VERSION_ID < 40100 - friend TABLE *open_table(THD *thd,const char *db,const char *table_name, - const char *alias,bool *refresh); -#endif + timestamp_auto_set_type get_auto_set_type() const; }; @@ -611,14 +736,15 @@ public: unireg_check_arg, field_name_arg, table_arg, 1, 1) {} enum_field_types type() const { return FIELD_TYPE_YEAR;} - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); void sql_type(String &str) const; - bool store_for_compare() { return 1; } + bool can_be_compared_as_longlong() const { return TRUE; } }; @@ -626,29 +752,30 @@ class Field_date :public Field_str { public: Field_date(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg) + unireg_check_arg, field_name_arg, table_arg, cs) {} Field_date(bool maybe_null_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,10, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg) {} + NONE, field_name_arg, table_arg, cs) {} enum_field_types type() const { return FIELD_TYPE_DATE;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; } enum Item_result cmp_type () const { return INT_RESULT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 4; } void sql_type(String &str) const; - bool store_for_compare() { return 1; } + bool can_be_compared_as_longlong() const { return TRUE; } bool zero_pack() const { return 1; } }; @@ -656,29 +783,30 @@ class Field_newdate :public Field_str { public: Field_newdate(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg) + unireg_check_arg, field_name_arg, table_arg, cs) {} enum_field_types type() const { return FIELD_TYPE_DATE;} enum_field_types real_type() const { return FIELD_TYPE_NEWDATE; } enum ha_base_keytype key_type() const { return HA_KEYTYPE_UINT24; } enum Item_result cmp_type () const { return INT_RESULT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void store_time(TIME *ltime,timestamp_type type); void reset(void) { ptr[0]=ptr[1]=ptr[2]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 3; } void sql_type(String &str) const; - bool store_for_compare() { return 1; } + bool can_be_compared_as_longlong() const { return TRUE; } bool zero_pack() const { return 1; } - bool get_date(TIME *ltime,bool fuzzydate); + bool get_date(TIME *ltime,uint fuzzydate); bool get_time(TIME *ltime); }; @@ -687,31 +815,32 @@ class Field_time :public Field_str { public: Field_time(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, 8, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg) + unireg_check_arg, field_name_arg, table_arg, cs) {} Field_time(bool maybe_null_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,8, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg) {} + NONE, field_name_arg, table_arg, cs) {} enum_field_types type() const { return FIELD_TYPE_TIME;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; } enum Item_result cmp_type () const { return INT_RESULT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset(void) { ptr[0]=ptr[1]=ptr[2]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); - bool get_date(TIME *ltime,bool fuzzydate); + bool get_date(TIME *ltime, uint fuzzydate); + bool send_binary(Protocol *protocol); bool get_time(TIME *ltime); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 3; } void sql_type(String &str) const; - bool store_for_compare() { return 1; } + bool can_be_compared_as_longlong() const { return TRUE; } bool zero_pack() const { return 1; } }; @@ -720,61 +849,52 @@ class Field_datetime :public Field_str { public: Field_datetime(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg) + unireg_check_arg, field_name_arg, table_arg, cs) {} Field_datetime(bool maybe_null_arg, const char *field_name_arg, - struct st_table *table_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,19, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg) {} + NONE, field_name_arg, table_arg, cs) {} enum_field_types type() const { return FIELD_TYPE_DATETIME;} #ifdef HAVE_LONG_LONG enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONGLONG; } #endif enum Item_result cmp_type () const { return INT_RESULT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + uint decimals() const { return DATETIME_DEC; } + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void store_time(TIME *ltime,timestamp_type type); void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; } double val_real(void); longlong val_int(void); String *val_str(String*,String *); + bool send_binary(Protocol *protocol); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 8; } void sql_type(String &str) const; - bool store_for_compare() { return 1; } + bool can_be_compared_as_longlong() const { return TRUE; } bool zero_pack() const { return 1; } - bool get_date(TIME *ltime,bool fuzzydate); + bool get_date(TIME *ltime,uint fuzzydate); bool get_time(TIME *ltime); }; class Field_string :public Field_str { - bool binary_flag; public: Field_string(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg,bool binary_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg), - binary_flag(binary_arg) - { - if (binary_arg) - flags|=BINARY_FLAG; - } + unireg_check_arg, field_name_arg, table_arg,cs) {}; Field_string(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, - struct st_table *table_arg, bool binary_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg), - binary_flag(binary_arg) - { - if (binary_arg) - flags|=BINARY_FLAG; - } + NONE, field_name_arg, table_arg, cs) {}; enum_field_types type() const { @@ -783,13 +903,12 @@ public: FIELD_TYPE_VAR_STRING : FIELD_TYPE_STRING); } enum ha_base_keytype key_type() const - { return binary_flag ? HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT; } + { return binary() ? HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT; } bool zero_pack() const { return 0; } - bool binary() const { return binary_flag; } - void reset(void) { bfill(ptr,field_length,' '); } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + void reset(void) { charset()->cset->fill(charset(),ptr,field_length,' '); } + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(longlong nr); + int store(double nr) { return Field_str::store(nr); } /* QQ: To be deleted */ double val_real(void); longlong val_int(void); String *val_str(String*,String *); @@ -804,52 +923,47 @@ public: uint max_packed_col_length(uint max_length); uint size_of() const { return sizeof(*this); } enum_field_types real_type() const { return FIELD_TYPE_STRING; } + bool has_charset(void) const + { return charset() == &my_charset_bin ? FALSE : TRUE; } }; class Field_varstring :public Field_str { - bool binary_flag; public: Field_varstring(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg,bool binary_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg), - binary_flag(binary_arg) - { - if (binary_arg) - flags|= BINARY_FLAG; - } + unireg_check_arg, field_name_arg, table_arg, cs) + {} Field_varstring(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, - struct st_table *table_arg, bool binary_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg), - binary_flag(binary_arg) - { - if (binary_arg) - flags|=BINARY_FLAG; - } + NONE, field_name_arg, table_arg, cs) + {} enum_field_types type() const { return FIELD_TYPE_VAR_STRING; } enum ha_base_keytype key_type() const - { return binary_flag ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } bool zero_pack() const { return 0; } - bool binary() const { return binary_flag; } void reset(void) { bzero(ptr,field_length+2); } uint32 pack_length() const { return (uint32) field_length+2; } uint32 key_length() const { return (uint32) field_length; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(longlong nr); + int store(double nr) { return Field_str::store(nr); } /* QQ: To be deleted */ double val_real(void); longlong val_int(void); String *val_str(String*,String *); int cmp(const char *,const char*); void sort_string(char *buff,uint length); + void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); + void set_key_image(char *buff,uint length, CHARSET_INFO *cs); void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); + char *pack_key(char *to, const char *from, uint max_length); const char *unpack(char* to, const char *from); int pack_cmp(const char *a, const char *b, uint key_length); int pack_cmp(const char *b, uint key_length); @@ -857,34 +971,34 @@ public: uint max_packed_col_length(uint max_length); uint size_of() const { return sizeof(*this); } enum_field_types real_type() const { return FIELD_TYPE_VAR_STRING; } + bool has_charset(void) const + { return charset() == &my_charset_bin ? FALSE : TRUE; } }; class Field_blob :public Field_str { +protected: uint packlength; String value; // For temporaries - bool binary_flag; public: Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg,uint blob_pack_length, - bool binary_arg); + CHARSET_INFO *cs); Field_blob(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, - struct st_table *table_arg, bool binary_arg) + struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg), - packlength(3),binary_flag(binary_arg) - { - flags|= BLOB_FLAG; - if (binary_arg) - flags|= BINARY_FLAG; - } + NONE, field_name_arg, table_arg, cs), + packlength(4) + { + flags|= BLOB_FLAG; + } enum_field_types type() const { return FIELD_TYPE_BLOB;} enum ha_base_keytype key_type() const - { return binary_flag ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); double val_real(void); longlong val_int(void); String *val_str(String*,String *); @@ -899,6 +1013,10 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return (uint32) (packlength+table->blob_ptr_size); } + inline uint32 max_data_length() const + { + return (uint32) (((ulonglong) 1 << (packlength*8)) -1); + } void reset(void) { bzero(ptr, packlength+sizeof(char*)); } void reset_fields() { bzero((char*) &value,sizeof(value)); } void store_length(uint32 number); @@ -906,7 +1024,6 @@ public: { return get_length(ptr+row_offset); } uint32 get_length(const char *ptr); void put_length(char *pos, uint32 length); - bool binary() const { return binary_flag; } inline void get_ptr(char **str) { memcpy_fixed(str,ptr+packlength,sizeof(char*)); @@ -921,13 +1038,13 @@ public: store_length(length); memcpy_fixed(ptr+packlength,&data,sizeof(char*)); } - void get_key_image(char *buff,uint length); - void set_key_image(char *buff,uint length); + void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); + void set_key_image(char *buff,uint length, CHARSET_INFO *cs); void sql_type(String &str) const; inline bool copy() { char *tmp; get_ptr(&tmp); - if (value.copy(tmp,get_length())) + if (value.copy(tmp,get_length(),charset())) { Field_blob::reset(); return 1; @@ -944,12 +1061,43 @@ public: int pack_cmp(const char *b, uint key_length); uint packed_col_length(const char *col_ptr, uint length); uint max_packed_col_length(uint max_length); - inline void free() { value.free(); } + void free() { value.free(); } inline void clear_temporary() { bzero((char*) &value,sizeof(value)); } friend void field_conv(Field *to,Field *from); uint size_of() const { return sizeof(*this); } + bool has_charset(void) const + { return charset() == &my_charset_bin ? FALSE : TRUE; } + uint32 max_length(); }; +#ifdef HAVE_SPATIAL +class Field_geom :public Field_blob { +public: + enum geometry_type geom_type; + + Field_geom(char *ptr_arg, uchar *null_ptr_arg, uint null_bit_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg,uint blob_pack_length, + enum geometry_type geom_type_arg) + :Field_blob(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, + field_name_arg, table_arg, blob_pack_length,&my_charset_bin) + { geom_type= geom_type_arg; } + Field_geom(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, + struct st_table *table_arg, enum geometry_type geom_type_arg) + :Field_blob(len_arg, maybe_null_arg, field_name_arg, + table_arg, &my_charset_bin) + { geom_type= geom_type_arg; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY; } + enum_field_types type() const { return FIELD_TYPE_GEOMETRY; } + void sql_type(String &str) const; + int store(const char *to, uint length, CHARSET_INFO *charset); + int store(double nr) { return 1; } + int store(longlong nr) { return 1; } + + void get_key_image(char *buff,uint length, CHARSET_INFO *cs,imagetype type); + void set_key_image(char *buff,uint length, CHARSET_INFO *cs); +}; +#endif /*HAVE_SPATIAL*/ class Field_enum :public Field_str { protected: @@ -960,19 +1108,21 @@ public: uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg,uint packlength_arg, - TYPELIB *typelib_arg) + TYPELIB *typelib_arg, + CHARSET_INFO *charset_arg) :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg), + unireg_check_arg, field_name_arg, table_arg, charset_arg), packlength(packlength_arg),typelib(typelib_arg) { flags|=ENUM_FLAG; } enum_field_types type() const { return FIELD_TYPE_STRING; } enum Item_result cmp_type () const { return INT_RESULT; } + enum Item_result cast_to_int_type () const { return INT_RESULT; } enum ha_base_keytype key_type() const; - void store(const char *to,uint length); - void store(double nr); - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); void reset() { bzero(ptr,packlength); } double val_real(void); longlong val_int(void); @@ -985,9 +1135,11 @@ public: uint size_of() const { return sizeof(*this); } enum_field_types real_type() const { return FIELD_TYPE_ENUM; } virtual bool zero_pack() const { return 0; } - bool optimize_range(uint idx) { return 0; } - bool binary() const { return 0; } + bool optimize_range(uint idx, uint part) { return 0; } bool eq_def(Field *field); + bool has_charset(void) const { return TRUE; } + /* enum and set are sorted as integers */ + CHARSET_INFO *sort_charset(void) const { return &my_charset_bin; } }; @@ -997,21 +1149,22 @@ public: uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg,uint32 packlength_arg, - TYPELIB *typelib_arg) + TYPELIB *typelib_arg, CHARSET_INFO *charset_arg) :Field_enum(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, table_arg, packlength_arg, - typelib_arg) + typelib_arg,charset_arg) { flags=(flags & ~ENUM_FLAG) | SET_FLAG; } - void store(const char *to,uint length); - void store(double nr) { Field_set::store((longlong) nr); } - void store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr) { return Field_set::store((longlong) nr); } + int store(longlong nr); virtual bool zero_pack() const { return 1; } String *val_str(String*,String *); void sql_type(String &str) const; enum_field_types real_type() const { return FIELD_TYPE_SET; } + bool has_charset(void) const { return TRUE; } }; @@ -1024,18 +1177,31 @@ public: const char *field_name; const char *change; // If done with alter table const char *after; // Put column after this one + LEX_STRING comment; // Comment for field Item *def; // Default value enum enum_field_types sql_type; + /* + At various stages in execution this can be length of field in bytes or + max number of characters. + */ uint32 length; + /* + The value of 'length' before a call to create_length_to_internal_length + */ + uint32 char_length; uint decimals,flags,pack_length; Field::utype unireg_check; TYPELIB *interval; // Which interval to use + List<String> interval_list; + CHARSET_INFO *charset; + Field::geometry_type geom_type; Field *field; // For alter table uint8 row,col,sc_length,interval_id; // For rea_create_table uint offset,pack_flag; create_field() :after(0) {} create_field(Field *field, Field *orig_field); + void create_length_to_internal_length(void); }; @@ -1045,8 +1211,11 @@ public: class Send_field { public: - const char *table_name,*col_name; - uint length,flags,decimals; + const char *db_name; + const char *table_name,*org_table_name; + const char *col_name,*org_col_name; + ulong length; + uint charsetnr, flags, decimals; enum_field_types type; Send_field() {} }; @@ -1078,25 +1247,25 @@ public: Field *make_field(char *ptr, uint32 field_length, uchar *null_pos, uchar null_bit, - uint pack_flag, - enum_field_types field_type, + uint pack_flag, enum_field_types field_type, + CHARSET_INFO *cs, + Field::geometry_type geom_type, Field::utype unireg_check, TYPELIB *interval, const char *field_name, struct st_table *table); uint pack_length_to_packflag(uint type); uint32 calc_pack_length(enum_field_types type,uint32 length); -bool set_field_to_null(Field *field); -bool set_field_to_null_with_conversions(Field *field, bool no_conversions); -uint find_enum(TYPELIB *typelib,const char *x, uint length); -ulonglong find_set(TYPELIB *typelib,const char *x, uint length); -bool test_if_int(const char *str,int length); +int set_field_to_null(Field *field); +int set_field_to_null_with_conversions(Field *field, bool no_conversions); +bool test_if_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs); /* The following are for the interface with the .frm file */ #define FIELDFLAG_DECIMAL 1 -#define FIELDFLAG_BINARY 1 // Shares same flag +#define FIELDFLAG_BINARY 1 // Shares same flag #define FIELDFLAG_NUMBER 2 #define FIELDFLAG_ZEROFILL 4 #define FIELDFLAG_PACK 120 // Bits used for packing @@ -1104,6 +1273,7 @@ bool test_if_int(const char *str,int length); #define FIELDFLAG_BITFIELD 512 // mangled with decimals! #define FIELDFLAG_BLOB 1024 // mangled with decimals! #define FIELDFLAG_GEOM 2048 // mangled with decimals! + #define FIELDFLAG_LEFT_FULLSCREEN 8192 #define FIELDFLAG_RIGHT_FULLSCREEN 16384 #define FIELDFLAG_FORMAT_NUMBER 16384 // predit: ###,,## in output @@ -1126,7 +1296,7 @@ bool test_if_int(const char *str,int length); #define f_packtype(x) (((x) >> FIELDFLAG_PACK_SHIFT) & 15) #define f_decimals(x) ((uint8) (((x) >> FIELDFLAG_DEC_SHIFT) & FIELDFLAG_MAX_DEC)) #define f_is_alpha(x) (!f_is_num(x)) -#define f_is_binary(x) ((x) & FIELDFLAG_BINARY) +#define f_is_binary(x) ((x) & FIELDFLAG_BINARY) // 4.0- compatibility #define f_is_enum(x) (((x) & (FIELDFLAG_INTERVAL | FIELDFLAG_NUMBER)) == FIELDFLAG_INTERVAL) #define f_is_bitfield(x) (((x) & (FIELDFLAG_BITFIELD | FIELDFLAG_NUMBER)) == FIELDFLAG_BITFIELD) #define f_is_blob(x) (((x) & (FIELDFLAG_BLOB | FIELDFLAG_NUMBER)) == FIELDFLAG_BLOB) diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 7aaabde4f55..d61b3735c91 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -17,7 +17,7 @@ /* Functions to copy data to or from fields - This could be done with a single short function but opencooding this + This could be done with a single short function but opencoding this gives much more speed. */ @@ -109,7 +109,7 @@ static void do_outer_field_to_null_str(Copy_field *copy) } -bool +int set_field_to_null(Field *field) { if (field->real_maybe_null()) @@ -119,15 +119,16 @@ set_field_to_null(Field *field) return 0; } field->reset(); - if (current_thd->count_cuted_fields) + if (current_thd->count_cuted_fields == CHECK_FIELD_WARN) { - current_thd->cuted_fields++; // Increment error counter + field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); return 0; } if (!current_thd->no_errors) my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0), field->field_name); - return 1; + return -1; } @@ -145,11 +146,11 @@ set_field_to_null(Field *field) RETURN VALUES 0 Field could take 0 or an automatic conversion was used - 1 Field could not take NULL and no conversion was used. + -1 Field could not take NULL and no conversion was used. If no_conversion was not set, an error message is printed */ -bool +int set_field_to_null_with_conversions(Field *field, bool no_conversions) { if (field->real_maybe_null()) @@ -159,11 +160,12 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) return 0; } if (no_conversions) - return 1; + return -1; /* Check if this is a special type, which will get a special walue - when set to NULL + when set to NULL (TIMESTAMP fields which allow setting to NULL + are handled by first check). */ if (field->type() == FIELD_TYPE_TIMESTAMP) { @@ -172,16 +174,20 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) } field->reset(); if (field == field->table->next_number_field) + { + field->table->auto_increment_field_not_null= FALSE; return 0; // field is set in handler.cc - if (current_thd->count_cuted_fields) + } + if (current_thd->count_cuted_fields == CHECK_FIELD_WARN) { - current_thd->cuted_fields++; // Increment error counter + field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_NULL_TO_NOTNULL, 1); return 0; } if (!current_thd->no_errors) my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0), field->field_name); - return 1; + return -1; } @@ -225,7 +231,8 @@ static void do_copy_not_null(Copy_field *copy) { if (*copy->from_null_ptr & copy->from_bit) { - current_thd->cuted_fields++; + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); copy->to_field->reset(); } else @@ -245,7 +252,8 @@ static void do_copy_timestamp(Copy_field *copy) { if (*copy->from_null_ptr & copy->from_bit) { - ((Field_timestamp*) copy->to_field)->set_time();// Same as set_field_to_null + /* Same as in set_field_to_null_with_conversions() */ + ((Field_timestamp*) copy->to_field)->set_time(); } else (copy->do_copy2)(copy); @@ -255,7 +263,11 @@ static void do_copy_timestamp(Copy_field *copy) static void do_copy_next_number(Copy_field *copy) { if (*copy->from_null_ptr & copy->from_bit) - copy->to_field->reset(); // Same as set_field_to_null + { + /* Same as in set_field_to_null_with_conversions() */ + copy->to_field->table->auto_increment_field_not_null= FALSE; + copy->to_field->reset(); + } else (copy->do_copy2)(copy); } @@ -270,9 +282,10 @@ static void do_copy_blob(Copy_field *copy) static void do_conv_blob(Copy_field *copy) { - copy->from_field->val_str(©->tmp,©->tmp); + copy->from_field->val_str(©->tmp); ((Field_blob *) copy->to_field)->store(copy->tmp.ptr(), - copy->tmp.length()); + copy->tmp.length(), + copy->tmp.charset()); } /* Save blob in copy->tmp for GROUP BY */ @@ -280,20 +293,21 @@ static void do_conv_blob(Copy_field *copy) static void do_save_blob(Copy_field *copy) { char buff[MAX_FIELD_WIDTH]; - String res(buff,sizeof(buff)); - copy->from_field->val_str(&res,&res); + String res(buff,sizeof(buff),copy->tmp.charset()); + copy->from_field->val_str(&res); copy->tmp.copy(res); ((Field_blob *) copy->to_field)->store(copy->tmp.ptr(), - copy->tmp.length()); + copy->tmp.length(), + copy->tmp.charset()); } static void do_field_string(Copy_field *copy) { char buff[MAX_FIELD_WIDTH]; - copy->tmp.set_quick(buff,sizeof(buff)); - copy->from_field->val_str(©->tmp,©->tmp); - copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length()); + copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset()); + copy->from_field->val_str(©->tmp); + copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(),copy->tmp.charset()); } @@ -310,29 +324,68 @@ static void do_field_real(Copy_field *copy) } +/* + string copy for single byte characters set when to string is shorter than + from string +*/ + static void do_cut_string(Copy_field *copy) -{ // Shorter string field +{ + CHARSET_INFO *cs= copy->from_field->charset(); memcpy(copy->to_ptr,copy->from_ptr,copy->to_length); /* Check if we loosed any important characters */ - char *ptr,*end; - for (ptr=copy->from_ptr+copy->to_length,end=copy->from_ptr+copy->from_length ; - ptr != end ; - ptr++) + if (cs->cset->scan(cs, + copy->from_ptr + copy->to_length, + copy->from_ptr + copy->from_length, + MY_SEQ_SPACES) < copy->from_length - copy->to_length) { - if (!isspace(*ptr)) - { - current_thd->cuted_fields++; // Give a warning - break; - } + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); } } +/* + string copy for multi byte characters set when to string is shorter than + from string +*/ + +static void do_cut_string_complex(Copy_field *copy) +{ // Shorter string field + int well_formed_error; + CHARSET_INFO *cs= copy->from_field->charset(); + const char *from_end= copy->from_ptr + copy->from_length; + uint copy_length= cs->cset->well_formed_len(cs, copy->from_ptr, from_end, + copy->to_length / cs->mbmaxlen, + &well_formed_error); + if (copy->to_length < copy_length) + copy_length= copy->to_length; + memcpy(copy->to_ptr, copy->from_ptr, copy_length); + + /* Check if we lost any important characters */ + if (well_formed_error || + cs->cset->scan(cs, copy->from_ptr + copy_length, from_end, + MY_SEQ_SPACES) < (copy->from_length - copy_length)) + { + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); + } + + if (copy_length < copy->to_length) + cs->cset->fill(cs, copy->to_ptr + copy_length, + copy->to_length - copy_length, ' '); +} + + + + static void do_expand_string(Copy_field *copy) { + CHARSET_INFO *cs= copy->from_field->charset(); memcpy(copy->to_ptr,copy->from_ptr,copy->from_length); - bfill(copy->to_ptr+copy->from_length,copy->to_length-copy->from_length,' '); + cs->cset->fill(cs, copy->to_ptr+copy->from_length, + copy->to_length-copy->from_length, ' '); } static void do_varstring(Copy_field *copy) @@ -342,7 +395,8 @@ static void do_varstring(Copy_field *copy) { length=copy->to_length-2; if (current_thd->count_cuted_fields) - current_thd->cuted_fields++; // Increment error counter + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); } int2store(copy->to_ptr,length); memcpy(copy->to_ptr+2, copy->from_ptr,length); @@ -424,18 +478,20 @@ void Copy_field::set(Field *to,Field *from,bool save) } } else - do_copy=do_copy_not_null; + { + if (to_field->type() == FIELD_TYPE_TIMESTAMP) + do_copy= do_copy_timestamp; // Automatic timestamp + else if (to_field == to_field->table->next_number_field) + do_copy= do_copy_next_number; + else + do_copy= do_copy_not_null; + } } else if (to_field->real_maybe_null()) { to_null_ptr= to->null_ptr; to_bit= to->null_bit; - if (to_field->type() == FIELD_TYPE_TIMESTAMP) - do_copy=do_copy_timestamp; // Automatic timestamp - else if (to_field == to_field->table->next_number_field) - do_copy=do_copy_next_number; - else - do_copy=do_copy_maybe_null; + do_copy= do_copy_maybe_null; } else do_copy=0; @@ -453,7 +509,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) { if (to->flags & BLOB_FLAG) { - if (!(from->flags & BLOB_FLAG)) + if (!(from->flags & BLOB_FLAG) || from->charset() != to->charset()) return do_conv_blob; if (from_length != to_length || to->table->db_low_byte_first != from->table->db_low_byte_first) @@ -484,11 +540,14 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) if (!to->eq_def(from)) return do_field_string; } + else if (to->charset() != from->charset()) + return do_field_string; else if (to->real_type() == FIELD_TYPE_VAR_STRING && to_length != from_length) return do_varstring; else if (to_length < from_length) - return do_cut_string; + return (from->charset()->mbmaxlen == 1 ? + do_cut_string : do_cut_string_complex); else if (to_length > from_length) return do_expand_string; } @@ -540,6 +599,10 @@ void field_conv(Field *to,Field *from) !(to->flags & UNSIGNED_FLAG && !(from->flags & UNSIGNED_FLAG)) && to->real_type() != FIELD_TYPE_ENUM && to->real_type() != FIELD_TYPE_SET && + (to->real_type() != FIELD_TYPE_DECIMAL || + (to->field_length == from->field_length && + (((Field_num*) to)->dec == ((Field_num*) from)->dec))) && + from->charset() == to->charset() && to->table->db_low_byte_first == from->table->db_low_byte_first) { // Identical fields memcpy(to->ptr,from->ptr,to->pack_length()); @@ -549,11 +612,11 @@ void field_conv(Field *to,Field *from) if (to->type() == FIELD_TYPE_BLOB) { // Be sure the value is stored Field_blob *blob=(Field_blob*) to; - from->val_str(&blob->value,&blob->value); + from->val_str(&blob->value); if (!blob->value.is_alloced() && from->real_type() != FIELD_TYPE_STRING) blob->value.copy(); - blob->store(blob->value.ptr(),blob->value.length()); + blob->store(blob->value.ptr(),blob->value.length(),from->charset()); return; } if ((from->result_type() == STRING_RESULT && @@ -563,9 +626,9 @@ void field_conv(Field *to,Field *from) to->type() == FIELD_TYPE_DECIMAL) { char buff[MAX_FIELD_WIDTH]; - String result(buff,sizeof(buff)); - from->val_str(&result,&result); - to->store(result.c_ptr_quick(),result.length()); + String result(buff,sizeof(buff),from->charset()); + from->val_str(&result); + to->store(result.c_ptr_quick(),result.length(),from->charset()); } else if (from->result_type() == REAL_RESULT) to->store(from->val_real()); diff --git a/sql/filesort.cc b/sql/filesort.cc index a53067ccd73..63a8515020b 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -49,7 +49,12 @@ static int merge_index(SORTPARAM *param,uchar *sort_buffer, uint maxbuffer,IO_CACHE *tempfile, IO_CACHE *outfile); static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count); -static uint sortlength(SORT_FIELD *sortorder,uint length); +static uint sortlength(SORT_FIELD *sortorder, uint s_length, + bool *multi_byte_charset); +static SORT_ADDON_FIELD *get_addon_fields(THD *thd, Field **ptabfield, + uint sortlength, uint *plength); +static void unpack_addon_fields(struct st_sort_addon_field *addon_field, + byte *buff); /* Sort a table @@ -84,35 +89,66 @@ static uint sortlength(SORT_FIELD *sortorder,uint length); table->record_pointers */ -ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length, - SQL_SELECT *select, ha_rows special, ha_rows max_rows, - ha_rows *examined_rows) +ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, + SQL_SELECT *select, ha_rows max_rows, ha_rows *examined_rows) { int error; ulong memavl, min_sort_memory; uint maxbuffer; BUFFPEK *buffpek; - ha_rows records; + ha_rows records= HA_POS_ERROR; uchar **sort_keys; IO_CACHE tempfile, buffpek_pointers, *selected_records_file, *outfile; SORTPARAM param; - THD *thd= current_thd; - + bool multi_byte_charset; DBUG_ENTER("filesort"); - DBUG_EXECUTE("info",TEST_filesort(sortorder,s_length,special);); + DBUG_EXECUTE("info",TEST_filesort(sortorder,s_length);); #ifdef SKIP_DBUG_IN_FILESORT DBUG_PUSH(""); /* No DBUG here */ #endif - outfile= table->io_cache; + outfile= table->sort.io_cache; my_b_clear(&tempfile); my_b_clear(&buffpek_pointers); buffpek=0; sort_keys= (uchar **) NULL; error= 1; bzero((char*) ¶m,sizeof(param)); + param.sort_length= sortlength(sortorder, s_length, &multi_byte_charset); param.ref_length= table->file->ref_length; - param.sort_length=sortlength(sortorder,s_length)+ param.ref_length; + param.addon_field= 0; + param.addon_length= 0; + if (!(table->tmp_table || table->fulltext_searched)) + { + /* + Get the descriptors of all fields whose values are appended + to sorted fields and get its total length in param.spack_length. + */ + param.addon_field= get_addon_fields(thd, table->field, + param.sort_length, + ¶m.addon_length); + } + table->sort.addon_buf= 0; + table->sort.addon_length= param.addon_length; + table->sort.addon_field= param.addon_field; + table->sort.unpack= unpack_addon_fields; + if (param.addon_field) + { + param.res_length= param.addon_length; + if (!(table->sort.addon_buf= (byte *) my_malloc(param.addon_length, + MYF(MY_WME)))) + goto err; + } + else + { + param.res_length= param.ref_length; + /* + The reference to the record is considered + as an additional sorted field + */ + param.sort_length+= param.ref_length; + } + param.rec_length= param.sort_length+param.addon_length; param.max_rows= max_rows; if (select && select->quick) @@ -123,46 +159,38 @@ ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length, { statistic_increment(filesort_scan_count, &LOCK_status); } - if (select && my_b_inited(&select->file)) - { - records=special=select->records; /* purecov: deadcode */ - selected_records_file= &select->file; /* purecov: deadcode */ - reinit_io_cache(selected_records_file,READ_CACHE,0L,0,0); /* purecov: deadcode */ - } - else if (special) - { - records=special; /* purecov: deadcode */ - selected_records_file= outfile; /* purecov: deadcode */ - reinit_io_cache(selected_records_file,READ_CACHE,0L,0,0); /* purecov: deadcode */ - } #ifdef CAN_TRUST_RANGE - else if (select && select->quick && select->quick->records > 0L) + if (select && select->quick && select->quick->records > 0L) { records=min((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2), table->file->records)+EXTRA_RECORDS; selected_records_file=0; } -#endif else +#endif { - records=table->file->estimate_number_of_rows(); + records= table->file->estimate_rows_upper_bound(); + /* + If number of records is not known, use as much of sort buffer + as possible. + */ + if (records == HA_POS_ERROR) + records--; // we use 'records+1' below. selected_records_file= 0; } -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info) && + if (multi_byte_charset && !(param.tmp_buffer=my_malloc(param.sort_length,MYF(MY_WME)))) goto err; -#endif memavl= thd->variables.sortbuff_size; min_sort_memory= max(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2); while (memavl >= min_sort_memory) { ulong old_memavl; - ulong keys= memavl/(param.sort_length+sizeof(char*)); + ulong keys= memavl/(param.rec_length+sizeof(char*)); param.keys=(uint) min(records+1, keys); - if ((sort_keys= (uchar **) make_char_array(param.keys, param.sort_length, + if ((sort_keys= (uchar **) make_char_array(param.keys, param.rec_length, MYF(0)))) break; old_memavl=memavl; @@ -179,7 +207,7 @@ ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length, DISK_BUFFER_SIZE, MYF(MY_WME))) goto err; - param.keys--; + param.keys--; /* TODO: check why we do this */ param.sort_form= table; param.end=(param.local_sortorder=sortorder)+s_length; if ((records=find_all_keys(¶m,select,sort_keys, &buffpek_pointers, @@ -209,8 +237,8 @@ ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length, Use also the space previously used by string pointers in sort_buffer for temporary key storage. */ - param.keys=((param.keys*(param.sort_length+sizeof(char*))) / - param.sort_length-1); + param.keys=((param.keys*(param.rec_length+sizeof(char*))) / + param.rec_length-1); maxbuffer--; // Offset from 0 if (merge_many_buff(¶m,(uchar*) sort_keys,buffpek,&maxbuffer, &tempfile)) @@ -227,10 +255,8 @@ ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length, error =0; err: -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) + if (param.tmp_buffer) x_free(param.tmp_buffer); -#endif x_free((gptr) sort_keys); x_free((gptr) buffpek); close_cached_file(&tempfile); @@ -260,6 +286,22 @@ ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length, } /* filesort */ +void filesort_free_buffers(TABLE *table) +{ + if (table->sort.record_pointers) + { + my_free((gptr) table->sort.record_pointers,MYF(0)); + table->sort.record_pointers=0; + } + if (table->sort.addon_buf) + { + my_free((char *) table->sort.addon_buf, MYF(0)); + my_free((char *) table->sort.addon_field, MYF(MY_ALLOW_ZERO_PTR)); + table->sort.addon_buf=0; + table->sort.addon_field=0; + } +} + /* Make a array of string pointers */ static char **make_char_array(register uint fields, uint length, myf my_flag) @@ -279,7 +321,7 @@ static char **make_char_array(register uint fields, uint length, myf my_flag) } /* make_char_array */ - /* Read all buffer pointers into memory */ +/* Read 'count' number of buffer pointers into memory */ static BUFFPEK *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count) { @@ -294,14 +336,46 @@ static BUFFPEK *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count) { my_free((char*) tmp, MYF(0)); tmp=0; - } + } } DBUG_RETURN(tmp); } - - /* Search after sort_keys and place them in a temp. file */ +/* + Search after sort_keys and write them into tempfile. + SYNOPSIS + find_all_keys() + param Sorting parameter + select Use this to get source data + sort_keys Array of pointers to sort key + addon buffers. + buffpek_pointers File to write BUFFPEKs describing sorted segments + in tempfile. + tempfile File to write sorted sequences of sortkeys to. + indexfile If !NULL, use it for source data (contains rowids) + + NOTE + Basic idea: + while (get_next_sortkey()) + { + if (no free space in sort_keys buffers) + { + sort sort_keys buffer; + dump sorted sequence to 'tempfile'; + dump BUFFPEK describing sequence location into 'buffpek_pointers'; + } + put sort key into 'sort_keys'; + } + if (sort_keys has some elements && dumped at least once) + sort-dump-dump as above; + else + don't sort, leave sort_keys array to be sorted by caller. + + All produced sequences are guaranteed to be non-empty. + RETURN + Number of records written on success. + HA_POS_ERROR on error. +*/ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, uchar **sort_keys, @@ -313,7 +387,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, byte *ref_pos,*next_pos,ref_buff[MAX_REFLENGTH]; my_off_t record; TABLE *sort_form; - volatile bool *killed= ¤t_thd->killed; + volatile my_bool *killed= ¤t_thd->killed; handler *file; DBUG_ENTER("find_all_keys"); DBUG_PRINT("info",("using: %s",(select?select->quick?"ranges":"where":"every row"))); @@ -337,7 +411,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (sort_form->key_read) // QQ Can be removed after the reset file->extra(HA_EXTRA_KEYREAD); // QQ is removed next_pos=(byte*) 0; /* Find records in sequence */ - file->rnd_init(); + file->ha_rnd_init(1); file->extra_opt(HA_EXTRA_CACHE, current_thd->variables.read_buff_size); } @@ -369,7 +443,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, ha_store_ptr(ref_pos,ref_length,record); // Position to row record+=sort_form->db_record_offset; } - else + else if (!error) file->position(sort_form->record[0]); } if (error && error != HA_ERR_RECORD_DELETED) @@ -378,13 +452,16 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (*killed) { DBUG_PRINT("info",("Sort killed by user")); - (void) file->extra(HA_EXTRA_NO_CACHE); - file->rnd_end(); + if (!indexfile && !quick_select) + { + (void) file->extra(HA_EXTRA_NO_CACHE); + file->ha_rnd_end(); + } DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ } if (error == 0) param->examined_rows++; - if (error == 0 && (!select || select->skipp_record() == 0)) + if (error == 0 && (!select || select->skip_record() == 0)) { if (idx == param->keys) { @@ -399,7 +476,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, file->unlock_row(); } (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ - file->rnd_end(); + if (!next_pos) + file->ha_rnd_end(); DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos)); if (error != HA_ERR_END_OF_FILE) { @@ -410,38 +488,57 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, write_keys(param,sort_keys,idx,buffpek_pointers,tempfile)) DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ DBUG_RETURN(my_b_inited(tempfile) ? - (ha_rows) (my_b_tell(tempfile)/param->sort_length) : + (ha_rows) (my_b_tell(tempfile)/param->rec_length) : idx); } /* find_all_keys */ - /* Skriver en buffert med nycklar till filen */ +/* + Sort the buffer and write: + 1) the sorted sequence to tempfile + 2) a BUFFPEK describing the sorted sequence position to buffpek_pointers + (was: Skriver en buffert med nycklar till filen) + SYNOPSIS + write_keys() + param Sort parameters + sort_keys Array of pointers to keys to sort + count Number of elements in sort_keys array + buffpek_pointers One 'BUFFPEK' struct will be written into this file. + The BUFFPEK::{file_pos, count} will indicate where + the sorted data was stored. + tempfile The sorted sequence will be written into this file. + + RETURN + 0 OK + 1 Error +*/ static int write_keys(SORTPARAM *param, register uchar **sort_keys, uint count, - IO_CACHE *buffpek_pointers, IO_CACHE *tempfile) + IO_CACHE *buffpek_pointers, IO_CACHE *tempfile) { - uint sort_length; + uint sort_length, rec_length; uchar **end; BUFFPEK buffpek; DBUG_ENTER("write_keys"); - sort_length=param->sort_length; + sort_length= param->sort_length; + rec_length= param->rec_length; #ifdef MC68000 quicksort(sort_keys,count,sort_length); #else - my_string_ptr_sort((gptr) sort_keys,(uint) count,sort_length); + my_string_ptr_sort((gptr) sort_keys, (uint) count, sort_length); #endif if (!my_b_inited(tempfile) && - open_cached_file(tempfile,mysql_tmpdir,TEMP_PREFIX,DISK_BUFFER_SIZE, - MYF(MY_WME))) - goto err; /* purecov: inspected */ - buffpek.file_pos=my_b_tell(tempfile); + open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX, DISK_BUFFER_SIZE, + MYF(MY_WME))) + goto err; /* purecov: inspected */ + buffpek.file_pos= my_b_tell(tempfile); if ((ha_rows) count > param->max_rows) - count=(uint) param->max_rows; /* purecov: inspected */ + count=(uint) param->max_rows; /* purecov: inspected */ buffpek.count=(ha_rows) count; for (end=sort_keys+count ; sort_keys != end ; sort_keys++) - if (my_b_write(tempfile,(byte*) *sort_keys,(uint) sort_length)) + if (my_b_write(tempfile, (byte*) *sort_keys, (uint) rec_length)) goto err; if (my_b_write(buffpek_pointers, (byte*) &buffpek, sizeof(buffpek))) goto err; @@ -490,10 +587,13 @@ static void make_sortkey(register SORTPARAM *param, switch (sort_field->result_type) { case STRING_RESULT: { + CHARSET_INFO *cs=item->collation.collation; + char fill_char= ((cs->state & MY_CS_BINSORT) ? (char) 0 : ' '); + if ((maybe_null=item->maybe_null)) *to++=1; /* All item->str() to use some extra byte for end null.. */ - String tmp((char*) to,sort_field->length+4); + String tmp((char*) to,sort_field->length+4,cs); String *res=item->val_str(&tmp); if (!res) { @@ -514,43 +614,24 @@ static void make_sortkey(register SORTPARAM *param, diff=0; /* purecov: inspected */ length=sort_field->length; } -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) + if (sort_field->need_strxnfrm) { - if (item->binary) - { - if (res->ptr() != (char*) to) - memcpy(to,res->ptr(),length); - bzero((char*) to+length,diff); - } - else - { - char *from=(char*) res->ptr(); - if ((unsigned char *)from == to) - { - set_if_smaller(length,sort_field->length); - memcpy(param->tmp_buffer,from,length); - from=param->tmp_buffer; - } - uint tmp_length=my_strnxfrm(default_charset_info, - to,(unsigned char *) from, - sort_field->length, - length); - if (tmp_length < sort_field->length) - bzero((char*) to+tmp_length,sort_field->length-tmp_length); - } + char *from=(char*) res->ptr(); + if ((unsigned char *)from == to) + { + set_if_smaller(length,sort_field->length); + memcpy(param->tmp_buffer,from,length); + from=param->tmp_buffer; + } + uint tmp_length=my_strnxfrm(cs,to,sort_field->length, + (unsigned char *) from, length); + DBUG_ASSERT(tmp_length == sort_field->length); } else { -#endif - if (res->ptr() != (char*) to) - memcpy(to,res->ptr(),length); - bzero((char *)to+length,diff); - if (!item->binary) - case_sort((char*) to,length); -#ifdef USE_STRCOLL + my_strnxfrm(cs,(uchar*)to,length,(const uchar*)res->ptr(),length); + cs->cset->fill(cs, (char *)to+length,diff,fill_char); } -#endif break; } case INT_RESULT: @@ -578,12 +659,18 @@ static void make_sortkey(register SORTPARAM *param, to[3]= (uchar) (value >> 32); to[2]= (uchar) (value >> 40); to[1]= (uchar) (value >> 48); - to[0]= (uchar) (value >> 56) ^ 128; // Fix sign + if (item->unsigned_flag) /* Fix sign */ + to[0]= (uchar) (value >> 56); + else + to[0]= (uchar) (value >> 56) ^ 128; /* Reverse signbit */ #else to[3]= (uchar) value; to[2]= (uchar) (value >> 8); to[1]= (uchar) (value >> 16); - to[0]= (uchar) (value >> 24) ^ 128; // Fix sign + if (item->unsigned_flag) /* Fix sign */ + to[0]= (uchar) (value >> 24); + else + to[0]= (uchar) (value >> 24) ^ 128; /* Reverse signbit */ #endif break; } @@ -601,6 +688,11 @@ static void make_sortkey(register SORTPARAM *param, change_double_for_sort(value,(byte*) to); break; } + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; } } if (sort_field->reverse) @@ -617,29 +709,69 @@ static void make_sortkey(register SORTPARAM *param, else to+= sort_field->length; } - memcpy((byte*) to,ref_pos,(size_s) param->ref_length);/* Save filepos last */ + + if (param->addon_field) + { + /* + Save field values appended to sorted fields. + First null bit indicators are appended then field values follow. + In this implementation we use fixed layout for field values - + the same for all records. + */ + SORT_ADDON_FIELD *addonf= param->addon_field; + uchar *nulls= to; + DBUG_ASSERT(addonf); + bzero((char *) nulls, addonf->offset); + to+= addonf->offset; + for ( ; (field= addonf->field) ; addonf++) + { + if (addonf->null_bit && field->is_null()) + { + nulls[addonf->null_offset]|= addonf->null_bit; +#ifdef HAVE_purify + bzero(to, addonf->length); +#endif + } + else + { + uchar *end= (uchar*) field->pack((char *) to, field->ptr); +#ifdef HAVE_purify + uint length= (uint) ((to + addonf->length) - end); + DBUG_ASSERT((int) length >= 0); + if (length) + bzero(end, length); +#endif + } + to+= addonf->length; + } + } + else + { + /* Save filepos last */ + memcpy((byte*) to, ref_pos, (size_s) param->ref_length); + } return; } static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count) { - uint offset,ref_length; + uint offset,res_length; byte *to; DBUG_ENTER("save_index"); - my_string_ptr_sort((gptr) sort_keys,(uint) count,param->sort_length); - ref_length=param->ref_length; - offset=param->sort_length-ref_length; + my_string_ptr_sort((gptr) sort_keys, (uint) count, param->sort_length); + res_length= param->res_length; + offset= param->rec_length-res_length; if ((ha_rows) count > param->max_rows) count=(uint) param->max_rows; - if (!(to=param->sort_form->record_pointers= - (byte*) my_malloc(ref_length*count,MYF(MY_WME)))) - DBUG_RETURN(1); /* purecov: inspected */ - for (uchar **end=sort_keys+count ; sort_keys != end ; sort_keys++) + if (!(to= param->sort_form->sort.record_pointers= + (byte*) my_malloc(res_length*count, MYF(MY_WME)))) + DBUG_RETURN(1); /* purecov: inspected */ + for (uchar **end= sort_keys+count ; sort_keys != end ; sort_keys++) { - memcpy(to,*sort_keys+offset,ref_length); - to+=ref_length; + memcpy(to, *sort_keys+offset, res_length); + to+= res_length; } DBUG_RETURN(0); } @@ -699,7 +831,7 @@ int merge_many_buff(SORTPARAM *param, uchar *sort_buffer, /* This returns (uint) -1 if something goes wrong */ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, - uint sort_length) + uint rec_length) { register uint count; uint length; @@ -707,65 +839,87 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count))) { if (my_pread(fromfile->file,(byte*) buffpek->base, - (length= sort_length*count),buffpek->file_pos,MYF_RW)) + (length= rec_length*count),buffpek->file_pos,MYF_RW)) return((uint) -1); /* purecov: inspected */ buffpek->key=buffpek->base; buffpek->file_pos+= length; /* New filepos */ buffpek->count-= count; buffpek->mem_count= count; } - return (count*sort_length); + return (count*rec_length); } /* read_to_buffer */ - /* Merge buffers to one buffer */ +/* + Merge buffers to one buffer + SYNOPSIS + merge_buffers() + param Sort parameter + from_file File with source data (BUFFPEKs point to this file) + to_file File to write the sorted result data. + sort_buffer Buffer for data to store up to MERGEBUFF2 sort keys. + lastbuff OUT Store here BUFFPEK describing data written to to_file + Fb First element in source BUFFPEKs array + Tb Last element in source BUFFPEKs array + flag + + RETURN + 0 - OK + other - error +*/ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, - IO_CACHE *to_file, uchar *sort_buffer, - BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb, - int flag) + IO_CACHE *to_file, uchar *sort_buffer, + BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb, + int flag) { int error; - uint sort_length,offset; + uint rec_length,sort_length,res_length,offset; ulong maxcount; ha_rows max_rows,org_max_rows; my_off_t to_start_filepos; uchar *strpos; BUFFPEK *buffpek,**refpek; QUEUE queue; - qsort2_cmp cmp; - volatile bool *killed= ¤t_thd->killed; - bool not_killable; + qsort2_cmp cmp; + volatile my_bool *killed= ¤t_thd->killed; + my_bool not_killable; DBUG_ENTER("merge_buffers"); statistic_increment(filesort_merge_passes, &LOCK_status); if (param->not_killable) { killed= ¬_killable; - not_killable=0; + not_killable= 0; } error=0; - offset=(sort_length=param->sort_length)-param->ref_length; - maxcount=(ulong) (param->keys/((uint) (Tb-Fb) +1)); - to_start_filepos=my_b_tell(to_file); - strpos=(uchar*) sort_buffer; - org_max_rows=max_rows=param->max_rows; - - if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0, - (queue_compare) - (cmp=get_ptr_compare(sort_length)),(void*) &sort_length)) - DBUG_RETURN(1); /* purecov: inspected */ + rec_length= param->rec_length; + res_length= param->res_length; + sort_length= param->sort_length; + offset= rec_length-res_length; + maxcount= (ulong) (param->keys/((uint) (Tb-Fb) +1)); + to_start_filepos= my_b_tell(to_file); + strpos= (uchar*) sort_buffer; + org_max_rows=max_rows= param->max_rows; + + /* The following will fire if there is not enough space in sort_buffer */ + DBUG_ASSERT(maxcount!=0); + + if (init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0, + (queue_compare) (cmp= get_ptr_compare(sort_length)), + (void*) &sort_length)) + DBUG_RETURN(1); /* purecov: inspected */ for (buffpek= Fb ; buffpek <= Tb ; buffpek++) { buffpek->base= strpos; - buffpek->max_keys=maxcount; - strpos+= (uint) (error=(int) read_to_buffer(from_file,buffpek, - sort_length)); + buffpek->max_keys= maxcount; + strpos+= (uint) (error= (int) read_to_buffer(from_file, buffpek, + rec_length)); if (error == -1) goto err; /* purecov: inspected */ buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected - queue_insert(&queue,(byte*) buffpek); + queue_insert(&queue, (byte*) buffpek); } if (param->unique_buff) @@ -778,98 +932,101 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, This is safe as we know that there is always more than one element in each block to merge (This is guaranteed by the Unique:: algorithm */ - buffpek=(BUFFPEK*) queue_top(&queue); - memcpy(param->unique_buff, buffpek->key, sort_length); - if (my_b_write(to_file,(byte*) buffpek->key, sort_length)) + buffpek= (BUFFPEK*) queue_top(&queue); + memcpy(param->unique_buff, buffpek->key, rec_length); + if (my_b_write(to_file, (byte*) buffpek->key, rec_length)) { - error=1; goto err; /* purecov: inspected */ + error=1; goto err; /* purecov: inspected */ } - buffpek->key+=sort_length; + buffpek->key+= rec_length; buffpek->mem_count--; if (!--max_rows) { - error=0; /* purecov: inspected */ - goto end; /* purecov: inspected */ + error= 0; /* purecov: inspected */ + goto end; /* purecov: inspected */ } - queue_replaced(&queue); // Top element has been used + queue_replaced(&queue); // Top element has been used } else - cmp=0; // Not unique + cmp= 0; // Not unique while (queue.elements > 1) { if (*killed) { - error=1; goto err; /* purecov: inspected */ + error= 1; goto err; /* purecov: inspected */ } for (;;) { - buffpek=(BUFFPEK*) queue_top(&queue); - if (cmp) // Remove duplicates + buffpek= (BUFFPEK*) queue_top(&queue); + if (cmp) // Remove duplicates { - if (!(*cmp)(&sort_length, &(param->unique_buff), - (uchar**) &buffpek->key)) - goto skip_duplicate; - memcpy(param->unique_buff, (uchar*) buffpek->key,sort_length); + if (!(*cmp)(&sort_length, &(param->unique_buff), + (uchar**) &buffpek->key)) + goto skip_duplicate; + memcpy(param->unique_buff, (uchar*) buffpek->key, rec_length); } if (flag == 0) { - if (my_b_write(to_file,(byte*) buffpek->key, sort_length)) - { - error=1; goto err; /* purecov: inspected */ - } + if (my_b_write(to_file,(byte*) buffpek->key, rec_length)) + { + error=1; goto err; /* purecov: inspected */ + } } else { - WRITE_REF(to_file,(byte*) buffpek->key+offset); + if (my_b_write(to_file, (byte*) buffpek->key+offset, res_length)) + { + error=1; goto err; /* purecov: inspected */ + } } if (!--max_rows) { - error=0; /* purecov: inspected */ - goto end; /* purecov: inspected */ + error= 0; /* purecov: inspected */ + goto end; /* purecov: inspected */ } skip_duplicate: - buffpek->key+=sort_length; + buffpek->key+= rec_length; if (! --buffpek->mem_count) { - if (!(error=(int) read_to_buffer(from_file,buffpek, - sort_length))) - { - uchar *base=buffpek->base; - ulong max_keys=buffpek->max_keys; - - VOID(queue_remove(&queue,0)); - - /* Put room used by buffer to use in other buffer */ - for (refpek= (BUFFPEK**) &queue_top(&queue); - refpek <= (BUFFPEK**) &queue_end(&queue); - refpek++) - { - buffpek= *refpek; - if (buffpek->base+buffpek->max_keys*sort_length == base) - { - buffpek->max_keys+=max_keys; - break; - } - else if (base+max_keys*sort_length == buffpek->base) - { - buffpek->base=base; - buffpek->max_keys+=max_keys; - break; - } - } - break; /* One buffer have been removed */ - } - else if (error == -1) - goto err; /* purecov: inspected */ + if (!(error= (int) read_to_buffer(from_file,buffpek, + rec_length))) + { + uchar *base= buffpek->base; + ulong max_keys= buffpek->max_keys; + + VOID(queue_remove(&queue,0)); + + /* Put room used by buffer to use in other buffer */ + for (refpek= (BUFFPEK**) &queue_top(&queue); + refpek <= (BUFFPEK**) &queue_end(&queue); + refpek++) + { + buffpek= *refpek; + if (buffpek->base+buffpek->max_keys*rec_length == base) + { + buffpek->max_keys+= max_keys; + break; + } + else if (base+max_keys*rec_length == buffpek->base) + { + buffpek->base= base; + buffpek->max_keys+= max_keys; + break; + } + } + break; /* One buffer have been removed */ + } + else if (error == -1) + goto err; /* purecov: inspected */ } - queue_replaced(&queue); /* Top element has been replaced */ + queue_replaced(&queue); /* Top element has been replaced */ } } - buffpek=(BUFFPEK*) queue_top(&queue); + buffpek= (BUFFPEK*) queue_top(&queue); buffpek->base= sort_buffer; - buffpek->max_keys=param->keys; + buffpek->max_keys= param->keys; /* As we know all entries in the buffer are unique, we only have to @@ -879,7 +1036,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, { if (!(*cmp)(&sort_length, &(param->unique_buff), (uchar**) &buffpek->key)) { - buffpek->key+=sort_length; // Remove duplicate + buffpek->key+= rec_length; // Remove duplicate --buffpek->mem_count; } } @@ -887,37 +1044,40 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, do { if ((ha_rows) buffpek->mem_count > max_rows) - { /* Don't write too many records */ - buffpek->mem_count=(uint) max_rows; - buffpek->count=0; /* Don't read more */ + { /* Don't write too many records */ + buffpek->mem_count= (uint) max_rows; + buffpek->count= 0; /* Don't read more */ } - max_rows-=buffpek->mem_count; + max_rows-= buffpek->mem_count; if (flag == 0) { if (my_b_write(to_file,(byte*) buffpek->key, - (sort_length*buffpek->mem_count))) + (rec_length*buffpek->mem_count))) { - error=1; goto err; /* purecov: inspected */ + error= 1; goto err; /* purecov: inspected */ } } else { register uchar *end; strpos= buffpek->key+offset; - for (end=strpos+buffpek->mem_count*sort_length; - strpos != end ; - strpos+=sort_length) - { - WRITE_REF(to_file,strpos); + for (end= strpos+buffpek->mem_count*rec_length ; + strpos != end ; + strpos+= rec_length) + { + if (my_b_write(to_file, (byte *) strpos, res_length)) + { + error=1; goto err; + } } } } - while ((error=(int) read_to_buffer(from_file,buffpek,sort_length)) - != -1 && error != 0); + while ((error=(int) read_to_buffer(from_file,buffpek, rec_length)) + != -1 && error != 0); end: - lastbuff->count=min(org_max_rows-max_rows,param->max_rows); - lastbuff->file_pos=to_start_filepos; + lastbuff->count= min(org_max_rows-max_rows, param->max_rows); + lastbuff->file_pos= to_start_filepos; err: delete_queue(&queue); DBUG_RETURN(error); @@ -938,17 +1098,37 @@ static int merge_index(SORTPARAM *param, uchar *sort_buffer, } /* merge_index */ - /* Calculate length of sort key */ +/* + Calculate length of sort key + + SYNOPSIS + sortlength() + sortorder Order of items to sort + uint s_length Number of items to sort + multi_byte_charset (out) + Set to 1 if we are using multi-byte charset + (In which case we have to use strxnfrm()) + + NOTES + sortorder->length is updated for each sort item + sortorder->need_strxnfrm is set 1 if we have to use strxnfrm + + RETURN + Total length of sort buffer in bytes +*/ static uint -sortlength(SORT_FIELD *sortorder, uint s_length) +sortlength(SORT_FIELD *sortorder, uint s_length, bool *multi_byte_charset) { reg2 uint length; THD *thd= current_thd; + CHARSET_INFO *cs; + *multi_byte_charset= 0; length=0; for (; s_length-- ; sortorder++) { + sortorder->need_strxnfrm= 0; if (sortorder->field) { if (sortorder->field->type() == FIELD_TYPE_BLOB) @@ -956,10 +1136,12 @@ sortlength(SORT_FIELD *sortorder, uint s_length) else { sortorder->length=sortorder->field->pack_length(); -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info) && !sortorder->field->binary()) - sortorder->length= sortorder->length*MY_STRXFRM_MULTIPLY; -#endif + if (use_strnxfrm((cs=sortorder->field->sort_charset()))) + { + sortorder->need_strxnfrm= 1; + *multi_byte_charset= 1; + sortorder->length= sortorder->length*cs->strxfrm_multiply; + } } if (sortorder->field->maybe_null()) length++; // Place for NULL marker @@ -969,10 +1151,12 @@ sortlength(SORT_FIELD *sortorder, uint s_length) switch ((sortorder->result_type=sortorder->item->result_type())) { case STRING_RESULT: sortorder->length=sortorder->item->max_length; -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info) && !sortorder->item->binary) - sortorder->length= sortorder->length*MY_STRXFRM_MULTIPLY; -#endif + if (use_strnxfrm((cs=sortorder->item->collation.collation))) + { + sortorder->length= sortorder->length*cs->strxfrm_multiply; + sortorder->need_strxnfrm= 1; + *multi_byte_charset= 1; + } break; case INT_RESULT: #if SIZEOF_LONG_LONG > 4 @@ -984,6 +1168,11 @@ sortlength(SORT_FIELD *sortorder, uint s_length) case REAL_RESULT: sortorder->length=sizeof(double); break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; } if (sortorder->item->maybe_null) length++; // Place for NULL marker @@ -998,6 +1187,148 @@ sortlength(SORT_FIELD *sortorder, uint s_length) /* + Get descriptors of fields appended to sorted fields and + calculate its total length + + SYNOPSIS + get_addon_fields() + thd Current thread + ptabfields Array of references to the table fields + sortlength Total length of sorted fields + plength out: Total length of appended fields + + DESCRIPTION + The function first finds out what fields are used in the result set. + Then it calculates the length of the buffer to store the values of + these fields together with the value of sort values. + If the calculated length is not greater than max_length_for_sort_data + the function allocates memory for an array of descriptors containing + layouts for the values of the non-sorted fields in the buffer and + fills them. + + NOTES + The null bits for the appended values are supposed to be put together + and stored the buffer just ahead of the value of the first field. + + RETURN + Pointer to the layout descriptors for the appended fields, if any + NULL - if we do not store field values with sort data. +*/ + +static SORT_ADDON_FIELD * +get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength) +{ + Field **pfield; + Field *field; + SORT_ADDON_FIELD *addonf; + uint length= 0; + uint fields= 0; + uint null_fields= 0; + + /* + If there is a reference to a field in the query add it + to the the set of appended fields. + Note for future refinement: + This this a too strong condition. + Actually we need only the fields referred in the + result set. And for some of them it makes sense to use + the values directly from sorted fields. + */ + *plength= 0; + /* + The following statement is added to avoid sorting in alter_table. + The fact is the filter 'field->query_id != thd->query_id' + doesn't work for alter table + */ + if (thd->lex->sql_command != SQLCOM_SELECT) + return 0; + for (pfield= ptabfield; (field= *pfield) ; pfield++) + { + if (field->query_id != thd->query_id) + continue; + if (field->flags & BLOB_FLAG) + return 0; + length+= field->max_packed_col_length(field->pack_length()); + if (field->maybe_null()) + null_fields++; + fields++; + } + if (!fields) + return 0; + length+= (null_fields+7)/8; + + if (length+sortlength > thd->variables.max_length_for_sort_data || + !(addonf= (SORT_ADDON_FIELD *) my_malloc(sizeof(SORT_ADDON_FIELD)* + (fields+1), MYF(MY_WME)))) + return 0; + + *plength= length; + length= (null_fields+7)/8; + null_fields= 0; + for (pfield= ptabfield; (field= *pfield) ; pfield++) + { + if (field->query_id != thd->query_id) + continue; + addonf->field= field; + addonf->offset= length; + if (field->maybe_null()) + { + addonf->null_offset= null_fields/8; + addonf->null_bit= 1<<(null_fields & 7); + null_fields++; + } + else + { + addonf->null_offset= 0; + addonf->null_bit= 0; + } + addonf->length= field->max_packed_col_length(field->pack_length()); + length+= addonf->length; + addonf++; + } + addonf->field= 0; // Put end marker + + DBUG_PRINT("info",("addon_length: %d",length)); + return (addonf-fields); +} + + +/* + Copy (unpack) values appended to sorted fields from a buffer back to + their regular positions specified by the Field::ptr pointers. + + SYNOPSIS + unpack_addon_fields() + addon_field Array of descriptors for appended fields + buff Buffer which to unpack the value from + + NOTES + The function is supposed to be used only as a callback function + when getting field values for the sorted result set. + + RETURN + void. +*/ + +static void +unpack_addon_fields(struct st_sort_addon_field *addon_field, byte *buff) +{ + Field *field; + SORT_ADDON_FIELD *addonf= addon_field; + + for ( ; (field= addonf->field) ; addonf++) + { + if (addonf->null_bit && (addonf->null_bit & buff[addonf->null_offset])) + { + field->set_null(); + continue; + } + field->set_notnull(); + field->unpack(field->ptr, (char *) buff+addonf->offset); + } +} + +/* ** functions to change a double or float to a sortable string ** The following should work for IEEE */ diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc index 1e78aa35195..0bbdf84c8d6 100644 --- a/sql/gen_lex_hash.cc +++ b/sql/gen_lex_hash.cc @@ -14,11 +14,68 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* + +The idea of presented algorithm see in +"The Art of Computer Programming" by Donald E. Knuth +Volume 3 "Sorting and searching" +(chapter 6.3 "Digital searching" - name and number of chapter + is back translation from Russian edition :)) + +as illustration of data structures, imagine next table: + +static SYMBOL symbols[] = { + { "ADD", SYM(ADD),0,0}, + { "AND", SYM(AND),0,0}, + { "DAY", SYM(DAY_SYM),0,0}, +}; + +for this structure, presented program generate next searching-structure: + ++-----------+-+-+-+ +| len |1|2|3| ++-----------+-+-+-+ +|first_char |0|0|a| +|last_char |0|0|d| +|link |0|0|+| + | + V + +----------+-+-+-+--+ + | 1 char|a|b|c|d | + +----------+-+-+-+--+ + |first_char|b|0|0|0 | + |last_char |n|0|0|-1| + |link |+|0|0|+ | + | | + | V + | symbols[2] ( "DAY" ) + V ++----------+--+-+-+-+-+-+-+-+-+-+--+ +| 2 char|d |e|f|j|h|i|j|k|l|m|n | ++----------+--+-+-+-+-+-+-+-+-+-+--+ +|first_char|0 |0|0|0|0|0|0|0|0|0|0 | +|last_char |-1|0|0|0|0|0|0|0|0|0|-1| +|link |+ |0|0|0|0|0|0|0|0|0|+ | + | | + V V + symbols[0] ( "ADD" ) symbols[1] ( "AND" ) + +for optimization, link is the 16-bit index in 'symbols' or 'sql_functions' +or search-array.. + +So, we can read full search-structure as 32-bit word + +TODO: +1. use instead to_upper_lex, special array + (substitute chars) without skip codes.. +2. try use reverse order of comparing.. + +*/ #define NO_YACC_SYMBOLS -#include <my_global.h> -#include <my_sys.h> -#include <m_string.h> +#include "my_global.h" +#include "my_sys.h" +#include "m_string.h" #ifndef __GNU_LIBRARY__ #define __GNU_LIBRARY__ // Skip warnings in getopt.h #endif @@ -26,337 +83,261 @@ #include "mysql_version.h" #include "lex.h" -my_bool opt_search; -int opt_verbose; -ulong opt_count; - -#define max_allowed_array 8000 // Don't generate bigger arrays than this -#define max_symbol 32767 // Use this for 'not found' -#define how_much_for_plus 8 // 2-8 -#define type_count 1 // 1-5 -#define char_table_count 5 -#define total_symbols (sizeof(symbols)/sizeof(SYMBOL) +\ - sizeof(sql_functions)/sizeof(SYMBOL)) - -#define how_much_and INT_MAX24 - -/* - The following only have to work with characters in the set - used by SQL commands -*/ - -#undef tolower -#define tolower(a) ((a) >= 'A' && (a) <= 'Z') ? ((a)- 'A' + 'a') : (a) - -static uint how_long_symbols,function_plus,function_mod,function_type; -static uint char_table[256]; -static uchar unique_length[256]; -static uchar bits[how_much_and/8+1]; -static uint primes[max_allowed_array+1]; -static ulong hash_results[type_count][how_much_for_plus+1][total_symbols]; -static ulong start_value=0; -static uint best_type; -static ulong best_t1,best_t2, best_start_value; - -static struct my_option my_long_options[] = +struct my_option my_long_options[] = { {"help", '?', "Display help and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"count", 'c', "Try count times to find a optimal hash table", - (gptr*) &opt_count, (gptr*) &opt_count, 0, GET_ULONG, REQUIRED_ARG, - 100000, 0, 0, 0, 0, 0}, - {"search", 'S', "Search after good rnd1 and rnd2 values", - (gptr*) &opt_search, (gptr*) &opt_search, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, - 0, 0}, - {"verbose", 'v', "Write some information while the program executes", - (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, GET_INT, NO_ARG, 0, 0, 0, - 0, 0, 0}, {"version", 'V', "Output version information and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} + {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; -struct rand_struct { - unsigned long seed1,seed2,max_value; - double max_value_dbl; +struct hash_lex_struct +{ + int first_char; + char last_char; + union{ + hash_lex_struct *char_tails; + int iresult; + }; + int ithis; }; -void randominit(struct rand_struct *rand_st,ulong seed1, ulong seed2) -{ /* For mysql 3.21.# */ - rand_st->max_value= 0x3FFFFFFFL; - rand_st->max_value_dbl=(double) rand_st->max_value; - rand_st->seed1=seed1%rand_st->max_value ; - rand_st->seed2=seed2%rand_st->max_value; -} - -double rnd(struct rand_struct *rand_st) +hash_lex_struct *get_hash_struct_by_len(hash_lex_struct **root_by_len, + int len, int *max_len) { - rand_st->seed1=(rand_st->seed1*3+rand_st->seed2) % rand_st->max_value; - rand_st->seed2=(rand_st->seed1+rand_st->seed2+33) % rand_st->max_value; - return (((double) rand_st->seed1)/rand_st->max_value_dbl); + if (*max_len<len){ + *root_by_len= (hash_lex_struct *)realloc((char*)*root_by_len, + sizeof(hash_lex_struct)*len); + hash_lex_struct *cur, *end= *root_by_len + len; + for (cur= *root_by_len + *max_len; cur<end; cur++) + cur->first_char= 0; + *max_len= len; + } + return (*root_by_len)+(len-1); } - -static void make_char_table(ulong t1,ulong t2,int type) +void insert_into_hash(hash_lex_struct *root, const char *name, + int len_from_begin, int index, int function) { - uint i; - struct rand_struct rand_st; - randominit(&rand_st,t1,t2); + hash_lex_struct *end, *cur, *tails; - for (i=0 ; i < 256 ; i++) + if (!root->first_char) { - switch (type) { - case 0: char_table[i]= i + (i << 8); break; - case 1: char_table[i]= i + ((i ^255 ) << 8); break; - case 2: char_table[i]= i; break; - case 3: char_table[i]= i + ((uint) (rnd(&rand_st)*255) << 8); break; - case 4: char_table[i]= (uint) (rnd(&rand_st)*255) + (i << 8); break; - } + root->first_char= -1; + root->iresult= index; + return; } - char_table[0]|=1+257; // Avoid problems with 0 - for (i=0 ; i < 256 ; i++) + + if (root->first_char == -1) { - uint tmp=(uint) (rnd(&rand_st)*255); - swap(uint,char_table[i],char_table[tmp]); + int index2= root->iresult; + const char *name2= (index2 < 0 ? sql_functions[-index2-1] : + symbols[index2]).name + len_from_begin; + root->first_char= (int) (uchar) name2[0]; + root->last_char= (char) root->first_char; + tails= (hash_lex_struct*)malloc(sizeof(hash_lex_struct)); + root->char_tails= tails; + tails->first_char= -1; + tails->iresult= index2; } - /* lower characters should be mapped to upper */ - for (i= 'a' ; i <= 'z' ; i++) + + size_t real_size= (root->last_char-root->first_char+1); + + if (root->first_char>(*name)) { - /* This loop is coded with extra variables to avoid a bug in gcc 2.96 */ - uchar tmp= (uchar) (i - 'a'); // Assume ascii - tmp+='A'; - char_table[i]=char_table[tmp]; + size_t new_size= root->last_char-(*name)+1; + if (new_size<real_size) printf("error!!!!\n"); + tails= root->char_tails; + tails= (hash_lex_struct*)realloc((char*)tails, + sizeof(hash_lex_struct)*new_size); + root->char_tails= tails; + memmove(tails+(new_size-real_size),tails,real_size*sizeof(hash_lex_struct)); + end= tails + new_size - real_size; + for (cur= tails; cur<end; cur++) + cur->first_char= 0; + root->first_char= (int) (uchar) *name; } + + if (root->last_char<(*name)) + { + size_t new_size= (*name)-root->first_char+1; + if (new_size<real_size) printf("error!!!!\n"); + tails= root->char_tails; + tails= (hash_lex_struct*)realloc((char*)tails, + sizeof(hash_lex_struct)*new_size); + root->char_tails= tails; + end= tails + new_size; + for (cur= tails+real_size; cur<end; cur++) + cur->first_char= 0; + root->last_char= (*name); + } + + insert_into_hash(root->char_tails+(*name)-root->first_char, + name+1,len_from_begin+1,index,function); } -/* Fill array primes with primes between start and 'max_allowed_array' */ -static void make_prime_array(uint start) -{ - uint i,j,*to; - uint max_index=(uint) sqrt((double) max_allowed_array); +hash_lex_struct *root_by_len= 0; +int max_len=0; - bzero((char*) primes,sizeof(primes[0])*max_allowed_array); +hash_lex_struct *root_by_len2= 0; +int max_len2=0; - i=2; - while (i < max_index) - { - for (j=i+i ; j <= max_allowed_array ; j+=i) - primes[j]=1; - while (primes[++i]) ; +void insert_symbols() +{ + size_t i= 0; + SYMBOL *cur; + for (cur= symbols; i<array_elements(symbols); cur++, i++){ + hash_lex_struct *root= + get_hash_struct_by_len(&root_by_len,cur->length,&max_len); + insert_into_hash(root,cur->name,0,i,0); } - - to=primes; - for (i=start ; i <= max_allowed_array ; i++) - if (!primes[i]) - *to++=i; - *to=0; // end marker } -#define USE_char_table - -static ulong tab_index_function(const char *s,uint add, uint type) +void insert_sql_functions() { - register ulong nr=start_value+char_table[(uchar) *s]; // Nice value - ulong pos=3; - uint tmp_length=unique_length[(uchar) *s]-1; - while (*++s && tmp_length-- > 0) - { - switch (type) { - case 0: - nr= (nr ^ (char_table[(uchar) *s] + (nr << add))); - break; - case 1: - nr= (nr + (char_table[(uchar) *s] + (nr << add))); - break; - case 2: - nr= (nr ^ (char_table[(uchar) *s] ^ (nr << add))); - break; - case 3: - nr= (char_table[(uchar) *s] ^ (nr << add)); - break; - case 4: - nr+= nr+nr+((nr & 63)+pos)*((ulong) char_table[(uchar) *s]); - pos+=add; - break; - } + size_t i= 0; + SYMBOL *cur; + for (cur= sql_functions; i<array_elements(sql_functions); cur++, i++){ + hash_lex_struct *root= + get_hash_struct_by_len(&root_by_len,cur->length,&max_len); + insert_into_hash(root,cur->name,0,-i-1,1); } - return nr & INT_MAX24; } -static int search(bool write_warning) +void calc_length() +{ + SYMBOL *cur, *end= symbols + array_elements(symbols); + for (cur= symbols; cur < end; cur++) + cur->length=(uchar) strlen(cur->name); + end= sql_functions + array_elements(sql_functions); + for (cur= sql_functions; cur<end; cur++) + cur->length=(uchar) strlen(cur->name); +} + +void generate_find_structs() { - uint size_symbols = sizeof(symbols)/sizeof(SYMBOL); - uint size_functions = sizeof(sql_functions)/sizeof(SYMBOL); - uint size=size_symbols + size_functions; - uint i=0,found,*prime,type; - int igra[max_allowed_array],test_count=INT_MAX; - uint possible_plus[how_much_for_plus*type_count+type_count]; + root_by_len= 0; + max_len=0; - how_long_symbols = sizeof(symbols)/sizeof(SYMBOL); + insert_symbols(); - bzero((char*) possible_plus,sizeof(possible_plus)); - found=0; + root_by_len2= root_by_len; + max_len2= max_len; + + root_by_len= 0; + max_len= 0; + + insert_symbols(); + insert_sql_functions(); +} - /* Check first which function_plus are possible */ - for (type=0 ; type < type_count ; type ++) +char *hash_map= 0; +int size_hash_map= 0; + +void add_struct_to_map(hash_lex_struct *st) +{ + st->ithis= size_hash_map/4; + size_hash_map+= 4; + hash_map= (char*)realloc((char*)hash_map,size_hash_map); + hash_map[size_hash_map-4]= (char) (st->first_char == -1 ? 0 : + st->first_char); + hash_map[size_hash_map-3]= (char) (st->first_char == -1 || + st->first_char == 0 ? 0 : st->last_char); + if (st->first_char == -1) { - for (function_plus = 1; - function_plus <= how_much_for_plus; - function_plus++) - { - bzero((char*) bits,sizeof(bits)); - for (i=0; i < size; i++) - { - ulong order= tab_index_function ((i < how_long_symbols) ? - symbols[i].name : - sql_functions[i-how_long_symbols].name, - function_plus, type); - hash_results[type][function_plus][i]=order; - uint pos=order/8; - uint bit=order & 7; - if (bits[pos] & (1 << bit)) - break; - bits[pos]|=1 << bit; - } - if (i == size) - { - possible_plus[found++]=function_plus; - } - } - possible_plus[found++]=0; // End marker + hash_map[size_hash_map-2]= ((unsigned int)(int16)st->iresult)&255; + hash_map[size_hash_map-1]= ((unsigned int)(int16)st->iresult)>>8; } - if (found == type_count) + else if (st->first_char == 0) { - if (write_warning) - fprintf(stderr,"\ -The hash function didn't return a unique value for any parameter\n\ -You have to change gen_lex_code.cc, function 'tab_index_function' to\n\ -generate unique values for some parameter. When you have succeeded in this,\n\ -you have to change 'main' to print out the new function\n"); - return(1); + hash_map[size_hash_map-2]= ((unsigned int)(int16)array_elements(symbols))&255; + hash_map[size_hash_map-1]= ((unsigned int)(int16)array_elements(symbols))>>8; } +} + - if (opt_verbose > 1) - fprintf (stderr,"Info: Possible add values: %d\n",found-type_count); +void add_structs_to_map(hash_lex_struct *st, int len) +{ + hash_lex_struct *cur, *end= st+len; + for (cur= st; cur<end; cur++) + add_struct_to_map(cur); + for (cur= st; cur<end; cur++) + { + if (cur->first_char && cur->first_char != -1) + add_structs_to_map(cur->char_tails,cur->last_char-cur->first_char+1); + } +} - for (prime=primes; (function_mod=*prime) ; prime++) +void set_links(hash_lex_struct *st, int len) +{ + hash_lex_struct *cur, *end= st+len; + for (cur= st; cur<end; cur++) { - uint *plus_ptr=possible_plus; - for (type=0 ; type < type_count ; type++ ) + if (cur->first_char != 0 && cur->first_char != -1) { - while ((function_plus= *plus_ptr++)) - { - ulong *order_pos= &hash_results[type][function_plus][0]; - if (test_count++ == INT_MAX) - { - test_count=1; - bzero((char*) igra,sizeof(igra)); - } - for (i=0; i<size ;i++) - { - ulong order; - order = *order_pos++ % function_mod; - if (igra[order] == test_count) - break; - igra[order] = test_count; - } - if (i == size) - { - *prime=0; // Mark this used - function_type=type; - return 0; // Found ok value - } - } + int ilink= cur->char_tails->ithis; + hash_map[cur->ithis*4+2]= ilink%256; + hash_map[cur->ithis*4+3]= ilink/256; + set_links(cur->char_tails,cur->last_char-cur->first_char+1); } } - - function_mod=max_allowed_array; - if (write_warning) - fprintf (stderr,"Fatal error when generating hash for symbols\n\ -Didn't find suitable values for perfect hashing:\n\ -You have to edit gen_lex_hash.cc to generate a new hashing function.\n\ -You can try running gen_lex_hash with --search to find a suitable value\n\ -Symbol array size = %d\n",function_mod); - return -1; } -void print_arrays() +void print_hash_map(const char *name) { - uint size_symbols = sizeof(symbols)/sizeof(SYMBOL); - uint size_functions = sizeof(sql_functions)/sizeof(SYMBOL); - uint size=size_symbols + size_functions; - uint i; - - fprintf(stderr,"Symbols: %d Functions: %d; Total: %d\nShifts per char: %d, Array size: %d\n", - size_symbols,size_functions,size_symbols+size_functions, - function_plus,function_mod); + char *cur; + int i; - int *prva= (int*) my_alloca(sizeof(int)*function_mod); - for (i=0 ; i < function_mod; i++) - prva[i]= max_symbol; - - for (i=0;i<size;i++) + printf("uchar %s[%d]= {\n",name,size_hash_map); + for (i=0, cur= hash_map; i<size_hash_map; i++, cur++) { - const char *name= ((i < how_long_symbols) ? - symbols[i].name : - sql_functions[i - how_long_symbols].name); - ulong order = tab_index_function(name,function_plus,function_type); - order %= function_mod; - /* This should never be true */ - if (prva[order] != max_symbol) - { - fprintf(stderr,"Error: Got duplicate value for symbol '%s'\n",name); - exit(1); + switch(i%4){ + case 0: case 1: + if (!*cur) + printf("0, "); + else + printf("\'%c\', ",*cur); + break; + case 2: printf("%u, ",(uint)(uchar)*cur); break; + case 3: printf("%u,\n",(uint)(uchar)*cur); break; } - prva [order] = i; } + printf("};\n"); +} -#ifdef USE_char_table - printf("static uint16 char_table[] = {\n"); - for (i=0; i < 255 ;i++) // < 255 is correct - { - printf("%u,",char_table[i]); - if (((i+1) & 15) == 0) - puts(""); - } - printf("%d\n};\n\n\n",char_table[i]); -#endif - printf("static uchar unique_length[] = {\n"); - for (i=0; i < 255 ;i++) // < 255 is correct - { - printf("%u,",unique_length[i]); - if (((i+1) & 15) == 0) - puts(""); - } - printf("%d\n};\n\n\n",unique_length[i]); +void print_find_structs() +{ + add_structs_to_map(root_by_len,max_len); + set_links(root_by_len,max_len); + print_hash_map("sql_functions_map"); - printf("static uint16 my_function_table[] = {\n"); - for (i=0; i < function_mod-1 ;i++) - { - printf("%d,",prva[i]); - if (((i+1) % 12) == 0) - puts(""); - } - printf("%d\n};\n\n\n",prva[i]); - my_afree((gptr) prva); + hash_map= 0; + size_hash_map= 0; + + printf("\n"); + + add_structs_to_map(root_by_len2,max_len2); + set_links(root_by_len2,max_len2); + print_hash_map("symbols_map"); } static void usage(int version) { - printf("%s Ver 3.5 Distrib %s, for %s (%s)\n", + printf("%s Ver 3.6 Distrib %s, for %s (%s)\n", my_progname, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); if (version) return; - puts("Copyright (C) 2001 MySQL AB, by Sinisa and Monty"); - puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,\nand you are welcome to modify and redistribute it under the GPL license\n"); + puts("Copyright (C) 2001 MySQL AB, by VVA and Monty"); + puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\ +and you are welcome to modify and redistribute it under the GPL license\n"); puts("This program generates a perfect hashing function for the sql_lex.cc"); printf("Usage: %s [OPTIONS]\n\n", my_progname); my_print_help(my_long_options); - my_print_variables(my_long_options); } @@ -364,10 +345,7 @@ extern "C" my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument __attribute__((unused))) { - switch (optid) { - case 'v': - opt_verbose++; - break; + switch(optid) { case 'V': usage(1); exit(0); @@ -389,182 +367,154 @@ static int get_options(int argc, char **argv) if (argc >= 1) { - fprintf(stderr,"%s: Too many arguments\n", my_progname); usage(0); - exit(1); + exit(1); } return(0); } -static uint max_prefix(const char *name) + +int check_dup_symbols(SYMBOL *s1, SYMBOL *s2) { - uint i; - uint max_length=1; - for (i=0 ; i < sizeof(symbols)/sizeof(SYMBOL) ; i++) - { - const char *str=symbols[i].name; - if (str != name) - { - const char *str2=name; - uint length; - while (*str && *str == *str2) - { - str++; - str2++; - } - length=(uint) (str2 - name)+1; - if (length > max_length) - max_length=length; - } - } - for (i=0 ; i < sizeof(sql_functions)/sizeof(SYMBOL) ; i++) - { - const char *str=sql_functions[i].name; - if (str != name) - { - const char *str2=name; - uint length; - while (*str && *str == *str2) - { - str++; - str2++; - } - length=(uint) (str2 - name)+1; - if (length > max_length) - max_length=length; - } - } - return max_length; + if (s1->length!=s2->length || strncmp(s1->name,s2->name,s1->length)) + return 0; + + const char *err_tmpl= "\ngen_lex_hash fatal error : \ +Unfortunately gen_lex_hash can not generate a hash,\n since \ +your lex.h has duplicate definition for a symbol \"%s\"\n\n"; + printf (err_tmpl,s1->name); + fprintf (stderr,err_tmpl,s1->name); + + return 1; } -static void make_max_length_table(void) +int check_duplicates() { - uint i; - for (i=0 ; i < sizeof(symbols)/sizeof(SYMBOL) ; i++) + SYMBOL *cur1, *cur2, *s_end, *f_end; + + s_end= symbols + array_elements(symbols); + f_end= sql_functions + array_elements(sql_functions); + + for (cur1= symbols; cur1<s_end; cur1++) { - uint length=max_prefix(symbols[i].name); - if (length > unique_length[(uchar) symbols[i].name[0]]) + for (cur2= cur1+1; cur2<s_end; cur2++) + { + if (check_dup_symbols(cur1,cur2)) + return 1; + } + for (cur2= sql_functions; cur2<f_end; cur2++) { - unique_length[(uchar) symbols[i].name[0]]=length; - unique_length[(uchar) tolower(symbols[i].name[0])]=length; + if (check_dup_symbols(cur1,cur2)) + return 1; } } - for (i=0 ; i < sizeof(sql_functions)/sizeof(SYMBOL) ; i++) + + for (cur1= sql_functions; cur1<f_end; cur1++) { - uint length=max_prefix(sql_functions[i].name); - if (length > unique_length[(uchar) sql_functions[i].name[0]]) + for (cur2= cur1+1; cur2< f_end; cur2++) { - unique_length[(uchar) sql_functions[i].name[0]]=length; - unique_length[(uchar) tolower(sql_functions[i].name[0])]=length; + if (check_dup_symbols(cur1,cur2)) + return 1; } } + return 0; } int main(int argc,char **argv) { - struct rand_struct rand_st; - static uint best_mod,best_add,best_functype; - int error; - MY_INIT(argv[0]); - start_value=2925024L; best_t1=654916L; best_t2=1723390L; best_type=3; /* mode=4943 add=1 type: 0 */ + if (get_options(argc,(char **) argv)) exit(1); - make_max_length_table(); - make_char_table(best_t1,best_t2,best_type); - make_prime_array(sizeof(symbols)/sizeof(SYMBOL) + - sizeof(sql_functions)/sizeof(SYMBOL)); - - if ((error=search(1)) > 0 || error && !opt_search) - exit(1); // This should work - best_mod=function_mod; best_add=function_plus; best_functype=function_type; + printf("/* Copyright (C) 2001 MySQL AB\n\ + This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\ + and you are welcome to modify and redistribute it under the GPL license\n\ + \n*/\n\n"); - if (opt_search) - { - time_t start_time=time((time_t*) 0); - randominit(&rand_st,start_time,start_time/2); // Some random values - printf("start_value=%ldL; best_t1=%ldL; best_t2=%ldL; best_type=%d; /* mode=%d add=%d type: %d */\n", - start_value, best_t1,best_t2,best_type,best_mod,best_add, - best_functype); - best_start_value=start_value; - for (uint i=1 ; i <= opt_count ; i++) - { - if (i % 10 == 0) - { - putchar('.'); - fflush(stdout); - } - ulong t1=(ulong) (rnd(&rand_st)*INT_MAX24); - ulong t2=(ulong) (rnd(&rand_st)*INT_MAX24); - uint type=(int) (rnd(&rand_st)*char_table_count); - start_value=(ulong) (rnd(&rand_st)*INT_MAX24); - make_char_table(t1,t2,type); - if (!search(0)) - { - best_mod=function_mod; best_add=function_plus; - best_functype=function_type; - best_t1=t1; best_t2=t2; best_type=type; - best_start_value=start_value; - printf("\nstart_value=%ldL; best_t1=%ldL; best_t2=%ldL; best_type=%d; /* mode=%d add=%d type: %d */\n", - best_start_value,best_t1,best_t2,best_type,best_mod,best_add, - best_functype); - } - if (opt_verbose && (i % 20000) == 0) - printf("\nstart_value=%ldL; best_t1=%ldL; best_t2=%ldL; best_type=%d; /* mode=%d add=%d type: %d */\n", - best_start_value,best_t1,best_t2,best_type,best_mod,best_add, - best_functype); - } - } + printf("/* This code is generated by gen_lex_hash.cc that seeks for\ + a perfect\nhash function */\n\n"); + printf("#include \"lex.h\"\n\n"); - function_mod=best_mod; function_plus=best_add; - make_char_table(best_t1,best_t2,best_type); + calc_length(); - printf("/* Copyright (C) 2001 MySQL AB\n\ - This program is free software; you can redistribute it and/or modify\n\ - it under the terms of the GNU General Public License as published by\n\ - the Free Software Foundation; either version 2 of the License, or\n\ - (at your option) any later version.\n\n\ - This program is distributed in the hope that it will be useful,\n\ - but WITHOUT ANY WARRANTY; without even the implied warranty of\n\ - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\ - GNU General Public License for more details.\n\n\ - You should have received a copy of the GNU General Public License\n\ - along with this program; if not, write to the Free Software\n\ - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */\n\n"); - -printf("/* This code is generated by gen_lex_hash.cc that seeks for a perfect\nhash function */\n\n"); - printf("#include \"lex.h\"\n\n"); + if (check_duplicates()) + exit(1); - print_arrays(); + generate_find_structs(); + print_find_structs(); - printf("/* start_value=%ldL; best_t1=%ldL; best_t2=%ldL; best_type=%d; */ /* mode=%d add=%d type: %d */\n\n", - best_start_value, best_t1, best_t2, best_type, - best_mod, best_add, best_functype); + printf("\nunsigned int sql_functions_max_len=%d;\n",max_len); + printf("\nunsigned int symbols_max_len=%d;\n\n",max_len2); - printf("inline SYMBOL *get_hash_symbol(const char *s,unsigned int length,bool function)\n\ + printf +( +"inline SYMBOL *get_hash_symbol(const char *s,\n\ + unsigned int len,bool function)\n\ {\n\ - ulong idx = %lu+char_table[(uchar) *s];\n\ - SYMBOL *sim;\n\ - const char *start=s;\n\ - int i=unique_length[(uchar) *s++];\n\ - if (i > (int) length) i=(int) length;\n\ - while (--i > 0)\n\ - idx= (idx ^ (char_table[(uchar) *s++] + (idx << %d)));\n\ - idx=my_function_table[(idx & %d) %% %d];\n\ - if (idx >= %d)\n\ - {\n\ - if (!function || idx >= %d) return (SYMBOL*) 0;\n\ - sim=sql_functions + (idx - %d);\n\ + register uchar *hash_map;\n\ + register const char *cur_str= s;\n\ + if (function){\n\ + if (len>sql_functions_max_len) return 0;\n\ + hash_map= sql_functions_map;\n\ + register uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\ +\n\ + for (;;){\n\ + register uchar first_char= (uchar)cur_struct;\n\ +\n\ + if (first_char == 0)\n\ + {\n\ + register int16 ires= (int16)(cur_struct>>16);\n\ + if (ires==array_elements(symbols)) return 0;\n\ + register SYMBOL *res;\n\ + if (ires>=0) \n\ + res= symbols+ires;\n\ + else\n\ + res= sql_functions-ires-1;\n\ + register uint count= cur_str-s;\n\ + return lex_casecmp(cur_str,res->name+count,len-count) ? 0 : res;\n\ + }\n\ +\n\ + register uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\ + if (cur_char<first_char) return 0;\n\ + cur_struct>>=8;\n\ + if (cur_char>(uchar)cur_struct) return 0;\n\ +\n\ + cur_struct>>=8;\n\ + cur_struct= uint4korr(hash_map+\n\ + (((uint16)cur_struct + cur_char - first_char)*4));\n\ + cur_str++;\n\ + }\n\ + }else{\n\ + if (len>symbols_max_len) return 0;\n\ + hash_map= symbols_map;\n\ + register uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\ +\n\ + for (;;){\n\ + register uchar first_char= (uchar)cur_struct;\n\ +\n\ + if (first_char==0){\n\ + register int16 ires= (int16)(cur_struct>>16);\n\ + if (ires==array_elements(symbols)) return 0;\n\ + register SYMBOL *res= symbols+ires;\n\ + register uint count= cur_str-s;\n\ + return lex_casecmp(cur_str,res->name+count,len-count)!=0 ? 0 : res;\n\ + }\n\ +\n\ + register uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\ + if (cur_char<first_char) return 0;\n\ + cur_struct>>=8;\n\ + if (cur_char>(uchar)cur_struct) return 0;\n\ +\n\ + cur_struct>>=8;\n\ + cur_struct= uint4korr(hash_map+\n\ + (((uint16)cur_struct + cur_char - first_char)*4));\n\ + cur_str++;\n\ + }\n\ }\n\ - else\n\ - sim=symbols + idx;\n\ - if ((length != sim->length) || lex_casecmp(start,sim->name,length))\n\ - return (SYMBOL *)0;\n\ - return sim;\n\ -}\n",(ulong) start_value,(int) function_plus,(int) how_much_and,function_mod,how_long_symbols,max_symbol,how_long_symbols); - exit(0); - return 0; +}\n" +); } + diff --git a/sql/gstream.cc b/sql/gstream.cc new file mode 100644 index 00000000000..f7d11d76b0c --- /dev/null +++ b/sql/gstream.cc @@ -0,0 +1,120 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Functions to read and parse geometrical data. + NOTE: These functions assumes that the string is end \0 terminated! +*/ + +#include "mysql_priv.h" + +enum Gis_read_stream::enum_tok_types Gis_read_stream::get_next_toc_type() +{ + skip_space(); + if (m_cur >= m_limit) + return eostream; + if (my_isvar_start(&my_charset_bin, *m_cur)) + return word; + if ((*m_cur >= '0' && *m_cur <= '9') || *m_cur == '-' || *m_cur == '+') + return numeric; + if (*m_cur == '(') + return l_bra; + if (*m_cur == ')') + return r_bra; + if (*m_cur == ',') + return comma; + return unknown; +} + + +bool Gis_read_stream::get_next_word(LEX_STRING *res) +{ + skip_space(); + res->str= (char*) m_cur; + /* The following will also test for \0 */ + if (!my_isvar_start(&my_charset_bin, *m_cur)) + return 1; + + /* + We can't combine the following increment with my_isvar() because + my_isvar() is a macro that would cause side effects + */ + m_cur++; + while ((m_cur < m_limit) && my_isvar(&my_charset_bin, *m_cur)) + m_cur++; + + res->length= (uint32) (m_cur - res->str); + return 0; +} + + +/* + Read a floating point number + + NOTE: Number must start with a digit or sign. It can't start with a decimal + point +*/ + +bool Gis_read_stream::get_next_number(double *d) +{ + char *endptr; + int err; + + skip_space(); + + if ((m_cur >= m_limit) || + (*m_cur < '0' || *m_cur > '9') && *m_cur != '-' && *m_cur != '+') + { + set_error_msg("Numeric constant expected"); + return 1; + } + + *d = my_strntod(m_charset, (char *)m_cur, + m_limit-m_cur, &endptr, &err); + if (err) + return 1; + if (endptr) + m_cur = endptr; + return 0; +} + + +bool Gis_read_stream::check_next_symbol(char symbol) +{ + skip_space(); + if ((m_cur >= m_limit) || (*m_cur != symbol)) + { + char buff[32]; + strmov(buff, "'?' expected"); + buff[2]= symbol; + set_error_msg(buff); + return 1; + } + m_cur++; + return 0; +} + + +/* + Remember error message. +*/ + +void Gis_read_stream::set_error_msg(const char *msg) +{ + size_t len= strlen(msg); // ok in this context + m_err_msg= (char *) my_realloc(m_err_msg, len + 1, MYF(MY_ALLOW_ZERO_PTR)); + memcpy(m_err_msg, msg, len + 1); +} diff --git a/sql/gstream.h b/sql/gstream.h new file mode 100644 index 00000000000..bfbf28851ce --- /dev/null +++ b/sql/gstream.h @@ -0,0 +1,76 @@ +/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +class Gis_read_stream +{ +public: + enum enum_tok_types + { + unknown, + eostream, + word, + numeric, + l_bra, + r_bra, + comma + }; + + Gis_read_stream(CHARSET_INFO *charset, const char *buffer, int size) + :m_cur(buffer), m_limit(buffer + size), m_err_msg(NULL), m_charset(charset) + {} + Gis_read_stream(): m_cur(NullS), m_limit(NullS), m_err_msg(NullS) + {} + ~Gis_read_stream() + { + my_free(m_err_msg, MYF(MY_ALLOW_ZERO_PTR)); + } + + enum enum_tok_types get_next_toc_type(); + bool get_next_word(LEX_STRING *); + bool get_next_number(double *); + bool check_next_symbol(char); + + inline void skip_space() + { + while ((m_cur < m_limit) && my_isspace(&my_charset_latin1, *m_cur)) + m_cur++; + } + /* Skip next character, if match. Return 1 if no match */ + inline bool skip_char(char skip) + { + skip_space(); + if ((m_cur >= m_limit) || *m_cur != skip) + return 1; /* Didn't find char */ + m_cur++; + return 0; + } + void set_error_msg(const char *msg); + + // caller should free this pointer + char *get_error_msg() + { + char *err_msg = m_err_msg; + m_err_msg= NullS; + return err_msg; + } + +protected: + const char *m_cur; + const char *m_limit; + char *m_err_msg; + CHARSET_INFO *m_charset; +}; diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 4ee844ac719..33f7b6238b0 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -25,7 +25,7 @@ We will need an updated Berkeley DB version for this. - Killing threads that has got a 'deadlock' - SHOW TABLE STATUS should give more information about the table. - - Get a more accurate count of the number of rows (estimate_number_of_rows()). + - Get a more accurate count of the number of rows (estimate_rows_upper_bound()). We could store the found number of rows when the table is scanned and then increment the counter for each attempted write. - We will need to extend the manager thread to makes checkpoints at @@ -47,15 +47,15 @@ */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" + #ifdef HAVE_BERKELEY_DB #include <m_ctype.h> #include <myisampack.h> -#include <assert.h> #include <hash.h> #include "ha_berkeley.h" #include "sql_manager.h" @@ -64,7 +64,7 @@ #define HA_BERKELEY_ROWS_IN_TABLE 10000 /* to get optimization right */ #define HA_BERKELEY_RANGE_COUNT 100 #define HA_BERKELEY_MAX_ROWS 10000000 /* Max rows in table */ -/* extra rows for estimate_number_of_rows() */ +/* extra rows for estimate_rows_upper_bound() */ #define HA_BERKELEY_EXTRA_ROWS 100 /* Bits for share->status */ @@ -73,7 +73,7 @@ #define STATUS_BDB_ANALYZE 4 const char *ha_berkeley_ext=".db"; -bool berkeley_skip=0,berkeley_shared_data=0; +bool berkeley_shared_data=0; u_int32_t berkeley_init_flags= DB_PRIVATE | DB_RECOVER, berkeley_env_flags=0, berkeley_lock_type=DB_LOCK_DEFAULT; ulong berkeley_cache_size, berkeley_log_buffer_size, berkeley_log_file_size=0; @@ -91,7 +91,7 @@ const char *berkeley_lock_names[] = u_int32_t berkeley_lock_types[]= { DB_LOCK_DEFAULT, DB_LOCK_OLDEST, DB_LOCK_RANDOM }; TYPELIB berkeley_lock_typelib= {array_elements(berkeley_lock_names)-1,"", - berkeley_lock_names}; + berkeley_lock_names, NULL}; static void berkeley_print_error(const char *db_errpfx, char *buffer); static byte* bdb_get_key(BDB_SHARE *share,uint *length, @@ -166,11 +166,13 @@ bool berkeley_init(void) { db_env->close(db_env,0); /* purecov: inspected */ db_env=0; /* purecov: inspected */ + goto err; } - (void) hash_init(&bdb_open_tables,32,0,0, + (void) hash_init(&bdb_open_tables,system_charset_info,32,0,0, (hash_get_key) bdb_get_key,0,0); pthread_mutex_init(&bdb_mutex,MY_MUTEX_INIT_FAST); +err: DBUG_RETURN(db_env == 0); } @@ -194,12 +196,12 @@ bool berkeley_flush_logs() int error; bool result=0; DBUG_ENTER("berkeley_flush_logs"); - if ((error=log_flush(db_env,0))) + if ((error=db_env->log_flush(db_env,0))) { my_error(ER_ERROR_DURING_FLUSH_LOGS,MYF(0),error); /* purecov: inspected */ result=1; /* purecov: inspected */ } - if ((error=txn_checkpoint(db_env,0,0,0))) + if ((error=db_env->txn_checkpoint(db_env,0,0,0))) { my_error(ER_ERROR_DURING_CHECKPOINT,MYF(0),error); /* purecov: inspected */ result=1; /* purecov: inspected */ @@ -231,22 +233,21 @@ int berkeley_rollback(THD *thd, void *trans) } -int berkeley_show_logs(THD *thd) +int berkeley_show_logs(Protocol *protocol) { char **all_logs, **free_logs, **a, **f; - String *packet= &thd->packet; int error=1; - MEM_ROOT show_logs_root; - MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC); + MEM_ROOT **root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC); + MEM_ROOT show_logs_root, *old_mem_root= *root_ptr; DBUG_ENTER("berkeley_show_logs"); - init_sql_alloc(&show_logs_root, 1024, 1024); - my_pthread_setspecific_ptr(THR_MALLOC,&show_logs_root); + init_sql_alloc(&show_logs_root, BDB_LOG_ALLOC_BLOCK_SIZE, + BDB_LOG_ALLOC_BLOCK_SIZE); + *root_ptr= &show_logs_root; - if ((error= log_archive(db_env, &all_logs, DB_ARCH_ABS | DB_ARCH_LOG, - (void* (*)(size_t)) sql_alloc)) || - (error= log_archive(db_env, &free_logs, DB_ARCH_ABS, - (void* (*)(size_t)) sql_alloc))) + if ((error= db_env->log_archive(db_env, &all_logs, + DB_ARCH_ABS | DB_ARCH_LOG)) || + (error= db_env->log_archive(db_env, &free_logs, DB_ARCH_ABS))) { DBUG_PRINT("error", ("log_archive failed (error %d)", error)); db_env->err(db_env, error, "log_archive: DB_ARCH_ABS"); @@ -259,18 +260,18 @@ int berkeley_show_logs(THD *thd) { for (a = all_logs, f = free_logs; *a; ++a) { - packet->length(0); - net_store_data(packet,*a); - net_store_data(packet,"BDB"); + protocol->prepare_for_resend(); + protocol->store(*a, system_charset_info); + protocol->store("BDB", 3, system_charset_info); if (f && *f && strcmp(*a, *f) == 0) { - ++f; - net_store_data(packet, SHOW_LOG_STATUS_FREE); + f++; + protocol->store(SHOW_LOG_STATUS_FREE, system_charset_info); } else - net_store_data(packet, SHOW_LOG_STATUS_INUSE); + protocol->store(SHOW_LOG_STATUS_INUSE, system_charset_info); - if (my_net_write(&thd->net,(char*) packet->ptr(),packet->length())) + if (protocol->write()) { error=1; goto err; @@ -279,15 +280,17 @@ int berkeley_show_logs(THD *thd) } err: free_root(&show_logs_root,MYF(0)); - my_pthread_setspecific_ptr(THR_MALLOC,old_root); + *root_ptr= old_mem_root; DBUG_RETURN(error); } + static void berkeley_print_error(const char *db_errpfx, char *buffer) { sql_print_error("%s: %s",db_errpfx,buffer); /* purecov: tested */ } + static void berkeley_noticecall(DB_ENV *db_env, db_notices notice) { switch (notice) @@ -307,12 +310,14 @@ void berkeley_cleanup_log_files(void) char **names; int error; +// by HF. Sometimes it crashes. TODO - find out why +#ifndef EMBEDDED_LIBRARY /* XXX: Probably this should be done somewhere else, and * should be tunable by the user. */ - if ((error = txn_checkpoint(db_env, 0, 0, 0))) + if ((error = db_env->txn_checkpoint(db_env, 0, 0, 0))) my_error(ER_ERROR_DURING_CHECKPOINT, MYF(0), error); /* purecov: inspected */ - - if ((error = log_archive(db_env, &names, DB_ARCH_ABS, NULL)) != 0) +#endif + if ((error = db_env->log_archive(db_env, &names, DB_ARCH_ABS)) != 0) { DBUG_PRINT("error", ("log_archive failed (error %d)", error)); /* purecov: inspected */ db_env->err(db_env, error, "log_archive: DB_ARCH_ABS"); /* purecov: inspected */ @@ -336,9 +341,40 @@ void berkeley_cleanup_log_files(void) ** Berkeley DB tables *****************************************************************************/ -static const char *bdb_bas_exts[]= { ha_berkeley_ext, NullS }; +static const char *ha_bdb_bas_exts[]= { ha_berkeley_ext, NullS }; const char **ha_berkeley::bas_ext() const -{ return bdb_bas_exts; } +{ return ha_bdb_bas_exts; } + + +ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const +{ + ulong flags= (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY + | HA_READ_RANGE); + for (uint i= all_parts ? 0 : part ; i <= part ; i++) + { + if (table->key_info[idx].key_part[i].field->type() == FIELD_TYPE_BLOB) + { + /* We can't use BLOBS to shortcut sorts */ + flags&= ~(HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE); + break; + } + switch (table->key_info[idx].key_part[i].field->key_type()) { + case HA_KEYTYPE_TEXT: + case HA_KEYTYPE_VARTEXT: + /* + As BDB stores only one copy of equal strings, we can't use key read + on these. Binary collations do support key read though. + */ + if (!(table->key_info[idx].key_part[i].field->charset()->state + & MY_CS_BINSORT)) + flags&= ~HA_KEYREAD_ONLY; + break; + default: // Keep compiler happy + break; + } + } + return flags; +} static int @@ -443,7 +479,6 @@ berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length) return 0; // Identical keys } - int ha_berkeley::open(const char *name, int mode, uint test_if_locked) { char name_buff[FN_REFLEN]; @@ -513,9 +548,12 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) berkeley_cmp_packed_key)); if (!hidden_primary_key) file->app_private= (void*) (table->key_info+table->primary_key); - if ((error=(file->open(file, fn_format(name_buff,name,"", ha_berkeley_ext, - 2 | 4), - "main", DB_BTREE, open_mode,0)))) + if ((error= txn_begin(db_env, 0, (DB_TXN**) &transaction, 0)) || + (error= (file->open(file, transaction, + fn_format(name_buff, name, "", ha_berkeley_ext, + 2 | 4), + "main", DB_BTREE, open_mode, 0))) || + (error= transaction->commit(transaction, 0))) { free_share(share,table, hidden_primary_key,1); /* purecov: inspected */ my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ @@ -549,8 +587,10 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) DBUG_PRINT("bdb",("Setting DB_DUP for key %u", i)); (*ptr)->set_flags(*ptr, DB_DUP); } - if ((error=((*ptr)->open(*ptr, name_buff, part, DB_BTREE, - open_mode, 0)))) + if ((error= txn_begin(db_env, 0, (DB_TXN**) &transaction, 0)) || + (error=((*ptr)->open(*ptr, transaction, name_buff, part, DB_BTREE, + open_mode, 0))) || + (error= transaction->commit(transaction, 0))) { close(); /* purecov: inspected */ my_errno=error; /* purecov: inspected */ @@ -822,8 +862,8 @@ int ha_berkeley::write_row(byte * record) DBUG_ENTER("write_row"); statistic_increment(ha_write_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(record+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); if (table->next_number_field && record == table->record[0]) update_auto_increment(); if ((error=pack_row(&row, record,1))) @@ -839,11 +879,11 @@ int ha_berkeley::write_row(byte * record) else { DB_TXN *sub_trans = transaction; - /* Don't use sub transactions in temporary tables (in_use == 0) */ - ulong thd_options = table->in_use ? table->in_use->options : 0; + /* Don't use sub transactions in temporary tables */ + ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) { - key_map changed_keys = 0; + key_map changed_keys(0); if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) { if ((error=txn_begin(db_env, transaction, &sub_trans, 0))) /* purecov: deadcode */ @@ -854,7 +894,7 @@ int ha_berkeley::write_row(byte * record) key_buff, record), &row, key_type[primary_key]))) { - changed_keys |= (key_map) 1 << primary_key; + changed_keys.set_bit(primary_key); for (uint keynr=0 ; keynr < table->keys ; keynr++) { if (keynr == primary_key) @@ -867,7 +907,7 @@ int ha_berkeley::write_row(byte * record) last_dup_key=keynr; break; } - changed_keys |= (key_map) 1 << keynr; + changed_keys.set_bit(keynr); } } else @@ -884,12 +924,13 @@ int ha_berkeley::write_row(byte * record) DBUG_PRINT("trans",("aborting subtransaction")); /* purecov: deadcode */ new_error=txn_abort(sub_trans); /* purecov: deadcode */ } - else if (changed_keys) + else if (!changed_keys.is_clear_all()) { new_error = 0; - for (uint keynr=0; changed_keys; keynr++, changed_keys >>= 1) + for (uint keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; + keynr++) { - if (changed_keys & 1) + if (changed_keys.is_set(keynr)) { if ((new_error = remove_key(sub_trans, keynr, record, &prim_key))) @@ -1011,7 +1052,7 @@ int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed, Clobbers keybuff2 */ -int ha_berkeley::restore_keys(DB_TXN *trans, key_map changed_keys, +int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key, const byte *old_row, DBT *old_key, const byte *new_row, DBT *new_key, @@ -1033,18 +1074,21 @@ int ha_berkeley::restore_keys(DB_TXN *trans, key_map changed_keys, rolled back. The last key set in changed_keys is the one that triggered the duplicate key error (it wasn't inserted), so for that one just put back the old value. */ - for (keynr=0; changed_keys; keynr++, changed_keys >>= 1) + if (!changed_keys->is_clear_all()) { - if (changed_keys & 1) + for (keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; keynr++) { - if (changed_keys != 1 && - (error = remove_key(trans, keynr, new_row, new_key))) - break; /* purecov: inspected */ - if ((error = key_file[keynr]->put(key_file[keynr], trans, - create_key(&tmp_key, keynr, key_buff2, - old_row), - old_key, key_type[keynr]))) - break; /* purecov: inspected */ + if (changed_keys->is_set(keynr)) + { + if (changed_keys->is_prefix(1) && + (error = remove_key(trans, keynr, new_row, new_key))) + break; /* purecov: inspected */ + if ((error = key_file[keynr]->put(key_file[keynr], trans, + create_key(&tmp_key, keynr, key_buff2, + old_row), + old_key, key_type[keynr]))) + break; /* purecov: inspected */ + } } } @@ -1059,14 +1103,14 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) DBT prim_key, key, old_prim_key; int error; DB_TXN *sub_trans; - ulong thd_options = table->in_use ? table->in_use->options : 0; + ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; bool primary_key_changed; DBUG_ENTER("update_row"); LINT_INIT(error); statistic_increment(ha_update_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(new_row+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); if (hidden_primary_key) { @@ -1089,7 +1133,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) sub_trans = transaction; for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) { - key_map changed_keys = 0; + key_map changed_keys(0); if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) { if ((error=txn_begin(db_env, transaction, &sub_trans, 0))) /* purecov: deadcode */ @@ -1122,7 +1166,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) } DBUG_RETURN(error); // Fatal error /* purecov: inspected */ } - changed_keys |= (key_map)1 << keynr; + changed_keys.set_bit(keynr); if ((error=key_file[keynr]->put(key_file[keynr], sub_trans, create_key(&key, keynr, key_buff2, new_row), @@ -1146,8 +1190,8 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) DBUG_PRINT("trans",("aborting subtransaction")); /* purecov: deadcode */ new_error=txn_abort(sub_trans); /* purecov: deadcode */ } - else if (changed_keys) - new_error=restore_keys(transaction, changed_keys, primary_key, + else if (!changed_keys.is_clear_all()) + new_error=restore_keys(transaction, &changed_keys, primary_key, old_row, &old_prim_key, new_row, &prim_key, thd_options); if (new_error) @@ -1228,12 +1272,12 @@ int ha_berkeley::remove_key(DB_TXN *trans, uint keynr, const byte *record, /* Delete all keys for new_record */ int ha_berkeley::remove_keys(DB_TXN *trans, const byte *record, - DBT *new_record, DBT *prim_key, key_map keys) + DBT *new_record, DBT *prim_key, key_map *keys) { int result = 0; - for (uint keynr=0; keys; keynr++, keys>>=1) + for (uint keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; keynr++) { - if (keys & 1) + if (keys->is_set(keynr)) { int new_error=remove_key(trans, keynr, record, prim_key); if (new_error) @@ -1252,7 +1296,7 @@ int ha_berkeley::delete_row(const byte * record) int error; DBT row, prim_key; key_map keys=table->keys_in_use; - ulong thd_options = table->in_use ? table->in_use->options : 0; + ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; DBUG_ENTER("delete_row"); statistic_increment(ha_delete_count,&LOCK_status); @@ -1260,7 +1304,7 @@ int ha_berkeley::delete_row(const byte * record) DBUG_RETURN((error)); /* purecov: inspected */ create_key(&prim_key, primary_key, key_buff, record); if (hidden_primary_key) - keys|= (key_map) 1 << primary_key; + keys.set_bit(primary_key); /* Subtransactions may be used in order to retry the delete in case we get a DB_LOCK_DEADLOCK error. */ @@ -1273,7 +1317,7 @@ int ha_berkeley::delete_row(const byte * record) break; /* purecov: deadcode */ DBUG_PRINT("trans",("starting sub transaction")); /* purecov: deadcode */ } - error=remove_keys(sub_trans, record, &row, &prim_key, keys); + error=remove_keys(sub_trans, record, &row, &prim_key, &keys); if (!error && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) { DBUG_PRINT("trans",("ending sub transaction")); /* purecov: deadcode */ @@ -1342,6 +1386,7 @@ int ha_berkeley::index_end() error=cursor->c_close(cursor); cursor=0; } + active_index=MAX_KEY; DBUG_RETURN(error); } @@ -1403,7 +1448,7 @@ int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key, statistic_increment(ha_read_key_count,&LOCK_status); DBUG_ENTER("index_read_idx"); current_row.flags=DB_DBT_REALLOC; - active_index= (uint) -1; + active_index=MAX_KEY; DBUG_RETURN(read_row(key_file[keynr]->get(key_file[keynr], transaction, pack_key(&last_key, keynr, key_buff, key, key_len), @@ -1418,10 +1463,21 @@ int ha_berkeley::index_read(byte * buf, const byte * key, DBT row; int error; KEY *key_info= &table->key_info[active_index]; + int do_prev= 0; DBUG_ENTER("ha_berkeley::index_read"); statistic_increment(ha_read_key_count,&LOCK_status); bzero((char*) &row,sizeof(row)); + if (find_flag == HA_READ_BEFORE_KEY) + { + find_flag= HA_READ_KEY_OR_NEXT; + do_prev= 1; + } + else if (find_flag == HA_READ_PREFIX_LAST_OR_PREV) + { + find_flag= HA_READ_AFTER_KEY; + do_prev= 1; + } if (key_len == key_info->key_length) { if (find_flag == HA_READ_AFTER_KEY) @@ -1458,6 +1514,12 @@ int ha_berkeley::index_read(byte * buf, const byte * key, error=HA_ERR_KEY_NOT_FOUND; } } + if (do_prev) + { + bzero((char*) &row, sizeof(row)); + error= read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV), + (char*) buf, active_index, &row, &last_key, 1); + } DBUG_RETURN(error); } @@ -1517,7 +1579,7 @@ int ha_berkeley::index_next_same(byte * buf, const byte *key, uint keylen) { error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), (char*) buf, active_index, &row, &last_key, 1); - if (!error && ::key_cmp(table, key, active_index, keylen)) + if (!error && ::key_cmp_if_same(table, key, active_index, keylen)) error=HA_ERR_END_OF_FILE; } DBUG_RETURN(error); @@ -1605,7 +1667,7 @@ int ha_berkeley::rnd_pos(byte * buf, byte *pos) statistic_increment(ha_read_rnd_count,&LOCK_status); DBUG_ENTER("ha_berkeley::rnd_pos"); - active_index= (uint) -1; // Don't delete via cursor + active_index= MAX_KEY; DBUG_RETURN(read_row(file->get(file, transaction, get_pos(&db_pos, pos), ¤t_row, 0), @@ -1715,6 +1777,7 @@ int ha_berkeley::extra(enum ha_extra_function operation) int ha_berkeley::reset(void) { + ha_berkeley::extra(HA_EXTRA_RESET); key_read=0; // Reset to state after open return 0; } @@ -1875,7 +1938,7 @@ static int create_sub_table(const char *table_name, const char *sub_name, if (!(error=db_create(&file, db_env, 0))) { file->set_flags(file, flags); - error=(file->open(file, table_name, sub_name, type, + error=(file->open(file, NULL, table_name, sub_name, type, DB_THREAD | DB_CREATE, my_umask)); if (error) { @@ -1931,7 +1994,7 @@ int ha_berkeley::create(const char *name, register TABLE *form, DB *status_block; if (!(error=(db_create(&status_block, db_env, 0)))) { - if (!(error=(status_block->open(status_block, name_buff, + if (!(error=(status_block->open(status_block, NULL, name_buff, "status", DB_BTREE, DB_CREATE, 0)))) { char rec_buff[4+MAX_KEY*4]; @@ -1960,6 +2023,7 @@ int ha_berkeley::delete_table(const char *name) DBUG_RETURN(error); } + int ha_berkeley::rename_table(const char * from, const char * to) { int error; @@ -1991,11 +2055,8 @@ double ha_berkeley::scan_time() return rows2double(records/3); } -ha_rows ha_berkeley::records_in_range(int keynr, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) +ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, + key_range *end_key) { DBT key; DB_KEY_RANGE start_range, end_range; @@ -2004,25 +2065,27 @@ ha_rows ha_berkeley::records_in_range(int keynr, DBUG_ENTER("records_in_range"); if ((start_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, start_key, - start_key_len), - &start_range,0)) || + pack_key(&key, keynr, key_buff, + start_key->key, + start_key->length), + &start_range,0)) || (end_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, end_key, - end_key_len), + pack_key(&key, keynr, key_buff, + end_key->key, + end_key->length), &end_range,0))) DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); // Better than returning an error /* purecov: inspected */ if (!start_key) - start_pos=0.0; - else if (start_search_flag == HA_READ_KEY_EXACT) + start_pos= 0.0; + else if (start_key->flag == HA_READ_KEY_EXACT) start_pos=start_range.less; else start_pos=start_range.less+start_range.equal; if (!end_key) - end_pos=1.0; - else if (end_search_flag == HA_READ_BEFORE_KEY) + end_pos= 1.0; + else if (end_key->flag == HA_READ_BEFORE_KEY) end_pos=end_range.less; else end_pos=end_range.less+end_range.equal; @@ -2105,8 +2168,7 @@ void ha_berkeley::print_error(int error, myf errflag) static void print_msg(THD *thd, const char *table_name, const char *op_name, const char *msg_type, const char *fmt, ...) { - String* packet = &thd->packet; - packet->length(0); + Protocol *protocol= thd->protocol; char msgbuf[256]; msgbuf[0] = 0; va_list args; @@ -2114,23 +2176,23 @@ static void print_msg(THD *thd, const char *table_name, const char *op_name, my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args); msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia - DBUG_PRINT(msg_type,("message: %s",msgbuf)); - net_store_data(packet, table_name); - net_store_data(packet, op_name); - net_store_data(packet, msg_type); - net_store_data(packet, msgbuf); - if (my_net_write(&thd->net, (char*)thd->packet.ptr(), - thd->packet.length())) + protocol->set_nfields(4); + protocol->prepare_for_resend(); + protocol->store(table_name); + protocol->store(op_name); + protocol->store(msg_type); + protocol->store(msgbuf); + if (protocol->write()) thd->killed=1; } #endif int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) { - DB_BTREE_STAT *stat=0; uint i; + DB_BTREE_STAT *stat=0; DB_TXN_STAT *txn_stat_ptr= 0; /* @@ -2142,25 +2204,16 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) the beginning of the transaction.. */ - /* - If it's a merge conflict here (4.0->4.1), please ignore it! - - The reason of the conflict is the difference between versions of bdb: - mysql-4.0 uses bdb 3.2.9 - mysql-4.1 uses bdb 4.1.24 - Older one has global functions txn_stat and txn_id but - newer one has DB_ENV->txn_stat and DB_TXN->id - */ - if (!txn_stat(db_env, &txn_stat_ptr, 0) && + if (!db_env->txn_stat(db_env, &txn_stat_ptr, 0) && txn_stat_ptr && txn_stat_ptr->st_nactive>=2) { DB_TXN_ACTIVE *atxn_stmt= 0, *atxn_all= 0; DB_TXN *txn_all= (DB_TXN*) thd->transaction.all.bdb_tid; - u_int32_t all_id= txn_id(txn_all); + u_int32_t all_id= txn_all->id(txn_all); DB_TXN *txn_stmt= (DB_TXN*) thd->transaction.stmt.bdb_tid; - u_int32_t stmt_id= txn_id(txn_stmt); + u_int32_t stmt_id= txn_stmt->id(txn_stmt); DB_TXN_ACTIVE *cur= txn_stat_ptr->st_txnarray; DB_TXN_ACTIVE *end= cur + txn_stat_ptr->st_nactive; @@ -2186,7 +2239,7 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) free(stat); stat=0; } - if ((key_file[i]->stat)(key_file[i], (void*) &stat, 0, 0)) + if ((key_file[i]->stat)(key_file[i], (void*) &stat, 0)) goto err; /* purecov: inspected */ share->rec_per_key[i]= (stat->bt_ndata / (stat->bt_nkeys ? stat->bt_nkeys : 1)); @@ -2199,7 +2252,7 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) free(stat); stat=0; } - if ((file->stat)(file, (void*) &stat, 0, 0)) + if ((file->stat)(file, (void*) &stat, 0)) goto err; /* purecov: inspected */ } pthread_mutex_lock(&share->mutex); @@ -2330,7 +2383,7 @@ static BDB_SHARE *get_share(const char *table_name, TABLE *table) strmov(share->table_name,table_name); share->key_file = key_file; share->key_type = key_type; - if (hash_insert(&bdb_open_tables, (byte*) share)) + if (my_hash_insert(&bdb_open_tables, (byte*) share)) { pthread_mutex_unlock(&bdb_mutex); /* purecov: inspected */ my_free((gptr) share,0); /* purecov: inspected */ @@ -2402,7 +2455,7 @@ void ha_berkeley::get_status() fn_format(name_buff, share->table_name,"", ha_berkeley_ext, 2 | 4); if (!db_create(&share->status_block, db_env, 0)) { - if (share->status_block->open(share->status_block, name_buff, + if (share->status_block->open(share->status_block, NULL, name_buff, "status", DB_BTREE, open_mode, 0)) { share->status_block->close(share->status_block, 0); /* purecov: inspected */ @@ -2478,7 +2531,7 @@ static void update_status(BDB_SHARE *share, TABLE *table) if (db_create(&share->status_block, db_env, 0)) /* purecov: inspected */ goto end; /* purecov: inspected */ share->status_block->set_flags(share->status_block,0); /* purecov: inspected */ - if (share->status_block->open(share->status_block, + if (share->status_block->open(share->status_block, NULL, fn_format(name_buff,share->table_name,"", ha_berkeley_ext,2 | 4), "status", DB_BTREE, @@ -2509,7 +2562,7 @@ end: Used when sorting to allocate buffers and by the optimizer. */ -ha_rows ha_berkeley::estimate_number_of_rows() +ha_rows ha_berkeley::estimate_rows_upper_bound() { return share->rows + HA_BERKELEY_EXTRA_ROWS; } diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index 1925d1c410f..1d4823bbdc0 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -72,8 +72,8 @@ class ha_berkeley: public handler uint key_length); int remove_key(DB_TXN *trans, uint keynr, const byte *record, DBT *prim_key); int remove_keys(DB_TXN *trans,const byte *record, DBT *new_record, - DBT *prim_key, key_map keys); - int restore_keys(DB_TXN *trans, key_map changed_keys, uint primary_key, + DBT *prim_key, key_map *keys); + int restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key, const byte *old_row, DBT *old_key, const byte *new_row, DBT *new_key, ulong thd_options); @@ -87,28 +87,21 @@ class ha_berkeley: public handler public: ha_berkeley(TABLE *table): handler(table), alloc_ptr(0),rec_buff(0), file(0), - int_table_flags(HA_REC_NOT_IN_SEQ | - HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | - HA_NULL_KEY | HA_BLOB_KEY | HA_NOT_EXACT_COUNT | - HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE | - HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX | - HA_KEY_READ_WRONG_STR | HA_FILE_BASED), - changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0) - { - } + int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ | + HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT | + HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | + HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX), + changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0) {} ~ha_berkeley() {} const char *table_type() const { return "BerkeleyDB"; } + ulong index_flags(uint idx, uint part, bool all_parts) const; const char *index_type(uint key_number) { return "BTREE"; } const char **bas_ext() const; ulong table_flags(void) const { return int_table_flags; } - uint max_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_keys() const { return MAX_KEY-1; } - uint max_key_parts() const { return MAX_REF_PARTS; } - uint max_key_length() const { return MAX_KEY_LENGTH; } + uint max_supported_keys() const { return MAX_KEY-1; } uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; } - ha_rows estimate_number_of_rows(); - bool fast_key_read() { return 1;} - key_map keys_to_use_for_scanning() { return ~(key_map) 0; } + ha_rows estimate_rows_upper_bound(); + const key_map *keys_to_use_for_scanning() { return &key_map_full; } bool has_transactions() { return 1;} int open(const char *name, int mode, uint test_if_locked); @@ -129,7 +122,7 @@ class ha_berkeley: public handler int index_prev(byte * buf); int index_first(byte * buf); int index_last(byte * buf); - int rnd_init(bool scan=1); + int rnd_init(bool scan); int rnd_end(); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); @@ -144,12 +137,7 @@ class ha_berkeley: public handler int optimize(THD* thd, HA_CHECK_OPT* check_opt); int check(THD* thd, HA_CHECK_OPT* check_opt); - ha_rows records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); - + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int create(const char *name, register TABLE *form, HA_CREATE_INFO *create_info); int delete_table(const char *name); @@ -167,9 +155,10 @@ class ha_berkeley: public handler } longlong get_auto_increment(); void print_error(int error, myf errflag); + uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; } }; -extern bool berkeley_skip, berkeley_shared_data; +extern bool berkeley_shared_data; extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type, berkeley_lock_types[]; extern ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size; @@ -182,4 +171,4 @@ bool berkeley_end(void); bool berkeley_flush_logs(void); int berkeley_commit(THD *thd, void *trans); int berkeley_rollback(THD *thd, void *trans); -int berkeley_show_logs(THD *thd); +int berkeley_show_logs(Protocol *protocol); diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc new file mode 100644 index 00000000000..9ac4ba2da15 --- /dev/null +++ b/sql/ha_blackhole.cc @@ -0,0 +1,192 @@ +/* Copyright (C) 2005 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "mysql_priv.h" +#ifdef HAVE_BLACKHOLE_DB +#include "ha_blackhole.h" + + +const char **ha_blackhole::bas_ext() const +{ + static const char *ext[]= { NullS }; + return ext; +} + +int ha_blackhole::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_blackhole::open"); + thr_lock_init(&thr_lock); + thr_lock_data_init(&thr_lock,&lock,NULL); + DBUG_RETURN(0); +} + +int ha_blackhole::close(void) +{ + DBUG_ENTER("ha_blackhole::close"); + thr_lock_delete(&thr_lock); + DBUG_RETURN(0); +} + +int ha_blackhole::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) +{ + DBUG_ENTER("ha_blackhole::create"); + DBUG_RETURN(0); +} + +const char *ha_blackhole::index_type(uint key_number) +{ + DBUG_ENTER("ha_blackhole::index_type"); + DBUG_RETURN((table->key_info[key_number].flags & HA_FULLTEXT) ? + "FULLTEXT" : + (table->key_info[key_number].flags & HA_SPATIAL) ? + "SPATIAL" : + (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ? + "RTREE" : + "BTREE"); +} + +int ha_blackhole::write_row(byte * buf) +{ + DBUG_ENTER("ha_blackhole::write_row"); + DBUG_RETURN(0); +} + +int ha_blackhole::rnd_init(bool scan) +{ + DBUG_ENTER("ha_blackhole::rnd_init"); + DBUG_RETURN(0); +} + + +int ha_blackhole::rnd_next(byte *buf) +{ + DBUG_ENTER("ha_blackhole::rnd_next"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + + +int ha_blackhole::rnd_pos(byte * buf, byte *pos) +{ + DBUG_ENTER("ha_blackhole::rnd_pos"); + DBUG_ASSERT(0); + DBUG_RETURN(0); +} + + +void ha_blackhole::position(const byte *record) +{ + DBUG_ENTER("ha_blackhole::position"); + DBUG_ASSERT(0); + DBUG_VOID_RETURN; +} + + +void ha_blackhole::info(uint flag) +{ + DBUG_ENTER("ha_blackhole::info"); + + records= 0; + deleted= 0; + errkey= 0; + mean_rec_length= 0; + data_file_length= 0; + index_file_length= 0; + max_data_file_length= 0; + delete_length= 0; + if (flag & HA_STATUS_AUTO) + auto_increment_value= 1; + DBUG_VOID_RETURN; +} + +int ha_blackhole::external_lock(THD *thd, int lock_type) +{ + DBUG_ENTER("ha_blackhole::external_lock"); + DBUG_RETURN(0); +} + + +uint ha_blackhole::lock_count(void) const +{ + DBUG_ENTER("ha_blackhole::lock_count"); + DBUG_RETURN(0); +} + +THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + DBUG_ENTER("ha_blackhole::store_lock"); + DBUG_RETURN(to); +} + + +int ha_blackhole::index_read(byte * buf, const byte * key, + uint key_len, enum ha_rkey_function find_flag) +{ + DBUG_ENTER("ha_blackhole::index_read"); + DBUG_RETURN(0); +} + + +int ha_blackhole::index_read_idx(byte * buf, uint idx, const byte * key, + uint key_len, enum ha_rkey_function find_flag) +{ + DBUG_ENTER("ha_blackhole::index_read_idx"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + + +int ha_blackhole::index_read_last(byte * buf, const byte * key, uint key_len) +{ + DBUG_ENTER("ha_blackhole::index_read_last"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + + +int ha_blackhole::index_next(byte * buf) +{ + DBUG_ENTER("ha_blackhole::index_next"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + + +int ha_blackhole::index_prev(byte * buf) +{ + DBUG_ENTER("ha_blackhole::index_prev"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + + +int ha_blackhole::index_first(byte * buf) +{ + DBUG_ENTER("ha_blackhole::index_first"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + + +int ha_blackhole::index_last(byte * buf) +{ + DBUG_ENTER("ha_blackhole::index_last"); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + +#endif /* HAVE_BLACKHOLE_DB */ diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h new file mode 100644 index 00000000000..88715c62408 --- /dev/null +++ b/sql/ha_blackhole.h @@ -0,0 +1,89 @@ +/* Copyright (C) 2005 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +/* + Class definition for the blackhole storage engine + "Dumbest named feature ever" +*/ +class ha_blackhole: public handler +{ + THR_LOCK_DATA lock; /* MySQL lock */ + THR_LOCK thr_lock; + +public: + ha_blackhole(TABLE *table): handler(table) + { + } + ~ha_blackhole() + { + } + /* The name that will be used for display purposes */ + const char *table_type() const { return "BLACKHOLE"; } + /* + The name of the index type that will be used for display + don't implement this method unless you really have indexes + */ + const char *index_type(uint key_number); + const char **bas_ext() const; + ulong table_flags() const + { + return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | + HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | + HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME | + HA_CAN_INSERT_DELAYED); + } + ulong index_flags(uint inx, uint part, bool all_parts) const + { + return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ? + 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE | + HA_READ_ORDER | HA_KEYREAD_ONLY); + } + /* The following defines can be increased if necessary */ +#define BLACKHOLE_MAX_KEY 64 /* Max allowed keys */ +#define BLACKHOLE_MAX_KEY_SEG 16 /* Max segments for key */ +#define BLACKHOLE_MAX_KEY_LENGTH 1000 + uint max_supported_keys() const { return BLACKHOLE_MAX_KEY; } + uint max_supported_key_length() const { return BLACKHOLE_MAX_KEY_LENGTH; } + uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; } + int open(const char *name, int mode, uint test_if_locked); + int close(void); + int write_row(byte * buf); + int rnd_init(bool scan); + int rnd_next(byte *buf); + int rnd_pos(byte * buf, byte *pos); + int index_read(byte * buf, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_idx(byte * buf, uint idx, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_last(byte * buf, const byte * key, uint key_len); + int index_next(byte * buf); + int index_prev(byte * buf); + int index_first(byte * buf); + int index_last(byte * buf); + void position(const byte *record); + void info(uint flag); + int external_lock(THD *thd, int lock_type); + uint lock_count(void) const; + int create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info); + THR_LOCK_DATA **store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type); +}; diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 5aa42fa1beb..9c680daaf91 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -30,126 +30,196 @@ const char **ha_heap::bas_ext() const { static const char *ext[1]= { NullS }; return ext; } +/* + Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to + rec_per_key) after 1/HEAP_STATS_UPDATE_THRESHOLD fraction of table records + have been inserted/updated/deleted. delete_all_rows() and table flush cause + immediate update. + + NOTE + hash index statistics must be updated when number of table records changes + from 0 to non-zero value and vice versa. Otherwise records_in_range may + erroneously return 0 and 'range' may miss records. +*/ +#define HEAP_STATS_UPDATE_THRESHOLD 10 int ha_heap::open(const char *name, int mode, uint test_if_locked) { - uint key,parts,mem_per_row=0; - ulong max_rows; - HP_KEYDEF *keydef; - HP_KEYSEG *seg; - THD *thd= current_thd; + if (!(file= heap_open(name, mode)) && my_errno == ENOENT) + { + HA_CREATE_INFO create_info; + bzero(&create_info, sizeof(create_info)); + if (!create(name, table, &create_info)) + { + file= heap_open(name, mode); + implicit_emptied= 1; + } + } + ref_length= sizeof(HEAP_PTR); + if (file) + { + /* Initialize variables for the opened table */ + set_keys_for_scanning(); + /* + We cannot run update_key_stats() here because we do not have a + lock on the table. The 'records' count might just be changed + temporarily at this moment and we might get wrong statistics (Bug + #10178). Instead we request for update. This will be done in + ha_heap::info(), which is always called before key statistics are + used. + */ + key_stat_version= file->s->key_stat_version-1; + } + return (file ? 0 : 1); +} - for (key=parts=0 ; key < table->keys ; key++) - parts+=table->key_info[key].key_parts; +int ha_heap::close(void) +{ + return heap_close(file); +} - if (!(keydef=(HP_KEYDEF*) my_malloc(table->keys*sizeof(HP_KEYDEF)+ - parts*sizeof(HP_KEYSEG),MYF(MY_WME)))) - return my_errno; - seg=my_reinterpret_cast(HP_KEYSEG*) (keydef+table->keys); - for (key=0 ; key < table->keys ; key++) - { - KEY *pos=table->key_info+key; - KEY_PART_INFO *key_part= pos->key_part; - KEY_PART_INFO *key_part_end= key_part+pos->key_parts; - mem_per_row += (pos->key_length + (sizeof(char*) * 2)); +/* + Compute which keys to use for scanning + + SYNOPSIS + set_keys_for_scanning() + no parameter + + DESCRIPTION + Set the bitmap btree_keys, which is used when the upper layers ask + which keys to use for scanning. For each btree index the + corresponding bit is set. - keydef[key].keysegs=(uint) pos->key_parts; - keydef[key].flag = (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL)); - keydef[key].seg=seg; + RETURN + void +*/ + +void ha_heap::set_keys_for_scanning(void) +{ + btree_keys.clear_all(); + for (uint i= 0 ; i < table->keys ; i++) + { + if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE) + btree_keys.set_bit(i); + } +} - for (; key_part != key_part_end ; key_part++, seg++) +void ha_heap::update_key_stats() +{ + for (uint i= 0; i < table->keys; i++) + { + KEY *key=table->key_info+i; + if (!key->rec_per_key) + continue; + if (key->algorithm != HA_KEY_ALG_BTREE) { - uint flag=key_part->key_type; - Field *field=key_part->field; - if (!f_is_packed(flag) && - f_packtype(flag) == (int) FIELD_TYPE_DECIMAL && - !(flag & FIELDFLAG_BINARY)) - seg->type= (int) HA_KEYTYPE_TEXT; - else - seg->type= (int) HA_KEYTYPE_BINARY; - seg->start=(uint) key_part->offset; - seg->length=(uint) key_part->length; - if (field->null_ptr) - { - seg->null_bit=field->null_bit; - seg->null_pos= (uint) (field->null_ptr- - (uchar*) table->record[0]); - } + if (key->flags & HA_NOSAME) + key->rec_per_key[key->key_parts-1]= 1; else { - seg->null_bit=0; - seg->null_pos=0; + ha_rows hash_buckets= file->s->keydef[i].hash_buckets; + uint no_records= hash_buckets ? file->s->records/hash_buckets : 2; + if (no_records < 2) + no_records= 2; + key->rec_per_key[key->key_parts-1]= no_records; } } } - mem_per_row += MY_ALIGN(table->reclength+1, sizeof(char*)); - max_rows = (ulong) (thd->variables.max_heap_table_size / mem_per_row); - file=heap_open(name,mode, - table->keys,keydef, - table->reclength, - (ulong) ((table->max_rows < max_rows && table->max_rows) ? - table->max_rows : max_rows), - (ulong) table->min_rows); - my_free((gptr) keydef,MYF(0)); - if (file) - info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); - ref_length=sizeof(HEAP_PTR); - return (!file ? errno : 0); -} - -int ha_heap::close(void) -{ - return heap_close(file); + records_changed= 0; + /* At the end of update_key_stats() we can proudly claim they are OK. */ + key_stat_version= file->s->key_stat_version; } int ha_heap::write_row(byte * buf) { + int res; statistic_increment(ha_write_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(buf+table->time_stamp-1); - return heap_write(file,buf); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + if (table->next_number_field && buf == table->record[0]) + update_auto_increment(); + res= heap_write(file,buf); + if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > + file->s->records) + { + /* + We can perform this safely since only one writer at the time is + allowed on the table. + */ + file->s->key_stat_version++; + } + return res; } int ha_heap::update_row(const byte * old_data, byte * new_data) { + int res; statistic_increment(ha_update_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(new_data+table->time_stamp-1); - return heap_update(file,old_data,new_data); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); + res= heap_update(file,old_data,new_data); + if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > + file->s->records) + { + /* + We can perform this safely since only one writer at the time is + allowed on the table. + */ + file->s->key_stat_version++; + } + return res; } int ha_heap::delete_row(const byte * buf) { + int res; statistic_increment(ha_delete_count,&LOCK_status); - return heap_delete(file,buf); + res= heap_delete(file,buf); + if (!res && table->tmp_table == NO_TMP_TABLE && + ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) + { + /* + We can perform this safely since only one writer at the time is + allowed on the table. + */ + file->s->key_stat_version++; + } + return res; } -int ha_heap::index_read(byte * buf, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) +int ha_heap::index_read(byte * buf, const byte * key, uint key_len, + enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); - int error=heap_rkey(file,buf,active_index, key); - table->status=error ? STATUS_NOT_FOUND: 0; + DBUG_ASSERT(inited==INDEX); + statistic_increment(ha_read_key_count, &LOCK_status); + int error = heap_rkey(file,buf,active_index, key, key_len, find_flag); + table->status = error ? STATUS_NOT_FOUND : 0; return error; } -int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) +int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) { - statistic_increment(ha_read_key_count,&LOCK_status); - int error=heap_rkey(file, buf, index, key); - table->status=error ? STATUS_NOT_FOUND: 0; + DBUG_ASSERT(inited==INDEX); + statistic_increment(ha_read_key_count, &LOCK_status); + int error= heap_rkey(file, buf, active_index, key, key_len, + HA_READ_PREFIX_LAST); + table->status= error ? STATUS_NOT_FOUND : 0; return error; } +int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, + uint key_len, enum ha_rkey_function find_flag) +{ + statistic_increment(ha_read_key_count, &LOCK_status); + int error = heap_rkey(file, buf, index, key, key_len, find_flag); + table->status = error ? STATUS_NOT_FOUND : 0; + return error; +} int ha_heap::index_next(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_next_count,&LOCK_status); int error=heap_rnext(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; @@ -158,6 +228,7 @@ int ha_heap::index_next(byte * buf) int ha_heap::index_prev(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_prev_count,&LOCK_status); int error=heap_rprev(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; @@ -166,16 +237,18 @@ int ha_heap::index_prev(byte * buf) int ha_heap::index_first(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_first_count,&LOCK_status); - int error=heap_rfirst(file, buf); + int error=heap_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } int ha_heap::index_last(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_last_count,&LOCK_status); - int error=heap_rlast(file, buf); + int error=heap_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } @@ -222,7 +295,15 @@ void ha_heap::info(uint flag) index_file_length=info.index_length; max_data_file_length= info.max_records* info.reclength; delete_length= info.deleted * info.reclength; - implicit_emptied= info.implicit_emptied; + if (flag & HA_STATUS_AUTO) + auto_increment_value= info.auto_increment; + /* + If info() is called for the first time after open(), we will still + have to update the key statistics. Hoping that a table lock is now + in place. + */ + if (key_stat_version != file->s->key_stat_version) + update_key_stats(); } int ha_heap::extra(enum ha_extra_function operation) @@ -230,14 +311,17 @@ int ha_heap::extra(enum ha_extra_function operation) return heap_extra(file,operation); } -int ha_heap::reset(void) -{ - return heap_extra(file,HA_EXTRA_RESET); -} - int ha_heap::delete_all_rows() { heap_clear(file); + if (table->tmp_table == NO_TMP_TABLE) + { + /* + We can perform this safely since only one writer at the time is + allowed on the table. + */ + file->s->key_stat_version++; + } return 0; } @@ -246,6 +330,114 @@ int ha_heap::external_lock(THD *thd, int lock_type) return 0; // No external locking } + +/* + Disable indexes. + + SYNOPSIS + disable_indexes() + mode mode of operation: + HA_KEY_SWITCH_NONUNIQ disable all non-unique keys + HA_KEY_SWITCH_ALL disable all keys + HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent + HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent + + DESCRIPTION + Disable indexes and clear keys to use for scanning. + + IMPLEMENTATION + HA_KEY_SWITCH_NONUNIQ is not implemented. + HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP. + HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP. + + RETURN + 0 ok + HA_ERR_WRONG_COMMAND mode not implemented. +*/ + +int ha_heap::disable_indexes(uint mode) +{ + int error; + + if (mode == HA_KEY_SWITCH_ALL) + { + if (!(error= heap_disable_indexes(file))) + set_keys_for_scanning(); + } + else + { + /* mode not implemented */ + error= HA_ERR_WRONG_COMMAND; + } + return error; +} + + +/* + Enable indexes. + + SYNOPSIS + enable_indexes() + mode mode of operation: + HA_KEY_SWITCH_NONUNIQ enable all non-unique keys + HA_KEY_SWITCH_ALL enable all keys + HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent + HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent + + DESCRIPTION + Enable indexes and set keys to use for scanning. + The indexes might have been disabled by disable_index() before. + The function works only if both data and indexes are empty, + since the heap storage engine cannot repair the indexes. + To be sure, call handler::delete_all_rows() before. + + IMPLEMENTATION + HA_KEY_SWITCH_NONUNIQ is not implemented. + HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP. + HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP. + + RETURN + 0 ok + HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry. + HA_ERR_WRONG_COMMAND mode not implemented. +*/ + +int ha_heap::enable_indexes(uint mode) +{ + int error; + + if (mode == HA_KEY_SWITCH_ALL) + { + if (!(error= heap_enable_indexes(file))) + set_keys_for_scanning(); + } + else + { + /* mode not implemented */ + error= HA_ERR_WRONG_COMMAND; + } + return error; +} + + +/* + Test if indexes are disabled. + + SYNOPSIS + indexes_are_disabled() + no parameters + + RETURN + 0 indexes are not disabled + 1 all indexes are disabled + [2 non-unique indexes are disabled - NOT YET IMPLEMENTED] +*/ + +int ha_heap::indexes_are_disabled(void) +{ + return heap_indexes_are_disabled(file); +} + THR_LOCK_DATA **ha_heap::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) @@ -256,7 +448,6 @@ THR_LOCK_DATA **ha_heap::store_lock(THD *thd, return to; } - /* We have to ignore ENOENT entries as the HEAP table is created on open and not when doing a CREATE on the table. @@ -265,7 +456,8 @@ THR_LOCK_DATA **ha_heap::store_lock(THD *thd, int ha_heap::delete_table(const char *name) { char buff[FN_REFLEN]; - int error= heap_delete_table(fn_format(buff,name,"","",4+2)); + int error= heap_delete_table(fn_format(buff,name,"","", + MY_REPLACE_EXT|MY_UNPACK_FILENAME)); return error == ENOENT ? 0 : error; } @@ -275,25 +467,142 @@ int ha_heap::rename_table(const char * from, const char * to) } -ha_rows ha_heap::records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) +ha_rows ha_heap::records_in_range(uint inx, key_range *min_key, + key_range *max_key) { - KEY *pos=table->key_info+inx; - if (start_key_len != end_key_len || - start_key_len != pos->key_length || - start_search_flag != HA_READ_KEY_EXACT || - end_search_flag != HA_READ_AFTER_KEY) - return HA_POS_ERROR; // Can't only use exact keys - return 10; // Good guess + KEY *key=table->key_info+inx; + if (key->algorithm == HA_KEY_ALG_BTREE) + return hp_rb_records_in_range(file, inx, min_key, max_key); + + if (!min_key || !max_key || + min_key->length != max_key->length || + min_key->length != key->key_length || + min_key->flag != HA_READ_KEY_EXACT || + max_key->flag != HA_READ_AFTER_KEY) + return HA_POS_ERROR; // Can only use exact keys + + if (records <= 1) + return records; + + /* Assert that info() did run. We need current statistics here. */ + DBUG_ASSERT(key_stat_version == file->s->key_stat_version); + return key->rec_per_key[key->key_parts-1]; } -int ha_heap::create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) - +int ha_heap::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) { + uint key, parts, mem_per_row= 0; + uint auto_key= 0, auto_key_type= 0; + ha_rows max_rows; + HP_KEYDEF *keydef; + HA_KEYSEG *seg; char buff[FN_REFLEN]; - return heap_create(fn_format(buff,name,"","",4+2)); + int error; + + for (key= parts= 0; key < table_arg->keys; key++) + parts+= table_arg->key_info[key].key_parts; + + if (!(keydef= (HP_KEYDEF*) my_malloc(table_arg->keys * sizeof(HP_KEYDEF) + + parts * sizeof(HA_KEYSEG), + MYF(MY_WME)))) + return my_errno; + seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + table_arg->keys); + for (key= 0; key < table_arg->keys; key++) + { + KEY *pos= table_arg->key_info+key; + KEY_PART_INFO *key_part= pos->key_part; + KEY_PART_INFO *key_part_end= key_part + pos->key_parts; + + keydef[key].keysegs= (uint) pos->key_parts; + keydef[key].flag= (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL)); + keydef[key].seg= seg; + + switch (pos->algorithm) { + case HA_KEY_ALG_UNDEF: + case HA_KEY_ALG_HASH: + keydef[key].algorithm= HA_KEY_ALG_HASH; + mem_per_row+= sizeof(char*) * 2; // = sizeof(HASH_INFO) + break; + case HA_KEY_ALG_BTREE: + keydef[key].algorithm= HA_KEY_ALG_BTREE; + mem_per_row+=sizeof(TREE_ELEMENT)+pos->key_length+sizeof(char*); + break; + default: + DBUG_ASSERT(0); // cannot happen + } + keydef[key].algorithm= ((pos->algorithm == HA_KEY_ALG_UNDEF) ? + HA_KEY_ALG_HASH : pos->algorithm); + + for (; key_part != key_part_end; key_part++, seg++) + { + Field *field= key_part->field; + if (pos->algorithm == HA_KEY_ALG_BTREE) + seg->type= field->key_type(); + else + { + if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT) + seg->type= HA_KEYTYPE_BINARY; + } + seg->start= (uint) key_part->offset; + seg->length= (uint) key_part->length; + seg->flag = 0; + seg->charset= field->charset(); + if (field->null_ptr) + { + seg->null_bit= field->null_bit; + seg->null_pos= (uint) (field->null_ptr - (uchar*) table_arg->record[0]); + } + else + { + seg->null_bit= 0; + seg->null_pos= 0; + } + if (field->flags & AUTO_INCREMENT_FLAG && + table_arg->found_next_number_field && + key == table_arg->next_number_index) + { + /* + Store key number and type for found auto_increment key + We have to store type as seg->type can differ from it + */ + auto_key= key+ 1; + auto_key_type= field->key_type(); + } + } + } + mem_per_row+= MY_ALIGN(table_arg->reclength + 1, sizeof(char*)); + HP_CREATE_INFO hp_create_info; + hp_create_info.auto_key= auto_key; + hp_create_info.auto_key_type= auto_key_type; + hp_create_info.auto_increment= (create_info->auto_increment_value ? + create_info->auto_increment_value - 1 : 0); + hp_create_info.max_table_size=current_thd->variables.max_heap_table_size; + max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row); + error= heap_create(fn_format(buff,name,"","", + MY_REPLACE_EXT|MY_UNPACK_FILENAME), + table_arg->keys,keydef, table_arg->reclength, + (ulong) ((table_arg->max_rows < max_rows && + table_arg->max_rows) ? + table_arg->max_rows : max_rows), + (ulong) table_arg->min_rows, &hp_create_info); + my_free((gptr) keydef, MYF(0)); + if (file) + info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); + return (error); +} + + +void ha_heap::update_create_info(HA_CREATE_INFO *create_info) +{ + table->file->info(HA_STATUS_AUTO); + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) + create_info->auto_increment_value= auto_increment_value; +} + +longlong ha_heap::get_auto_increment() +{ + ha_heap::info(HA_STATUS_AUTO); + return auto_increment_value; } diff --git a/sql/ha_heap.h b/sql/ha_heap.h index 31126111d9d..0a087fde1b0 100644 --- a/sql/ha_heap.h +++ b/sql/ha_heap.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -26,9 +26,13 @@ class ha_heap: public handler { HP_INFO *file; - - public: - ha_heap(TABLE *table): handler(table), file(0) {} + key_map btree_keys; + /* number of records changed since last statistics update */ + uint records_changed; + uint key_stat_version; +public: + ha_heap(TABLE *table): handler(table), file(0), records_changed(0), + key_stat_version(0) {} ~ha_heap() {} const char *table_type() const { return "HEAP"; } const char *index_type(uint inx) @@ -39,57 +43,58 @@ class ha_heap: public handler const char **bas_ext() const; ulong table_flags() const { - return (HA_READ_RND_SAME | HA_NO_INDEX | HA_KEYPOS_TO_RNDPOS | - HA_NO_BLOBS | HA_NULL_KEY | HA_REC_NOT_IN_SEQ | - HA_NO_AUTO_INCREMENT); + return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY | + HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME | + HA_CAN_INSERT_DELAYED); } - ulong index_flags(uint inx) const + ulong index_flags(uint inx, uint part, bool all_parts) const { return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ? - (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER) : - (HA_ONLY_WHOLE_INDEX | HA_WRONG_ASCII_ORDER | - HA_NOT_READ_PREFIX_LAST)); + HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE : + HA_ONLY_WHOLE_INDEX); } - uint max_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_keys() const { return MAX_KEY; } - uint max_key_parts() const { return MAX_REF_PARTS; } - uint max_key_length() const { return HA_MAX_REC_LENGTH; } + const key_map *keys_to_use_for_scanning() { return &btree_keys; } + uint max_supported_keys() const { return MAX_KEY; } + uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; } double scan_time() { return (double) (records+deleted) / 20.0+10; } double read_time(uint index, uint ranges, ha_rows rows) { return (double) rows / 20.0+1; } - virtual bool fast_key_read() { return 1;} int open(const char *name, int mode, uint test_if_locked); int close(void); + void set_keys_for_scanning(void); int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); + longlong get_auto_increment(); int index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag); int index_read_idx(byte * buf, uint idx, const byte * key, uint key_len, enum ha_rkey_function find_flag); + int index_read_last(byte * buf, const byte * key, uint key_len); int index_next(byte * buf); int index_prev(byte * buf); int index_first(byte * buf); int index_last(byte * buf); - int rnd_init(bool scan=1); + int rnd_init(bool scan); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); void position(const byte *record); void info(uint); int extra(enum ha_extra_function operation); - int reset(void); int external_lock(THD *thd, int lock_type); int delete_all_rows(void); - ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); + int disable_indexes(uint mode); + int enable_indexes(uint mode); + int indexes_are_disabled(void); + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int delete_table(const char *from); int rename_table(const char * from, const char * to); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); + void update_create_info(HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - +private: + void update_key_stats(); }; diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index d994c76fb3f..8455bbaf4d0 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -19,7 +19,16 @@ InnoDB NOTE: You can only use noninlined InnoDB functions in this file, because we have disables the InnoDB inlining in this file. */ -#ifdef __GNUC__ +/* TODO list for the InnoDB handler in 4.1: + - Remove the flag innodb_active_trans from thd and replace it with a + function call innodb_active_trans(thd), which looks at the InnoDB + trx struct state field + - Find out what kind of problems the OS X case-insensitivity causes to + table and database names; should we 'normalize' the names like we do + in Windows? +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -28,16 +37,17 @@ have disables the InnoDB inlining in this file. */ #ifdef HAVE_INNOBASE_DB #include <m_ctype.h> -#include <assert.h> #include <hash.h> #include <myisampack.h> #include <mysys_err.h> +#include <my_sys.h> #define MAX_ULONG_BIT ((ulong) 1 << (sizeof(ulong)*8-1)) #include "ha_innodb.h" pthread_mutex_t innobase_mutex; +bool innodb_inited= 0; /* Store MySQL definition of 'byte': in Linux it is char while InnoDB uses unsigned char; the header univ.i which we include next defines @@ -57,6 +67,7 @@ extern "C" { #include "../innobase/include/trx0roll.h" #include "../innobase/include/trx0trx.h" #include "../innobase/include/trx0sys.h" +#include "../innobase/include/mtr0mtr.h" #include "../innobase/include/row0ins.h" #include "../innobase/include/row0mysql.h" #include "../innobase/include/row0sel.h" @@ -64,17 +75,16 @@ extern "C" { #include "../innobase/include/log0log.h" #include "../innobase/include/lock0lock.h" #include "../innobase/include/dict0crea.h" -#include "../innobase/include/dict0dict.h" #include "../innobase/include/btr0cur.h" #include "../innobase/include/btr0btr.h" #include "../innobase/include/fsp0fsp.h" #include "../innobase/include/sync0sync.h" +#include "../innobase/include/fil0fil.h" } #define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */ #define HA_INNOBASE_RANGE_COUNT 100 -bool innodb_skip = 0; uint innobase_init_flags = 0; ulong innobase_cache_size = 0; @@ -83,9 +93,11 @@ are declared in mysqld.cc: */ long innobase_mirrored_log_groups, innobase_log_files_in_group, innobase_log_file_size, innobase_log_buffer_size, + innobase_buffer_pool_awe_mem_mb, innobase_buffer_pool_size, innobase_additional_mem_pool_size, innobase_file_io_threads, innobase_lock_wait_timeout, - innobase_thread_concurrency, innobase_force_recovery; + innobase_thread_concurrency, innobase_force_recovery, + innobase_open_files; /* The default values for the following char* start-up parameters are determined in innobase_init below: */ @@ -93,7 +105,7 @@ are determined in innobase_init below: */ char* innobase_data_home_dir = NULL; char* innobase_data_file_path = NULL; char* innobase_log_group_home_dir = NULL; -char* innobase_log_arch_dir = NULL; +char* innobase_log_arch_dir = NULL;/* unused */ /* The following has a misleading name: starting from 4.0.5, this also affects Windows: */ char* innobase_unix_file_flush_method = NULL; @@ -102,9 +114,14 @@ char* innobase_unix_file_flush_method = NULL; values */ uint innobase_flush_log_at_trx_commit = 1; -my_bool innobase_log_archive = FALSE; +my_bool innobase_log_archive = FALSE;/* unused */ my_bool innobase_use_native_aio = FALSE; my_bool innobase_fast_shutdown = TRUE; +my_bool innobase_very_fast_shutdown = FALSE; /* this can be set to + 1 just prior calling + innobase_end() */ +my_bool innobase_file_per_table = FALSE; +my_bool innobase_locks_unsafe_for_binlog = FALSE; my_bool innobase_create_status_file = FALSE; static char *internal_innobase_data_file_path = NULL; @@ -131,7 +148,6 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, my_bool not_used __attribute__((unused))); static INNOBASE_SHARE *get_share(const char *table_name); static void free_share(INNOBASE_SHARE *share); -static void innobase_print_error(const char* db_errpfx, char* buffer); /* General functions */ @@ -262,15 +278,15 @@ convert_error_code_to_mysql( } else if (error == (int) DB_LOCK_WAIT_TIMEOUT) { - /* Since we rolled back the whole transaction, we must - tell it also to MySQL so that MySQL knows to empty the - cached binlog for this transaction */ + /* Since we rolled back the whole transaction, we must + tell it also to MySQL so that MySQL knows to empty the + cached binlog for this transaction */ - if (thd) { - ha_rollback(thd); - } + if (thd) { + ha_rollback(thd); + } - return(HA_ERR_LOCK_WAIT_TIMEOUT); + return(HA_ERR_LOCK_WAIT_TIMEOUT); } else if (error == (int) DB_NO_REFERENCED_ROW) { @@ -315,6 +331,16 @@ convert_error_code_to_mysql( } else if (error == (int) DB_NO_SAVEPOINT) { return(HA_ERR_NO_SAVEPOINT); + } else if (error == (int) DB_LOCK_TABLE_FULL) { + /* Since we rolled back the whole transaction, we must + tell it also to MySQL so that MySQL knows to empty the + cached binlog for this transaction */ + + if (thd) { + ha_rollback(thd); + } + + return(HA_ERR_LOCK_TABLE_FULL); } else { return(-1); // Unknown error } @@ -402,7 +428,7 @@ innobase_mysql_print_thd( May 14, 2004 probably no race any more, but better be safe */ } - + /* Use strmake to reduce the timeframe for a race, compared to fwrite() */ i= (uint) (strmake(buf, s, len) - buf); @@ -413,7 +439,36 @@ innobase_mysql_print_thd( putc('\n', f); } -#ifndef __NETWARE__ +/********************************************************************** +Compares NUL-terminated UTF-8 strings case insensitively. + +NOTE that the exact prototype of this function has to be in +/innobase/dict/dict0dict.c! */ +extern "C" +int +innobase_strcasecmp( +/*================*/ + /* out: 0 if a=b, <0 if a<b, >1 if a>b */ + const char* a, /* in: first string to compare */ + const char* b) /* in: second string to compare */ +{ + return(my_strcasecmp(system_charset_info, a, b)); +} + +/********************************************************************** +Makes all characters in a NUL-terminated UTF-8 string lower case. + +NOTE that the exact prototype of this function has to be in +/innobase/dict/dict0dict.c! */ +extern "C" +void +innobase_casedn_str( +/*================*/ + char* a) /* in/out: string to put in lower case */ +{ + my_casedn_str(system_charset_info, a); +} + /************************************************************************* Creates a temporary file. */ extern "C" @@ -457,7 +512,6 @@ innobase_mysql_tmpfile(void) } return(fd2); } -#endif /* !__NETWARE__ */ /************************************************************************* Gets the InnoDB transaction handle for a MySQL handler object, creates @@ -472,7 +526,7 @@ check_trx_exists( { trx_t* trx; - ut_a(thd == current_thd); + ut_ad(thd == current_thd); trx = (trx_t*) thd->transaction.all.innobase_tid; @@ -552,7 +606,7 @@ uncommitted change to TBL. 2) When a change to TBL commits, InnoDB stores the current value of its global trx id counter, let us denote it by INV_TRX_ID, to the table object in the InnoDB data dictionary, and does only allow such transactions whose -id >= INV_TRX_ID to use the query cache. +id <= INV_TRX_ID to use the query cache. 3) When InnoDB does an INSERT/DELETE/UPDATE to a table TBL, or an implicit modification because an ON DELETE CASCADE, we invalidate the MySQL query cache @@ -616,7 +670,6 @@ innobase_query_caching_of_table_permitted( { ibool is_autocommit; trx_t* trx; - char* ptr; char norm_name[1000]; ut_a(full_name_len < 999); @@ -644,18 +697,24 @@ innobase_query_caching_of_table_permitted( } - if (is_autocommit && trx->conc_state == TRX_NOT_STARTED) { - /* We are going to retrieve the query result from the - query cache. This cannot be a store operation because then - we would have started the trx already. - - We can imagine we instantaneously serialize - this consistent read trx to the current trx id counter. - If trx2 would have changed the tables of a query - result stored in the cache, and trx2 would have already - committed, making the result obsolete, then trx2 would have - already invalidated the cache. Thus we can trust the result - in the cache is ok for this query. */ + if (is_autocommit && trx->n_mysql_tables_in_use == 0) { + /* We are going to retrieve the query result from the query + cache. This cannot be a store operation to the query cache + because then MySQL would have locks on tables already. + + TODO: if the user has used LOCK TABLES to lock the table, + then we open a transaction in the call of row_.. below. + That trx can stay open until UNLOCK TABLES. The same problem + exists even if we do not use the query cache. MySQL should be + modified so that it ALWAYS calls some cleanup function when + the processing of a query ends! + + We can imagine we instantaneously serialize this consistent + read trx to the current trx id counter. If trx2 would have + changed the tables of a query result stored in the cache, and + trx2 would have already committed, making the result obsolete, + then trx2 would have already invalidated the cache. Thus we + can trust the result in the cache is ok for this query. */ return((my_bool)TRUE); } @@ -668,23 +727,21 @@ innobase_query_caching_of_table_permitted( separator between db and table */ norm_name[full_name_len] = '\0'; #ifdef __WIN__ - /* Put to lower case */ + innobase_casedn_str(norm_name); +#endif + /* The call of row_search_.. will start a new transaction if it is + not yet started */ - ptr = norm_name; + thd->transaction.all.innodb_active_trans = 1; - while (*ptr != '\0') { - *ptr = tolower(*ptr); - ptr++; - } -#endif if (row_search_check_if_query_cache_permitted(trx, norm_name)) { - printf("Query cache for %s permitted\n", norm_name); + /* printf("Query cache for %s permitted\n", norm_name); */ return((my_bool)TRUE); } - printf("Query cache for %s NOT permitted\n", norm_name); + /* printf("Query cache for %s NOT permitted\n", norm_name); */ return((my_bool)FALSE); } @@ -715,15 +772,35 @@ innobase_invalidate_query_cache( } /********************************************************************* -Get the quote character to be used in SQL identifiers. */ +Get the quote character to be used in SQL identifiers. +This definition must match the one in innobase/ut/ut0ut.c! */ extern "C" -char -mysql_get_identifier_quote_char(void) -/*=================================*/ +int +mysql_get_identifier_quote_char( +/*============================*/ /* out: quote character to be - used in SQL identifiers */ + used in SQL identifiers; EOF if none */ + trx_t* trx, /* in: transaction */ + const char* name, /* in: name to print */ + ulint namelen)/* in: length of name */ { - return '`'; + if (!trx || !trx->mysql_thd) { + return(EOF); + } + return(get_quote_char_for_identifier((THD*) trx->mysql_thd, + name, namelen)); +} + +/************************************************************************** +Obtain a pointer to the MySQL THD object, as in current_thd(). This +definition must match the one in sql/ha_innodb.cc! */ +extern "C" +void* +innobase_current_thd(void) +/*======================*/ + /* out: MySQL THD object */ +{ + return(current_thd); } /********************************************************************* @@ -761,6 +838,10 @@ ha_innobase::init_table_handle_for_HANDLER(void) trx_assign_read_view(prebuilt->trx); + /* Set the MySQL flag to mark that there is an active transaction */ + + current_thd->transaction.all.innodb_active_trans = 1; + /* We did the necessary inits in this function, no need to repeat them in row_search_for_mysql */ @@ -774,7 +855,7 @@ ha_innobase::init_table_handle_for_HANDLER(void) /* Always fetch all columns in the index record */ - prebuilt->hint_no_need_to_fetch_extra_cols = FALSE; + prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS; /* We want always to fetch all columns in the whole row? Or do we???? */ @@ -792,7 +873,7 @@ innobase_init(void) /*===============*/ /* out: TRUE if error */ { - static char current_dir[3]; // Set if using current lib + static char current_dir[3]; /* Set if using current lib */ int err; bool ret; char *default_path; @@ -807,7 +888,7 @@ innobase_init(void) Note that when using the embedded server, the datadirectory is not necessarily the current directory of this program. */ - if (mysql_embedded) { + if (mysqld_embedded) { default_path = mysql_real_data_home; fil_path_to_mysql_datadir = mysql_real_data_home; } else { @@ -826,7 +907,7 @@ innobase_init(void) srv_set_thread_priorities = TRUE; srv_query_thread_priority = QUERY_PRIOR; } - + /* Set InnoDB initialization parameters according to the values read from MySQL .cnf file */ @@ -872,7 +953,8 @@ innobase_init(void) if (!innobase_log_group_home_dir) { innobase_log_group_home_dir = default_path; } - + +#ifdef UNIV_LOG_ARCHIVE /* Since innodb_log_arch_dir has no relevance under MySQL, starting from 4.0.6 we always set it the same as innodb_log_group_home_dir: */ @@ -880,6 +962,7 @@ innobase_init(void) innobase_log_arch_dir = innobase_log_group_home_dir; srv_arch_dir = innobase_log_arch_dir; +#endif /* UNIG_LOG_ARCHIVE */ ret = (bool) srv_parse_log_group_home_dirs(innobase_log_group_home_dir, @@ -901,11 +984,31 @@ innobase_init(void) srv_n_log_files = (ulint) innobase_log_files_in_group; srv_log_file_size = (ulint) innobase_log_file_size; +#ifdef UNIV_LOG_ARCHIVE srv_log_archive_on = (ulint) innobase_log_archive; +#endif /* UNIV_LOG_ARCHIVE */ srv_log_buffer_size = (ulint) innobase_log_buffer_size; srv_flush_log_at_trx_commit = (ulint) innobase_flush_log_at_trx_commit; - srv_pool_size = (ulint) innobase_buffer_pool_size; + /* We set srv_pool_size here in units of 1 kB. InnoDB internally + changes the value so that it becomes the number of database pages. */ + + if (innobase_buffer_pool_awe_mem_mb == 0) { + /* Careful here: we first convert the signed long int to ulint + and only after that divide */ + + srv_pool_size = ((ulint) innobase_buffer_pool_size) / 1024; + } else { + srv_use_awe = TRUE; + srv_pool_size = (ulint) + (1024 * innobase_buffer_pool_awe_mem_mb); + srv_awe_window_size = (ulint) innobase_buffer_pool_size; + + /* Note that what the user specified as + innodb_buffer_pool_size is actually the AWE memory window + size in this case, and the real buffer pool size is + determined by .._awe_mem_mb. */ + } srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size; @@ -916,24 +1019,31 @@ innobase_init(void) srv_force_recovery = (ulint) innobase_force_recovery; srv_fast_shutdown = (ibool) innobase_fast_shutdown; + + srv_file_per_table = (ibool) innobase_file_per_table; + srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog; + + srv_max_n_open_files = (ulint) innobase_open_files; srv_innodb_status = (ibool) innobase_create_status_file; - srv_print_verbose_log = mysql_embedded ? 0 : 1; + srv_print_verbose_log = mysqld_embedded ? 0 : 1; - if (my_isspace(default_charset_info, (char)0xA0)) { - dict_char_0xA0_is_space = TRUE; - } + /* Store the default charset-collation number of this MySQL + installation */ - if (strcmp(default_charset_info->name, "latin1") == 0) { + data_mysql_default_charset_coll = (ulint)default_charset_info->number; - /* Store the character ordering table to InnoDB. - For non-latin1 charsets we use the MySQL comparison - functions, and consequently we do not need to know - the ordering internally in InnoDB. */ + data_mysql_latin1_swedish_charset_coll = + (ulint)my_charset_latin1.number; - memcpy(srv_latin1_ordering, - default_charset_info->sort_order, 256); - } + /* Store the latin1_swedish_ci character ordering table to InnoDB. For + non-latin1_swedish_ci charsets we use the MySQL comparison functions, + and consequently we do not need to know the ordering internally in + InnoDB. */ + + ut_a(0 == strcmp((char*)my_charset_latin1.name, + (char*)"latin1_swedish_ci")); + memcpy(srv_latin1_ordering, my_charset_latin1.sort_order, 256); /* Since we in this module access directly the fields of a trx struct, and due to different headers and flags it might happen that @@ -950,9 +1060,10 @@ innobase_init(void) DBUG_RETURN(1); } - (void) hash_init(&innobase_open_tables,32,0,0, - (hash_get_key) innobase_get_key,0,0); - pthread_mutex_init(&innobase_mutex,MY_MUTEX_INIT_FAST); + (void) hash_init(&innobase_open_tables,system_charset_info, 32, 0, 0, + (hash_get_key) innobase_get_key, 0, 0); + pthread_mutex_init(&innobase_mutex, MY_MUTEX_INIT_FAST); + innodb_inited= 1; /* If this is a replication slave and we needed to do a crash recovery, set the master binlog position to what InnoDB internally knew about @@ -980,7 +1091,7 @@ innobase_end(void) /*==============*/ /* out: TRUE if error */ { - int err; + int err= 0; DBUG_ENTER("innobase_end"); @@ -989,22 +1100,31 @@ innobase_end(void) set_panic_flag_for_netware(); } #endif - err = innobase_shutdown_for_mysql(); - hash_free(&innobase_open_tables); - my_free(internal_innobase_data_file_path,MYF(MY_ALLOW_ZERO_PTR)); - pthread_mutex_destroy(&innobase_mutex); - - if (err != DB_SUCCESS) { - - DBUG_RETURN(1); + if (innodb_inited) + { + if (innobase_very_fast_shutdown) { + srv_very_fast_shutdown = TRUE; + fprintf(stderr, +"InnoDB: MySQL has requested a very fast shutdown without flushing\n" +"InnoDB: the InnoDB buffer pool to data files. At the next mysqld startup\n" +"InnoDB: InnoDB will do a crash recovery!\n"); + + } + + innodb_inited= 0; + if (innobase_shutdown_for_mysql() != DB_SUCCESS) + err= 1; + hash_free(&innobase_open_tables); + my_free(internal_innobase_data_file_path,MYF(MY_ALLOW_ZERO_PTR)); + pthread_mutex_destroy(&innobase_mutex); } - DBUG_RETURN(0); + DBUG_RETURN(err); } /******************************************************************** -Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit -flushes logs, and the name of this function should be innobase_checkpoint. */ +Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes +the logs, and the name of this function should be innobase_checkpoint. */ bool innobase_flush_logs(void) @@ -1020,17 +1140,6 @@ innobase_flush_logs(void) DBUG_RETURN(result); } -/************************************************************************* -Gets the free space in an InnoDB database: returned in units of kB. */ - -uint -innobase_get_free_space(void) -/*=========================*/ - /* out: free space in kB */ -{ - return((uint) fsp_get_available_space_in_free_extents(0)); -} - /********************************************************************* Commits a transaction in an InnoDB database. */ @@ -1039,28 +1148,68 @@ innobase_commit_low( /*================*/ trx_t* trx) /* in: transaction handle */ { - if (trx->conc_state == TRX_NOT_STARTED) { + if (trx->conc_state == TRX_NOT_STARTED) { - return; - } + return; + } +#ifdef HAVE_REPLICATION if (current_thd->slave_thread) { /* Update the replication position info inside InnoDB */ trx->mysql_master_log_file_name - = active_mi->rli.master_log_name; - trx->mysql_master_log_pos = (ib_longlong) -#if MYSQL_VERSION_ID < 40100 - (active_mi->rli.future_master_log_pos); -#else - (active_mi->rli.future_group_master_log_pos); -#endif + = active_mi->rli.group_master_log_name; + trx->mysql_master_log_pos= ((ib_longlong) + active_mi->rli.future_group_master_log_pos); } +#endif /* HAVE_REPLICATION */ trx_commit_for_mysql(trx); } /********************************************************************* +Creates an InnoDB transaction struct for the thd if it does not yet have one. +Starts a new InnoDB transaction if a transaction is not yet started. And +assigns a new snapshot for a consistent read if the transaction does not yet +have one. */ + +int +innobase_start_trx_and_assign_read_view( +/*====================================*/ + /* out: 0 */ + THD* thd) /* in: MySQL thread handle of the user for whom + the transaction should be committed */ +{ + trx_t* trx; + + DBUG_ENTER("innobase_start_trx_and_assign_read_view"); + + /* Create a new trx struct for thd, if it does not yet have one */ + + trx = check_trx_exists(thd); + + /* This is just to play safe: release a possible FIFO ticket and + search latch. Since we will reserve the kernel mutex, we have to + release the search system latch first to obey the latching order. */ + + innobase_release_stat_resources(trx); + + /* If the transaction is not started yet, start it */ + + trx_start_if_not_started_noninline(trx); + + /* Assign a read view if the transaction does not have it yet */ + + trx_assign_read_view(trx); + + /* Set the MySQL flag to mark that there is an active transaction */ + + current_thd->transaction.all.innodb_active_trans = 1; + + DBUG_RETURN(0); +} + +/********************************************************************* Commits a transaction in an InnoDB database or marks an SQL statement ended. */ @@ -1072,10 +1221,9 @@ innobase_commit( the transaction should be committed */ void* trx_handle)/* in: InnoDB trx handle or &innodb_dummy_stmt_trx_handle: the latter means - that the current SQL statement ended, and we should - mark the start of a new statement with a savepoint */ + that the current SQL statement ended */ { - trx_t* trx; + trx_t* trx; DBUG_ENTER("innobase_commit"); DBUG_PRINT("trans", ("ending transaction")); @@ -1089,11 +1237,18 @@ innobase_commit( innobase_release_stat_resources(trx); /* The flag thd->transaction.all.innodb_active_trans is set to 1 in - ::external_lock, ::start_stmt, and innobase_savepoint, and it is only - set to 0 in a commit or a rollback. If it is 0 we know there cannot be - resources to be freed and we could return immediately. For the time - being we play safe and do the cleanup though there should be nothing - to clean up. */ + + 1. ::external_lock(), + 2. ::start_stmt(), + 3. innobase_query_caching_of_table_permitted(), + 4. innobase_savepoint(), + 5. ::init_table_handle_for_HANDLER(), + 6. innobase_start_trx_and_assign_read_view() + + and it is only set to 0 in a commit or a rollback. If it is 0 we know + there cannot be resources to be freed and we could return immediately. + For the time being, we play safe and do the cleanup though there should + be nothing to clean up. */ if (thd->transaction.all.innodb_active_trans == 0 && trx->conc_state != TRX_NOT_STARTED) { @@ -1106,10 +1261,16 @@ innobase_commit( if (trx_handle != (void*)&innodb_dummy_stmt_trx_handle || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + /* We were instructed to commit the whole transaction, or + this is an SQL statement end and autocommit is on */ + innobase_commit_low(trx); thd->transaction.all.innodb_active_trans = 0; } else { + /* We just mark the SQL statement ended and do not do a + transaction commit */ + if (trx->auto_inc_lock) { /* If we had reserved the auto-inc lock for some table in this SQL statement we release it now */ @@ -1136,7 +1297,7 @@ This is called when MySQL writes the binlog entry for the current transaction. Writes to the InnoDB tablespace info which tells where the MySQL binlog entry for the current transaction ended. Also commits the transaction inside InnoDB but does NOT flush InnoDB log files to disk. -To flush you have to call innobase_flush_log_to_disk. We have separated +To flush you have to call innobase_commit_complete(). We have separated flushing to eliminate the bottleneck of LOCK_log in log.cc which disabled InnoDB's group commit capability. */ @@ -1272,7 +1433,6 @@ innobase_rollback_to_savepoint( error = trx_rollback_to_savepoint_for_mysql(trx, savepoint_name, &mysql_binlog_cache_pos); - *binlog_cache_pos = (my_off_t)mysql_binlog_cache_pos; DBUG_RETURN(convert_error_code_to_mysql(error, NULL)); @@ -1330,41 +1490,39 @@ Frees a possible InnoDB trx object associated with the current THD. */ int innobase_close_connection( /*======================*/ - /* out: 0 or error number */ - THD* thd) /* in: handle to the MySQL thread of the user - whose transaction should be rolled back */ + /* out: 0 or error number */ + THD* thd) /* in: handle to the MySQL thread of the user + whose transaction should be rolled back */ { - trx_t* trx; + trx_t* trx; - trx = (trx_t*)thd->transaction.all.innobase_tid; + trx = (trx_t*)thd->transaction.all.innobase_tid; - if (NULL != trx) { - innobase_rollback(thd, (void*)trx); + if (NULL != trx) { + innobase_rollback(thd, (void*)trx); - trx_free_for_mysql(trx); + trx_free_for_mysql(trx); - thd->transaction.all.innobase_tid = NULL; - } + thd->transaction.all.innobase_tid = NULL; + } - return(0); + return(0); } + /***************************************************************************** ** InnoDB database tables *****************************************************************************/ /******************************************************************** -This function is not relevant since we store the tables and indexes -into our own tablespace, not as files, whose extension this function would -give. */ +Gives the file extension of an InnoDB single-table tablespace. */ const char** ha_innobase::bas_ext() const /*========================*/ - /* out: file extension strings, currently not - used */ + /* out: file extension string */ { - static const char* ext[] = {".InnoDB", NullS}; + static const char* ext[] = {".ibd", NullS}; return(ext); } @@ -1411,14 +1569,7 @@ normalize_table_name( norm_name[name_ptr - db_ptr - 1] = '/'; #ifdef __WIN__ - /* Put to lower case */ - - ptr = norm_name; - - while (*ptr != '\0') { - *ptr = tolower(*ptr); - ptr++; - } + innobase_casedn_str(norm_name); #endif } @@ -1435,25 +1586,25 @@ ha_innobase::open( uint test_if_locked) /* in: not used */ { dict_table_t* ib_table; - int error = 0; char norm_name[1000]; + THD* thd; DBUG_ENTER("ha_innobase::open"); UT_NOT_USED(mode); UT_NOT_USED(test_if_locked); + thd = current_thd; normalize_table_name(norm_name, name); user_thd = NULL; last_query_id = (ulong)-1; - active_index = 0; - active_index_before_scan = (uint)-1; /* undefined value */ + if (!(share=get_share(name))) { - if (!(share=get_share(name))) - DBUG_RETURN(1); + DBUG_RETURN(1); + } /* Create buffers for packing the fields of a record. Why table->reclength did not work here? Obviously, because char @@ -1467,6 +1618,7 @@ ha_innobase::open( &key_val_buff, upd_and_key_val_buff_len, NullS)) { free_share(share); + DBUG_RETURN(1); } @@ -1485,10 +1637,29 @@ ha_innobase::open( "Look from section 15.1 of http://www.innodb.com/ibman.html\n" "how you can resolve the problem.\n", norm_name); + free_share(share); + my_free((char*) upd_buff, MYF(0)); + my_errno = ENOENT; + DBUG_RETURN(1); + } + + if (ib_table->ibd_file_missing && !thd->tablespace_op) { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB error:\n" +"MySQL is trying to open a table handle but the .ibd file for\n" +"table %s does not exist.\n" +"Have you deleted the .ibd file from the database directory under\n" +"the MySQL datadir, or have you used DISCARD TABLESPACE?\n" +"Look from section 15.1 of http://www.innodb.com/ibman.html\n" +"how you can resolve the problem.\n", + norm_name); free_share(share); my_free((char*) upd_buff, MYF(0)); my_errno = ENOENT; + + dict_table_decrement_handle_count(ib_table); + DBUG_RETURN(1); } @@ -1558,7 +1729,7 @@ ha_innobase::open( fprintf(stderr, "InnoDB: Warning: table %s key_used_on_scan is %lu even though there is no\n" "InnoDB: primary key inside InnoDB.\n", - name, (ulint)key_used_on_scan); + name, (ulong)key_used_on_scan); } } @@ -1575,15 +1746,6 @@ ha_innobase::open( DBUG_RETURN(0); } -/********************************************************************* -Does nothing. */ - -void -ha_innobase::initialize(void) -/*=========================*/ -{ -} - /********************************************************************** Closes a handle to an InnoDB table. */ @@ -1685,10 +1847,10 @@ reset_null_bits( extern "C" { /***************************************************************** -InnoDB uses this function is to compare two data fields for which the -data type is such that we must use MySQL code to compare them. NOTE that the -prototype of this function is in rem0cmp.c in InnoDB source code! -If you change this function, remember to update the prototype there! */ +InnoDB uses this function to compare two data fields for which the data type +is such that we must use MySQL code to compare them. NOTE that the prototype +of this function is in rem0cmp.c in InnoDB source code! If you change this +function, remember to update the prototype there! */ int innobase_mysql_cmp( @@ -1696,6 +1858,7 @@ innobase_mysql_cmp( /* out: 1, 0, -1, if a is greater, equal, less than b, respectively */ int mysql_type, /* in: MySQL type */ + uint charset_number, /* in: number of the charset */ unsigned char* a, /* in: data field */ unsigned int a_length, /* in: data field length, not UNIV_SQL_NULL */ @@ -1703,6 +1866,7 @@ innobase_mysql_cmp( unsigned int b_length) /* in: data field length, not UNIV_SQL_NULL */ { + CHARSET_INFO* charset; enum_field_types mysql_tp; int ret; @@ -1719,8 +1883,34 @@ innobase_mysql_cmp( case FIELD_TYPE_MEDIUM_BLOB: case FIELD_TYPE_BLOB: case FIELD_TYPE_LONG_BLOB: - ret = my_sortncmp((const char*) a, a_length, - (const char*) b, b_length); + /* Use the charset number to pick the right charset struct for + the comparison. Since the MySQL function get_charset may be + slow before Bar removes the mutex operation there, we first + look at 2 common charsets directly. */ + + if (charset_number == default_charset_info->number) { + charset = default_charset_info; + } else if (charset_number == my_charset_latin1.number) { + charset = &my_charset_latin1; + } else { + charset = get_charset(charset_number, MYF(MY_WME)); + + if (charset == NULL) { + fprintf(stderr, +"InnoDB: fatal error: InnoDB needs charset %lu for doing a comparison,\n" +"InnoDB: but MySQL cannot find that charset.\n", (ulong)charset_number); + ut_a(0); + } + } + + /* Starting from 4.1.3, we use strnncollsp() in comparisons of + non-latin1_swedish_ci strings. NOTE that the collation order + changes then: 'b\0\0...' is ordered BEFORE 'b ...'. Users + having indexes on such data need to rebuild their tables! */ + + ret = charset->coll->strnncollsp(charset, + a, a_length, + b, b_length); if (ret < 0) { return(-1); } else if (ret > 0) { @@ -1742,12 +1932,15 @@ inline ulint get_innobase_type_from_mysql_type( /*==============================*/ - /* out: DATA_BINARY, DATA_VARCHAR, ... */ - Field* field) /* in: MySQL field */ + /* out: DATA_BINARY, DATA_VARCHAR, ... */ + ulint* unsigned_flag, /* out: DATA_UNSIGNED if an 'unsigned type'; + at least ENUM and SET, and unsigned integer + types are 'unsigned types' */ + Field* field) /* in: MySQL field */ { - /* The following asserts check that the MySQL type code fits in - 8 bits: this is used in ibuf and also when DATA_NOT_NULL is - ORed to the type */ + /* The following asserts try to check that the MySQL type code fits in + 8 bits: this is used in ibuf and also when DATA_NOT_NULL is ORed to + the type */ DBUG_ASSERT((ulint)FIELD_TYPE_STRING < 256); DBUG_ASSERT((ulint)FIELD_TYPE_VAR_STRING < 256); @@ -1755,25 +1948,46 @@ get_innobase_type_from_mysql_type( DBUG_ASSERT((ulint)FIELD_TYPE_FLOAT < 256); DBUG_ASSERT((ulint)FIELD_TYPE_DECIMAL < 256); + if (field->flags & UNSIGNED_FLAG) { + + *unsigned_flag = DATA_UNSIGNED; + } else { + *unsigned_flag = 0; + } + + if (field->real_type() == FIELD_TYPE_ENUM + || field->real_type() == FIELD_TYPE_SET) { + + /* MySQL has field->type() a string type for these, but the + data is actually internally stored as an unsigned integer + code! */ + + *unsigned_flag = DATA_UNSIGNED; /* MySQL has its own unsigned + flag set to zero, even though + internally this is an unsigned + integer type */ + return(DATA_INT); + } + switch (field->type()) { /* NOTE that we only allow string types in DATA_MYSQL and DATA_VARMYSQL */ - case FIELD_TYPE_VAR_STRING: if (field->flags & BINARY_FLAG) { + case FIELD_TYPE_VAR_STRING: if (field->binary()) { return(DATA_BINARY); } else if (strcmp( - default_charset_info->name, - "latin1") == 0) { + field->charset()->name, + "latin1_swedish_ci") == 0) { return(DATA_VARCHAR); } else { return(DATA_VARMYSQL); } - case FIELD_TYPE_STRING: if (field->flags & BINARY_FLAG) { + case FIELD_TYPE_STRING: if (field->binary()) { return(DATA_FIXBINARY); } else if (strcmp( - default_charset_info->name, - "latin1") == 0) { + field->charset()->name, + "latin1_swedish_ci") == 0) { return(DATA_CHAR); } else { return(DATA_MYSQL); @@ -1787,8 +2001,6 @@ get_innobase_type_from_mysql_type( case FIELD_TYPE_DATETIME: case FIELD_TYPE_YEAR: case FIELD_TYPE_NEWDATE: - case FIELD_TYPE_ENUM: - case FIELD_TYPE_SET: case FIELD_TYPE_TIME: case FIELD_TYPE_TIMESTAMP: return(DATA_INT); @@ -1811,6 +2023,22 @@ get_innobase_type_from_mysql_type( } /*********************************************************************** +Writes an unsigned integer value < 64k to 2 bytes, in the little-endian +storage format. */ +inline +void +innobase_write_to_2_little_endian( +/*==============================*/ + byte* buf, /* in: where to store */ + ulint val) /* in: value to write, must be < 64k */ +{ + ut_a(val < 256 * 256); + + buf[0] = (byte)(val & 0xFF); + buf[1] = (byte)(val / 256); +} + +/*********************************************************************** Stores a key value for a row to a buffer. */ uint @@ -1829,8 +2057,6 @@ ha_innobase::store_key_val_for_row( char* buff_start = buff; enum_field_types mysql_type; Field* field; - ulint blob_len; - byte* blob_data; ibool is_null; DBUG_ENTER("store_key_val_for_row"); @@ -1879,13 +2105,25 @@ ha_innobase::store_key_val_for_row( || mysql_type == FIELD_TYPE_BLOB || mysql_type == FIELD_TYPE_LONG_BLOB) { + CHARSET_INFO* cs; + ulint key_len; + ulint len; + ulint true_len; + int error=0; + ulint blob_len; + byte* blob_data; + ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); + key_len = key_part->length; + if (is_null) { - buff += key_part->length + 2; + buff += key_len + 2; continue; } + + cs = field->charset(); blob_data = row_mysql_read_blob_ref(&blob_len, (byte*) (record @@ -1894,29 +2132,108 @@ ha_innobase::store_key_val_for_row( ut_a(get_field_offset(table, field) == key_part->offset); - if (blob_len > key_part->length) { - blob_len = key_part->length; + + true_len = blob_len; + + /* For multi byte character sets we need to calculate + the true length of the key */ + + if (key_len > 0 && cs->mbmaxlen > 1) { + true_len = (ulint) cs->cset->well_formed_len(cs, + (const char *) blob_data, + (const char *) blob_data + + blob_len, + key_len / cs->mbmaxlen, + &error); + } + + /* All indexes on BLOB and TEXT are column prefix + indexes, and we may need to truncate the data to be + stored in the key value: */ + + if (true_len > key_len) { + true_len = key_len; } /* MySQL reserves 2 bytes for the length and the storage of the number is little-endian */ - ut_a(blob_len < 256); - *((byte*)buff) = (byte)blob_len; + innobase_write_to_2_little_endian( + (byte*)buff, true_len); buff += 2; - memcpy(buff, blob_data, blob_len); + memcpy(buff, blob_data, true_len); + + /* Note that we always reserve the maximum possible + length of the BLOB prefix in the key value. */ - buff += key_part->length; + buff += key_len; } else { + /* Here we handle all other data types except the + true VARCHAR, BLOB and TEXT. Note that the column + value we store may be also in a column prefix + index. */ + + CHARSET_INFO* cs; + ulint true_len; + ulint key_len; + const mysql_byte* src_start; + int error=0; + enum_field_types real_type; + + key_len = key_part->length; + if (is_null) { - buff += key_part->length; + buff += key_len; continue; } - memcpy(buff, record + key_part->offset, - key_part->length); - buff += key_part->length; + + src_start = record + key_part->offset; + real_type = field->real_type(); + true_len = key_len; + + /* Character set for the field is defined only + to fields whose type is string and real field + type is not enum or set. For these fields check + if character set is multi byte. */ + + if (real_type != FIELD_TYPE_ENUM + && real_type != FIELD_TYPE_SET + && ( mysql_type == MYSQL_TYPE_VAR_STRING + || mysql_type == MYSQL_TYPE_STRING)) { + + cs = field->charset(); + + /* For multi byte character sets we need to + calculate the true length of the key */ + + if (key_len > 0 && cs->mbmaxlen > 1) { + + true_len = (ulint) + cs->cset->well_formed_len(cs, + (const char *)src_start, + (const char *)src_start + + key_len, + key_len / cs->mbmaxlen, + &error); + } + } + + memcpy(buff, src_start, true_len); + buff += true_len; + + /* Pad the unused space with spaces. Note that no + padding is ever needed for UCS-2 because in MySQL, + all UCS2 characters are 2 bytes, as MySQL does not + support surrogate pairs, which are needed to represent + characters in the range U+10000 to U+10FFFF. */ + + if (true_len < key_len) { + ulint pad_len = key_len - true_len; + memset(buff, ' ', pad_len); + buff += pad_len; + } } } @@ -1926,7 +2243,8 @@ ha_innobase::store_key_val_for_row( } /****************************************************************** -Builds a template to the prebuilt struct. */ +Builds a 'template' to the prebuilt struct. The template is used in fast +retrieval of just those column values MySQL needs in its processing. */ static void build_template( @@ -1946,13 +2264,23 @@ build_template( ulint n_fields; ulint n_requested_fields = 0; ibool fetch_all_in_key = FALSE; + ibool fetch_primary_key_cols = FALSE; ulint i; - clust_index = dict_table_get_first_index_noninline(prebuilt->table); + if (prebuilt->select_lock_type == LOCK_X) { + /* We always retrieve the whole clustered index record if we + use exclusive row level locks, for example, if the read is + done in an UPDATE statement. */ - if (!prebuilt->hint_no_need_to_fetch_extra_cols) { - /* We have a hint that we should at least fetch all - columns in the key, or all columns in the table */ + templ_type = ROW_MYSQL_WHOLE_ROW; + } + + if (templ_type == ROW_MYSQL_REC_FIELDS) { + if (prebuilt->hint_need_to_fetch_extra_cols + == ROW_RETRIEVE_ALL_COLS) { + + /* We know we must at least fetch all columns in the key, or + all columns in the table */ if (prebuilt->read_just_key) { /* MySQL has instructed us that it is enough to @@ -1964,32 +2292,23 @@ build_template( fetch_all_in_key = TRUE; } else { - /* We are building a temporary table: fetch all - columns; the reason is that MySQL may use the - clustered index key to store rows, but the mechanism - we use below to detect required columns does not - reveal that. Actually, it might be enough to - fetch only all in the key also in this case! */ - templ_type = ROW_MYSQL_WHOLE_ROW; } + } else if (prebuilt->hint_need_to_fetch_extra_cols + == ROW_RETRIEVE_PRIMARY_KEY) { + /* We must at least fetch all primary key cols. Note that if + the clustered index was internally generated by InnoDB on the + row id (no primary key was defined), then + row_search_for_mysql() will always retrieve the row id to a + special buffer in the prebuilt struct. */ + + fetch_primary_key_cols = TRUE; + } } - if (prebuilt->select_lock_type == LOCK_X) { - /* We always retrieve the whole clustered index record if we - use exclusive row level locks, for example, if the read is - done in an UPDATE statement. */ - - templ_type = ROW_MYSQL_WHOLE_ROW; - } + clust_index = dict_table_get_first_index_noninline(prebuilt->table); if (templ_type == ROW_MYSQL_REC_FIELDS) { - /* In versions < 3.23.50 we always retrieved the clustered - index record if prebuilt->select_lock_type == LOCK_S, - but there is really not need for that, and in some cases - performance could be seriously degraded because the MySQL - optimizer did not know about our convention! */ - index = prebuilt->index; } else { index = clust_index; @@ -2003,7 +2322,7 @@ build_template( the clustered index */ } - n_fields = (ulint)table->fields; + n_fields = (ulint)table->fields; /* number of columns */ if (!prebuilt->mysql_template) { prebuilt->mysql_template = (mysql_row_templ_t*) @@ -2016,17 +2335,18 @@ build_template( prebuilt->templ_contains_blob = FALSE; + /* Note that in InnoDB, i is the column number. MySQL calls columns + 'fields'. */ for (i = 0; i < n_fields; i++) { templ = prebuilt->mysql_template + n_requested_fields; field = table->field[i]; if (templ_type == ROW_MYSQL_REC_FIELDS - && !(fetch_all_in_key - && dict_index_contains_col_or_prefix(index, i)) - && thd->query_id != field->query_id - && thd->query_id != (field->query_id ^ MAX_ULONG_BIT) - && thd->query_id != - (field->query_id ^ (MAX_ULONG_BIT >> 1))) { + && !(fetch_all_in_key + && dict_index_contains_col_or_prefix(index, i)) + && !(fetch_primary_key_cols + && dict_table_col_in_clustered_key(index->table, i)) + && thd->query_id != field->query_id) { /* This field is not needed in the query, skip it */ @@ -2063,8 +2383,11 @@ build_template( get_field_offset(table, field); templ->mysql_col_len = (ulint) field->pack_length(); - templ->type = get_innobase_type_from_mysql_type(field); - templ->is_unsigned = (ulint) (field->flags & UNSIGNED_FLAG); + templ->type = index->table->cols[i].type.mtype; + templ->is_unsigned = index->table->cols[i].type.prtype + & DATA_UNSIGNED; + templ->charset = dtype_get_charset_coll_noninline( + index->table->cols[i].type.prtype); if (templ->type == DATA_BLOB) { prebuilt->templ_contains_blob = TRUE; @@ -2075,7 +2398,7 @@ skip_field: prebuilt->n_template = n_requested_fields; - if (prebuilt->need_to_access_clustered) { + if (index != clust_index && prebuilt->need_to_access_clustered) { /* Change rec_field_no's to correspond to the clustered index record */ for (i = 0; i < n_requested_fields; i++) { @@ -2127,9 +2450,77 @@ ha_innobase::write_row( statistic_increment(ha_write_count, &LOCK_status); - if (table->time_stamp) { - update_timestamp(record + table->time_stamp - 1); - } + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + + if ((user_thd->lex->sql_command == SQLCOM_ALTER_TABLE + || user_thd->lex->sql_command == SQLCOM_OPTIMIZE + || user_thd->lex->sql_command == SQLCOM_CREATE_INDEX + || user_thd->lex->sql_command == SQLCOM_DROP_INDEX) + && num_write_row >= 10000) { + /* ALTER TABLE is COMMITted at every 10000 copied rows. + The IX table lock for the original table has to be re-issued. + As this method will be called on a temporary table where the + contents of the original table is being copied to, it is + a bit tricky to determine the source table. The cursor + position in the source table need not be adjusted after the + intermediate COMMIT, since writes by other transactions are + being blocked by a MySQL table lock TL_WRITE_ALLOW_READ. */ + + dict_table_t* src_table; + ibool mode; + + num_write_row = 0; + + /* Commit the transaction. This will release the table + locks, so they have to be acquired again. */ + + /* Altering an InnoDB table */ + /* Get the source table. */ + src_table = lock_get_src_table( + prebuilt->trx, prebuilt->table, &mode); + if (!src_table) { + no_commit: + /* Unknown situation: do not commit */ + /* + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB error: ALTER TABLE is holding lock" + " on %lu tables!\n", + prebuilt->trx->mysql_n_tables_locked); + */ + ; + } else if (src_table == prebuilt->table) { + /* Source table is not in InnoDB format: + no need to re-acquire locks on it. */ + + /* Altering to InnoDB format */ + innobase_commit(user_thd, prebuilt->trx); + /* Note that this transaction is still active. */ + user_thd->transaction.all.innodb_active_trans = 1; + /* We will need an IX lock on the destination table. */ + prebuilt->sql_stat_start = TRUE; + } else { + /* Ensure that there are no other table locks than + LOCK_IX and LOCK_AUTO_INC on the destination table. */ + if (!lock_is_table_exclusive(prebuilt->table, + prebuilt->trx)) { + goto no_commit; + } + + /* Commit the transaction. This will release the table + locks, so they have to be acquired again. */ + innobase_commit(user_thd, prebuilt->trx); + /* Note that this transaction is still active. */ + user_thd->transaction.all.innodb_active_trans = 1; + /* Re-acquire the table lock on the source table. */ + row_lock_table_for_mysql(prebuilt, src_table, mode); + /* We will need an IX lock on the destination table. */ + prebuilt->sql_stat_start = TRUE; + } + } + + num_write_row++; if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -2279,9 +2670,10 @@ ha_innobase::write_row( /* If the insert did not succeed we restore the value of the auto-inc counter we used; note that this behavior was introduced only in version 4.0.4. - NOTE that a REPLACE command handles a duplicate key error + NOTE that a REPLACE command and LOAD DATA INFILE REPLACE + handles a duplicate key error itself, and we must not decrement the autoinc counter - if we are performing a REPLACE statement. + if we are performing those statements. NOTE 2: if there was an error, for example a deadlock, which caused InnoDB to roll back the whole transaction already in the call of row_insert_for_mysql(), we may no @@ -2291,11 +2683,11 @@ ha_innobase::write_row( skip_auto_inc_decr = FALSE; if (error == DB_DUPLICATE_KEY - && (user_thd->lex.sql_command == SQLCOM_REPLACE - || user_thd->lex.sql_command + && (user_thd->lex->sql_command == SQLCOM_REPLACE + || user_thd->lex->sql_command == SQLCOM_REPLACE_SELECT - || (user_thd->lex.sql_command == SQLCOM_LOAD - && user_thd->lex.duplicates == DUP_REPLACE))) { + || (user_thd->lex->sql_command == SQLCOM_LOAD + && user_thd->lex->duplicates == DUP_REPLACE))) { skip_auto_inc_decr= TRUE; } @@ -2334,7 +2726,7 @@ innobase_convert_and_store_changed_col( mysql_byte* data, /* in: column data to store */ ulint len, /* in: data len */ ulint col_type,/* in: data type in InnoDB type numbers */ - ulint is_unsigned)/* in: != 0 if an unsigned integer type */ + ulint prtype) /* InnoDB precise data type and flags */ { uint i; @@ -2342,10 +2734,31 @@ innobase_convert_and_store_changed_col( data = NULL; } else if (col_type == DATA_VARCHAR || col_type == DATA_BINARY || col_type == DATA_VARMYSQL) { - /* Remove trailing spaces */ - while (len > 0 && data[len - 1] == ' ') { - len--; - } + /* Remove trailing spaces. */ + + /* Handle UCS2 strings differently. As no new + collations will be introduced in 4.1, we hardcode the + charset-collation codes here. In 5.0, the logic will + be based on mbminlen. */ + ulint cset = dtype_get_charset_coll_noninline(prtype); + if (cset == 35/*ucs2_general_ci*/ + || cset == 90/*ucs2_bin*/ + || (cset >= 128/*ucs2_unicode_ci*/ + && cset <= 144/*ucs2_persian_ci*/)) { + /* space=0x0020 */ + /* Trim "half-chars", just in case. */ + len = len - (len % 2); /* len &= ~1; */ + + while (len && data[len - 2] == 0x00 + && data[len - 1] == 0x20) { + len -= 2; + } + } else { + /* space=0x20 */ + while (len && data[len - 1] == 0x20) { + len--; + } + } } else if (col_type == DATA_INT) { /* Store integer data in InnoDB in a big-endian format, sign bit negated, if signed */ @@ -2354,7 +2767,7 @@ innobase_convert_and_store_changed_col( buf[len - 1 - i] = data[i]; } - if (!is_unsigned) { + if (!(prtype & DATA_UNSIGNED)) { buf[0] = buf[0] ^ 128; } @@ -2397,7 +2810,7 @@ calc_row_difference( byte* buf; upd_field_t* ufield; ulint col_type; - ulint is_unsigned; + ulint prtype; ulint n_changed = 0; uint i; @@ -2421,9 +2834,8 @@ calc_row_difference( o_len = field->pack_length(); n_len = field->pack_length(); - col_type = get_innobase_type_from_mysql_type(field); - is_unsigned = (ulint) (field->flags & UNSIGNED_FLAG); - + col_type = prebuilt->table->cols[i].type.mtype; + prtype = prebuilt->table->cols[i].type.prtype; switch (col_type) { case DATA_BLOB: @@ -2463,10 +2875,9 @@ calc_row_difference( innobase_convert_and_store_changed_col(ufield, (mysql_byte*)buf, (mysql_byte*)n_ptr, n_len, col_type, - is_unsigned); + prtype); ufield->exp = NULL; - ufield->field_no = - (prebuilt->table->cols + i)->clust_pos; + ufield->field_no = prebuilt->table->cols[i].clust_pos; n_changed++; } } @@ -2500,12 +2911,11 @@ ha_innobase::update_row( DBUG_ENTER("ha_innobase::update_row"); - ut_a(prebuilt->trx == + ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); - if (table->time_stamp) { - update_timestamp(new_row + table->time_stamp - 1); - } + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -2562,7 +2972,7 @@ ha_innobase::delete_row( DBUG_ENTER("ha_innobase::delete_row"); - ut_a(prebuilt->trx == + ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); if (last_query_id != user_thd->query_id) { @@ -2622,7 +3032,7 @@ ha_innobase::index_end(void) { int error = 0; DBUG_ENTER("index_end"); - + active_index=MAX_KEY; DBUG_RETURN(error); } @@ -2644,6 +3054,7 @@ convert_search_mode_to_innobase( case HA_READ_BEFORE_KEY: return(PAGE_CUR_L); case HA_READ_PREFIX: return(PAGE_CUR_GE); case HA_READ_PREFIX_LAST: return(PAGE_CUR_LE); + case HA_READ_PREFIX_LAST_OR_PREV:return(PAGE_CUR_LE); /* In MySQL-4.0 HA_READ_PREFIX and HA_READ_PREFIX_LAST always pass a complete-field prefix of a key value as the search tuple. I.e., it is not allowed that the last field would @@ -2745,7 +3156,7 @@ ha_innobase::index_read( DBUG_ENTER("index_read"); - ut_a(prebuilt->trx == + ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); statistic_increment(ha_read_key_count, &LOCK_status); @@ -2776,7 +3187,7 @@ ha_innobase::index_read( (ulint)upd_and_key_val_buff_len, index, (byte*) key_ptr, - (ulint) key_len); + (ulint) key_len, prebuilt->trx); } else { /* We position the cursor to the last or the first entry in the index */ @@ -2857,7 +3268,8 @@ ha_innobase::change_active_index( statistic_increment(ha_read_key_count, &LOCK_status); DBUG_ENTER("change_active_index"); - ut_a(prebuilt->trx == + ut_ad(user_thd == current_thd); + ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); active_index = keynr; @@ -2887,11 +3299,13 @@ ha_innobase::change_active_index( dict_index_copy_types(prebuilt->search_tuple, prebuilt->index, prebuilt->index->n_fields); - /* Maybe MySQL changes the active index for a handle also - during some queries, we do not know: then it is safest to build - the template such that all columns will be fetched. */ + /* MySQL changes the active index for a handle also during some + queries, for example SELECT MAX(a), SUM(a) first retrieves the MAX() + and then calculates the sum. Previously we played safe and used + the flag ROW_MYSQL_WHOLE_ROW below, but that caused unnecessary + copying. Starting from MySQL-4.1 we use a more efficient flag here. */ - build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW); + build_template(prebuilt, user_thd, table, ROW_MYSQL_REC_FIELDS); DBUG_RETURN(0); } @@ -2943,7 +3357,7 @@ ha_innobase::general_fetch( DBUG_ENTER("general_fetch"); - ut_a(prebuilt->trx == + ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); innodb_srv_conc_enter_innodb(prebuilt->trx); @@ -3089,8 +3503,6 @@ ha_innobase::rnd_init( /* Store the active index value so that we can restore the original value after a scan */ - active_index_before_scan = active_index; - if (prebuilt->clust_index_was_generated) { err = change_active_index(MAX_KEY); } else { @@ -3110,19 +3522,7 @@ ha_innobase::rnd_end(void) /*======================*/ /* out: 0 or error number */ { - /* Restore the old active_index back; MySQL may assume that a table - scan does not change active_index. We only restore the value if - MySQL has called rnd_init before: sometimes MySQL seems to call - rnd_end WITHOUT calling rnd_init. */ - - if (active_index_before_scan != (uint)-1) { - - change_active_index(active_index_before_scan); - - active_index_before_scan = (uint)-1; - } - - return(index_end()); + return(index_end()); } /********************************************************************* @@ -3177,7 +3577,7 @@ ha_innobase::rnd_pos( statistic_increment(ha_read_rnd_count, &LOCK_status); - ut_a(prebuilt->trx == + ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); if (prebuilt->clust_index_was_generated) { @@ -3226,7 +3626,7 @@ ha_innobase::position( row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; uint len; - ut_a(prebuilt->trx == + ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); if (prebuilt->clust_index_was_generated) { @@ -3250,7 +3650,7 @@ ha_innobase::position( if (len != ref_length) { fprintf(stderr, "InnoDB: Error: stored ref len is %lu, but table ref len is %lu\n", - (ulint)len, (ulint)ref_length); + (ulong)len, (ulong)ref_length); } } @@ -3263,7 +3663,15 @@ create_table_def( trx_t* trx, /* in: InnoDB transaction handle */ TABLE* form, /* in: information on table columns and indexes */ - const char* table_name) /* in: table name */ + const char* table_name, /* in: table name */ + const char* path_of_temp_table)/* in: if this is a table explicitly + created by the user with the + TEMPORARY keyword, then this + parameter is the dir path where the + table should be placed if we create + an .ibd file for it (no .ibd extension + in the path, though); otherwise this + is NULL */ { Field* field; dict_table_t* table; @@ -3273,7 +3681,7 @@ create_table_def( ulint nulls_allowed; ulint unsigned_type; ulint binary_type; - ulint nonlatin1_type; + ulint charset_no; ulint i; DBUG_ENTER("create_table_def"); @@ -3281,45 +3689,49 @@ create_table_def( n_cols = form->fields; - /* The '0' below specifies that everything is currently - created in tablespace 0 */ + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ table = dict_mem_table_create((char*) table_name, 0, n_cols); + if (path_of_temp_table) { + table->dir_path_of_temp_table = + mem_heap_strdup(table->heap, path_of_temp_table); + } + for (i = 0; i < n_cols; i++) { field = form->field[i]; - col_type = get_innobase_type_from_mysql_type(field); + col_type = get_innobase_type_from_mysql_type(&unsigned_type, + field); if (field->null_ptr) { nulls_allowed = 0; } else { nulls_allowed = DATA_NOT_NULL; } - if (field->flags & UNSIGNED_FLAG) { - unsigned_type = DATA_UNSIGNED; + if (field->binary()) { + binary_type = DATA_BINARY_TYPE; } else { - unsigned_type = 0; + binary_type = 0; } - if (col_type == DATA_BLOB - && strcmp(default_charset_info->name, "latin1") != 0) { - nonlatin1_type = DATA_NONLATIN1; - } else { - nonlatin1_type = 0; - } + charset_no = 0; - if (field->flags & BINARY_FLAG) { - binary_type = DATA_BINARY_TYPE; - nonlatin1_type = 0; - } else { - binary_type = 0; + if (dtype_is_string_type(col_type)) { + + charset_no = (ulint)field->charset()->number; + + ut_a(charset_no < 256); /* in ut0type.h we assume that + the number fits in one byte */ } dict_mem_table_add_col(table, (char*) field->field_name, - col_type, (ulint)field->type() + col_type, dtype_form_prtype( + (ulint)field->type() | nulls_allowed | unsigned_type - | nonlatin1_type | binary_type, + | binary_type, + + charset_no), field->pack_length(), 0); } @@ -3351,6 +3763,7 @@ create_index( ulint ind_type; ulint col_type; ulint prefix_len; + ulint is_unsigned; ulint i; ulint j; @@ -3362,8 +3775,7 @@ create_index( ind_type = 0; - if (key_num == form->primary_key) - { + if (key_num == form->primary_key) { ind_type = ind_type | DICT_CLUSTERED; } @@ -3371,8 +3783,8 @@ create_index( ind_type = ind_type | DICT_UNIQUE; } - /* The '0' below specifies that everything in InnoDB is currently - created in tablespace 0 */ + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ index = dict_mem_index_create((char*) table_name, key->name, 0, ind_type, n_fields); @@ -3390,9 +3802,9 @@ create_index( field = form->field[j]; - if (0 == ut_cmp_in_lower_case( - (char*)field->field_name, - (char*)key_part->field->field_name)) { + if (0 == innobase_strcasecmp( + field->field_name, + key_part->field->field_name)) { /* Found the corresponding column */ break; @@ -3401,7 +3813,8 @@ create_index( ut_a(j < form->fields); - col_type = get_innobase_type_from_mysql_type(key_part->field); + col_type = get_innobase_type_from_mysql_type( + &is_unsigned, key_part->field); if (DATA_BLOB == col_type || key_part->length < field->pack_length()) { @@ -3423,10 +3836,6 @@ create_index( prefix_len = 0; } - if (prefix_len >= DICT_MAX_COL_PREFIX_LEN) { - DBUG_RETURN(-1); - } - /* We assume all fields should be sorted in ascending order, hence the '0': */ @@ -3455,8 +3864,8 @@ create_clustered_index_when_no_primary( dict_index_t* index; int error; - /* The first '0' below specifies that everything in InnoDB is - currently created in file space 0 */ + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ index = dict_mem_index_create((char*) table_name, (char*) "GEN_CLUST_INDEX", @@ -3491,6 +3900,7 @@ ha_innobase::create( char name2[FN_REFLEN]; char norm_name[FN_REFLEN]; THD *thd= current_thd; + ib_longlong auto_inc_value; DBUG_ENTER("ha_innobase::create"); @@ -3500,7 +3910,7 @@ ha_innobase::create( /* The limit probably should be REC_MAX_N_FIELDS - 3 = 1020, but we play safe here */ - DBUG_RETURN(HA_ERR_TO_BIG_ROW); + DBUG_RETURN(HA_ERR_TO_BIG_ROW); } /* Get the transaction associated with the current thd, or create one @@ -3532,7 +3942,7 @@ ha_innobase::create( srv_lower_case_table_names = FALSE; } - fn_format(name2, name, "", "",2); // Remove the .frm extension + fn_format(name2, name, "", "", 2); // Remove the .frm extension normalize_table_name(norm_name, name2); @@ -3544,8 +3954,13 @@ ha_innobase::create( /* Create the table definition in InnoDB */ - error = create_table_def(trx, form, norm_name); - + if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { + + error = create_table_def(trx, form, norm_name, name2); + } else { + error = create_table_def(trx, form, norm_name, NULL); + } + if (error) { innobase_commit_low(trx); @@ -3620,11 +4035,19 @@ ha_innobase::create( } if (current_thd->query != NULL) { - - error = row_table_add_foreign_constraints(trx, - current_thd->query, norm_name); + LEX_STRING q; + + if (thd->convert_string(&q, system_charset_info, + current_thd->query, + current_thd->query_length, + current_thd->charset())) { + error = HA_ERR_OUT_OF_MEM; + } else { + error = row_table_add_foreign_constraints(trx, + q.str, norm_name); - error = convert_error_code_to_mysql(error, NULL); + error = convert_error_code_to_mysql(error, NULL); + } if (error) { innobase_commit_low(trx); @@ -3651,6 +4074,20 @@ ha_innobase::create( DBUG_ASSERT(innobase_table != 0); + if ((create_info->used_fields & HA_CREATE_USED_AUTO) && + (create_info->auto_increment_value != 0)) { + + /* Query was ALTER TABLE...AUTO_INCREMENT = x; or + CREATE TABLE ...AUTO_INCREMENT = x; Find out a table + definition from the dictionary and get the current value + of the auto increment field. Set a new value to the + auto increment field if the value is greater than the + maximum value in the column. */ + + auto_inc_value = create_info->auto_increment_value; + dict_table_autoinc_initialize(innobase_table, auto_inc_value); + } + /* Tell the InnoDB server that there might be work for utility threads: */ @@ -3662,6 +4099,40 @@ ha_innobase::create( } /********************************************************************* +Discards or imports an InnoDB tablespace. */ + +int +ha_innobase::discard_or_import_tablespace( +/*======================================*/ + /* out: 0 == success, -1 == error */ + my_bool discard) /* in: TRUE if discard, else import */ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + dict_table_t* dict_table; + trx_t* trx; + int err; + + DBUG_ENTER("ha_innobase::discard_or_import_tablespace"); + + ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(prebuilt->trx == + (trx_t*) current_thd->transaction.all.innobase_tid); + + dict_table = prebuilt->table; + trx = prebuilt->trx; + + if (discard) { + err = row_discard_tablespace_for_mysql(dict_table->name, trx); + } else { + err = row_import_tablespace_for_mysql(dict_table->name, trx); + } + + err = convert_error_code_to_mysql(err, NULL); + + DBUG_RETURN(err); +} + +/********************************************************************* Drops a table from an InnoDB database. Before calling this function, MySQL calls innobase_commit to commit the transaction of the current user. Then the current user cannot have locks set on the table. Drop table @@ -3678,10 +4149,10 @@ ha_innobase::delete_table( int error; trx_t* parent_trx; trx_t* trx; - THD *thd= current_thd; + THD *thd= current_thd; char norm_name[1000]; - DBUG_ENTER("ha_innobase::delete_table"); + DBUG_ENTER("ha_innobase::delete_table"); /* Get the transaction associated with the current thd, or create one if not yet created */ @@ -3724,7 +4195,7 @@ ha_innobase::delete_table( /* Drop the table in InnoDB */ error = row_drop_table_for_mysql(norm_name, trx, - thd->lex.sql_command == SQLCOM_DROP_DB); + thd->lex->sql_command == SQLCOM_DROP_DB); /* Flush the log to reduce probability that the .frm files and the InnoDB data dictionary get out-of-sync if the user runs @@ -3788,8 +4259,8 @@ innobase_drop_database( memcpy(namebuf, ptr, len); namebuf[len] = '/'; namebuf[len + 1] = '\0'; -#ifdef __WIN__ - casedn_str(namebuf); +#ifdef __WIN__ + innobase_casedn_str(namebuf); #endif trx = trx_allocate_for_mysql(); trx->mysql_thd = current_thd; @@ -3861,6 +4332,10 @@ ha_innobase::rename_table( trx->mysql_thd = current_thd; trx->mysql_query_str = &((*current_thd).query); + if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + trx->check_foreigns = FALSE; + } + name_len1 = strlen(from); name_len2 = strlen(to); @@ -3901,18 +4376,11 @@ ha_innobase::records_in_range( /*==========================*/ /* out: estimated number of rows */ - int keynr, /* in: index number */ - const mysql_byte* start_key, /* in: start key value of the - range, may also be empty */ - uint start_key_len, /* in: start key val len, may - also be 0 */ - enum ha_rkey_function start_search_flag,/* in: start search condition - e.g., 'greater than' */ - const mysql_byte* end_key, /* in: range end key val, may - also be empty */ - uint end_key_len, /* in: range end key val len, - may also be 0 */ - enum ha_rkey_function end_search_flag)/* in: range end search cond */ + uint keynr, /* in: index number */ + key_range *min_key, /* in: start key value of the + range, may also be 0 */ + key_range *max_key) /* in: range end key val, may + also be 0 */ { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; KEY* key; @@ -3933,12 +4401,6 @@ ha_innobase::records_in_range( DBUG_ENTER("records_in_range"); - /* We do not know if MySQL can call this function before calling - external_lock(). To be safe, update the thd of the current table - handle. */ - - update_thd(current_thd); - prebuilt->trx->op_info = (char*)"estimating records in index range"; /* In case MySQL calls this in the middle of a SELECT query, release @@ -3962,17 +4424,23 @@ ha_innobase::records_in_range( range_start, (byte*) key_val_buff, (ulint)upd_and_key_val_buff_len, index, - (byte*) start_key, - (ulint) start_key_len); + (byte*) (min_key ? min_key->key : + (const mysql_byte*) 0), + (ulint) (min_key ? min_key->length : 0), + prebuilt->trx); row_sel_convert_mysql_key_to_innobase( range_end, (byte*) key_val_buff2, buff2_len, index, - (byte*) end_key, - (ulint) end_key_len); + (byte*) (max_key ? max_key->key : + (const mysql_byte*) 0), + (ulint) (max_key ? max_key->length : 0), + prebuilt->trx); - mode1 = convert_search_mode_to_innobase(start_search_flag); - mode2 = convert_search_mode_to_innobase(end_search_flag); + mode1 = convert_search_mode_to_innobase(min_key ? min_key->flag : + HA_READ_KEY_EXACT); + mode2 = convert_search_mode_to_innobase(max_key ? max_key->flag : + HA_READ_KEY_EXACT); n_rows = btr_estimate_n_rows_in_range(index, range_start, mode1, range_end, mode2); @@ -4001,7 +4469,7 @@ Gives an UPPER BOUND to the number of rows in a table. This is used in filesort.cc. */ ha_rows -ha_innobase::estimate_number_of_rows(void) +ha_innobase::estimate_rows_upper_bound(void) /*======================================*/ /* out: upper bound of rows */ { @@ -4010,7 +4478,7 @@ ha_innobase::estimate_number_of_rows(void) ulonglong estimate; ulonglong local_data_file_length; - DBUG_ENTER("estimate_number_of_rows"); + DBUG_ENTER("estimate_rows_upper_bound"); /* We do not know if MySQL can call this function before calling external_lock(). To be safe, update the thd of the current table @@ -4088,9 +4556,9 @@ ha_innobase::read_time( /* Assume that the read time is proportional to the scan time for all rows + at most one seek per range. */ - time_for_scan= scan_time(); + time_for_scan = scan_time(); - if ((total_rows= estimate_number_of_rows()) < rows) + if ((total_rows = estimate_rows_upper_bound()) < rows) return time_for_scan; return (ranges + (double) rows / (double) total_rows * time_for_scan); @@ -4112,6 +4580,8 @@ ha_innobase::info( ib_longlong n_rows; ulong j; ulong i; + char path[FN_REFLEN]; + os_file_stat_t stat_info; DBUG_ENTER("info"); @@ -4149,6 +4619,26 @@ ha_innobase::info( prebuilt->trx->op_info = (char*) "returning various info to MySQL"; + + if (ib_table->space != 0) { + my_snprintf(path, sizeof(path), "%s/%s%s", + mysql_data_home, ib_table->name, + ".ibd"); + unpack_filename(path,path); + } else { + my_snprintf(path, sizeof(path), "%s/%s%s", + mysql_data_home, ib_table->name, + reg_ext); + + unpack_filename(path,path); + } + + /* Note that we do not know the access time of the table, + nor the CHECK TABLE time, nor the UPDATE or INSERT time. */ + + if (os_file_get_status(path,&stat_info)) { + create_time = stat_info.ctime; + } } if (flag & HA_STATUS_VARIABLE) { @@ -4222,7 +4712,8 @@ ha_innobase::info( "InnoDB: .frm files from different installations? See section\n" "InnoDB: 15.1 at http://www.innodb.com/ibman.html\n", index->name, - ib_table->name, index->n_uniq, + ib_table->name, + (unsigned long) index->n_uniq, j + 1); break; } @@ -4269,8 +4760,8 @@ ha_innobase::info( } /************************************************************************** -Updates index cardinalities of the table, based on 10 random dives into -each index tree. This does NOT calculate exact statistics of the table. */ +Updates index cardinalities of the table, based on 8 random dives into +each index tree. This does NOT calculate exact statistics on the table. */ int ha_innobase::analyze( @@ -4285,10 +4776,17 @@ ha_innobase::analyze( return(0); } +/************************************************************************** +This is mapped to "ALTER TABLE tablename TYPE=InnoDB", which rebuilds +the table in MySQL. */ -int ha_innobase::optimize(THD* thd, HA_CHECK_OPT* check_opt) +int +ha_innobase::optimize( +/*==================*/ + THD* thd, /* in: connection thread handle */ + HA_CHECK_OPT* check_opt) /* in: currently ignored */ { - return ha_innobase::analyze(thd,check_opt); + return(HA_ADMIN_TRY_ALTER); } /*********************************************************************** @@ -4343,6 +4841,7 @@ ha_innobase::update_table_comment( uint length = strlen(comment); char* str; row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; + long flen; /* We do not know if MySQL can call this function before calling external_lock(). To be safe, update the thd of the current table @@ -4362,40 +4861,43 @@ ha_innobase::update_table_comment( trx_search_latch_release_if_reserved(prebuilt->trx); str = NULL; - if (FILE* file = os_file_create_tmpfile()) { - long flen; - - /* output the data to a temporary file */ - fprintf(file, "InnoDB free: %lu kB", - (ulong) innobase_get_free_space()); - dict_print_info_on_foreign_keys(FALSE, file, prebuilt->table); - flen = ftell(file); - if (flen < 0) { - flen = 0; - } else if (length + flen + 3 > 64000) { - flen = 64000 - 3 - length; - } + /* output the data to a temporary file */ - /* allocate buffer for the full string, and - read the contents of the temporary file */ + mutex_enter_noninline(&srv_dict_tmpfile_mutex); + rewind(srv_dict_tmpfile); - str = my_malloc(length + flen + 3, MYF(0)); + fprintf(srv_dict_tmpfile, "InnoDB free: %lu kB", + (ulong) fsp_get_available_space_in_free_extents( + prebuilt->table->space)); - if (str) { - char* pos = str + length; - if(length) { - memcpy(str, comment, length); - *pos++ = ';'; - *pos++ = ' '; - } - rewind(file); - flen = fread(pos, 1, flen, file); - pos[flen] = 0; - } + dict_print_info_on_foreign_keys(FALSE, srv_dict_tmpfile, + prebuilt->trx, prebuilt->table); + flen = ftell(srv_dict_tmpfile); + if (flen < 0) { + flen = 0; + } else if (length + flen + 3 > 64000) { + flen = 64000 - 3 - length; + } + + /* allocate buffer for the full string, and + read the contents of the temporary file */ - fclose(file); + str = my_malloc(length + flen + 3, MYF(0)); + + if (str) { + char* pos = str + length; + if (length) { + memcpy(str, comment, length); + *pos++ = ';'; + *pos++ = ' '; + } + rewind(srv_dict_tmpfile); + flen = (uint) fread(pos, 1, flen, srv_dict_tmpfile); + pos[flen] = 0; } + mutex_exit_noninline(&srv_dict_tmpfile_mutex); + prebuilt->trx->op_info = (char*)""; return(str ? str : (char*) comment); @@ -4413,6 +4915,7 @@ ha_innobase::get_foreign_key_create_info(void) { row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; char* str = 0; + long flen; ut_a(prebuilt != NULL); @@ -4422,49 +4925,71 @@ ha_innobase::get_foreign_key_create_info(void) update_thd(current_thd); - if (FILE* file = os_file_create_tmpfile()) { - long flen; - - prebuilt->trx->op_info = (char*)"getting info on foreign keys"; + prebuilt->trx->op_info = (char*)"getting info on foreign keys"; - /* In case MySQL calls this in the middle of a SELECT query, - release possible adaptive hash latch to avoid - deadlocks of threads */ + /* In case MySQL calls this in the middle of a SELECT query, + release possible adaptive hash latch to avoid + deadlocks of threads */ - trx_search_latch_release_if_reserved(prebuilt->trx); + trx_search_latch_release_if_reserved(prebuilt->trx); - /* output the data to a temporary file */ - dict_print_info_on_foreign_keys(TRUE, file, prebuilt->table); - prebuilt->trx->op_info = (char*)""; + mutex_enter_noninline(&srv_dict_tmpfile_mutex); + rewind(srv_dict_tmpfile); - flen = ftell(file); - if (flen < 0) { - flen = 0; - } else if(flen > 64000 - 1) { - flen = 64000 - 1; - } + /* output the data to a temporary file */ + dict_print_info_on_foreign_keys(TRUE, srv_dict_tmpfile, + prebuilt->trx, prebuilt->table); + prebuilt->trx->op_info = (char*)""; - /* allocate buffer for the string, and - read the contents of the temporary file */ + flen = ftell(srv_dict_tmpfile); + if (flen < 0) { + flen = 0; + } else if (flen > 64000 - 1) { + flen = 64000 - 1; + } - str = my_malloc(flen + 1, MYF(0)); + /* allocate buffer for the string, and + read the contents of the temporary file */ - if (str) { - rewind(file); - flen = fread(str, 1, flen, file); - str[flen] = 0; - } + str = my_malloc(flen + 1, MYF(0)); - fclose(file); - } else { - /* unable to create temporary file */ - str = my_strdup( -"/* Error: cannot display foreign key constraints */", MYF(0)); + if (str) { + rewind(srv_dict_tmpfile); + flen = (uint) fread(str, 1, flen, srv_dict_tmpfile); + str[flen] = 0; } + mutex_exit_noninline(&srv_dict_tmpfile_mutex); + return(str); } +/********************************************************************* +Checks if ALTER TABLE may change the storage engine of the table. +Changing storage engines is not allowed for tables for which there +are foreign key constraints (parent or child tables). */ + +bool +ha_innobase::can_switch_engines(void) +/*=================================*/ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + bool can_switch; + + DBUG_ENTER("ha_innobase::can_switch_engines"); + prebuilt->trx->op_info = + "determining if there are foreign key constraints"; + row_mysql_lock_data_dictionary(prebuilt->trx); + + can_switch = !UT_LIST_GET_FIRST(prebuilt->table->referenced_list) + && !UT_LIST_GET_FIRST(prebuilt->table->foreign_list); + + row_mysql_unlock_data_dictionary(prebuilt->trx); + prebuilt->trx->op_info = ""; + + DBUG_RETURN(can_switch); +} + /*********************************************************************** Checks if a table is referenced by a foreign key. The MySQL manual states that a REPLACE is either equivalent to an INSERT, or DELETE(s) + INSERT. Only a @@ -4508,7 +5033,8 @@ ha_innobase::extra( /*===============*/ /* out: 0 or error number */ enum ha_extra_function operation) - /* in: HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE */ + /* in: HA_EXTRA_RETRIEVE_ALL_COLS or some + other flag */ { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; @@ -4517,15 +5043,32 @@ ha_innobase::extra( obsolete! */ switch (operation) { - case HA_EXTRA_RESET: + case HA_EXTRA_FLUSH: + if (prebuilt->blob_heap) { + row_mysql_prebuilt_free_blob_heap(prebuilt); + } + break; + case HA_EXTRA_RESET: + if (prebuilt->blob_heap) { + row_mysql_prebuilt_free_blob_heap(prebuilt); + } + prebuilt->read_just_key = 0; + break; case HA_EXTRA_RESET_STATE: prebuilt->read_just_key = 0; break; case HA_EXTRA_NO_KEYREAD: prebuilt->read_just_key = 0; break; - case HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE: - prebuilt->hint_no_need_to_fetch_extra_cols = FALSE; + case HA_EXTRA_RETRIEVE_ALL_COLS: + prebuilt->hint_need_to_fetch_extra_cols + = ROW_RETRIEVE_ALL_COLS; + break; + case HA_EXTRA_RETRIEVE_PRIMARY_KEY: + if (prebuilt->hint_need_to_fetch_extra_cols == 0) { + prebuilt->hint_need_to_fetch_extra_cols + = ROW_RETRIEVE_PRIMARY_KEY; + } break; case HA_EXTRA_KEYREAD: prebuilt->read_just_key = 1; @@ -4538,16 +5081,6 @@ ha_innobase::extra( } /********************************************************************** -????????????? */ - -int -ha_innobase::reset(void) -/*====================*/ -{ - return(0); -} - -/********************************************************************** MySQL calls this function at the start of each SQL statement inside LOCK TABLES. Inside LOCK TABLES the ::external_lock method does not work to mark SQL statement borders. Note also a special case: if a temporary table @@ -4586,7 +5119,7 @@ ha_innobase::start_stmt( auto_inc_counter_for_this_stat = 0; prebuilt->sql_stat_start = TRUE; - prebuilt->hint_no_need_to_fetch_extra_cols = TRUE; + prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; if (!prebuilt->mysql_has_locked) { @@ -4598,8 +5131,8 @@ ha_innobase::start_stmt( prebuilt->select_lock_type = LOCK_X; } else { if (trx->isolation_level != TRX_ISO_SERIALIZABLE - && thd->lex.sql_command == SQLCOM_SELECT - && thd->lex.lock_option == TL_READ) { + && thd->lex->sql_command == SQLCOM_SELECT + && thd->lex->lock_option == TL_READ) { /* For other than temporary tables, we obtain no lock for consistent read (plain SELECT). */ @@ -4682,7 +5215,7 @@ ha_innobase::external_lock( trx = prebuilt->trx; prebuilt->sql_stat_start = TRUE; - prebuilt->hint_no_need_to_fetch_extra_cols = TRUE; + prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; @@ -4713,10 +5246,10 @@ ha_innobase::external_lock( if (trx->isolation_level == TRX_ISO_SERIALIZABLE && prebuilt->select_lock_type == LOCK_NONE && (thd->options - & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { - /* To get serializable execution, we let InnoDB - conceptually add 'LOCK IN SHARE MODE' to all SELECTs + /* To get serializable execution, we let InnoDB + conceptually add 'LOCK IN SHARE MODE' to all SELECTs which otherwise would have been consistent reads. An exception is consistent reads in the AUTOCOMMIT=1 mode: we know that they are read-only transactions, and they @@ -4726,12 +5259,21 @@ ha_innobase::external_lock( prebuilt->select_lock_type = LOCK_S; } + /* Starting from 4.1.9, no InnoDB table lock is taken in LOCK + TABLES if AUTOCOMMIT=1. It does not make much sense to acquire + an InnoDB table lock if it is released immediately at the end + of LOCK TABLES, and InnoDB's table locks in that case cause + VERY easily deadlocks. */ + if (prebuilt->select_lock_type != LOCK_NONE) { + if (thd->in_lock_tables && thd->variables.innodb_table_locks && (thd->options & OPTION_NOT_AUTOCOMMIT)) { + ulint error; - error = row_lock_table_for_mysql(prebuilt); + error = row_lock_table_for_mysql(prebuilt, + NULL, LOCK_TABLE_EXP); if (error != DB_SUCCESS) { error = convert_error_code_to_mysql( @@ -4767,10 +5309,9 @@ ha_innobase::external_lock( may reserve the kernel mutex, we have to release the search system latch first to obey the latching order. */ - innobase_release_stat_resources(trx); + innobase_release_stat_resources(trx); - if (!(thd->options - & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { if (thd->transaction.all.innodb_active_trans != 0) { innobase_commit(thd, trx); } @@ -4790,7 +5331,7 @@ ha_innobase::external_lock( } /**************************************************************************** -Implements the SHOW INNODB STATUS command. Send the output of the InnoDB +Implements the SHOW INNODB STATUS command. Sends the output of the InnoDB Monitor to the client. */ int @@ -4798,15 +5339,19 @@ innodb_show_status( /*===============*/ THD* thd) /* in: the MySQL query thread of the caller */ { - String* packet = &thd->packet; - trx_t* trx; - - DBUG_ENTER("innodb_show_status"); - - if (innodb_skip) { - my_message(ER_NOT_SUPPORTED_YET, - "Cannot call SHOW INNODB STATUS because skip-innodb is defined", - MYF(0)); + Protocol* protocol = thd->protocol; + trx_t* trx; + static const char truncated_msg[] = "... truncated...\n"; + const long MAX_STATUS_SIZE = 64000; + ulint trx_list_start = ULINT_UNDEFINED; + ulint trx_list_end = ULINT_UNDEFINED; + + DBUG_ENTER("innodb_show_status"); + + if (have_innodb != SHOW_OPTION_YES) { + my_message(ER_NOT_SUPPORTED_YET, + "Cannot call SHOW INNODB STATUS because skip-innodb is defined", + MYF(0)); DBUG_RETURN(-1); } @@ -4814,31 +5359,56 @@ innodb_show_status( innobase_release_stat_resources(trx); - /* We let the InnoDB Monitor to output at most 64000 bytes of text. */ + /* We let the InnoDB Monitor to output at most MAX_STATUS_SIZE + bytes of text. */ - long flen; + long flen, usable_len; char* str; mutex_enter_noninline(&srv_monitor_file_mutex); rewind(srv_monitor_file); - srv_printf_innodb_monitor(srv_monitor_file); + srv_printf_innodb_monitor(srv_monitor_file, + &trx_list_start, &trx_list_end); flen = ftell(srv_monitor_file); os_file_set_eof(srv_monitor_file); if (flen < 0) { flen = 0; - } else if (flen > 64000 - 1) { - flen = 64000 - 1; + } + + if (flen > MAX_STATUS_SIZE) { + usable_len = MAX_STATUS_SIZE; + } else { + usable_len = flen; } /* allocate buffer for the string, and read the contents of the temporary file */ - str = my_malloc(flen + 1, MYF(0)); + if (!(str = my_malloc(usable_len + 1, MYF(0)))) + { + mutex_exit_noninline(&srv_monitor_file_mutex); + DBUG_RETURN(-1); + } - if (str) { - rewind(srv_monitor_file); + rewind(srv_monitor_file); + if (flen < MAX_STATUS_SIZE) { + /* Display the entire output. */ flen = fread(str, 1, flen, srv_monitor_file); - str[flen] = 0; + } else if (trx_list_end < (ulint) flen + && trx_list_start < trx_list_end + && trx_list_start + (flen - trx_list_end) + < MAX_STATUS_SIZE - sizeof truncated_msg - 1) { + /* Omit the beginning of the list of active transactions. */ + long len = fread(str, 1, trx_list_start, srv_monitor_file); + memcpy(str + len, truncated_msg, sizeof truncated_msg - 1); + len += sizeof truncated_msg - 1; + usable_len = (MAX_STATUS_SIZE - 1) - len; + fseek(srv_monitor_file, flen - usable_len, SEEK_SET); + len += fread(str + len, 1, usable_len, srv_monitor_file); + flen = len; + } else { + /* Omit the end of the output. */ + flen = fread(str, 1, MAX_STATUS_SIZE - 1, srv_monitor_file); } mutex_exit_noninline(&srv_monitor_file_mutex); @@ -4847,24 +5417,21 @@ innodb_show_status( field_list.push_back(new Item_empty_string("Status", flen)); - if (send_fields(thd, field_list, 1)) { + if (protocol->send_fields(&field_list, 1)) { my_free(str, MYF(0)); DBUG_RETURN(-1); } - packet->length(0); - net_store_data(packet, str); - my_free(str, MYF(0)); + protocol->prepare_for_resend(); + protocol->store(str, flen, system_charset_info); + my_free(str, MYF(0)); - if (my_net_write(&thd->net, (char*)thd->packet.ptr(), - packet->length())) { + if (protocol->write()) + DBUG_RETURN(-1); - DBUG_RETURN(-1); - } - - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } @@ -4895,7 +5462,7 @@ static INNOBASE_SHARE *get_share(const char *table_name) share->table_name_length=length; share->table_name=(char*) (share+1); strmov(share->table_name,table_name); - if (hash_insert(&innobase_open_tables, (mysql_byte*) share)) + if (my_hash_insert(&innobase_open_tables, (mysql_byte*) share)) { pthread_mutex_unlock(&innobase_mutex); my_free((gptr) share,0); @@ -4952,7 +5519,7 @@ ha_innobase::store_lock( (lock_type == TL_READ_HIGH_PRIORITY && thd->in_lock_tables) || lock_type == TL_READ_WITH_SHARED_LOCKS || lock_type == TL_READ_NO_INSERT || - (thd->lex.sql_command != SQLCOM_SELECT + (thd->lex->sql_command != SQLCOM_SELECT && lock_type != TL_IGNORE)) { /* The OR cases above are in this order: @@ -4967,8 +5534,34 @@ ha_innobase::store_lock( are not simple SELECTs; note that select_lock_type in this case may get strengthened in ::external_lock() to LOCK_X. */ - prebuilt->select_lock_type = LOCK_S; - prebuilt->stored_select_lock_type = LOCK_S; + if (srv_locks_unsafe_for_binlog && + prebuilt->trx->isolation_level != TRX_ISO_SERIALIZABLE && + (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) && + thd->lex->sql_command != SQLCOM_SELECT && + thd->lex->sql_command != SQLCOM_UPDATE_MULTI && + thd->lex->sql_command != SQLCOM_DELETE_MULTI && + thd->lex->sql_command != SQLCOM_LOCK_TABLES) { + + /* In case we have innobase_locks_unsafe_for_binlog + option set and isolation level of the transaction + is not set to serializable and MySQL is doing + INSERT INTO...SELECT or UPDATE ... = (SELECT ...) + without FOR UPDATE or IN SHARE MODE in select, then + we use consistent read for select. */ + + prebuilt->select_lock_type = LOCK_NONE; + prebuilt->stored_select_lock_type = LOCK_NONE; + } else if (thd->lex->sql_command == SQLCOM_CHECKSUM) { + /* Use consistent read for checksum table and + convert lock type to the TL_READ */ + + prebuilt->select_lock_type = LOCK_NONE; + prebuilt->stored_select_lock_type = LOCK_NONE; + lock.type = TL_READ; + } else { + prebuilt->select_lock_type = LOCK_S; + prebuilt->stored_select_lock_type = LOCK_S; + } } else if (lock_type != TL_IGNORE) { @@ -4985,11 +5578,28 @@ ha_innobase::store_lock( if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { - /* If we are not doing a LOCK TABLE, then allow multiple - writers */ + if (lock_type == TL_READ && thd->in_lock_tables) { + /* We come here if MySQL is processing LOCK TABLES + ... READ LOCAL. MyISAM under that table lock type + reads the table as it was at the time the lock was + granted (new inserts are allowed, but not seen by the + reader). To get a similar effect on an InnoDB table, + we must use LOCK TABLES ... READ. We convert the lock + type here, so that for InnoDB, READ LOCAL is + equivalent to READ. This will change the InnoDB + behavior in mysqldump, so that dumps of InnoDB tables + are consistent with dumps of MyISAM tables. */ + + lock_type = TL_READ_NO_INSERT; + } + + /* If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers */ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && - lock_type <= TL_WRITE) && !thd->in_lock_tables) { + lock_type <= TL_WRITE) && !thd->in_lock_tables + && !thd->tablespace_op + && thd->lex->sql_command != SQLCOM_CREATE_TABLE) { lock_type = TL_WRITE_ALLOW_WRITE; } @@ -5015,7 +5625,7 @@ ha_innobase::store_lock( /*********************************************************************** This function initializes the auto-inc counter if it has not been initialized yet. This function does not change the value of the auto-inc -counter if it already has been initialized. In parameter ret returns +counter if it already has been initialized. In paramete ret returns the value of the auto-inc counter. */ int @@ -5080,7 +5690,7 @@ ha_innobase::innobase_read_and_init_auto_inc( /* Play safe and also give in another way the hint to fetch all columns in the key: */ - prebuilt->hint_no_need_to_fetch_extra_cols = FALSE; + prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS; prebuilt->trx->mysql_n_tables_locked += 1; @@ -5141,4 +5751,163 @@ ha_innobase::get_auto_increment() return(nr); } +/*********************************************************************** +This function stores the binlog offset and flushes logs. */ + +void +innobase_store_binlog_offset_and_flush_log( +/*=======================================*/ + char *binlog_name, /* in: binlog name */ + longlong offset) /* in: binlog offset */ +{ + mtr_t mtr; + + assert(binlog_name != NULL); + + /* Start a mini-transaction */ + mtr_start_noninline(&mtr); + + /* Update the latest MySQL binlog name and offset info + in trx sys header */ + + trx_sys_update_mysql_binlog_offset( + binlog_name, + offset, + TRX_SYS_MYSQL_LOG_INFO, &mtr); + + /* Commits the mini-transaction */ + mtr_commit(&mtr); + + /* Syncronous flush of the log buffer to disk */ + log_buffer_flush_to_disk(); +} + +char* +ha_innobase::get_mysql_bin_log_name() +{ + return(trx_sys_mysql_bin_log_name); +} + +ulonglong +ha_innobase::get_mysql_bin_log_pos() +{ + /* trx... is ib_longlong, which is a typedef for a 64-bit integer + (__int64 or longlong) so it's ok to cast it to ulonglong. */ + + return(trx_sys_mysql_bin_log_pos); +} + +extern "C" { +/********************************************************************** +This function is used to find the storage length in bytes of the first n +characters for prefix indexes using a multibyte character set. The function +finds charset information and returns length of prefix_len characters in the +index field in bytes. + +NOTE: the prototype of this function is copied to data0type.c! If you change +this function, you MUST change also data0type.c! */ + +ulint +innobase_get_at_most_n_mbchars( +/*===========================*/ + /* out: number of bytes occupied by the first + n characters */ + ulint charset_id, /* in: character set id */ + ulint prefix_len, /* in: prefix length in bytes of the index + (this has to be divided by mbmaxlen to get the + number of CHARACTERS n in the prefix) */ + ulint data_len, /* in: length of the string in bytes */ + const char* str) /* in: character string */ +{ + ulint char_length; /* character length in bytes */ + ulint n_chars; /* number of characters in prefix */ + CHARSET_INFO* charset; /* charset used in the field */ + + charset = get_charset(charset_id, MYF(MY_WME)); + + ut_ad(charset); + ut_ad(charset->mbmaxlen); + + /* Calculate how many characters at most the prefix index contains */ + + n_chars = prefix_len / charset->mbmaxlen; + + /* If the charset is multi-byte, then we must find the length of the + first at most n chars in the string. If the string contains less + characters than n, then we return the length to the end of the last + character. */ + + if (charset->mbmaxlen > 1) { + /* my_charpos() returns the byte length of the first n_chars + characters, or a value bigger than the length of str, if + there were not enough full characters in str. + + Why does the code below work: + Suppose that we are looking for n UTF-8 characters. + + 1) If the string is long enough, then the prefix contains at + least n complete UTF-8 characters + maybe some extra + characters + an incomplete UTF-8 character. No problem in + this case. The function returns the pointer to the + end of the nth character. + + 2) If the string is not long enough, then the string contains + the complete value of a column, that is, only complete UTF-8 + characters, and we can store in the column prefix index the + whole string. */ + + char_length = my_charpos(charset, str, + str + data_len, n_chars); + if (char_length > data_len) { + char_length = data_len; + } + } else { + if (data_len < prefix_len) { + char_length = data_len; + } else { + char_length = prefix_len; + } + } + + return(char_length); +} +} + +extern "C" { +/********************************************************************** +This function returns true if + +1) SQL-query in the current thread +is either REPLACE or LOAD DATA INFILE REPLACE. + +2) SQL-query in the current thread +is INSERT ON DUPLICATE KEY UPDATE. + +NOTE that /mysql/innobase/row/row0ins.c must contain the +prototype for this function ! */ + +ibool +innobase_query_is_update(void) +/*==========================*/ +{ + THD* thd; + + thd = (THD *)innobase_current_thd(); + + if ( thd->lex->sql_command == SQLCOM_REPLACE || + thd->lex->sql_command == SQLCOM_REPLACE_SELECT || + ( thd->lex->sql_command == SQLCOM_LOAD && + thd->lex->duplicates == DUP_REPLACE )) { + return(1); + } + + if ( thd->lex->sql_command == SQLCOM_INSERT && + thd->lex->duplicates == DUP_UPDATE ) { + return(1); + } + + return(0); +} +} + #endif /* HAVE_INNOBASE_DB */ diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index 7bf20771680..d336811a1eb 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -21,7 +21,7 @@ Innodb */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -32,6 +32,7 @@ typedef struct st_innobase_share { uint table_name_length,use_count; } INNOBASE_SHARE; + /* The class defining a handle to an Innodb table */ class ha_innobase: public handler { @@ -60,20 +61,12 @@ class ha_innobase: public handler ulong start_of_scan; /* this is set to 1 when we are starting a table scan but have not yet fetched any row, else 0 */ - uint active_index_before_scan; - /* since a table scan in InnoDB is - always done through an index, a table - scan may change active_index; but - MySQL may assume that active_index - after a table scan is the same as - before; we store the value here so - that we can restore the value after - a scan */ uint last_match_mode;/* match mode of the latest search: ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX, or undefined */ + uint num_write_row; /* number of write_row() calls */ longlong auto_inc_counter_for_this_stat; - ulong max_row_length(const byte *buf); + ulong max_supported_row_length(const byte *buf); uint store_key_val_for_row(uint keynr, char* buff, uint buff_len, const byte* record); @@ -86,18 +79,15 @@ class ha_innobase: public handler public: ha_innobase(TABLE *table): handler(table), int_table_flags(HA_REC_NOT_IN_SEQ | - HA_KEYPOS_TO_RNDPOS | - HA_LASTKEY_ORDER | - HA_NULL_KEY | - HA_BLOB_KEY | + HA_NULL_IN_KEY | HA_FAST_KEY_READ | + HA_CAN_INDEX_BLOBS | HA_CAN_SQL_HANDLER | HA_NOT_EXACT_COUNT | - HA_NO_WRITE_DELAYED | HA_PRIMARY_KEY_IN_READ_INDEX | - HA_DROP_BEFORE_CREATE | HA_TABLE_SCAN_ON_INDEX), last_dup_key((uint) -1), - start_of_scan(0) + start_of_scan(0), + num_write_row(0) { } ~ha_innobase() {} @@ -106,14 +96,12 @@ class ha_innobase: public handler const char *index_type(uint key_number) { return "BTREE"; } const char** bas_ext() const; ulong table_flags() const { return int_table_flags; } - ulong index_flags(uint idx) const + ulong index_flags(uint idx, uint part, bool all_parts) const { - return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | - HA_KEY_READ_ONLY); + return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE | + HA_KEYREAD_ONLY); } - uint max_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_keys() const { return MAX_KEY; } - uint max_key_parts() const { return MAX_REF_PARTS; } + uint max_supported_keys() const { return MAX_KEY; } /* An InnoDB page must store >= 2 keys; a secondary key record must also contain the primary key value: @@ -121,14 +109,12 @@ class ha_innobase: public handler less than 1 / 4 of page size which is 16 kB; but currently MySQL does not work with keys whose size is > MAX_KEY_LENGTH */ - uint max_key_length() const { return((MAX_KEY_LENGTH <= 3500) ? - MAX_KEY_LENGTH : 3500);} - bool fast_key_read() { return 1;} - key_map keys_to_use_for_scanning() { return ~(key_map) 0; } + uint max_supported_key_length() const { return 3500; } + uint max_supported_key_part_length() const { return 3500; } + const key_map *keys_to_use_for_scanning() { return &key_map_full; } bool has_transactions() { return 1;} int open(const char *name, int mode, uint test_if_locked); - void initialize(void); int close(void); double scan_time(); double read_time(uint index, uint ranges, ha_rows rows); @@ -150,7 +136,7 @@ class ha_innobase: public handler int index_first(byte * buf); int index_last(byte * buf); - int rnd_init(bool scan=1); + int rnd_init(bool scan); int rnd_end(); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); @@ -159,18 +145,14 @@ class ha_innobase: public handler void info(uint); int analyze(THD* thd,HA_CHECK_OPT* check_opt); int optimize(THD* thd,HA_CHECK_OPT* check_opt); + int discard_or_import_tablespace(my_bool discard); int extra(enum ha_extra_function operation); - int reset(void); int external_lock(THD *thd, int lock_type); int start_stmt(THD *thd); void position(byte *record); - ha_rows records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); - ha_rows estimate_number_of_rows(); + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); + ha_rows estimate_rows_upper_bound(); int create(const char *name, register TABLE *form, HA_CREATE_INFO *create_info); @@ -179,15 +161,19 @@ class ha_innobase: public handler int check(THD* thd, HA_CHECK_OPT* check_opt); char* update_table_comment(const char* comment); char* get_foreign_key_create_info(); + bool can_switch_engines(); uint referenced_by_foreign_key(); void free_foreign_key_create_info(char* str); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); void init_table_handle_for_HANDLER(); longlong get_auto_increment(); + uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } + + static char *get_mysql_bin_log_name(); + static ulonglong get_mysql_bin_log_pos(); }; -extern bool innodb_skip; extern uint innobase_init_flags, innobase_lock_type; extern uint innobase_flush_log_at_trx_commit; extern ulong innobase_cache_size; @@ -196,15 +182,23 @@ extern long innobase_lock_scan_time; extern long innobase_mirrored_log_groups, innobase_log_files_in_group; extern long innobase_log_file_size, innobase_log_buffer_size; extern long innobase_buffer_pool_size, innobase_additional_mem_pool_size; +extern long innobase_buffer_pool_awe_mem_mb; extern long innobase_file_io_threads, innobase_lock_wait_timeout; extern long innobase_force_recovery, innobase_thread_concurrency; +extern long innobase_open_files; extern char *innobase_data_home_dir, *innobase_data_file_path; extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; extern char *innobase_unix_file_flush_method; /* The following variables have to be my_bool for SHOW VARIABLES to work */ extern my_bool innobase_log_archive, innobase_use_native_aio, innobase_fast_shutdown, + innobase_file_per_table, innobase_locks_unsafe_for_binlog, innobase_create_status_file; +extern my_bool innobase_very_fast_shutdown; /* set this to 1 just before + calling innobase_end() if you want + InnoDB to shut down without + flushing the buffer pool: this + is equivalent to a 'crash' */ extern "C" { extern ulong srv_max_buf_pool_modified_pct; extern ulong srv_max_purge_lag; @@ -242,3 +236,7 @@ int innodb_show_status(THD* thd); my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, uint full_name_len); void innobase_release_temporary_latches(void* innobase_tid); + +void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); + +int innobase_start_trx_and_assign_read_view(THD* thd); diff --git a/sql/ha_isam.cc b/sql/ha_isam.cc index a93bb25eb77..afa7dffa5f4 100644 --- a/sql/ha_isam.cc +++ b/sql/ha_isam.cc @@ -14,9 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifdef __GNUC__ -#pragma implementation // gcc: Class implementation +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" @@ -34,10 +33,10 @@ ** isam tables *****************************************************************************/ + const char **ha_isam::bas_ext() const { static const char *ext[]= { ".ISM",".ISD", NullS }; return ext; } - int ha_isam::open(const char *name, int mode, uint test_if_locked) { char name_buff[FN_REFLEN]; @@ -70,8 +69,8 @@ uint ha_isam::min_record_length(uint options) const int ha_isam::write_row(byte * buf) { statistic_increment(ha_write_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(buf+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) update_auto_increment(); return !nisam_write(file,buf) ? 0 : my_errno ? my_errno : -1; @@ -80,8 +79,8 @@ int ha_isam::write_row(byte * buf) int ha_isam::update_row(const byte * old_data, byte * new_data) { statistic_increment(ha_update_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(new_data+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); return !nisam_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; } @@ -201,7 +200,7 @@ void ha_isam::info(uint flag) sortkey = info.sortkey; block_size=nisam_block_size; table->keys = min(table->keys,info.keys); - table->keys_in_use= set_bits(key_map,table->keys); + table->keys_in_use.set_prefix(table->keys); table->db_options_in_use= info.options; table->db_record_offset= (table->db_options_in_use & @@ -237,11 +236,6 @@ int ha_isam::extra(enum ha_extra_function operation) return nisam_extra(file,operation); } -int ha_isam::reset(void) -{ - return nisam_extra(file,HA_EXTRA_RESET); -} - int ha_isam::external_lock(THD *thd, int lock_type) { if (!table->tmp_table) @@ -387,18 +381,21 @@ int ha_isam::create(const char *name, register TABLE *form, } +static key_range no_range= { (byte*) 0, 0, HA_READ_KEY_EXACT }; -ha_rows ha_isam::records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) +ha_rows ha_isam::records_in_range(uint inx, key_range *min_key, + key_range *max_key) { + /* ISAM checks if 'key' pointer <> 0 to know if there is no range */ + if (!min_key) + min_key= &no_range; + if (!max_key) + max_key= &no_range; return (ha_rows) nisam_records_in_range(file, - inx, - start_key,start_key_len, - start_search_flag, - end_key,end_key_len, - end_search_flag); + (int) inx, + min_key->key, min_key->length, + min_key->flag, + max_key->key, max_key->length, + max_key->flag); } #endif /* HAVE_ISAM */ diff --git a/sql/ha_isam.h b/sql/ha_isam.h index b573d4b295c..1f9b8eb28fe 100644 --- a/sql/ha_isam.h +++ b/sql/ha_isam.h @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -32,19 +32,20 @@ class ha_isam: public handler public: ha_isam(TABLE *table) :handler(table), file(0), - int_table_flags(HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | - HA_KEY_READ_WRONG_STR | HA_DUPP_POS | - HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED) + int_table_flags(HA_READ_RND_SAME | + HA_DUPP_POS | HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED) {} ~ha_isam() {} + ulong index_flags(uint idx, uint part, bool all_parts) const + { return HA_READ_NEXT; } // but no HA_READ_PREV here!!! const char *table_type() const { return "ISAM"; } const char *index_type(uint key_number) { return "BTREE"; } const char **bas_ext() const; ulong table_flags() const { return int_table_flags; } - uint max_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_keys() const { return N_MAXKEY; } - uint max_key_parts() const { return N_MAXKEY_SEG; } - uint max_key_length() const { return N_MAX_KEY_LENGTH; } + uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + uint max_supported_keys() const { return N_MAXKEY; } + uint max_supported_key_parts() const { return N_MAXKEY_SEG; } + uint max_supported_key_length() const { return N_MAX_KEY_LENGTH; } uint min_record_length(uint options) const; bool low_byte_first() const { return 0; } @@ -62,22 +63,17 @@ class ha_isam: public handler int index_prev(byte * buf); int index_first(byte * buf); int index_last(byte * buf); - int rnd_init(bool scan=1); + int rnd_init(bool scan); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); void position(const byte *record); - my_off_t row_position() { return nisam_position(file); } void info(uint); int extra(enum ha_extra_function operation); - int reset(void); int external_lock(THD *thd, int lock_type); - ha_rows records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); }; + diff --git a/sql/ha_isammrg.cc b/sql/ha_isammrg.cc index 94e394e7665..c0e6f665f08 100644 --- a/sql/ha_isammrg.cc +++ b/sql/ha_isammrg.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -78,8 +78,8 @@ int ha_isammrg::write_row(byte * buf) int ha_isammrg::update_row(const byte * old_data, byte * new_data) { statistic_increment(ha_update_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(new_data+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); return !mrg_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; } @@ -157,7 +157,7 @@ void ha_isammrg::info(uint flag) deleted = (ha_rows) info.deleted; data_file_length=info.data_file_length; errkey = info.errkey; - table->keys_in_use=0; // No keys yet + table->keys_in_use.clear_all(); // No keys yet table->db_options_in_use = info.options; mean_rec_length=info.reclength; block_size=0; @@ -171,11 +171,6 @@ int ha_isammrg::extra(enum ha_extra_function operation) return !mrg_extra(file,operation) ? 0 : my_errno ? my_errno : -1; } -int ha_isammrg::reset(void) -{ - return !mrg_extra(file,HA_EXTRA_RESET) ? 0 : my_errno ? my_errno : -1; -} - int ha_isammrg::external_lock(THD *thd, int lock_type) { return !mrg_lock_database(file,lock_type) ? 0 : my_errno ? my_errno : -1; diff --git a/sql/ha_isammrg.h b/sql/ha_isammrg.h index e5846d20212..82a2e312ca3 100644 --- a/sql/ha_isammrg.h +++ b/sql/ha_isammrg.h @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -32,14 +32,12 @@ class ha_isammrg: public handler ~ha_isammrg() {} const char *table_type() const { return "MRG_ISAM"; } const char **bas_ext() const; - ulong table_flags() const { return (HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS | + ulong table_flags() const { return (HA_READ_RND_SAME | HA_REC_NOT_IN_SEQ | HA_FILE_BASED); } - ulong index_flags(uint idx) const { return HA_NOT_READ_PREFIX_LAST; } + ulong index_flags(uint idx, uint part, bool all_parts) const + { DBUG_ASSERT(0); return 0; } - uint max_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_keys() const { return 0; } - uint max_key_parts() const { return 0; } - uint max_key_length() const { return 0; } + uint max_supported_keys() const { return 0; } bool low_byte_first() const { return 0; } uint min_record_length(uint options) const; @@ -56,17 +54,16 @@ class ha_isammrg: public handler int index_prev(byte * buf); int index_first(byte * buf); int index_last(byte * buf); - int rnd_init(bool scan=1); + int rnd_init(bool scan); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); void position(const byte *record); - my_off_t row_position() { return mrg_position(file); } void info(uint); int extra(enum ha_extra_function operation); - int reset(void); int external_lock(THD *thd, int lock_type); uint lock_count(void) const; int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); + uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } }; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 63a5508872e..9b84e48e970 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -28,6 +28,7 @@ #include "../srclib/myisam/myisamdef.h" #else #include "../myisam/myisamdef.h" +#include "../myisam/rt_index.h" #endif ulong myisam_recover_options= HA_RECOVER_NONE; @@ -36,7 +37,13 @@ ulong myisam_recover_options= HA_RECOVER_NONE; const char *myisam_recover_names[] = { "DEFAULT", "BACKUP", "FORCE", "QUICK", NullS}; TYPELIB myisam_recover_typelib= {array_elements(myisam_recover_names)-1,"", - myisam_recover_names}; + myisam_recover_names, NULL}; + +const char *myisam_stats_method_names[] = {"nulls_unequal", "nulls_equal", + "nulls_ignored", NullS}; +TYPELIB myisam_stats_method_typelib= { + array_elements(myisam_stats_method_names) - 1, "", + myisam_stats_method_names, NULL}; /***************************************************************************** @@ -49,36 +56,36 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, const char *fmt, va_list args) { THD* thd = (THD*)param->thd; - String* packet = &thd->packet; - uint length; + Protocol *protocol= thd->protocol; + uint length, msg_length; char msgbuf[MI_MAX_MSG_BUF]; char name[NAME_LEN*2+2]; - packet->length(0); - msgbuf[0] = 0; // healthy paranoia ? - my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args); + msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args); msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia DBUG_PRINT(msg_type,("message: %s",msgbuf)); - if (thd->net.vio == 0) + if (!thd->vio_ok()) { sql_print_error(msgbuf); return; } - if (param->testflag & (T_CREATE_MISSING_KEYS | T_SAFE_REPAIR | T_AUTO_REPAIR)) + + if (param->testflag & (T_CREATE_MISSING_KEYS | T_SAFE_REPAIR | + T_AUTO_REPAIR)) { my_message(ER_NOT_KEYFILE,msgbuf,MYF(MY_WME)); return; } length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) - name); - net_store_data(packet, name, length); - net_store_data(packet, param->op_name); - net_store_data(packet, msg_type); - - net_store_data(packet, msgbuf); - if (my_net_write(&thd->net, (char*)thd->packet.ptr(), thd->packet.length())) + protocol->prepare_for_resend(); + protocol->store(name, length, system_charset_info); + protocol->store(param->op_name, system_charset_info); + protocol->store(msg_type, system_charset_info); + protocol->store(msgbuf, msg_length, system_charset_info); + if (protocol->write()) sql_print_error("Failed on my_net_write, writing to stderr instead: %s\n", msgbuf); return; @@ -86,6 +93,11 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, extern "C" { +volatile my_bool *killed_ptr(MI_CHECK *param) +{ + return &(((THD *)(param->thd))->killed); +} + void mi_check_print_error(MI_CHECK *param, const char *fmt,...) { param->error_printed|=1; @@ -122,11 +134,16 @@ const char **ha_myisam::bas_ext() const const char *ha_myisam::index_type(uint key_number) { - return ((table->key_info[key_number].flags & HA_FULLTEXT) ? + return ((table->key_info[key_number].flags & HA_FULLTEXT) ? "FULLTEXT" : + (table->key_info[key_number].flags & HA_SPATIAL) ? + "SPATIAL" : + (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ? + "RTREE" : "BTREE"); } +#ifdef HAVE_REPLICATION int ha_myisam::net_read_dump(NET* net) { int data_fd = file->dfile; @@ -151,7 +168,6 @@ int ha_myisam::net_read_dump(NET* net) goto err; } } - err: return error; } @@ -208,6 +224,7 @@ err: my_free((gptr) buf, MYF(0)); return error; } +#endif /* HAVE_REPLICATION */ /* Name is here without an extension */ @@ -215,7 +232,7 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) { if (!(file=mi_open(name, mode, test_if_locked))) return (my_errno ? my_errno : -1); - + if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE)) VOID(mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0)); info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); @@ -223,6 +240,8 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) VOID(mi_extra(file, HA_EXTRA_WAIT_LOCK, 0)); if (!table->db_record_offset) int_table_flags|=HA_REC_NOT_IN_SEQ; + if (file->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) + int_table_flags|=HA_HAS_CHECKSUM; return (0); } @@ -238,9 +257,8 @@ int ha_myisam::write_row(byte * buf) statistic_increment(ha_write_count,&LOCK_status); /* If we have a timestamp column, update it to the current time */ - - if (table->time_stamp) - update_timestamp(buf+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); /* If we have an auto_increment column and we are writing a changed row @@ -266,6 +284,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt) param.db_name = table->table_cache_key; param.table_name = table->table_name; param.testflag = check_opt->flags | T_CHECK | T_SILENT; + param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method; if (!(table->db_stat & HA_READ_ONLY)) param.testflag|= T_STATISTICS; @@ -324,7 +343,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt) HA_STATUS_CONST); } } - else if (!mi_is_crashed(file)) + else if (!mi_is_crashed(file) && !thd->killed) { mi_mark_crashed(file); file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED; @@ -355,6 +374,7 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) param.testflag=(T_FAST | T_CHECK | T_SILENT | T_STATISTICS | T_DONT_CHECK_CHECKSUM); param.using_global_keycache = 1; + param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method; if (!(share->state.changed & STATE_NOT_ANALYZED)) return HA_ADMIN_ALREADY_DONE; @@ -366,7 +386,7 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) error=update_state_info(¶m,file,UPDATE_STAT); pthread_mutex_unlock(&share->intern_lock); } - else if (!mi_is_crashed(file)) + else if (!mi_is_crashed(file) && !thd->killed) mi_mark_crashed(file); return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK; } @@ -375,7 +395,7 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) { HA_CHECK_OPT tmp_check_opt; - char* backup_dir = thd->lex.backup_dir; + char* backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; char* table_name = table->real_name; int error; @@ -415,7 +435,7 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) { - char* backup_dir = thd->lex.backup_dir; + char* backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; char* table_name = table->real_name; int error; @@ -497,16 +517,16 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) (uint) (T_RETRY_WITHOUT_QUICK | T_QUICK))) { param.testflag&= ~T_RETRY_WITHOUT_QUICK; - sql_print_error("Note: Retrying repair of: '%s' without quick", - table->path); + sql_print_information("Retrying repair of: '%s' without quick", + table->path); continue; } param.testflag&= ~T_QUICK; if ((param.testflag & T_REP_BY_SORT)) { param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP; - sql_print_error("Note: Retrying repair of: '%s' with keycache", - table->path); + sql_print_information("Retrying repair of: '%s' with keycache", + table->path); continue; } break; @@ -515,10 +535,10 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) !(check_opt->flags & T_VERY_SILENT)) { char llbuff[22],llbuff2[22]; - sql_print_error("Note: Found %s of %s rows when repairing '%s'", - llstr(file->state->records, llbuff), - llstr(start_records, llbuff2), - table->path); + sql_print_information("Found %s of %s rows when repairing '%s'", + llstr(file->state->records, llbuff), + llstr(start_records, llbuff2), + table->path); } return error; } @@ -562,7 +582,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) param.tmpfile_createflag = O_RDWR | O_TRUNC; param.using_global_keycache = 1; param.thd=thd; - param.tmpdir=mysql_tmpdir; + param.tmpdir=&mysql_tmpdir_list; param.out_flag=0; strmov(fixed_name,file->filename); @@ -685,73 +705,231 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) /* - Deactive all not unique index that can be recreated fast + Assign table indexes to a specific key cache. +*/ - SYNOPSIS - deactivate_non_unique_index() - rows Rows to be inserted - 0 if we don't know - HA_POS_ERROR if we want to disable all keys +int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) +{ + KEY_CACHE *new_key_cache= check_opt->key_cache; + const char *errmsg= 0; + int error= HA_ADMIN_OK; + ulonglong map= ~(ulonglong) 0; + TABLE_LIST *table_list= table->pos_in_table_list; + DBUG_ENTER("ha_myisam::assign_to_keycache"); + + /* Check validity of the index references */ + if (table_list->use_index) + { + /* We only come here when the user did specify an index map */ + key_map kmap; + if (get_key_map_from_key_list(&kmap, table, table_list->use_index)) + { + errmsg= thd->net.last_error; + error= HA_ADMIN_FAILED; + goto err; + } + map= kmap.to_ulonglong(); + } + + if ((error= mi_assign_to_key_cache(file, map, new_key_cache))) + { + char buf[80]; + my_snprintf(buf, sizeof(buf), + "Failed to flush to index file (errno: %d)", error); + errmsg= buf; + error= HA_ADMIN_CORRUPT; + } + + err: + if (error != HA_ADMIN_OK) + { + /* Send error to user */ + MI_CHECK param; + myisamchk_init(¶m); + param.thd= thd; + param.op_name= (char*)"assign_to_keycache"; + param.db_name= table->table_cache_key; + param.table_name= table->table_name; + param.testflag= 0; + mi_check_print_error(¶m, errmsg); + } + DBUG_RETURN(error); +} + + +/* + Preload pages of the index file for a table into the key cache. */ -void ha_myisam::deactivate_non_unique_index(ha_rows rows) +int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) { - MYISAM_SHARE* share = file->s; - if (share->state.key_map == ((ulonglong) 1L << share->base.keys)-1) + int error; + const char *errmsg; + ulonglong map= ~(ulonglong) 0; + TABLE_LIST *table_list= table->pos_in_table_list; + my_bool ignore_leaves= table_list->ignore_leaves; + + DBUG_ENTER("ha_myisam::preload_keys"); + + /* Check validity of the index references */ + if (table_list->use_index) { - if (!(specialflag & SPECIAL_SAFE_MODE)) + key_map kmap; + get_key_map_from_key_list(&kmap, table, table_list->use_index); + if (kmap.is_set_all()) { - if (rows == HA_POS_ERROR) - mi_extra(file, HA_EXTRA_NO_KEYS, 0); - else - { - /* - Only disable old index if the table was empty and we are inserting - a lot of rows. - We should not do this for only a few rows as this is slower and - we don't want to update the key statistics based of only a few rows. - */ - if (file->state->records == 0 && - (!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT)) - mi_disable_non_unique_index(file,rows); - else - { - mi_init_bulk_insert(file, - current_thd->variables.bulk_insert_buff_size, - rows); - table->bulk_insert= 1; - } - } + errmsg= thd->net.last_error; + error= HA_ADMIN_FAILED; + goto err; } - enable_activate_all_index=1; - info(HA_STATUS_CONST); // Read new key info + if (!kmap.is_clear_all()) + map= kmap.to_ulonglong(); + } + + mi_extra(file, HA_EXTRA_PRELOAD_BUFFER_SIZE, + (void *) &thd->variables.preload_buff_size); + + if ((error= mi_preload(file, map, ignore_leaves))) + { + switch (error) { + case HA_ERR_NON_UNIQUE_BLOCK_SIZE: + errmsg= "Indexes use different block sizes"; + break; + case HA_ERR_OUT_OF_MEM: + errmsg= "Failed to allocate buffer"; + break; + default: + char buf[ERRMSGSIZE+20]; + my_snprintf(buf, ERRMSGSIZE, + "Failed to read from index file (errno: %d)", my_errno); + errmsg= buf; + } + error= HA_ADMIN_FAILED; + goto err; + } + + DBUG_RETURN(HA_ADMIN_OK); + + err: + { + MI_CHECK param; + myisamchk_init(¶m); + param.thd= thd; + param.op_name= (char*)"preload_keys"; + param.db_name= table->table_cache_key; + param.table_name= table->table_name; + param.testflag= 0; + mi_check_print_error(¶m, errmsg); + DBUG_RETURN(error); + } +} + + +/* + Disable indexes, making it persistent if requested. + + SYNOPSIS + disable_indexes() + mode mode of operation: + HA_KEY_SWITCH_NONUNIQ disable all non-unique keys + HA_KEY_SWITCH_ALL disable all keys + HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent + HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent + + IMPLEMENTATION + HA_KEY_SWITCH_NONUNIQ is not implemented. + HA_KEY_SWITCH_ALL_SAVE is not implemented. + + RETURN + 0 ok + HA_ERR_WRONG_COMMAND mode not implemented. +*/ + +int ha_myisam::disable_indexes(uint mode) +{ + int error; + + if (mode == HA_KEY_SWITCH_ALL) + { + /* call a storage engine function to switch the key map */ + error= mi_disable_indexes(file); + } + else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE) + { + mi_extra(file, HA_EXTRA_NO_KEYS, 0); + info(HA_STATUS_CONST); // Read new key info + error= 0; } else - enable_activate_all_index=0; + { + /* mode not implemented */ + error= HA_ERR_WRONG_COMMAND; + } + return error; } -bool ha_myisam::activate_all_index(THD *thd) +/* + Enable indexes, making it persistent if requested. + + SYNOPSIS + enable_indexes() + mode mode of operation: + HA_KEY_SWITCH_NONUNIQ enable all non-unique keys + HA_KEY_SWITCH_ALL enable all keys + HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent + HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent + + DESCRIPTION + Enable indexes, which might have been disabled by disable_index() before. + The modes without _SAVE work only if both data and indexes are empty, + since the MyISAM repair would enable them persistently. + To be sure in these cases, call handler::delete_all_rows() before. + + IMPLEMENTATION + HA_KEY_SWITCH_NONUNIQ is not implemented. + HA_KEY_SWITCH_ALL_SAVE is not implemented. + + RETURN + 0 ok + !=0 Error, among others: + HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry. + HA_ERR_WRONG_COMMAND mode not implemented. +*/ + +int ha_myisam::enable_indexes(uint mode) { - int error=0; - MI_CHECK param; - MYISAM_SHARE* share = file->s; - DBUG_ENTER("activate_all_index"); + int error; - mi_end_bulk_insert(file); - table->bulk_insert= 0; - if (enable_activate_all_index && - share->state.key_map != set_bits(ulonglong, share->base.keys)) + if (file->s->state.key_map == set_bits(ulonglong, file->s->base.keys)) + { + /* All indexes are enabled already. */ + return 0; + } + + if (mode == HA_KEY_SWITCH_ALL) { + error= mi_enable_indexes(file); + /* + Do not try to repair on error, + as this could make the enabled state persistent, + but mode==HA_KEY_SWITCH_ALL forbids it. + */ + } + else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE) + { + THD *thd=current_thd; + MI_CHECK param; const char *save_proc_info=thd->proc_info; thd->proc_info="Creating index"; myisamchk_init(¶m); param.op_name = (char*) "recreating_index"; param.testflag = (T_SILENT | T_REP_BY_SORT | T_QUICK | - T_CREATE_MISSING_KEYS); + T_CREATE_MISSING_KEYS); param.myf_rw&= ~MY_WAIT_IF_FULL; param.sort_buffer_length= thd->variables.myisam_sort_buff_size; - param.tmpdir=mysql_tmpdir; + param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method; + param.tmpdir=&mysql_tmpdir_list; if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair) { sql_print_warning("Warning: Enabling keys got errno %d, retrying", @@ -763,8 +941,105 @@ bool ha_myisam::activate_all_index(THD *thd) thd->proc_info=save_proc_info; } else - enable_activate_all_index=1; - DBUG_RETURN(error); + { + /* mode not implemented */ + error= HA_ERR_WRONG_COMMAND; + } + return error; +} + + +/* + Test if indexes are disabled. + + + SYNOPSIS + indexes_are_disabled() + no parameters + + + RETURN + 0 indexes are not disabled + 1 all indexes are disabled + [2 non-unique indexes are disabled - NOT YET IMPLEMENTED] +*/ + +int ha_myisam::indexes_are_disabled(void) +{ + + return mi_indexes_are_disabled(file); +} + + +/* + prepare for a many-rows insert operation + e.g. - disable indexes (if they can be recreated fast) or + activate special bulk-insert optimizations + + SYNOPSIS + start_bulk_insert(rows) + rows Rows to be inserted + 0 if we don't know + + NOTICE + Do not forget to call end_bulk_insert() later! +*/ + +void ha_myisam::start_bulk_insert(ha_rows rows) +{ + DBUG_ENTER("ha_myisam::start_bulk_insert"); + THD *thd=current_thd; + ulong size= min(thd->variables.read_buff_size, table->avg_row_length*rows); + DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu", + (ulong) rows, size)); + + /* don't enable row cache if too few rows */ + if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE)) + mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size); + + can_enable_indexes= (file->s->state.key_map == + set_bits(ulonglong, file->s->base.keys)); + + if (!(specialflag & SPECIAL_SAFE_MODE)) + { + /* + Only disable old index if the table was empty and we are inserting + a lot of rows. + We should not do this for only a few rows as this is slower and + we don't want to update the key statistics based of only a few rows. + */ + if (file->state->records == 0 && can_enable_indexes && + (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES)) + mi_disable_non_unique_index(file,rows); + else + if (!file->bulk_insert && + (!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT)) + { + mi_init_bulk_insert(file, thd->variables.bulk_insert_buff_size, rows); + } + } + DBUG_VOID_RETURN; +} + +/* + end special bulk-insert optimizations, + which have been activated by start_bulk_insert(). + + SYNOPSIS + end_bulk_insert() + no arguments + + RETURN + 0 OK + != 0 Error +*/ + +int ha_myisam::end_bulk_insert() +{ + mi_end_bulk_insert(file); + int err=mi_extra(file, HA_EXTRA_NO_CACHE, 0); + return err ? err : can_enable_indexes ? + enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE) : 0; } @@ -772,18 +1047,28 @@ bool ha_myisam::check_and_repair(THD *thd) { int error=0; int marked_crashed; + char *old_query; + uint old_query_length; HA_CHECK_OPT check_opt; - DBUG_ENTER("ha_myisam::auto_check_and_repair"); + DBUG_ENTER("ha_myisam::check_and_repair"); check_opt.init(); check_opt.flags= T_MEDIUM | T_AUTO_REPAIR; // Don't use quick if deleted rows if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK)) check_opt.flags|=T_QUICK; - sql_print_error("Warning: Checking table: '%s'",table->path); - if ((marked_crashed=mi_is_crashed(file)) || check(thd, &check_opt)) + sql_print_warning("Checking table: '%s'",table->path); + + old_query= thd->query; + old_query_length= thd->query_length; + pthread_mutex_lock(&LOCK_thread_count); + thd->query= table->real_name; + thd->query_length= strlen(table->real_name); + pthread_mutex_unlock(&LOCK_thread_count); + + if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt)) { - sql_print_error("Warning: Recovering table: '%s'",table->path); + sql_print_warning("Recovering table: '%s'",table->path); check_opt.flags= ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) | (marked_crashed ? 0 : T_QUICK) | @@ -792,6 +1077,10 @@ bool ha_myisam::check_and_repair(THD *thd) if (repair(thd, &check_opt)) error=1; } + pthread_mutex_lock(&LOCK_thread_count); + thd->query= old_query; + thd->query_length= old_query_length; + pthread_mutex_unlock(&LOCK_thread_count); DBUG_RETURN(error); } @@ -804,8 +1093,8 @@ bool ha_myisam::is_crashed() const int ha_myisam::update_row(const byte * old_data, byte * new_data) { statistic_increment(ha_update_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(new_data+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); return mi_update(file,old_data,new_data); } @@ -818,6 +1107,7 @@ int ha_myisam::delete_row(const byte * buf) int ha_myisam::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_key_count,&LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; @@ -835,6 +1125,7 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_key_count,&LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; @@ -843,6 +1134,7 @@ int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) int ha_myisam::index_next(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_next_count,&LOCK_status); int error=mi_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; @@ -851,6 +1143,7 @@ int ha_myisam::index_next(byte * buf) int ha_myisam::index_prev(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_prev_count,&LOCK_status); int error=mi_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; @@ -859,6 +1152,7 @@ int ha_myisam::index_prev(byte * buf) int ha_myisam::index_first(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_first_count,&LOCK_status); int error=mi_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; @@ -867,6 +1161,7 @@ int ha_myisam::index_first(byte * buf) int ha_myisam::index_last(byte * buf) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_last_count,&LOCK_status); int error=mi_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; @@ -877,6 +1172,7 @@ int ha_myisam::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { + DBUG_ASSERT(inited==INDEX); statistic_increment(ha_read_next_count,&LOCK_status); int error=mi_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; @@ -943,9 +1239,10 @@ void ha_myisam::info(uint flag) ref_length=info.reflength; table->db_options_in_use = info.options; block_size=myisam_block_size; - table->keys_in_use= (set_bits(key_map, table->keys) & - (key_map) info.key_map); - table->keys_for_keyread= table->keys_in_use & ~table->read_only_keys; + table->keys_in_use.set_prefix(table->keys); + table->keys_in_use.intersect(info.key_map); + table->keys_for_keyread= table->keys_in_use; + table->keys_for_keyread.subtract(table->read_only_keys); table->db_record_offset=info.record_offset; if (table->key_parts) memcpy((char*) table->key_info[0].rec_per_key, @@ -996,12 +1293,6 @@ int ha_myisam::extra_opt(enum ha_extra_function operation, ulong cache_size) return mi_extra(file, operation, (void*) &cache_size); } - -int ha_myisam::reset(void) -{ - return mi_extra(file, HA_EXTRA_RESET, 0); -} - int ha_myisam::delete_all_rows() { return mi_delete_all_rows(file); @@ -1059,7 +1350,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, KEY *pos; MI_KEYDEF *keydef; MI_COLUMNDEF *recinfo,*recinfo_pos; - MI_KEYSEG *keyseg; + HA_KEYSEG *keyseg; uint options=table_arg->db_options_in_use; DBUG_ENTER("ha_myisam::create"); @@ -1069,14 +1360,17 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, &keydef, table_arg->keys*sizeof(MI_KEYDEF), &keyseg, ((table_arg->key_parts + table_arg->keys) * - sizeof(MI_KEYSEG)), + sizeof(HA_KEYSEG)), NullS))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); pos=table_arg->key_info; for (i=0; i < table_arg->keys ; i++, pos++) { - keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT)); + keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL)); + keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ? + (pos->flags & HA_SPATIAL ? HA_KEY_ALG_RTREE : HA_KEY_ALG_BTREE) : + pos->algorithm; keydef[i].seg=keyseg; keydef[i].keysegs=pos->key_parts; for (j=0 ; j < pos->key_parts ; j++) @@ -1111,7 +1405,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].start= pos->key_part[j].offset; keydef[i].seg[j].length= pos->key_part[j].length; keydef[i].seg[j].bit_start=keydef[i].seg[j].bit_end=0; - keydef[i].seg[j].language=MY_CHARSET_CURRENT; + keydef[i].seg[j].language = field->charset()->number; if (field->null_ptr) { @@ -1124,7 +1418,8 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].null_bit=0; keydef[i].seg[j].null_pos=0; } - if (field->type() == FIELD_TYPE_BLOB) + if (field->type() == FIELD_TYPE_BLOB || + field->type() == FIELD_TYPE_GEOMETRY) { keydef[i].seg[j].flag|=HA_BLOB_PART; /* save number of bytes used to pack length */ @@ -1259,8 +1554,8 @@ longlong ha_myisam::get_auto_increment() return auto_increment_value; } - if (table->bulk_insert) - mi_flush_bulk_insert(file, table->next_number_index); + /* it's safe to call the following if bulk_insert isn't on */ + mi_flush_bulk_insert(file, table->next_number_index); longlong nr; int error; @@ -1286,19 +1581,15 @@ longlong ha_myisam::get_auto_increment() SYNOPSIS records_in_range() inx Index to use - start_key Start of range. Null pointer if from first key - start_key_len Length of start key - start_search_flag Flag if start key should be included or not - end_key End of range. Null pointer if to last key - end_key_len Length of end key - end_search_flag Flag if start key should be included or not + min_key Start of range. Null pointer if from first key + max_key End of range. Null pointer if to last key NOTES - start_search_flag can have one of the following values: + min_key.flag can have one of the following values: HA_READ_KEY_EXACT Include the key in the range HA_READ_AFTER_KEY Don't include key in range - end_search_flag can have one of the following values: + max_key.flag can have one of the following values: HA_READ_BEFORE_KEY Don't include key in range HA_READ_AFTER_KEY Include all 'end_key' values in the range @@ -1309,18 +1600,10 @@ longlong ha_myisam::get_auto_increment() the range. */ -ha_rows ha_myisam::records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) +ha_rows ha_myisam::records_in_range(uint inx, key_range *min_key, + key_range *max_key) { - return (ha_rows) mi_records_in_range(file, - inx, - start_key,start_key_len, - start_search_flag, - end_key,end_key_len, - end_search_flag); + return (ha_rows) mi_records_in_range(file, (int) inx, min_key, max_key); } @@ -1338,3 +1621,9 @@ int ha_myisam::ft_read(byte * buf) table->status=error ? STATUS_NOT_FOUND: 0; return error; } + +uint ha_myisam::checksum() const +{ + return (uint)file->s->state.checksum; +} + diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 212850d8f28..b256d4777f9 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -37,34 +37,34 @@ extern ulong myisam_recover_options; class ha_myisam: public handler { MI_INFO *file; - uint int_table_flags; + ulong int_table_flags; char *data_file_name, *index_file_name; - bool enable_activate_all_index; + bool can_enable_indexes; int repair(THD *thd, MI_CHECK ¶m, bool optimize); public: ha_myisam(TABLE *table): handler(table), file(0), - int_table_flags(HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | - HA_NULL_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | - HA_DUPP_POS | HA_BLOB_KEY | HA_AUTO_PART_KEY | - HA_FILE_BASED), - enable_activate_all_index(1) + int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | + HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | + HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME | + HA_CAN_INSERT_DELAYED), + can_enable_indexes(1) {} ~ha_myisam() {} const char *table_type() const { return "MyISAM"; } const char *index_type(uint key_number); const char **bas_ext() const; ulong table_flags() const { return int_table_flags; } - ulong index_flags(uint inx) const + ulong index_flags(uint inx, uint part, bool all_parts) const { - ulong flags=(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER); - return (flags | ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ? - 0 : HA_KEY_READ_ONLY)); + return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ? + 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE | + HA_READ_ORDER | HA_KEYREAD_ONLY); } - uint max_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_keys() const { return MI_MAX_KEY; } - uint max_key_parts() const { return MAX_REF_PARTS; } - uint max_key_length() const { return MI_MAX_KEY_LENGTH; } + uint max_supported_keys() const { return MI_MAX_KEY; } + uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; } + uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; } + uint checksum() const; int open(const char *name, int mode, uint test_if_locked); int close(void); @@ -81,7 +81,6 @@ class ha_myisam: public handler int index_first(byte * buf); int index_last(byte * buf); int index_next_same(byte *buf, const byte *key, uint keylen); - int index_end() { ft_handler=NULL; return 0; } int ft_init() { if (!ft_handler) @@ -89,29 +88,29 @@ class ha_myisam: public handler ft_handler->please->reinit_search(ft_handler); return 0; } - FT_INFO *ft_init_ext(uint mode, uint inx,const byte *key, uint keylen, - bool presort) - { return ft_init_search(mode, file,inx,(byte*) key,keylen,presort); } + FT_INFO *ft_init_ext(uint flags, uint inx,String *key) + { + return ft_init_search(flags,file,inx, + (byte *)key->ptr(), key->length(), key->charset(), + table->record[0]); + } int ft_read(byte *buf); - int rnd_init(bool scan=1); + int rnd_init(bool scan); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); int restart_rnd_next(byte *buf, byte *pos); void position(const byte *record); - my_off_t row_position() { return mi_position(file); } void info(uint); int extra(enum ha_extra_function operation); int extra_opt(enum ha_extra_function operation, ulong cache_size); - int reset(void); int external_lock(THD *thd, int lock_type); int delete_all_rows(void); - void deactivate_non_unique_index(ha_rows rows); - bool activate_all_index(THD *thd); - ha_rows records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); + int disable_indexes(uint mode); + int enable_indexes(uint mode); + int indexes_are_disabled(void); + void start_bulk_insert(ha_rows rows); + int end_bulk_insert(); + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); void update_create_info(HA_CREATE_INFO *create_info); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, @@ -128,6 +127,10 @@ class ha_myisam: public handler int optimize(THD* thd, HA_CHECK_OPT* check_opt); int restore(THD* thd, HA_CHECK_OPT* check_opt); int backup(THD* thd, HA_CHECK_OPT* check_opt); + int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt); + int preload_keys(THD* thd, HA_CHECK_OPT* check_opt); +#ifdef HAVE_REPLICATION int dump(THD* thd, int fd); int net_read_dump(NET* net); +#endif }; diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index f65c9afc786..edb3521470f 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -35,9 +35,22 @@ const char **ha_myisammrg::bas_ext() const { static const char *ext[]= { ".MRG", NullS }; return ext; } +const char *ha_myisammrg::index_type(uint key_number) +{ + return ((table->key_info[key_number].flags & HA_FULLTEXT) ? + "FULLTEXT" : + (table->key_info[key_number].flags & HA_SPATIAL) ? + "SPATIAL" : + (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ? + "RTREE" : + "BTREE"); +} + + int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) { char name_buff[FN_REFLEN]; + DBUG_PRINT("info", ("ha_myisammrg::open")); if (!(file=myrg_open(fn_format(name_buff,name,"","",2 | 4), mode, test_if_locked))) @@ -80,8 +93,8 @@ int ha_myisammrg::close(void) int ha_myisammrg::write_row(byte * buf) { statistic_increment(ha_write_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(buf+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) update_auto_increment(); return myrg_write(file,buf); @@ -90,8 +103,8 @@ int ha_myisammrg::write_row(byte * buf) int ha_myisammrg::update_row(const byte * old_data, byte * new_data) { statistic_increment(ha_update_count,&LOCK_status); - if (table->time_stamp) - update_timestamp(new_data+table->time_stamp-1); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); return myrg_update(file,old_data,new_data); } @@ -160,6 +173,16 @@ int ha_myisammrg::index_last(byte * buf) return error; } +int ha_myisammrg::index_next_same(byte * buf, + const byte *key __attribute__((unused)), + uint length __attribute__((unused))) +{ + statistic_increment(ha_read_next_count,&LOCK_status); + int error=myrg_rnext_same(file,buf); + table->status=error ? STATUS_NOT_FOUND: 0; + return error; +} + int ha_myisammrg::rnd_init(bool scan) { return myrg_extra(file,HA_EXTRA_RESET,0); @@ -187,20 +210,14 @@ void ha_myisammrg::position(const byte *record) ha_store_ptr(ref, ref_length, (my_off_t) position); } -ha_rows ha_myisammrg::records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) + +ha_rows ha_myisammrg::records_in_range(uint inx, key_range *min_key, + key_range *max_key) { - return (ha_rows) myrg_records_in_range(file, - inx, - start_key,start_key_len, - start_search_flag, - end_key,end_key_len, - end_search_flag); + return (ha_rows) myrg_records_in_range(file, (int) inx, min_key, max_key); } + void ha_myisammrg::info(uint flag) { MYMERGE_INFO info; @@ -218,7 +235,7 @@ void ha_myisammrg::info(uint flag) #endif data_file_length=info.data_file_length; errkey = info.errkey; - table->keys_in_use= set_bits(key_map, table->keys); + table->keys_in_use.set_prefix(table->keys); table->db_options_in_use = info.options; table->is_view=1; mean_rec_length=info.reclength; @@ -259,12 +276,6 @@ int ha_myisammrg::extra_opt(enum ha_extra_function operation, ulong cache_size) return myrg_extra(file, operation, (void*) &cache_size); } - -int ha_myisammrg::reset(void) -{ - return myrg_extra(file,HA_EXTRA_RESET,0); -} - int ha_myisammrg::external_lock(THD *thd, int lock_type) { return myrg_lock_database(file,lock_type); @@ -293,14 +304,40 @@ THR_LOCK_DATA **ha_myisammrg::store_lock(THD *thd, return to; } + +/* Find out database name and table name from a filename */ + +static void split_file_name(const char *file_name, + LEX_STRING *db, LEX_STRING *name) +{ + uint dir_length, prefix_length; + char buff[FN_REFLEN]; + + db->length= 0; + strmake(buff, file_name, sizeof(buff)-1); + dir_length= dirname_length(buff); + if (dir_length > 1) + { + /* Get database */ + buff[dir_length-1]= 0; // Remove end '/' + prefix_length= dirname_length(buff); + db->str= (char*) file_name+ prefix_length; + db->length= dir_length - prefix_length -1; + } + name->str= (char*) file_name+ dir_length; + name->length= (uint) (fn_ext(name->str) - name->str); +} + + void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) { - // [phi] auto_increment stuff is missing (but currently not needed) DBUG_ENTER("ha_myisammrg::update_create_info"); + if (!(create_info->used_fields & HA_CREATE_USED_UNION)) { MYRG_TABLE *open_table; THD *thd=current_thd; + create_info->merge_list.next= &create_info->merge_list.first; create_info->merge_list.elements=0; @@ -308,14 +345,17 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) open_table != file->end_table ; open_table++) { - char *name=open_table->table->filename; - char buff[FN_REFLEN]; TABLE_LIST *ptr; + LEX_STRING db, name; + if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST)))) goto err; - fn_format(buff,name,"","",3); - if (!(ptr->real_name=thd->strdup(buff))) + split_file_name(open_table->table->filename, &db, &name); + if (!(ptr->real_name= thd->strmake(name.str, name.length))) + goto err; + if (db.length && !(ptr->db= thd->strmake(db.str, db.length))) goto err; + create_info->merge_list.elements++; (*create_info->merge_list.next) = (byte*) ptr; create_info->merge_list.next= (byte**) &ptr->next; @@ -334,37 +374,55 @@ err: DBUG_VOID_RETURN; } + int ha_myisammrg::create(const char *name, register TABLE *form, HA_CREATE_INFO *create_info) { char buff[FN_REFLEN],**table_names,**pos; TABLE_LIST *tables= (TABLE_LIST*) create_info->merge_list.first; + THD *thd= current_thd; + uint dirlgt= dirname_length(name); DBUG_ENTER("ha_myisammrg::create"); - if (!(table_names= (char**) sql_alloc((create_info->merge_list.elements+1)* - sizeof(char*)))) + if (!(table_names= (char**) thd->alloc((create_info->merge_list.elements+1)* + sizeof(char*)))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); for (pos=table_names ; tables ; tables=tables->next) { char *table_name; + TABLE **tbl= 0; if (create_info->options & HA_LEX_CREATE_TMP_TABLE) + tbl= find_temporary_table(thd, tables->db, tables->real_name); + if (!tbl) { - TABLE **tbl=find_temporary_table(current_thd, - tables->db, tables->real_name); - if (!tbl) - { - table_name=sql_alloc(1+ - my_snprintf(buff,FN_REFLEN,"%s/%s/%s",mysql_real_data_home, - tables->db, tables->real_name)); - if (!table_name) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - strcpy(table_name, buff); - } + /* + Construct the path to the MyISAM table. Try to meet two conditions: + 1.) Allow to include MyISAM tables from different databases, and + 2.) allow for moving DATADIR around in the file system. + The first means that we need paths in the .MRG file. The second + means that we should not have absolute paths in the .MRG file. + The best, we can do, is to use 'mysql_data_home', which is '.' + in mysqld and may be an absolute path in an embedded server. + This means that it might not be possible to move the DATADIR of + an embedded server without changing the paths in the .MRG file. + */ + uint length= my_snprintf(buff, FN_REFLEN, "%s/%s/%s", mysql_data_home, + tables->db, tables->real_name); + /* + If a MyISAM table is in the same directory as the MERGE table, + we use the table name without a path. This means that the + DATADIR can easily be moved even for an embedded server as long + as the MyISAM tables are from the same database as the MERGE table. + */ + if ((dirname_length(buff) == dirlgt) && ! memcmp(buff, name, dirlgt)) + table_name= tables->real_name; else - table_name=(*tbl)->path; + if (! (table_name= thd->strmake(buff, length))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } else - table_name=tables->real_name; + table_name=(*tbl)->path; + DBUG_PRINT("info",("MyISAM table_name: '%s'", table_name)); *pos++= table_name; } *pos=0; @@ -374,9 +432,13 @@ int ha_myisammrg::create(const char *name, register TABLE *form, (my_bool) 0)); } + void ha_myisammrg::append_create_info(String *packet) { - char buff[FN_REFLEN]; + const char *current_db; + uint db_length; + THD *thd= current_thd; + if (file->merge_insert_method != MERGE_INSERT_DISABLED) { packet->append(" INSERT_METHOD=",15); @@ -385,15 +447,26 @@ void ha_myisammrg::append_create_info(String *packet) packet->append(" UNION=(",8); MYRG_TABLE *open_table,*first; + current_db= table->table_cache_key; + db_length= strlen(current_db); + for (first=open_table=file->open_tables ; open_table != file->end_table ; open_table++) { - char *name= open_table->table->filename; - fn_format(buff,name,"","",3); + LEX_STRING db, name; + split_file_name(open_table->table->filename, &db, &name); if (open_table != first) packet->append(','); - packet->append(buff,(uint) strlen(buff)); + /* Report database for mapped table if it isn't in current database */ + if (db.length && + (db_length != db.length || + strncmp(current_db, db.str, db.length))) + { + append_identifier(thd, packet, db.str, db.length); + packet->append('.'); + } + append_identifier(thd, packet, name.str, name.length); } packet->append(')'); } diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h index db3c20bede2..7348096b695 100644 --- a/sql/ha_myisammrg.h +++ b/sql/ha_myisammrg.h @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -32,24 +32,23 @@ class ha_myisammrg: public handler ~ha_myisammrg() {} const char *table_type() const { return "MRG_MyISAM"; } const char **bas_ext() const; + const char *index_type(uint key_number); ulong table_flags() const { - return (HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME | HA_AUTO_PART_KEY | - HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | - HA_NULL_KEY | HA_BLOB_KEY | HA_FILE_BASED); + return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME | + HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED | + HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE); } - ulong index_flags(uint inx) const + ulong index_flags(uint inx, uint part, bool all_parts) const { - ulong flags=(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | - HA_NOT_READ_PREFIX_LAST); // This - last - flag is ONLY for 4.0 !!! - return (flags | ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ? - 0 : HA_KEY_READ_ONLY)); + return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ? + 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE | + HA_READ_ORDER | HA_KEYREAD_ONLY); } - uint max_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_keys() const { return MI_MAX_KEY; } - uint max_key_parts() const { return MAX_REF_PARTS; } - uint max_key_length() const { return MAX_KEY_LENGTH; } - virtual double scan_time() + uint max_supported_keys() const { return MI_MAX_KEY; } + uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; } + uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; } + double scan_time() { return ulonglong2double(data_file_length) / IO_SIZE + file->tables; } int open(const char *name, int mode, uint test_if_locked); @@ -66,20 +65,15 @@ class ha_myisammrg: public handler int index_prev(byte * buf); int index_first(byte * buf); int index_last(byte * buf); - int rnd_init(bool scan=1); + int index_next_same(byte *buf, const byte *key, uint keylen); + int rnd_init(bool scan); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); void position(const byte *record); - ha_rows records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); - my_off_t row_position() { return myrg_position(file); } + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); void info(uint); int extra(enum ha_extra_function operation); int extra_opt(enum ha_extra_function operation, ulong cache_size); - int reset(void); int external_lock(THD *thd, int lock_type); uint lock_count(void) const; int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc new file mode 100644 index 00000000000..876d5d2f8fd --- /dev/null +++ b/sql/ha_ndbcluster.cc @@ -0,0 +1,5192 @@ + /* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +/* + This file defines the NDB Cluster handler: the interface between MySQL and + NDB Cluster +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "mysql_priv.h" + +#ifdef HAVE_NDBCLUSTER_DB +#include <my_dir.h> +#include "ha_ndbcluster.h" +#include <ndbapi/NdbApi.hpp> +#include <ndbapi/NdbScanFilter.hpp> + +// options from from mysqld.cc +extern my_bool opt_ndb_optimized_node_selection; +extern const char *opt_ndbcluster_connectstring; + +// Default value for parallelism +static const int parallelism= 240; + +// Default value for max number of transactions +// createable against NDB from this handler +static const int max_transactions= 256; + +static const char *ha_ndb_ext=".ndb"; + +#define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0 +#define NDB_AUTO_INCREMENT_RETRIES 10 + +#define NDB_INVALID_SCHEMA_OBJECT 241 + +#define ERR_PRINT(err) \ + DBUG_PRINT("error", ("%d message: %s", err.code, err.message)) + +#define ERR_RETURN(err) \ +{ \ + ERR_PRINT(err); \ + DBUG_RETURN(ndb_to_mysql_error(&err)); \ +} + +// Typedefs for long names +typedef NdbDictionary::Column NDBCOL; +typedef NdbDictionary::Table NDBTAB; +typedef NdbDictionary::Index NDBINDEX; +typedef NdbDictionary::Dictionary NDBDICT; + +bool ndbcluster_inited= FALSE; + +static Ndb* g_ndb= NULL; +static Ndb_cluster_connection* g_ndb_cluster_connection= NULL; + +// Handler synchronization +pthread_mutex_t ndbcluster_mutex; + +// Table lock handling +static HASH ndbcluster_open_tables; + +static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, + my_bool not_used __attribute__((unused))); +static NDB_SHARE *get_share(const char *table_name); +static void free_share(NDB_SHARE *share); + +static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len); +static int unpackfrm(const void **data, uint *len, + const void* pack_data); + +static int ndb_get_table_statistics(Ndb*, const char *, + Uint64* rows, Uint64* commits); + + +/* + Dummy buffer to read zero pack_length fields + which are mapped to 1 char +*/ +static byte dummy_buf[1]; + +/* + Error handling functions +*/ + +struct err_code_mapping +{ + int ndb_err; + int my_err; + int show_warning; +}; + +static const err_code_mapping err_map[]= +{ + { 626, HA_ERR_KEY_NOT_FOUND, 0 }, + { 630, HA_ERR_FOUND_DUPP_KEY, 0 }, + { 893, HA_ERR_FOUND_DUPP_KEY, 0 }, + { 721, HA_ERR_TABLE_EXIST, 1 }, + { 4244, HA_ERR_TABLE_EXIST, 1 }, + + { 709, HA_ERR_NO_SUCH_TABLE, 1 }, + + { 266, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 274, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 296, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 297, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 237, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + + { 623, HA_ERR_RECORD_FILE_FULL, 1 }, + { 624, HA_ERR_RECORD_FILE_FULL, 1 }, + { 625, HA_ERR_RECORD_FILE_FULL, 1 }, + { 826, HA_ERR_RECORD_FILE_FULL, 1 }, + { 827, HA_ERR_RECORD_FILE_FULL, 1 }, + { 832, HA_ERR_RECORD_FILE_FULL, 1 }, + + { 0, 1, 0 }, + + { -1, -1, 1 } +}; + + +static int ndb_to_mysql_error(const NdbError *err) +{ + uint i; + for (i=0; err_map[i].ndb_err != err->code && err_map[i].my_err != -1; i++); + if (err_map[i].show_warning) + { + // Push the NDB error message as warning + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + err->code, err->message, "NDB"); + } + if (err_map[i].my_err == -1) + return err->code; + return err_map[i].my_err; +} + + + +inline +int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans) +{ + int m_batch_execute= 0; +#ifdef NOT_USED + if (m_batch_execute) + return 0; +#endif + return trans->execute(NoCommit,AbortOnError,h->m_force_send); +} + +inline +int execute_commit(ha_ndbcluster *h, NdbConnection *trans) +{ + int m_batch_execute= 0; +#ifdef NOT_USED + if (m_batch_execute) + return 0; +#endif + return trans->execute(Commit,AbortOnError,h->m_force_send); +} + +inline +int execute_commit(THD *thd, NdbConnection *trans) +{ + int m_batch_execute= 0; +#ifdef NOT_USED + if (m_batch_execute) + return 0; +#endif + return trans->execute(Commit,AbortOnError,thd->variables.ndb_force_send); +} + +inline +int execute_no_commit_ie(ha_ndbcluster *h, NdbConnection *trans) +{ + int m_batch_execute= 0; +#ifdef NOT_USED + if (m_batch_execute) + return 0; +#endif + return trans->execute(NoCommit, AO_IgnoreError,h->m_force_send); +} + +/* + Place holder for ha_ndbcluster thread specific data +*/ + +Thd_ndb::Thd_ndb() +{ + ndb= new Ndb(g_ndb_cluster_connection, ""); + lock_count= 0; + count= 0; + error= 0; +} + +Thd_ndb::~Thd_ndb() +{ + if (ndb) + { +#ifndef DBUG_OFF + Ndb::Free_list_usage tmp; tmp.m_name= 0; + while (ndb->get_free_list_usage(&tmp)) + { + uint leaked= (uint) tmp.m_created - tmp.m_free; + if (leaked) + fprintf(stderr, "NDB: Found %u %s%s that %s not been released\n", + leaked, tmp.m_name, + (leaked == 1)?"":"'s", + (leaked == 1)?"has":"have"); + } +#endif + delete ndb; + } + ndb= 0; +} + +inline +Ndb *ha_ndbcluster::get_ndb() +{ + return ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb; +} + +/* + * manage uncommitted insert/deletes during transactio to get records correct + */ + +struct Ndb_local_table_statistics { + int no_uncommitted_rows_count; + ulong last_count; + ha_rows records; +}; + +void ha_ndbcluster::set_rec_per_key() +{ + DBUG_ENTER("ha_ndbcluster::get_status_const"); + for (uint i=0 ; i < table->keys ; i++) + { + table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1; + } + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::records_update() +{ + if (m_ha_not_exact_count) + return; + DBUG_ENTER("ha_ndbcluster::records_update"); + struct Ndb_local_table_statistics *info= + (struct Ndb_local_table_statistics *)m_table_info; + DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); + // if (info->records == ~(ha_rows)0) + { + Ndb *ndb= get_ndb(); + Uint64 rows; + if(ndb_get_table_statistics(ndb, m_tabname, &rows, 0) == 0){ + info->records= rows; + } + } + { + THD *thd= current_thd; + if (((Thd_ndb*)(thd->transaction.thd_ndb))->error) + info->no_uncommitted_rows_count= 0; + } + records= info->records+ info->no_uncommitted_rows_count; + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::no_uncommitted_rows_execute_failure() +{ + if (m_ha_not_exact_count) + return; + DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure"); + THD *thd= current_thd; + ((Thd_ndb*)(thd->transaction.thd_ndb))->error= 1; + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) +{ + if (m_ha_not_exact_count) + return; + DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init"); + struct Ndb_local_table_statistics *info= + (struct Ndb_local_table_statistics *)m_table_info; + Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb; + if (info->last_count != thd_ndb->count) + { + info->last_count = thd_ndb->count; + info->no_uncommitted_rows_count= 0; + info->records= ~(ha_rows)0; + DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); + } + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::no_uncommitted_rows_update(int c) +{ + if (m_ha_not_exact_count) + return; + DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); + struct Ndb_local_table_statistics *info= + (struct Ndb_local_table_statistics *)m_table_info; + info->no_uncommitted_rows_count+= c; + DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) +{ + if (m_ha_not_exact_count) + return; + DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset"); + ((Thd_ndb*)(thd->transaction.thd_ndb))->count++; + ((Thd_ndb*)(thd->transaction.thd_ndb))->error= 0; + DBUG_VOID_RETURN; +} + +/* + Take care of the error that occured in NDB + + RETURN + 0 No error + # The mapped error code +*/ + +void ha_ndbcluster::invalidate_dictionary_cache(bool global) +{ + NDBDICT *dict= get_ndb()->getDictionary(); + DBUG_ENTER("invalidate_dictionary_cache"); + DBUG_PRINT("info", ("invalidating %s", m_tabname)); + + if (global) + { + const NDBTAB *tab= dict->getTable(m_tabname); + if (!tab) + DBUG_VOID_RETURN; + if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + // Global cache has already been invalidated + dict->removeCachedTable(m_tabname); + global= FALSE; + } + else + dict->invalidateTable(m_tabname); + } + else + dict->removeCachedTable(m_tabname); + table->version=0L; /* Free when thread is ready */ + /* Invalidate indexes */ + for (uint i= 0; i < table->keys; i++) + { + NDBINDEX *index = (NDBINDEX *) m_index[i].index; + NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index; + NDB_INDEX_TYPE idx_type= m_index[i].type; + + switch(idx_type) { + case(PRIMARY_KEY_ORDERED_INDEX): + case(ORDERED_INDEX): + if (global) + dict->invalidateIndex(index->getName(), m_tabname); + else + dict->removeCachedIndex(index->getName(), m_tabname); + break; + case(UNIQUE_ORDERED_INDEX): + if (global) + dict->invalidateIndex(index->getName(), m_tabname); + else + dict->removeCachedIndex(index->getName(), m_tabname); + case(UNIQUE_INDEX): + if (global) + dict->invalidateIndex(unique_index->getName(), m_tabname); + else + dict->removeCachedIndex(unique_index->getName(), m_tabname); + break; + case(PRIMARY_KEY_INDEX): + case(UNDEFINED_INDEX): + break; + } + } + DBUG_VOID_RETURN; +} + +int ha_ndbcluster::ndb_err(NdbConnection *trans) +{ + int res; + NdbError err= trans->getNdbError(); + DBUG_ENTER("ndb_err"); + + ERR_PRINT(err); + switch (err.classification) { + case NdbError::SchemaError: + { + invalidate_dictionary_cache(TRUE); + + if (err.code==284) + { + /* + Check if the table is _really_ gone or if the table has + been alterend and thus changed table id + */ + NDBDICT *dict= get_ndb()->getDictionary(); + DBUG_PRINT("info", ("Check if table %s is really gone", m_tabname)); + if (!(dict->getTable(m_tabname))) + { + err= dict->getNdbError(); + DBUG_PRINT("info", ("Table not found, error: %d", err.code)); + if (err.code != 709) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Table exist but must have changed")); + /* In 5.0, this should be replaced with a mapping to a mysql error */ + my_printf_error(ER_UNKNOWN_ERROR, + "Table definition has changed, "\ + "please retry transaction", + MYF(0)); + DBUG_RETURN(1); + } + } + break; + } + default: + break; + } + res= ndb_to_mysql_error(&err); + DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d", + err.code, res)); + if (res == HA_ERR_FOUND_DUPP_KEY) + { + if (m_rows_to_insert == 1) + m_dupkey= table->primary_key; + else + { + /* We are batching inserts, offending key is not available */ + m_dupkey= (uint) -1; + } + } + DBUG_RETURN(res); +} + + +/* + Override the default get_error_message in order to add the + error message of NDB + */ + +bool ha_ndbcluster::get_error_message(int error, + String *buf) +{ + DBUG_ENTER("ha_ndbcluster::get_error_message"); + DBUG_PRINT("enter", ("error: %d", error)); + + Ndb *ndb= get_ndb(); + if (!ndb) + DBUG_RETURN(FALSE); + + const NdbError err= ndb->getNdbError(error); + bool temporary= err.status==NdbError::TemporaryError; + buf->set(err.message, strlen(err.message), &my_charset_bin); + DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary)); + DBUG_RETURN(temporary); +} + + +#ifndef DBUG_OFF +/* + Check if type is supported by NDB. +*/ + +static bool ndb_supported_type(enum_field_types type) +{ + switch (type) { + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + return TRUE; + case MYSQL_TYPE_NULL: + case MYSQL_TYPE_GEOMETRY: + break; + } + return FALSE; +} +#endif /* !DBUG_OFF */ + + +/* + Instruct NDB to set the value of the hidden primary key +*/ + +bool ha_ndbcluster::set_hidden_key(NdbOperation *ndb_op, + uint fieldnr, const byte *field_ptr) +{ + DBUG_ENTER("set_hidden_key"); + DBUG_RETURN(ndb_op->equal(fieldnr, (char*)field_ptr, + NDB_HIDDEN_PRIMARY_KEY_LENGTH) != 0); +} + + +/* + Instruct NDB to set the value of one primary key attribute +*/ + +int ha_ndbcluster::set_ndb_key(NdbOperation *ndb_op, Field *field, + uint fieldnr, const byte *field_ptr) +{ + uint32 pack_len= field->pack_length(); + DBUG_ENTER("set_ndb_key"); + DBUG_PRINT("enter", ("%d: %s, ndb_type: %u, len=%d", + fieldnr, field->field_name, field->type(), + pack_len)); + DBUG_DUMP("key", (char*)field_ptr, pack_len); + + DBUG_ASSERT(ndb_supported_type(field->type())); + DBUG_ASSERT(! (field->flags & BLOB_FLAG)); + // Common implementation for most field types + DBUG_RETURN(ndb_op->equal(fieldnr, (char*) field_ptr, pack_len) != 0); +} + + +/* + Instruct NDB to set the value of one attribute +*/ + +int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, + uint fieldnr, bool *set_blob_value) +{ + const byte* field_ptr= field->ptr; + uint32 pack_len= field->pack_length(); + DBUG_ENTER("set_ndb_value"); + DBUG_PRINT("enter", ("%d: %s, type: %u, len=%d, is_null=%s", + fieldnr, field->field_name, field->type(), + pack_len, field->is_null()?"Y":"N")); + DBUG_DUMP("value", (char*) field_ptr, pack_len); + + DBUG_ASSERT(ndb_supported_type(field->type())); + { + // ndb currently does not support size 0 + const byte *empty_field= ""; + if (pack_len == 0) + { + pack_len= 1; + field_ptr= empty_field; + } + if (! (field->flags & BLOB_FLAG)) + { + if (field->is_null()) + // Set value to NULL + DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL, pack_len) != 0)); + // Common implementation for most field types + DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)field_ptr, pack_len) != 0); + } + + // Blob type + NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr); + if (ndb_blob != NULL) + { + if (field->is_null()) + DBUG_RETURN(ndb_blob->setNull() != 0); + + Field_blob *field_blob= (Field_blob*)field; + + // Get length and pointer to data + uint32 blob_len= field_blob->get_length(field_ptr); + char* blob_ptr= NULL; + field_blob->get_ptr(&blob_ptr); + + // Looks like NULL ptr signals length 0 blob + if (blob_ptr == NULL) { + DBUG_ASSERT(blob_len == 0); + blob_ptr= (char*)""; + } + + DBUG_PRINT("value", ("set blob ptr=%p len=%u", + blob_ptr, blob_len)); + DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26)); + + if (set_blob_value) + *set_blob_value= TRUE; + // No callback needed to write value + DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0); + } + DBUG_RETURN(1); + } +} + + +/* + Callback to read all blob values. + - not done in unpack_record because unpack_record is valid + after execute(Commit) but reading blobs is not + - may only generate read operations; they have to be executed + somewhere before the data is available + - due to single buffer for all blobs, we let the last blob + process all blobs (last so that all are active) + - null bit is still set in unpack_record + - TODO allocate blob part aligned buffers +*/ + +NdbBlob::ActiveHook g_get_ndb_blobs_value; + +int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) +{ + DBUG_ENTER("g_get_ndb_blobs_value"); + if (ndb_blob->blobsNextBlob() != NULL) + DBUG_RETURN(0); + ha_ndbcluster *ha= (ha_ndbcluster *)arg; + DBUG_RETURN(ha->get_ndb_blobs_value(ndb_blob)); +} + +int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob) +{ + DBUG_ENTER("get_ndb_blobs_value"); + + // Field has no field number so cannot use TABLE blob_field + // Loop twice, first only counting total buffer size + for (int loop= 0; loop <= 1; loop++) + { + uint32 offset= 0; + for (uint i= 0; i < table->fields; i++) + { + Field *field= table->field[i]; + NdbValue value= m_value[i]; + if (value.ptr != NULL && (field->flags & BLOB_FLAG)) + { + Field_blob *field_blob= (Field_blob *)field; + NdbBlob *ndb_blob= value.blob; + Uint64 blob_len= 0; + if (ndb_blob->getLength(blob_len) != 0) + DBUG_RETURN(-1); + // Align to Uint64 + uint32 blob_size= blob_len; + if (blob_size % 8 != 0) + blob_size+= 8 - blob_size % 8; + if (loop == 1) + { + char *buf= m_blobs_buffer + offset; + uint32 len= 0xffffffff; // Max uint32 + DBUG_PRINT("value", ("read blob ptr=%x len=%u", + (UintPtr)buf, (uint)blob_len)); + if (ndb_blob->readData(buf, len) != 0) + DBUG_RETURN(-1); + DBUG_ASSERT(len == blob_len); + field_blob->set_ptr(len, buf); + } + offset+= blob_size; + } + } + if (loop == 0 && offset > m_blobs_buffer_size) + { + my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR)); + m_blobs_buffer_size= 0; + DBUG_PRINT("value", ("allocate blobs buffer size %u", offset)); + m_blobs_buffer= my_malloc(offset, MYF(MY_WME)); + if (m_blobs_buffer == NULL) + DBUG_RETURN(-1); + m_blobs_buffer_size= offset; + } + } + DBUG_RETURN(0); +} + + +/* + Instruct NDB to fetch one field + - data is read directly into buffer provided by field + if field is NULL, data is read into memory provided by NDBAPI +*/ + +int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, + uint fieldnr, byte* buf) +{ + DBUG_ENTER("get_ndb_value"); + DBUG_PRINT("enter", ("fieldnr: %d flags: %o", fieldnr, + (int)(field != NULL ? field->flags : 0))); + + if (field != NULL) + { + DBUG_ASSERT(buf); + DBUG_ASSERT(ndb_supported_type(field->type())); + DBUG_ASSERT(field->ptr != NULL); + if (! (field->flags & BLOB_FLAG)) + { + byte *field_buf; + if (field->pack_length() != 0) + field_buf= buf + (field->ptr - table->record[0]); + else + field_buf= dummy_buf; + m_value[fieldnr].rec= ndb_op->getValue(fieldnr, + field_buf); + DBUG_RETURN(m_value[fieldnr].rec == NULL); + } + + // Blob type + NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr); + m_value[fieldnr].blob= ndb_blob; + if (ndb_blob != NULL) + { + // Set callback + void *arg= (void *)this; + DBUG_RETURN(ndb_blob->setActiveHook(g_get_ndb_blobs_value, arg) != 0); + } + DBUG_RETURN(1); + } + + // Used for hidden key only + m_value[fieldnr].rec= ndb_op->getValue(fieldnr, m_ref); + DBUG_RETURN(m_value[fieldnr].rec == NULL); +} + + +/* + Check if any set or get of blob value in current query. +*/ +bool ha_ndbcluster::uses_blob_value(bool all_fields) +{ + if (table->blob_fields == 0) + return FALSE; + if (all_fields) + return TRUE; + { + uint no_fields= table->fields; + int i; + THD *thd= table->in_use; + // They always put blobs at the end.. + for (i= no_fields - 1; i >= 0; i--) + { + Field *field= table->field[i]; + if (thd->query_id == field->query_id) + { + return TRUE; + } + } + } + return FALSE; +} + + +/* + Get metadata for this table from NDB + + IMPLEMENTATION + - check that frm-file on disk is equal to frm-file + of table accessed in NDB +*/ + +int ha_ndbcluster::get_metadata(const char *path) +{ + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); + const NDBTAB *tab; + int error; + bool invalidating_ndb_table= FALSE; + + DBUG_ENTER("get_metadata"); + DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); + + do { + const void *data, *pack_data; + uint length, pack_length; + + if (!(tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + // Check if thread has stale local cache + if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + invalidate_dictionary_cache(FALSE); + if (!(tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); + } + /* + Compare FrmData in NDB with frm file from disk. + */ + error= 0; + if (readfrm(path, &data, &length) || + packfrm(data, length, &pack_data, &pack_length)) + { + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(1); + } + + if ((pack_length != tab->getFrmLength()) || + (memcmp(pack_data, tab->getFrmData(), pack_length))) + { + if (!invalidating_ndb_table) + { + DBUG_PRINT("info", ("Invalidating table")); + invalidate_dictionary_cache(TRUE); + invalidating_ndb_table= TRUE; + } + else + { + DBUG_PRINT("error", + ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", + pack_length, tab->getFrmLength(), + memcmp(pack_data, tab->getFrmData(), pack_length))); + DBUG_DUMP("pack_data", (char*)pack_data, pack_length); + DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); + error= 3; + invalidating_ndb_table= FALSE; + } + } + else + { + invalidating_ndb_table= FALSE; + } + my_free((char*)data, MYF(0)); + my_free((char*)pack_data, MYF(0)); + } while (invalidating_ndb_table); + + if (error) + DBUG_RETURN(error); + + m_table_version= tab->getObjectVersion(); + m_table= (void *)tab; + m_table_info= NULL; // Set in external lock + + DBUG_RETURN(build_index_list(ndb, table, ILBP_OPEN)); +} + +static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, + const NDBINDEX *index, + KEY *key_info) +{ + DBUG_ENTER("fix_unique_index_attr_order"); + unsigned sz= index->getNoOfIndexColumns(); + + if (data.unique_index_attrid_map) + my_free((char*)data.unique_index_attrid_map, MYF(0)); + data.unique_index_attrid_map= (unsigned char*)my_malloc(sz,MYF(MY_WME)); + + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + DBUG_ASSERT(key_info->key_parts == sz); + for (unsigned i= 0; key_part != end; key_part++, i++) + { + const char *field_name= key_part->field->field_name; + unsigned name_sz= strlen(field_name); + if (name_sz >= NDB_MAX_ATTR_NAME_SIZE) + name_sz= NDB_MAX_ATTR_NAME_SIZE-1; +#ifndef DBUG_OFF + data.unique_index_attrid_map[i]= 255; +#endif + for (unsigned j= 0; j < sz; j++) + { + const NDBCOL *c= index->getColumn(j); + if (strncmp(field_name, c->getName(), name_sz) == 0) + { + data.unique_index_attrid_map[i]= j; + break; + } + } + DBUG_ASSERT(data.unique_index_attrid_map[i] != 255); + } + DBUG_RETURN(0); +} + +int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase) +{ + uint i; + int error= 0; + const char *name, *index_name; + char unique_index_name[FN_LEN]; + static const char* unique_suffix= "$unique"; + KEY* key_info= tab->key_info; + const char **key_name= tab->keynames.type_names; + NDBDICT *dict= ndb->getDictionary(); + DBUG_ENTER("build_index_list"); + + // Save information about all known indexes + for (i= 0; i < tab->keys; i++, key_info++, key_name++) + { + index_name= *key_name; + NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); + m_index[i].type= idx_type; + if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) + { + strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS); + DBUG_PRINT("info", ("Created unique index name \'%s\' for index %d", + unique_index_name, i)); + } + // Create secondary indexes if in create phase + if (phase == ILBP_CREATE) + { + DBUG_PRINT("info", ("Creating index %u: %s", i, index_name)); + switch (idx_type){ + + case PRIMARY_KEY_INDEX: + // Do nothing, already created + break; + case PRIMARY_KEY_ORDERED_INDEX: + error= create_ordered_index(index_name, key_info); + break; + case UNIQUE_ORDERED_INDEX: + if (!(error= create_ordered_index(index_name, key_info))) + error= create_unique_index(unique_index_name, key_info); + break; + case UNIQUE_INDEX: + if (!(error= check_index_fields_not_null(i))) + error= create_unique_index(unique_index_name, key_info); + break; + case ORDERED_INDEX: + error= create_ordered_index(index_name, key_info); + break; + default: + DBUG_ASSERT(FALSE); + break; + } + if (error) + { + DBUG_PRINT("error", ("Failed to create index %u", i)); + drop_table(); + break; + } + } + // Add handles to index objects + if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX) + { + DBUG_PRINT("info", ("Get handle to index %s", index_name)); + const NDBINDEX *index= dict->getIndex(index_name, m_tabname); + if (!index) DBUG_RETURN(1); + m_index[i].index= (void *) index; + } + if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) + { + DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name)); + const NDBINDEX *index= dict->getIndex(unique_index_name, m_tabname); + if (!index) DBUG_RETURN(1); + m_index[i].unique_index= (void *) index; + error= fix_unique_index_attr_order(m_index[i], index, key_info); + } + } + + DBUG_RETURN(error); +} + + +/* + Decode the type of an index from information + provided in table object +*/ +NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const +{ + bool is_hash_index= (table->key_info[inx].algorithm == HA_KEY_ALG_HASH); + if (inx == table->primary_key) + return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX; + else + return ((table->key_info[inx].flags & HA_NOSAME) ? + (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) : + ORDERED_INDEX); +} + +int ha_ndbcluster::check_index_fields_not_null(uint inx) +{ + KEY* key_info= table->key_info + inx; + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + DBUG_ENTER("check_index_fields_not_null"); + + for (; key_part != end; key_part++) + { + Field* field= key_part->field; + if (field->maybe_null()) + { + my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), + MYF(0),field->field_name); + DBUG_RETURN(ER_NULL_COLUMN_IN_INDEX); + } + } + + DBUG_RETURN(0); +} + +void ha_ndbcluster::release_metadata() +{ + uint i; + + DBUG_ENTER("release_metadata"); + DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); + + m_table= NULL; + m_table_info= NULL; + + // Release index list + for (i= 0; i < MAX_KEY; i++) + { + m_index[i].unique_index= NULL; + m_index[i].index= NULL; + if (m_index[i].unique_index_attrid_map) + { + my_free((char *)m_index[i].unique_index_attrid_map, MYF(0)); + m_index[i].unique_index_attrid_map= NULL; + } + } + + DBUG_VOID_RETURN; +} + +int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type) +{ + if (type >= TL_WRITE_ALLOW_WRITE) + return NdbOperation::LM_Exclusive; + else if (uses_blob_value(m_retrieve_all_fields)) + return NdbOperation::LM_Read; + else + return NdbOperation::LM_CommittedRead; +} + +static const ulong index_type_flags[]= +{ + /* UNDEFINED_INDEX */ + 0, + + /* PRIMARY_KEY_INDEX */ + HA_ONLY_WHOLE_INDEX, + + /* PRIMARY_KEY_ORDERED_INDEX */ + /* + Enable HA_KEYREAD_ONLY when "sorted" indexes are supported, + thus ORDERD BY clauses can be optimized by reading directly + through the index. + */ + // HA_KEYREAD_ONLY | + HA_READ_NEXT | + HA_READ_RANGE | + HA_READ_ORDER, + + /* UNIQUE_INDEX */ + HA_ONLY_WHOLE_INDEX, + + /* UNIQUE_ORDERED_INDEX */ + HA_READ_NEXT | + HA_READ_RANGE | + HA_READ_ORDER, + + /* ORDERED_INDEX */ + HA_READ_NEXT | + HA_READ_RANGE | + HA_READ_ORDER +}; + +static const int index_flags_size= sizeof(index_type_flags)/sizeof(ulong); + +inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const +{ + DBUG_ASSERT(idx_no < MAX_KEY); + return m_index[idx_no].type; +} + + +/* + Get the flags for an index + + RETURN + flags depending on the type of the index. +*/ + +inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part, + bool all_parts) const +{ + DBUG_ENTER("index_flags"); + DBUG_PRINT("info", ("idx_no: %d", idx_no)); + DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size); + DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)]); +} + + +int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) +{ + KEY* key_info= table->key_info + table->primary_key; + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + DBUG_ENTER("set_primary_key"); + + for (; key_part != end; key_part++) + { + Field* field= key_part->field; + if (set_ndb_key(op, field, + key_part->fieldnr-1, key)) + ERR_RETURN(op->getNdbError()); + key += key_part->length; + } + DBUG_RETURN(0); +} + + +int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record) +{ + KEY* key_info= table->key_info + table->primary_key; + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + DBUG_ENTER("set_primary_key_from_record"); + + for (; key_part != end; key_part++) + { + Field* field= key_part->field; + if (set_ndb_key(op, field, + key_part->fieldnr-1, record+key_part->offset)) + ERR_RETURN(op->getNdbError()); + } + DBUG_RETURN(0); +} + +/* + Read one record from NDB using primary key +*/ + +int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) +{ + uint no_fields= table->fields, i; + NdbConnection *trans= m_active_trans; + NdbOperation *op; + THD *thd= current_thd; + DBUG_ENTER("pk_read"); + DBUG_PRINT("enter", ("key_len: %u", key_len)); + DBUG_DUMP("key", (char*)key, key_len); + + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || + op->readTuple(lm) != 0) + ERR_RETURN(trans->getNdbError()); + + if (table->primary_key == MAX_KEY) + { + // This table has no primary key, use "hidden" primary key + DBUG_PRINT("info", ("Using hidden key")); + DBUG_DUMP("key", (char*)key, 8); + if (set_hidden_key(op, no_fields, key)) + ERR_RETURN(trans->getNdbError()); + + // Read key at the same time, for future reference + if (get_ndb_value(op, NULL, no_fields, NULL)) + ERR_RETURN(trans->getNdbError()); + } + else + { + int res; + if ((res= set_primary_key(op, key))) + return res; + } + + // Read all wanted non-key field(s) unless HA_EXTRA_RETRIEVE_ALL_COLS + for (i= 0; i < no_fields; i++) + { + Field *field= table->field[i]; + if ((thd->query_id == field->query_id) || + m_retrieve_all_fields || + (field->flags & PRI_KEY_FLAG) && m_retrieve_primary_key) + { + if (get_ndb_value(op, field, i, buf)) + ERR_RETURN(trans->getNdbError()); + } + else + { + // Attribute was not to be read + m_value[i].ptr= NULL; + } + } + + if (execute_no_commit_ie(this,trans) != 0) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(ndb_err(trans)); + } + + // The value have now been fetched from NDB + unpack_record(buf); + table->status= 0; + DBUG_RETURN(0); +} + + +/* + Read one complementing record from NDB using primary key from old_data +*/ + +int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) +{ + uint no_fields= table->fields, i; + NdbConnection *trans= m_active_trans; + NdbOperation *op; + THD *thd= current_thd; + DBUG_ENTER("complemented_pk_read"); + + if (m_retrieve_all_fields) + // We have allready retrieved all fields, nothing to complement + DBUG_RETURN(0); + + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || + op->readTuple(lm) != 0) + ERR_RETURN(trans->getNdbError()); + + int res; + if ((res= set_primary_key_from_record(op, old_data))) + ERR_RETURN(trans->getNdbError()); + + // Read all unreferenced non-key field(s) + for (i= 0; i < no_fields; i++) + { + Field *field= table->field[i]; + if (!(field->flags & PRI_KEY_FLAG) && + (thd->query_id != field->query_id)) + { + if (get_ndb_value(op, field, i, new_data)) + ERR_RETURN(trans->getNdbError()); + } + } + + if (execute_no_commit(this,trans) != 0) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(ndb_err(trans)); + } + + // The value have now been fetched from NDB + unpack_record(new_data); + table->status= 0; + DBUG_RETURN(0); +} + +/* + Peek to check if a particular row already exists +*/ + +int ha_ndbcluster::peek_row(const byte *record) +{ + NdbConnection *trans= m_active_trans; + NdbOperation *op; + THD *thd= current_thd; + DBUG_ENTER("peek_row"); + + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || + op->readTuple(lm) != 0) + ERR_RETURN(trans->getNdbError()); + + int res; + if ((res= set_primary_key_from_record(op, record))) + ERR_RETURN(trans->getNdbError()); + + if (execute_no_commit_ie(this,trans) != 0) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(ndb_err(trans)); + } + DBUG_RETURN(0); +} + +/* + Read one record from NDB using unique secondary index +*/ + +int ha_ndbcluster::unique_index_read(const byte *key, + uint key_len, byte *buf) +{ + NdbConnection *trans= m_active_trans; + NdbIndexOperation *op; + THD *thd= current_thd; + byte *key_ptr; + KEY* key_info; + KEY_PART_INFO *key_part, *end; + uint i; + DBUG_ENTER("unique_index_read"); + DBUG_PRINT("enter", ("key_len: %u, index: %u", key_len, active_index)); + DBUG_DUMP("key", (char*)key, key_len); + + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + if (!(op= trans->getNdbIndexOperation((NDBINDEX *) + m_index[active_index].unique_index, + (const NDBTAB *) m_table)) || + op->readTuple(lm) != 0) + ERR_RETURN(trans->getNdbError()); + + // Set secondary index key(s) + key_ptr= (byte *) key; + key_info= table->key_info + active_index; + DBUG_ASSERT(key_info->key_length == key_len); + end= (key_part= key_info->key_part) + key_info->key_parts; + + for (i= 0; key_part != end; key_part++, i++) + { + if (set_ndb_key(op, key_part->field, + m_index[active_index].unique_index_attrid_map[i], + key_part->null_bit ? key_ptr + 1 : key_ptr)) + ERR_RETURN(trans->getNdbError()); + key_ptr+= key_part->store_length; + } + + // Get non-index attribute(s) + for (i= 0; i < table->fields; i++) + { + Field *field= table->field[i]; + if ((thd->query_id == field->query_id) || + (field->flags & PRI_KEY_FLAG)) // && m_retrieve_primary_key ?? + { + if (get_ndb_value(op, field, i, buf)) + ERR_RETURN(op->getNdbError()); + } + else + { + // Attribute was not to be read + m_value[i].ptr= NULL; + } + } + + if (execute_no_commit_ie(this,trans) != 0) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(ndb_err(trans)); + } + // The value have now been fetched from NDB + unpack_record(buf); + table->status= 0; + DBUG_RETURN(0); +} + +/* + Get the next record of a started scan. Try to fetch + it locally from NdbApi cached records if possible, + otherwise ask NDB for more. + + NOTE + If this is a update/delete make sure to not contact + NDB before any pending ops have been sent to NDB. + +*/ + +inline int ha_ndbcluster::next_result(byte *buf) +{ + int check; + NdbConnection *trans= m_active_trans; + NdbResultSet *cursor= m_active_cursor; + DBUG_ENTER("next_result"); + + if (!cursor) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + /* + If this an update or delete, call nextResult with false + to process any records already cached in NdbApi + */ + bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE; + do { + DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb)); + /* + We can only handle one tuple with blobs at a time. + */ + if (m_ops_pending && m_blobs_pending) + { + if (execute_no_commit(this,trans) != 0) + DBUG_RETURN(ndb_err(trans)); + m_ops_pending= 0; + m_blobs_pending= FALSE; + } + check= cursor->nextResult(contact_ndb, m_force_send); + if (check == 0) + { + // One more record found + DBUG_PRINT("info", ("One more record found")); + + unpack_record(buf); + table->status= 0; + DBUG_RETURN(0); + } + else if (check == 1 || check == 2) + { + // 1: No more records + // 2: No more cached records + + /* + Before fetching more rows and releasing lock(s), + all pending update or delete operations should + be sent to NDB + */ + DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); + if (m_ops_pending) + { + // if (current_thd->transaction.on) + if (m_transaction_on) + { + if (execute_no_commit(this,trans) != 0) + DBUG_RETURN(ndb_err(trans)); + } + else + { + if (execute_commit(this,trans) != 0) + DBUG_RETURN(ndb_err(trans)); + int res= trans->restart(); + DBUG_ASSERT(res == 0); + } + m_ops_pending= 0; + } + + contact_ndb= (check == 2); + } + } while (check == 2); + + table->status= STATUS_NOT_FOUND; + if (check == -1) + DBUG_RETURN(ndb_err(trans)); + + // No more records + DBUG_PRINT("info", ("No more records")); + DBUG_RETURN(HA_ERR_END_OF_FILE); +} + +/* + Set bounds for ordered index scan. +*/ + +int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, + const key_range *keys[2]) +{ + const KEY *const key_info= table->key_info + active_index; + const uint key_parts= key_info->key_parts; + uint key_tot_len[2]; + uint tot_len; + uint i, j; + + DBUG_ENTER("set_bounds"); + DBUG_PRINT("info", ("key_parts=%d", key_parts)); + + for (j= 0; j <= 1; j++) + { + const key_range *key= keys[j]; + if (key != NULL) + { + // for key->flag see ha_rkey_function + DBUG_PRINT("info", ("key %d length=%d flag=%d", + j, key->length, key->flag)); + key_tot_len[j]= key->length; + } + else + { + DBUG_PRINT("info", ("key %d not present", j)); + key_tot_len[j]= 0; + } + } + tot_len= 0; + + for (i= 0; i < key_parts; i++) + { + KEY_PART_INFO *key_part= &key_info->key_part[i]; + Field *field= key_part->field; + uint part_len= key_part->length; + uint part_store_len= key_part->store_length; + // Info about each key part + struct part_st { + bool part_last; + const key_range *key; + const byte *part_ptr; + bool part_null; + int bound_type; + const char* bound_ptr; + }; + struct part_st part[2]; + + for (j= 0; j <= 1; j++) + { + struct part_st &p = part[j]; + p.key= NULL; + p.bound_type= -1; + if (tot_len < key_tot_len[j]) + { + p.part_last= (tot_len + part_store_len >= key_tot_len[j]); + p.key= keys[j]; + p.part_ptr= &p.key->key[tot_len]; + p.part_null= key_part->null_bit && *p.part_ptr; + p.bound_ptr= (const char *) + p.part_null ? 0 : key_part->null_bit ? p.part_ptr + 1 : p.part_ptr; + + if (j == 0) + { + switch (p.key->flag) + { + case HA_READ_KEY_EXACT: + p.bound_type= NdbIndexScanOperation::BoundEQ; + break; + case HA_READ_KEY_OR_NEXT: + p.bound_type= NdbIndexScanOperation::BoundLE; + break; + case HA_READ_AFTER_KEY: + if (! p.part_last) + p.bound_type= NdbIndexScanOperation::BoundLE; + else + p.bound_type= NdbIndexScanOperation::BoundLT; + break; + default: + break; + } + } + if (j == 1) { + switch (p.key->flag) + { + case HA_READ_BEFORE_KEY: + if (! p.part_last) + p.bound_type= NdbIndexScanOperation::BoundGE; + else + p.bound_type= NdbIndexScanOperation::BoundGT; + break; + case HA_READ_AFTER_KEY: // weird + p.bound_type= NdbIndexScanOperation::BoundGE; + break; + default: + break; + } + } + + if (p.bound_type == -1) + { + DBUG_PRINT("error", ("key %d unknown flag %d", j, p.key->flag)); + DBUG_ASSERT(false); + // Stop setting bounds but continue with what we have + DBUG_RETURN(0); + } + } + } + + // Seen with e.g. b = 1 and c > 1 + if (part[0].bound_type == NdbIndexScanOperation::BoundLE && + part[1].bound_type == NdbIndexScanOperation::BoundGE && + memcmp(part[0].part_ptr, part[1].part_ptr, part_store_len) == 0) + { + DBUG_PRINT("info", ("replace LE/GE pair by EQ")); + part[0].bound_type= NdbIndexScanOperation::BoundEQ; + part[1].bound_type= -1; + } + // Not seen but was in previous version + if (part[0].bound_type == NdbIndexScanOperation::BoundEQ && + part[1].bound_type == NdbIndexScanOperation::BoundGE && + memcmp(part[0].part_ptr, part[1].part_ptr, part_store_len) == 0) + { + DBUG_PRINT("info", ("remove GE from EQ/GE pair")); + part[1].bound_type= -1; + } + + for (j= 0; j <= 1; j++) + { + struct part_st &p = part[j]; + // Set bound if not done with this key + if (p.key != NULL) + { + DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d", + j, i, tot_len, part_len, p.part_last, p.bound_type)); + DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len); + + // Set bound if not cancelled via type -1 + if (p.bound_type != -1) + { + if (op->setBound(i, p.bound_type, p.bound_ptr)) + ERR_RETURN(op->getNdbError()); + } + } + } + + tot_len+= part_store_len; + } + DBUG_RETURN(0); +} + +inline +int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) +{ + uint i; + THD *thd= current_thd; + NdbConnection *trans= m_active_trans; + + DBUG_ENTER("define_read_attrs"); + + // Define attributes to read + for (i= 0; i < table->fields; i++) + { + Field *field= table->field[i]; + if ((thd->query_id == field->query_id) || + (field->flags & PRI_KEY_FLAG) || + m_retrieve_all_fields) + { + if (get_ndb_value(op, field, i, buf)) + ERR_RETURN(op->getNdbError()); + } + else + { + m_value[i].ptr= NULL; + } + } + + if (table->primary_key == MAX_KEY) + { + DBUG_PRINT("info", ("Getting hidden key")); + // Scanning table with no primary key + int hidden_no= table->fields; +#ifndef DBUG_OFF + const NDBTAB *tab= (const NDBTAB *) m_table; + if (!tab->getColumn(hidden_no)) + DBUG_RETURN(1); +#endif + if (get_ndb_value(op, NULL, hidden_no, NULL)) + ERR_RETURN(op->getNdbError()); + } + + if (execute_no_commit(this,trans) != 0) + DBUG_RETURN(ndb_err(trans)); + DBUG_PRINT("exit", ("Scan started successfully")); + DBUG_RETURN(next_result(buf)); +} + +/* + Start ordered index scan in NDB +*/ + +int ha_ndbcluster::ordered_index_scan(const key_range *start_key, + const key_range *end_key, + bool sorted, byte* buf) +{ + bool restart; + NdbConnection *trans= m_active_trans; + NdbResultSet *cursor; + NdbIndexScanOperation *op; + + DBUG_ENTER("ordered_index_scan"); + DBUG_PRINT("enter", ("index: %u, sorted: %d", active_index, sorted)); + DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname)); + + // Check that sorted seems to be initialised + DBUG_ASSERT(sorted == 0 || sorted == 1); + + if (m_active_cursor == 0) + { + restart= false; + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *) + m_index[active_index].index, + (const NDBTAB *) m_table)) || + !(cursor= op->readTuples(lm, 0, parallelism, sorted))) + ERR_RETURN(trans->getNdbError()); + m_active_cursor= cursor; + } else { + restart= true; + op= (NdbIndexScanOperation*)m_active_cursor->getOperation(); + + DBUG_ASSERT(op->getSorted() == sorted); + DBUG_ASSERT(op->getLockMode() == + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); + if(op->reset_bounds(m_force_send)) + DBUG_RETURN(ndb_err(m_active_trans)); + } + + { + const key_range *keys[2]= { start_key, end_key }; + int ret= set_bounds(op, keys); + if (ret) + DBUG_RETURN(ret); + } + + if (!restart) + { + DBUG_RETURN(define_read_attrs(buf, op)); + } + else + { + if (execute_no_commit(this,trans) != 0) + DBUG_RETURN(ndb_err(trans)); + + DBUG_RETURN(next_result(buf)); + } +} + +/* + Start a filtered scan in NDB. + + NOTE + This function is here as an example of how to start a + filtered scan. It should be possible to replace full_table_scan + with this function and make a best effort attempt + at filtering out the irrelevant data by converting the "items" + into interpreted instructions. + This would speed up table scans where there is a limiting WHERE clause + that doesn't match any index in the table. + + */ + +int ha_ndbcluster::filtered_scan(const byte *key, uint key_len, + byte *buf, + enum ha_rkey_function find_flag) +{ + NdbConnection *trans= m_active_trans; + NdbResultSet *cursor; + NdbScanOperation *op; + + DBUG_ENTER("filtered_scan"); + DBUG_PRINT("enter", ("key_len: %u, index: %u", + key_len, active_index)); + DBUG_DUMP("key", (char*)key, key_len); + DBUG_PRINT("info", ("Starting a new filtered scan on %s", + m_tabname)); + + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + if (!(op= trans->getNdbScanOperation((const NDBTAB *) m_table)) || + !(cursor= op->readTuples(lm, 0, parallelism))) + ERR_RETURN(trans->getNdbError()); + m_active_cursor= cursor; + + { + // Start scan filter + NdbScanFilter sf(op); + sf.begin(); + + // Set filter using the supplied key data + byte *key_ptr= (byte *) key; + uint tot_len= 0; + KEY* key_info= table->key_info + active_index; + for (uint k= 0; k < key_info->key_parts; k++) + { + KEY_PART_INFO* key_part= key_info->key_part+k; + Field* field= key_part->field; + uint ndb_fieldnr= key_part->fieldnr-1; + DBUG_PRINT("key_part", ("fieldnr: %d", ndb_fieldnr)); + //const NDBCOL *col= ((const NDBTAB *) m_table)->getColumn(ndb_fieldnr); + uint32 field_len= field->pack_length(); + DBUG_DUMP("key", (char*)key, field_len); + + DBUG_PRINT("info", ("Column %s, type: %d, len: %d", + field->field_name, field->real_type(), field_len)); + + // Define scan filter + if (field->real_type() == MYSQL_TYPE_STRING) + sf.eq(ndb_fieldnr, key_ptr, field_len); + else + { + if (field_len == 8) + sf.eq(ndb_fieldnr, (Uint64)*key_ptr); + else if (field_len <= 4) + sf.eq(ndb_fieldnr, (Uint32)*key_ptr); + else + DBUG_RETURN(1); + } + + key_ptr += field_len; + tot_len += field_len; + + if (tot_len >= key_len) + break; + } + // End scan filter + sf.end(); + } + + DBUG_RETURN(define_read_attrs(buf, op)); +} + + +/* + Start full table scan in NDB + */ + +int ha_ndbcluster::full_table_scan(byte *buf) +{ + uint i; + NdbResultSet *cursor; + NdbScanOperation *op; + NdbConnection *trans= m_active_trans; + + DBUG_ENTER("full_table_scan"); + DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); + + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) || + !(cursor= op->readTuples(lm, 0, parallelism))) + ERR_RETURN(trans->getNdbError()); + m_active_cursor= cursor; + DBUG_RETURN(define_read_attrs(buf, op)); +} + +/* + Insert one record into NDB +*/ +int ha_ndbcluster::write_row(byte *record) +{ + bool has_auto_increment; + uint i; + NdbConnection *trans= m_active_trans; + NdbOperation *op; + int res; + DBUG_ENTER("write_row"); + + if(m_ignore_dup_key && table->primary_key != MAX_KEY) + { + int peek_res= peek_row(record); + + if (!peek_res) + { + m_dupkey= table->primary_key; + DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); + } + if (peek_res != HA_ERR_KEY_NOT_FOUND) + DBUG_RETURN(peek_res); + } + + statistic_increment(ha_write_count,&LOCK_status); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + has_auto_increment= (table->next_number_field && record == table->record[0]); + + if (!(op= trans->getNdbOperation((const NDBTAB *) m_table))) + ERR_RETURN(trans->getNdbError()); + + res= (m_use_write) ? op->writeTuple() :op->insertTuple(); + if (res != 0) + ERR_RETURN(trans->getNdbError()); + + if (table->primary_key == MAX_KEY) + { + // Table has hidden primary key + Ndb *ndb= get_ndb(); + Uint64 auto_value= NDB_FAILED_AUTO_INCREMENT; + uint retries= NDB_AUTO_INCREMENT_RETRIES; + do { + auto_value= ndb->getAutoIncrementValue((const NDBTAB *) m_table); + } while (auto_value == NDB_FAILED_AUTO_INCREMENT && + --retries && + ndb->getNdbError().status == NdbError::TemporaryError); + if (auto_value == NDB_FAILED_AUTO_INCREMENT) + ERR_RETURN(ndb->getNdbError()); + if (set_hidden_key(op, table->fields, (const byte*)&auto_value)) + ERR_RETURN(op->getNdbError()); + } + else + { + int res; + + if (has_auto_increment) + { + m_skip_auto_increment= FALSE; + update_auto_increment(); + m_skip_auto_increment= !auto_increment_column_changed; + } + + if ((res= set_primary_key_from_record(op, record))) + return res; + } + + // Set non-key attribute(s) + bool set_blob_value= FALSE; + for (i= 0; i < table->fields; i++) + { + Field *field= table->field[i]; + if (!(field->flags & PRI_KEY_FLAG) && + set_ndb_value(op, field, i, &set_blob_value)) + { + m_skip_auto_increment= TRUE; + ERR_RETURN(op->getNdbError()); + } + } + + /* + Execute write operation + NOTE When doing inserts with many values in + each INSERT statement it should not be necessary + to NoCommit the transaction between each row. + Find out how this is detected! + */ + m_rows_inserted++; + no_uncommitted_rows_update(1); + m_bulk_insert_not_flushed= TRUE; + if ((m_rows_to_insert == (ha_rows) 1) || + ((m_rows_inserted % m_bulk_insert_rows) == 0) || + m_primary_key_update || + set_blob_value) + { + THD *thd= current_thd; + // Send rows to NDB + DBUG_PRINT("info", ("Sending inserts to NDB, "\ + "rows_inserted:%d, bulk_insert_rows: %d", + (int)m_rows_inserted, (int)m_bulk_insert_rows)); + + m_bulk_insert_not_flushed= FALSE; + // if (thd->transaction.on) + if (m_transaction_on) + { + if (execute_no_commit(this,trans) != 0) + { + m_skip_auto_increment= TRUE; + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); + } + } + else + { + if (execute_commit(this,trans) != 0) + { + m_skip_auto_increment= TRUE; + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); + } + int res= trans->restart(); + DBUG_ASSERT(res == 0); + } + } + if ((has_auto_increment) && (m_skip_auto_increment)) + { + Ndb *ndb= get_ndb(); + Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; + DBUG_PRINT("info", + ("Trying to set next auto increment value to %lu", + (ulong) next_val)); + if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE)) + DBUG_PRINT("info", + ("Setting next auto increment value to %u", next_val)); + } + m_skip_auto_increment= TRUE; + + DBUG_RETURN(0); +} + + +/* Compare if a key in a row has changed */ + +int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, + const byte * new_row) +{ + KEY_PART_INFO *key_part=table->key_info[keynr].key_part; + KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts; + + for (; key_part != end ; key_part++) + { + if (key_part->null_bit) + { + if ((old_row[key_part->null_offset] & key_part->null_bit) != + (new_row[key_part->null_offset] & key_part->null_bit)) + return 1; + } + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH)) + { + + if (key_part->field->cmp_binary((char*) (old_row + key_part->offset), + (char*) (new_row + key_part->offset), + (ulong) key_part->length)) + return 1; + } + else + { + if (memcmp(old_row+key_part->offset, new_row+key_part->offset, + key_part->length)) + return 1; + } + } + return 0; +} + +/* + Update one record in NDB using primary key +*/ + +int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) +{ + THD *thd= current_thd; + NdbConnection *trans= m_active_trans; + NdbResultSet* cursor= m_active_cursor; + NdbOperation *op; + uint i; + DBUG_ENTER("update_row"); + + statistic_increment(ha_update_count,&LOCK_status); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + { + table->timestamp_field->set_time(); + // Set query_id so that field is really updated + table->timestamp_field->query_id= thd->query_id; + } + + /* Check for update of primary key for special handling */ + if ((table->primary_key != MAX_KEY) && + (key_cmp(table->primary_key, old_data, new_data))) + { + int read_res, insert_res, delete_res, undo_res; + + DBUG_PRINT("info", ("primary key update, doing pk read+delete+insert")); + // Get all old fields, since we optimize away fields not in query + read_res= complemented_pk_read(old_data, new_data); + if (read_res) + { + DBUG_PRINT("info", ("pk read failed")); + DBUG_RETURN(read_res); + } + // Delete old row + m_primary_key_update= TRUE; + delete_res= delete_row(old_data); + m_primary_key_update= FALSE; + if (delete_res) + { + DBUG_PRINT("info", ("delete failed")); + DBUG_RETURN(delete_res); + } + // Insert new row + DBUG_PRINT("info", ("delete succeded")); + m_primary_key_update= TRUE; + insert_res= write_row(new_data); + m_primary_key_update= FALSE; + if (insert_res) + { + DBUG_PRINT("info", ("insert failed")); + if (trans->commitStatus() == NdbConnection::Started) + { + // Undo delete_row(old_data) + m_primary_key_update= TRUE; + undo_res= write_row((byte *)old_data); + if (undo_res) + push_warning(current_thd, + MYSQL_ERROR::WARN_LEVEL_WARN, + undo_res, + "NDB failed undoing delete at primary key update"); + m_primary_key_update= FALSE; + } + DBUG_RETURN(insert_res); + } + DBUG_PRINT("info", ("delete+insert succeeded")); + DBUG_RETURN(0); + } + + if (cursor) + { + /* + We are scanning records and want to update the record + that was just found, call updateTuple on the cursor + to take over the lock to a new update operation + And thus setting the primary key of the record from + the active record in cursor + */ + DBUG_PRINT("info", ("Calling updateTuple on cursor")); + if (!(op= cursor->updateTuple())) + ERR_RETURN(trans->getNdbError()); + m_ops_pending++; + if (uses_blob_value(FALSE)) + m_blobs_pending= TRUE; + } + else + { + if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || + op->updateTuple() != 0) + ERR_RETURN(trans->getNdbError()); + + if (table->primary_key == MAX_KEY) + { + // This table has no primary key, use "hidden" primary key + DBUG_PRINT("info", ("Using hidden key")); + + // Require that the PK for this record has previously been + // read into m_ref + DBUG_DUMP("key", m_ref, NDB_HIDDEN_PRIMARY_KEY_LENGTH); + + if (set_hidden_key(op, table->fields, m_ref)) + ERR_RETURN(op->getNdbError()); + } + else + { + int res; + if ((res= set_primary_key_from_record(op, old_data))) + DBUG_RETURN(res); + } + } + + // Set non-key attribute(s) + for (i= 0; i < table->fields; i++) + { + Field *field= table->field[i]; + if (((thd->query_id == field->query_id) || m_retrieve_all_fields) && + (!(field->flags & PRI_KEY_FLAG)) && + set_ndb_value(op, field, i)) + ERR_RETURN(op->getNdbError()); + } + + // Execute update operation + if (!cursor && execute_no_commit(this,trans) != 0) { + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); + } + + DBUG_RETURN(0); +} + + +/* + Delete one record from NDB, using primary key +*/ + +int ha_ndbcluster::delete_row(const byte *record) +{ + NdbConnection *trans= m_active_trans; + NdbResultSet* cursor= m_active_cursor; + NdbOperation *op; + DBUG_ENTER("delete_row"); + + statistic_increment(ha_delete_count,&LOCK_status); + + if (cursor) + { + /* + We are scanning records and want to delete the record + that was just found, call deleteTuple on the cursor + to take over the lock to a new delete operation + And thus setting the primary key of the record from + the active record in cursor + */ + DBUG_PRINT("info", ("Calling deleteTuple on cursor")); + if (cursor->deleteTuple() != 0) + ERR_RETURN(trans->getNdbError()); + m_ops_pending++; + + no_uncommitted_rows_update(-1); + + if (!m_primary_key_update) + // If deleting from cursor, NoCommit will be handled in next_result + DBUG_RETURN(0); + } + else + { + + if (!(op=trans->getNdbOperation((const NDBTAB *) m_table)) || + op->deleteTuple() != 0) + ERR_RETURN(trans->getNdbError()); + + no_uncommitted_rows_update(-1); + + if (table->primary_key == MAX_KEY) + { + // This table has no primary key, use "hidden" primary key + DBUG_PRINT("info", ("Using hidden key")); + + if (set_hidden_key(op, table->fields, m_ref)) + ERR_RETURN(op->getNdbError()); + } + else + { + int res; + if ((res= set_primary_key_from_record(op, record))) + return res; + } + } + + // Execute delete operation + if (execute_no_commit(this,trans) != 0) { + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); + } + DBUG_RETURN(0); +} + +/* + Unpack a record read from NDB + + SYNOPSIS + unpack_record() + buf Buffer to store read row + + NOTE + The data for each row is read directly into the + destination buffer. This function is primarily + called in order to check if any fields should be + set to null. +*/ + +void ha_ndbcluster::unpack_record(byte* buf) +{ + uint row_offset= (uint) (buf - table->record[0]); + Field **field, **end; + NdbValue *value= m_value; + DBUG_ENTER("unpack_record"); + + // Set null flag(s) + bzero(buf, table->null_bytes); + for (field= table->field, end= field+table->fields; + field < end; + field++, value++) + { + if ((*value).ptr) + { + if (! ((*field)->flags & BLOB_FLAG)) + { + if ((*value).rec->isNULL()) + (*field)->set_null(row_offset); + } + else + { + NdbBlob* ndb_blob= (*value).blob; + bool isNull= TRUE; + int ret= ndb_blob->getNull(isNull); + DBUG_ASSERT(ret == 0); + if (isNull) + (*field)->set_null(row_offset); + } + } + } + +#ifndef DBUG_OFF + // Read and print all values that was fetched + if (table->primary_key == MAX_KEY) + { + // Table with hidden primary key + int hidden_no= table->fields; + const NDBTAB *tab= (const NDBTAB *) m_table; + const NDBCOL *hidden_col= tab->getColumn(hidden_no); + NdbRecAttr* rec= m_value[hidden_no].rec; + DBUG_ASSERT(rec); + DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no, + hidden_col->getName(), rec->u_64_value())); + } + print_results(); +#endif + DBUG_VOID_RETURN; +} + +/* + Utility function to print/dump the fetched field + */ + +void ha_ndbcluster::print_results() +{ + const NDBTAB *tab= (const NDBTAB*) m_table; + DBUG_ENTER("print_results"); + +#ifndef DBUG_OFF + if (!_db_on_) + DBUG_VOID_RETURN; + + for (uint f=0; f<table->fields;f++) + { + Field *field; + const NDBCOL *col; + NdbValue value; + + if (!(value= m_value[f]).ptr) + { + fprintf(DBUG_FILE, "Field %d was not read\n", f); + continue; + } + field= table->field[f]; + DBUG_DUMP("field->ptr", (char*)field->ptr, field->pack_length()); + col= tab->getColumn(f); + fprintf(DBUG_FILE, "%d: %s\t", f, col->getName()); + + NdbBlob *ndb_blob= NULL; + if (! (field->flags & BLOB_FLAG)) + { + if (value.rec->isNULL()) + { + fprintf(DBUG_FILE, "NULL\n"); + continue; + } + } + else + { + ndb_blob= value.blob; + bool isNull= TRUE; + ndb_blob->getNull(isNull); + if (isNull) { + fprintf(DBUG_FILE, "NULL\n"); + continue; + } + } + + switch (col->getType()) { + case NdbDictionary::Column::Tinyint: { + char value= *field->ptr; + fprintf(DBUG_FILE, "Tinyint\t%d", value); + break; + } + case NdbDictionary::Column::Tinyunsigned: { + unsigned char value= *field->ptr; + fprintf(DBUG_FILE, "Tinyunsigned\t%u", value); + break; + } + case NdbDictionary::Column::Smallint: { + short value= *field->ptr; + fprintf(DBUG_FILE, "Smallint\t%d", value); + break; + } + case NdbDictionary::Column::Smallunsigned: { + unsigned short value= *field->ptr; + fprintf(DBUG_FILE, "Smallunsigned\t%u", value); + break; + } + case NdbDictionary::Column::Mediumint: { + byte value[3]; + memcpy(value, field->ptr, 3); + fprintf(DBUG_FILE, "Mediumint\t%d,%d,%d", value[0], value[1], value[2]); + break; + } + case NdbDictionary::Column::Mediumunsigned: { + byte value[3]; + memcpy(value, field->ptr, 3); + fprintf(DBUG_FILE, "Mediumunsigned\t%u,%u,%u", value[0], value[1], value[2]); + break; + } + case NdbDictionary::Column::Int: { + fprintf(DBUG_FILE, "Int\t%lld", field->val_int()); + break; + } + case NdbDictionary::Column::Unsigned: { + Uint32 value= (Uint32) *field->ptr; + fprintf(DBUG_FILE, "Unsigned\t%u", value); + break; + } + case NdbDictionary::Column::Bigint: { + Int64 value= (Int64) *field->ptr; + fprintf(DBUG_FILE, "Bigint\t%lld", value); + break; + } + case NdbDictionary::Column::Bigunsigned: { + Uint64 value= (Uint64) *field->ptr; + fprintf(DBUG_FILE, "Bigunsigned\t%llu", value); + break; + } + case NdbDictionary::Column::Float: { + float value= (float) *field->ptr; + fprintf(DBUG_FILE, "Float\t%f", value); + break; + } + case NdbDictionary::Column::Double: { + double value= (double) *field->ptr; + fprintf(DBUG_FILE, "Double\t%f", value); + break; + } + case NdbDictionary::Column::Olddecimal: { + char *value= field->ptr; + fprintf(DBUG_FILE, "Olddecimal\t'%-*s'", field->pack_length(), value); + break; + } + case NdbDictionary::Column::Olddecimalunsigned: { + char *value= field->ptr; + fprintf(DBUG_FILE, "Olddecimalunsigned\t'%-*s'", field->pack_length(), value); + break; + } + case NdbDictionary::Column::Char:{ + const char *value= (char *) field->ptr; + fprintf(DBUG_FILE, "Char\t'%.*s'", field->pack_length(), value); + break; + } + case NdbDictionary::Column::Varchar: + case NdbDictionary::Column::Binary: + case NdbDictionary::Column::Varbinary: { + const char *value= (char *) field->ptr; + fprintf(DBUG_FILE, "Var\t'%.*s'", field->pack_length(), value); + break; + } + case NdbDictionary::Column::Datetime: { + Uint64 value= (Uint64) *field->ptr; + fprintf(DBUG_FILE, "Datetime\t%llu", value); + break; + } + case NdbDictionary::Column::Date: { + Uint64 value= (Uint64) *field->ptr; + fprintf(DBUG_FILE, "Date\t%llu", value); + break; + } + case NdbDictionary::Column::Time: { + Uint64 value= (Uint64) *field->ptr; + fprintf(DBUG_FILE, "Time\t%llu", value); + break; + } + case NdbDictionary::Column::Blob: { + Uint64 len= 0; + ndb_blob->getLength(len); + fprintf(DBUG_FILE, "Blob\t[len=%u]", (unsigned)len); + break; + } + case NdbDictionary::Column::Text: { + Uint64 len= 0; + ndb_blob->getLength(len); + fprintf(DBUG_FILE, "Text\t[len=%u]", (unsigned)len); + break; + } + case NdbDictionary::Column::Undefined: + default: + fprintf(DBUG_FILE, "Unknown type: %d", col->getType()); + break; + } + fprintf(DBUG_FILE, "\n"); + + } +#endif + DBUG_VOID_RETURN; +} + + +int ha_ndbcluster::index_init(uint index) +{ + DBUG_ENTER("index_init"); + DBUG_PRINT("enter", ("index: %u", index)); + DBUG_RETURN(handler::index_init(index)); +} + + +int ha_ndbcluster::index_end() +{ + DBUG_ENTER("index_end"); + DBUG_RETURN(close_scan()); +} + +/** + * Check if key contains null + */ +static +int +check_null_in_key(const KEY* key_info, const byte *key, uint key_len) +{ + KEY_PART_INFO *curr_part, *end_part; + const byte* end_ptr = key + key_len; + curr_part= key_info->key_part; + end_part= curr_part + key_info->key_parts; + + + for (; curr_part != end_part && key < end_ptr; curr_part++) + { + if(curr_part->null_bit && *key) + return 1; + + key += curr_part->store_length; + } + return 0; +} + +int ha_ndbcluster::index_read(byte *buf, + const byte *key, uint key_len, + enum ha_rkey_function find_flag) +{ + DBUG_ENTER("index_read"); + DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d", + active_index, key_len, find_flag)); + + int error; + ndb_index_type type = get_index_type(active_index); + const KEY* key_info = table->key_info+active_index; + switch (type){ + case PRIMARY_KEY_ORDERED_INDEX: + case PRIMARY_KEY_INDEX: + if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len) + { + if(m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); + DBUG_RETURN(pk_read(key, key_len, buf)); + } + else if (type == PRIMARY_KEY_INDEX) + { + DBUG_RETURN(1); + } + break; + case UNIQUE_ORDERED_INDEX: + case UNIQUE_INDEX: + if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len && + !check_null_in_key(key_info, key, key_len)) + { + if(m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); + DBUG_RETURN(unique_index_read(key, key_len, buf)); + } + else if (type == UNIQUE_INDEX) + { + DBUG_RETURN(1); + } + break; + case ORDERED_INDEX: + break; + default: + case UNDEFINED_INDEX: + DBUG_ASSERT(FALSE); + DBUG_RETURN(1); + break; + } + + key_range start_key; + start_key.key = key; + start_key.length = key_len; + start_key.flag = find_flag; + error= ordered_index_scan(&start_key, 0, TRUE, buf); + DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error); +} + + +int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, + const byte *key, uint key_len, + enum ha_rkey_function find_flag) +{ + statistic_increment(ha_read_key_count,&LOCK_status); + DBUG_ENTER("index_read_idx"); + DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); + index_init(index_no); + DBUG_RETURN(index_read(buf, key, key_len, find_flag)); +} + + +int ha_ndbcluster::index_next(byte *buf) +{ + DBUG_ENTER("index_next"); + + int error= 1; + statistic_increment(ha_read_next_count,&LOCK_status); + DBUG_RETURN(next_result(buf)); +} + + +int ha_ndbcluster::index_prev(byte *buf) +{ + DBUG_ENTER("index_prev"); + statistic_increment(ha_read_prev_count,&LOCK_status); + DBUG_RETURN(1); +} + + +int ha_ndbcluster::index_first(byte *buf) +{ + DBUG_ENTER("index_first"); + statistic_increment(ha_read_first_count,&LOCK_status); + // Start the ordered index scan and fetch the first row + + // Only HA_READ_ORDER indexes get called by index_first + DBUG_RETURN(ordered_index_scan(0, 0, TRUE, buf)); +} + + +int ha_ndbcluster::index_last(byte *buf) +{ + DBUG_ENTER("index_last"); + statistic_increment(ha_read_last_count,&LOCK_status); + int res; + if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){ + NdbResultSet *cursor= m_active_cursor; + while((res= cursor->nextResult(TRUE, m_force_send)) == 0); + if(res == 1){ + unpack_record(buf); + table->status= 0; + DBUG_RETURN(0); + } + } + DBUG_RETURN(res); +} + + +inline +int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, + const key_range *end_key, + bool eq_range, bool sorted, + byte* buf) +{ + KEY* key_info; + int error= 1; + DBUG_ENTER("ha_ndbcluster::read_range_first_to_buf"); + DBUG_PRINT("info", ("eq_range: %d, sorted: %d", eq_range, sorted)); + + switch (get_index_type(active_index)){ + case PRIMARY_KEY_ORDERED_INDEX: + case PRIMARY_KEY_INDEX: + key_info= table->key_info + active_index; + if (start_key && + start_key->length == key_info->key_length && + start_key->flag == HA_READ_KEY_EXACT) + { + if(m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); + error= pk_read(start_key->key, start_key->length, buf); + DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); + } + break; + case UNIQUE_ORDERED_INDEX: + case UNIQUE_INDEX: + key_info= table->key_info + active_index; + if (start_key && start_key->length == key_info->key_length && + start_key->flag == HA_READ_KEY_EXACT && + !check_null_in_key(key_info, start_key->key, start_key->length)) + { + if(m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); + error= unique_index_read(start_key->key, start_key->length, buf); + DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); + } + break; + default: + break; + } + + // Start the ordered index scan and fetch the first row + error= ordered_index_scan(start_key, end_key, sorted, buf); + DBUG_RETURN(error); +} + + +int ha_ndbcluster::read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range, bool sorted) +{ + byte* buf= table->record[0]; + DBUG_ENTER("ha_ndbcluster::read_range_first"); + + DBUG_RETURN(read_range_first_to_buf(start_key, + end_key, + eq_range, + sorted, + buf)); +} + +int ha_ndbcluster::read_range_next() +{ + DBUG_ENTER("ha_ndbcluster::read_range_next"); + DBUG_RETURN(next_result(table->record[0])); +} + + +int ha_ndbcluster::rnd_init(bool scan) +{ + NdbResultSet *cursor= m_active_cursor; + DBUG_ENTER("rnd_init"); + DBUG_PRINT("enter", ("scan: %d", scan)); + // Check if scan is to be restarted + if (cursor) + { + if (!scan) + DBUG_RETURN(1); + int res= cursor->restart(m_force_send); + DBUG_ASSERT(res == 0); + } + index_init(table->primary_key); + DBUG_RETURN(0); +} + +int ha_ndbcluster::close_scan() +{ + NdbResultSet *cursor= m_active_cursor; + NdbConnection *trans= m_active_trans; + DBUG_ENTER("close_scan"); + + if (!cursor) + DBUG_RETURN(1); + + + if (m_ops_pending) + { + /* + Take over any pending transactions to the + deleteing/updating transaction before closing the scan + */ + DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); + if (execute_no_commit(this,trans) != 0) { + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); + } + m_ops_pending= 0; + } + + cursor->close(m_force_send); + m_active_cursor= NULL; + DBUG_RETURN(0); +} + +int ha_ndbcluster::rnd_end() +{ + DBUG_ENTER("rnd_end"); + DBUG_RETURN(close_scan()); +} + + +int ha_ndbcluster::rnd_next(byte *buf) +{ + DBUG_ENTER("rnd_next"); + statistic_increment(ha_read_rnd_next_count, &LOCK_status); + + if (!m_active_cursor) + DBUG_RETURN(full_table_scan(buf)); + DBUG_RETURN(next_result(buf)); +} + + +/* + An "interesting" record has been found and it's pk + retrieved by calling position + Now it's time to read the record from db once + again +*/ + +int ha_ndbcluster::rnd_pos(byte *buf, byte *pos) +{ + DBUG_ENTER("rnd_pos"); + statistic_increment(ha_read_rnd_count,&LOCK_status); + // The primary key for the record is stored in pos + // Perform a pk_read using primary key "index" + DBUG_RETURN(pk_read(pos, ref_length, buf)); +} + + +/* + Store the primary key of this record in ref + variable, so that the row can be retrieved again later + using "reference" in rnd_pos +*/ + +void ha_ndbcluster::position(const byte *record) +{ + KEY *key_info; + KEY_PART_INFO *key_part; + KEY_PART_INFO *end; + byte *buff; + DBUG_ENTER("position"); + + if (table->primary_key != MAX_KEY) + { + key_info= table->key_info + table->primary_key; + key_part= key_info->key_part; + end= key_part + key_info->key_parts; + buff= ref; + + for (; key_part != end; key_part++) + { + if (key_part->null_bit) { + /* Store 0 if the key part is a NULL part */ + if (record[key_part->null_offset] + & key_part->null_bit) { + *buff++= 1; + continue; + } + *buff++= 0; + } + memcpy(buff, record + key_part->offset, key_part->length); + buff += key_part->length; + } + } + else + { + // No primary key, get hidden key + DBUG_PRINT("info", ("Getting hidden key")); + int hidden_no= table->fields; + NdbRecAttr* rec= m_value[hidden_no].rec; + const NDBTAB *tab= (const NDBTAB *) m_table; + const NDBCOL *hidden_col= tab->getColumn(hidden_no); + DBUG_ASSERT(hidden_col->getPrimaryKey() && + hidden_col->getAutoIncrement() && + rec != NULL && + ref_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH); + memcpy(ref, m_ref, ref_length); + } + + DBUG_DUMP("ref", (char*)ref, ref_length); + DBUG_VOID_RETURN; +} + + +void ha_ndbcluster::info(uint flag) +{ + DBUG_ENTER("info"); + DBUG_PRINT("enter", ("flag: %d", flag)); + + if (flag & HA_STATUS_POS) + DBUG_PRINT("info", ("HA_STATUS_POS")); + if (flag & HA_STATUS_NO_LOCK) + DBUG_PRINT("info", ("HA_STATUS_NO_LOCK")); + if (flag & HA_STATUS_TIME) + DBUG_PRINT("info", ("HA_STATUS_TIME")); + if (flag & HA_STATUS_VARIABLE) + { + DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); + if (m_table_info) + { + if (m_ha_not_exact_count) + records= 100; + else + records_update(); + } + else + { + if ((my_errno= check_ndb_connection())) + DBUG_VOID_RETURN; + Ndb *ndb= get_ndb(); + Uint64 rows= 100; + if (current_thd->variables.ndb_use_exact_count) + ndb_get_table_statistics(ndb, m_tabname, &rows, 0); + records= rows; + } + } + if (flag & HA_STATUS_CONST) + { + DBUG_PRINT("info", ("HA_STATUS_CONST")); + set_rec_per_key(); + } + if (flag & HA_STATUS_ERRKEY) + { + DBUG_PRINT("info", ("HA_STATUS_ERRKEY")); + errkey= m_dupkey; + } + if (flag & HA_STATUS_AUTO) + { + DBUG_PRINT("info", ("HA_STATUS_AUTO")); + if (m_table) + { + Ndb *ndb= get_ndb(); + + auto_increment_value= + ndb->readAutoIncrementValue((const NDBTAB *) m_table); + } + } + DBUG_VOID_RETURN; +} + + +int ha_ndbcluster::extra(enum ha_extra_function operation) +{ + DBUG_ENTER("extra"); + switch (operation) { + case HA_EXTRA_NORMAL: /* Optimize for space (def) */ + DBUG_PRINT("info", ("HA_EXTRA_NORMAL")); + break; + case HA_EXTRA_QUICK: /* Optimize for speed */ + DBUG_PRINT("info", ("HA_EXTRA_QUICK")); + break; + case HA_EXTRA_RESET: /* Reset database to after open */ + DBUG_PRINT("info", ("HA_EXTRA_RESET")); + break; + case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */ + DBUG_PRINT("info", ("HA_EXTRA_CACHE")); + break; + case HA_EXTRA_NO_CACHE: /* End cacheing of records (def) */ + DBUG_PRINT("info", ("HA_EXTRA_NO_CACHE")); + break; + case HA_EXTRA_NO_READCHECK: /* No readcheck on update */ + DBUG_PRINT("info", ("HA_EXTRA_NO_READCHECK")); + break; + case HA_EXTRA_READCHECK: /* Use readcheck (def) */ + DBUG_PRINT("info", ("HA_EXTRA_READCHECK")); + break; + case HA_EXTRA_KEYREAD: /* Read only key to database */ + DBUG_PRINT("info", ("HA_EXTRA_KEYREAD")); + break; + case HA_EXTRA_NO_KEYREAD: /* Normal read of records (def) */ + DBUG_PRINT("info", ("HA_EXTRA_NO_KEYREAD")); + break; + case HA_EXTRA_NO_USER_CHANGE: /* No user is allowed to write */ + DBUG_PRINT("info", ("HA_EXTRA_NO_USER_CHANGE")); + break; + case HA_EXTRA_KEY_CACHE: + DBUG_PRINT("info", ("HA_EXTRA_KEY_CACHE")); + break; + case HA_EXTRA_NO_KEY_CACHE: + DBUG_PRINT("info", ("HA_EXTRA_NO_KEY_CACHE")); + break; + case HA_EXTRA_WAIT_LOCK: /* Wait until file is avalably (def) */ + DBUG_PRINT("info", ("HA_EXTRA_WAIT_LOCK")); + break; + case HA_EXTRA_NO_WAIT_LOCK: /* If file is locked, return quickly */ + DBUG_PRINT("info", ("HA_EXTRA_NO_WAIT_LOCK")); + break; + case HA_EXTRA_WRITE_CACHE: /* Use write cache in ha_write() */ + DBUG_PRINT("info", ("HA_EXTRA_WRITE_CACHE")); + break; + case HA_EXTRA_FLUSH_CACHE: /* flush write_record_cache */ + DBUG_PRINT("info", ("HA_EXTRA_FLUSH_CACHE")); + break; + case HA_EXTRA_NO_KEYS: /* Remove all update of keys */ + DBUG_PRINT("info", ("HA_EXTRA_NO_KEYS")); + break; + case HA_EXTRA_KEYREAD_CHANGE_POS: /* Keyread, but change pos */ + DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_CHANGE_POS")); /* xxxxchk -r must be used */ + break; + case HA_EXTRA_REMEMBER_POS: /* Remember pos for next/prev */ + DBUG_PRINT("info", ("HA_EXTRA_REMEMBER_POS")); + break; + case HA_EXTRA_RESTORE_POS: + DBUG_PRINT("info", ("HA_EXTRA_RESTORE_POS")); + break; + case HA_EXTRA_REINIT_CACHE: /* init cache from current record */ + DBUG_PRINT("info", ("HA_EXTRA_REINIT_CACHE")); + break; + case HA_EXTRA_FORCE_REOPEN: /* Datafile have changed on disk */ + DBUG_PRINT("info", ("HA_EXTRA_FORCE_REOPEN")); + break; + case HA_EXTRA_FLUSH: /* Flush tables to disk */ + DBUG_PRINT("info", ("HA_EXTRA_FLUSH")); + break; + case HA_EXTRA_NO_ROWS: /* Don't write rows */ + DBUG_PRINT("info", ("HA_EXTRA_NO_ROWS")); + break; + case HA_EXTRA_RESET_STATE: /* Reset positions */ + DBUG_PRINT("info", ("HA_EXTRA_RESET_STATE")); + break; + case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ + DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); + if (current_thd->lex->sql_command == SQLCOM_REPLACE) + { + DBUG_PRINT("info", ("Turning ON use of write instead of insert")); + m_use_write= TRUE; + } else + { + DBUG_PRINT("info", ("Ignoring duplicate key")); + m_ignore_dup_key= TRUE; + } + break; + case HA_EXTRA_NO_IGNORE_DUP_KEY: + DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); + DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); + m_use_write= FALSE; + m_ignore_dup_key= FALSE; + break; + case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those + where field->query_id is the same as + the current query id */ + DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS")); + m_retrieve_all_fields= TRUE; + break; + case HA_EXTRA_PREPARE_FOR_DELETE: + DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE")); + break; + case HA_EXTRA_PREPARE_FOR_UPDATE: /* Remove read cache if problems */ + DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_UPDATE")); + break; + case HA_EXTRA_PRELOAD_BUFFER_SIZE: + DBUG_PRINT("info", ("HA_EXTRA_PRELOAD_BUFFER_SIZE")); + break; + case HA_EXTRA_RETRIEVE_PRIMARY_KEY: + DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY")); + m_retrieve_primary_key= TRUE; + break; + case HA_EXTRA_CHANGE_KEY_TO_UNIQUE: + DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE")); + break; + case HA_EXTRA_CHANGE_KEY_TO_DUP: + DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP")); + break; + + } + + DBUG_RETURN(0); +} + +/* + Start of an insert, remember number of rows to be inserted, it will + be used in write_row and get_autoincrement to send an optimal number + of rows in each roundtrip to the server + + SYNOPSIS + rows number of rows to insert, 0 if unknown + +*/ + +void ha_ndbcluster::start_bulk_insert(ha_rows rows) +{ + int bytes, batch; + const NDBTAB *tab= (const NDBTAB *) m_table; + + DBUG_ENTER("start_bulk_insert"); + DBUG_PRINT("enter", ("rows: %d", (int)rows)); + + m_rows_inserted= (ha_rows) 0; + if (rows == (ha_rows) 0) + { + /* We don't know how many will be inserted, guess */ + m_rows_to_insert= m_autoincrement_prefetch; + } + else + m_rows_to_insert= rows; + + /* + Calculate how many rows that should be inserted + per roundtrip to NDB. This is done in order to minimize the + number of roundtrips as much as possible. However performance will + degrade if too many bytes are inserted, thus it's limited by this + calculation. + */ + const int bytesperbatch= 8192; + bytes= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns(); + batch= bytesperbatch/bytes; + batch= batch == 0 ? 1 : batch; + DBUG_PRINT("info", ("batch: %d, bytes: %d", batch, bytes)); + m_bulk_insert_rows= batch; + + DBUG_VOID_RETURN; +} + +/* + End of an insert + */ +int ha_ndbcluster::end_bulk_insert() +{ + int error= 0; + + DBUG_ENTER("end_bulk_insert"); + // Check if last inserts need to be flushed + if (m_bulk_insert_not_flushed) + { + NdbConnection *trans= m_active_trans; + // Send rows to NDB + DBUG_PRINT("info", ("Sending inserts to NDB, "\ + "rows_inserted:%d, bulk_insert_rows: %d", + (int) m_rows_inserted, (int) m_bulk_insert_rows)); + m_bulk_insert_not_flushed= FALSE; + if (m_transaction_on) + { + if (execute_no_commit(this, trans) != 0) + { + no_uncommitted_rows_execute_failure(); + my_errno= error= ndb_err(trans); + } + } + else + { + if (execute_commit(this, trans) != 0) + { + no_uncommitted_rows_execute_failure(); + my_errno= error= ndb_err(trans); + } + else + { + int res= trans->restart(); + DBUG_ASSERT(res == 0); + } + } + } + + m_rows_inserted= (ha_rows) 0; + m_rows_to_insert= (ha_rows) 1; + DBUG_RETURN(error); +} + + +int ha_ndbcluster::extra_opt(enum ha_extra_function operation, ulong cache_size) +{ + DBUG_ENTER("extra_opt"); + DBUG_PRINT("enter", ("cache_size: %lu", cache_size)); + DBUG_RETURN(extra(operation)); +} + + +int ha_ndbcluster::reset() +{ + DBUG_ENTER("reset"); + // Reset what? + DBUG_RETURN(1); +} + +static const char *ha_ndb_bas_exts[]= { ha_ndb_ext, NullS }; +const char **ha_ndbcluster::bas_ext() const +{ return ha_ndb_bas_exts; } + + +/* + How many seeks it will take to read through the table + This is to be comparable to the number returned by records_in_range so + that we can decide if we should scan the table or use keys. +*/ + +double ha_ndbcluster::scan_time() +{ + DBUG_ENTER("ha_ndbcluster::scan_time()"); + double res= rows2double(records*1000); + DBUG_PRINT("exit", ("table: %s value: %f", + m_tabname, res)); + DBUG_RETURN(res); +} + +/* + Convert MySQL table locks into locks supported by Ndb Cluster. + Note that MySQL Cluster does currently not support distributed + table locks, so to be safe one should set cluster in Single + User Mode, before relying on table locks when updating tables + from several MySQL servers +*/ + +THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + DBUG_ENTER("store_lock"); + if (lock_type != TL_IGNORE && m_lock.type == TL_UNLOCK) + { + + /* If we are not doing a LOCK TABLE, then allow multiple + writers */ + + /* Since NDB does not currently have table locks + this is treated as a ordinary lock */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd->in_lock_tables) + lock_type= TL_WRITE_ALLOW_WRITE; + + /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. */ + + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) + lock_type= TL_READ; + + m_lock.type=lock_type; + } + *to++= &m_lock; + + DBUG_PRINT("exit", ("lock_type: %d", lock_type)); + + DBUG_RETURN(to); +} + +#ifndef DBUG_OFF +#define PRINT_OPTION_FLAGS(t) { \ + if (t->options & OPTION_NOT_AUTOCOMMIT) \ + DBUG_PRINT("thd->options", ("OPTION_NOT_AUTOCOMMIT")); \ + if (t->options & OPTION_BEGIN) \ + DBUG_PRINT("thd->options", ("OPTION_BEGIN")); \ + if (t->options & OPTION_TABLE_LOCK) \ + DBUG_PRINT("thd->options", ("OPTION_TABLE_LOCK")); \ +} +#else +#define PRINT_OPTION_FLAGS(t) +#endif + + +/* + As MySQL will execute an external lock for every new table it uses + we can use this to start the transactions. + If we are in auto_commit mode we just need to start a transaction + for the statement, this will be stored in transaction.stmt. + If not, we have to start a master transaction if there doesn't exist + one from before, this will be stored in transaction.all + + When a table lock is held one transaction will be started which holds + the table lock and for each statement a hupp transaction will be started + If we are locking the table then: + - save the NdbDictionary::Table for easy access + - save reference to table statistics + - refresh list of the indexes for the table if needed (if altered) + */ + +int ha_ndbcluster::external_lock(THD *thd, int lock_type) +{ + int error=0; + NdbConnection* trans= NULL; + + DBUG_ENTER("external_lock"); + /* + Check that this handler instance has a connection + set up to the Ndb object of thd + */ + if (check_ndb_connection()) + DBUG_RETURN(1); + + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; + Ndb *ndb= thd_ndb->ndb; + + DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d", + thd_ndb->lock_count)); + + if (lock_type != F_UNLCK) + { + DBUG_PRINT("info", ("lock_type != F_UNLCK")); + if (!thd_ndb->lock_count++) + { + PRINT_OPTION_FLAGS(thd); + + if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) + { + // Autocommit transaction + DBUG_ASSERT(!thd->transaction.stmt.ndb_tid); + DBUG_PRINT("trans",("Starting transaction stmt")); + + trans= ndb->startTransaction(); + if (trans == NULL) + ERR_RETURN(ndb->getNdbError()); + no_uncommitted_rows_reset(thd); + thd->transaction.stmt.ndb_tid= trans; + } + else + { + if (!thd->transaction.all.ndb_tid) + { + // Not autocommit transaction + // A "master" transaction ha not been started yet + DBUG_PRINT("trans",("starting transaction, all")); + + trans= ndb->startTransaction(); + if (trans == NULL) + ERR_RETURN(ndb->getNdbError()); + no_uncommitted_rows_reset(thd); + + /* + If this is the start of a LOCK TABLE, a table look + should be taken on the table in NDB + + Check if it should be read or write lock + */ + if (thd->options & (OPTION_TABLE_LOCK)) + { + //lockThisTable(); + DBUG_PRINT("info", ("Locking the table..." )); + } + + thd->transaction.all.ndb_tid= trans; + } + } + } + /* + This is the place to make sure this handler instance + has a started transaction. + + The transaction is started by the first handler on which + MySQL Server calls external lock + + Other handlers in the same stmt or transaction should use + the same NDB transaction. This is done by setting up the m_active_trans + pointer to point to the NDB transaction. + */ + + // store thread specific data first to set the right context + m_force_send= thd->variables.ndb_force_send; + m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; + m_autoincrement_prefetch= + (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; + if (!thd->transaction.on) + m_transaction_on= FALSE; + else + m_transaction_on= thd->variables.ndb_use_transactions; + // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; + + m_active_trans= thd->transaction.all.ndb_tid ? + (NdbConnection*)thd->transaction.all.ndb_tid: + (NdbConnection*)thd->transaction.stmt.ndb_tid; + DBUG_ASSERT(m_active_trans); + // Start of transaction + m_retrieve_all_fields= FALSE; + m_retrieve_primary_key= FALSE; + m_ops_pending= 0; + { + NDBDICT *dict= ndb->getDictionary(); + const NDBTAB *tab; + void *tab_info; + if (!(tab= dict->getTable(m_tabname, &tab_info))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", + tab->getObjectVersion())); + // Check if thread has stale local cache + // New transaction must not use old tables... (trans != 0) + // Running might... + if ((trans && tab->getObjectStatus() != NdbDictionary::Object::Retrieved) + || tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + invalidate_dictionary_cache(FALSE); + if (!(tab= dict->getTable(m_tabname, &tab_info))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", + tab->getObjectVersion())); + } + if (m_table_version < tab->getObjectVersion()) + { + /* + The table has been altered, caller has to retry + */ + NdbError err= ndb->getNdbError(NDB_INVALID_SCHEMA_OBJECT); + DBUG_RETURN(ndb_to_mysql_error(&err)); + } + if (m_table != (void *)tab) + { + m_table= (void *)tab; + m_table_version = tab->getObjectVersion(); + if (!(my_errno= build_index_list(ndb, table, ILBP_OPEN))) + DBUG_RETURN(my_errno); + } + m_table_info= tab_info; + } + no_uncommitted_rows_init(thd); + } + else + { + DBUG_PRINT("info", ("lock_type == F_UNLCK")); + if (!--thd_ndb->lock_count) + { + DBUG_PRINT("trans", ("Last external_lock")); + PRINT_OPTION_FLAGS(thd); + + if (thd->transaction.stmt.ndb_tid) + { + /* + Unlock is done without a transaction commit / rollback. + This happens if the thread didn't update any rows + We must in this case close the transaction to release resources + */ + DBUG_PRINT("trans",("ending non-updating transaction")); + ndb->closeTransaction(m_active_trans); + thd->transaction.stmt.ndb_tid= 0; + } + } + m_table_info= NULL; + /* + This is the place to make sure this handler instance + no longer are connected to the active transaction. + + And since the handler is no longer part of the transaction + it can't have open cursors, ops or blobs pending. + */ + m_active_trans= NULL; + + if (m_active_cursor) + DBUG_PRINT("warning", ("m_active_cursor != NULL")); + m_active_cursor= NULL; + + if (m_blobs_pending) + DBUG_PRINT("warning", ("blobs_pending != 0")); + m_blobs_pending= 0; + + if (m_ops_pending) + DBUG_PRINT("warning", ("ops_pending != 0L")); + m_ops_pending= 0; + } + DBUG_RETURN(error); +} + +/* + Start a transaction for running a statement if one is not + already running in a transaction. This will be the case in + a BEGIN; COMMIT; block + When using LOCK TABLE's external_lock will start a transaction + since ndb does not currently does not support table locking +*/ + +int ha_ndbcluster::start_stmt(THD *thd) +{ + int error=0; + DBUG_ENTER("start_stmt"); + PRINT_OPTION_FLAGS(thd); + + NdbConnection *trans= + (thd->transaction.stmt.ndb_tid) + ? (NdbConnection *)(thd->transaction.stmt.ndb_tid) + : (NdbConnection *)(thd->transaction.all.ndb_tid); + if (!trans){ + Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; + DBUG_PRINT("trans",("Starting transaction stmt")); + trans= ndb->startTransaction(); + if (trans == NULL) + ERR_RETURN(ndb->getNdbError()); + no_uncommitted_rows_reset(thd); + thd->transaction.stmt.ndb_tid= trans; + } + m_active_trans= trans; + + // Start of statement + m_retrieve_all_fields= FALSE; + m_retrieve_primary_key= FALSE; + m_ops_pending= 0; + + DBUG_RETURN(error); +} + + +/* + Commit a transaction started in NDB + */ + +int ndbcluster_commit(THD *thd, void *ndb_transaction) +{ + int res= 0; + Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; + NdbConnection *trans= (NdbConnection*)ndb_transaction; + + DBUG_ENTER("ndbcluster_commit"); + DBUG_PRINT("transaction",("%s", + trans == thd->transaction.stmt.ndb_tid ? + "stmt" : "all")); + DBUG_ASSERT(ndb && trans); + + if (execute_commit(thd,trans) != 0) + { + const NdbError err= trans->getNdbError(); + const NdbOperation *error_op= trans->getNdbErrorOperation(); + ERR_PRINT(err); + res= ndb_to_mysql_error(&err); + if (res != -1) + ndbcluster_print_error(res, error_op); + } + ndb->closeTransaction(trans); + DBUG_RETURN(res); +} + + +/* + Rollback a transaction started in NDB + */ + +int ndbcluster_rollback(THD *thd, void *ndb_transaction) +{ + int res= 0; + Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; + NdbConnection *trans= (NdbConnection*)ndb_transaction; + + DBUG_ENTER("ndbcluster_rollback"); + DBUG_PRINT("transaction",("%s", + trans == thd->transaction.stmt.ndb_tid ? + "stmt" : "all")); + DBUG_ASSERT(ndb && trans); + + if (trans->execute(Rollback) != 0) + { + const NdbError err= trans->getNdbError(); + const NdbOperation *error_op= trans->getNdbErrorOperation(); + ERR_PRINT(err); + res= ndb_to_mysql_error(&err); + if (res != -1) + ndbcluster_print_error(res, error_op); + } + ndb->closeTransaction(trans); + DBUG_RETURN(0); +} + + +/* + Define NDB column based on Field. + Returns 0 or mysql error code. + Not member of ha_ndbcluster because NDBCOL cannot be declared. + */ + +static int create_ndb_column(NDBCOL &col, + Field *field, + HA_CREATE_INFO *info) +{ + // Set name + { + char truncated_field_name[NDB_MAX_ATTR_NAME_SIZE]; + strnmov(truncated_field_name,field->field_name,sizeof(truncated_field_name)); + truncated_field_name[sizeof(truncated_field_name)-1]= '\0'; + col.setName(truncated_field_name); + } + // Get char set + CHARSET_INFO *cs= field->charset(); + // Set type and sizes + const enum enum_field_types mysql_type= field->real_type(); + switch (mysql_type) { + // Numeric types + case MYSQL_TYPE_TINY: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Tinyunsigned); + else + col.setType(NDBCOL::Tinyint); + col.setLength(1); + break; + case MYSQL_TYPE_SHORT: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Smallunsigned); + else + col.setType(NDBCOL::Smallint); + col.setLength(1); + break; + case MYSQL_TYPE_LONG: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Unsigned); + else + col.setType(NDBCOL::Int); + col.setLength(1); + break; + case MYSQL_TYPE_INT24: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Mediumunsigned); + else + col.setType(NDBCOL::Mediumint); + col.setLength(1); + break; + case MYSQL_TYPE_LONGLONG: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Bigunsigned); + else + col.setType(NDBCOL::Bigint); + col.setLength(1); + break; + case MYSQL_TYPE_FLOAT: + col.setType(NDBCOL::Float); + col.setLength(1); + break; + case MYSQL_TYPE_DOUBLE: + col.setType(NDBCOL::Double); + col.setLength(1); + break; + case MYSQL_TYPE_DECIMAL: + { + Field_decimal *f= (Field_decimal*)field; + uint precision= f->pack_length(); + uint scale= f->decimals(); + if (field->flags & UNSIGNED_FLAG) + { + col.setType(NDBCOL::Olddecimalunsigned); + precision-= (scale > 0); + } + else + { + col.setType(NDBCOL::Olddecimal); + precision-= 1 + (scale > 0); + } + col.setPrecision(precision); + col.setScale(scale); + col.setLength(1); + } + break; + // Date types + case MYSQL_TYPE_DATETIME: + col.setType(NDBCOL::Datetime); + col.setLength(1); + break; + case MYSQL_TYPE_DATE: // ? + col.setType(NDBCOL::Char); + col.setLength(field->pack_length()); + break; + case MYSQL_TYPE_NEWDATE: + col.setType(NDBCOL::Date); + col.setLength(1); + break; + case MYSQL_TYPE_TIME: + col.setType(NDBCOL::Time); + col.setLength(1); + break; + case MYSQL_TYPE_YEAR: + col.setType(NDBCOL::Year); + col.setLength(1); + break; + case MYSQL_TYPE_TIMESTAMP: + col.setType(NDBCOL::Timestamp); + col.setLength(1); + break; + // Char types + case MYSQL_TYPE_STRING: + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Binary); + else { + col.setType(NDBCOL::Char); + col.setCharset(cs); + } + if (field->pack_length() == 0) + col.setLength(1); // currently ndb does not support size 0 + else + col.setLength(field->pack_length()); + break; + case MYSQL_TYPE_VAR_STRING: + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Varbinary); + else { + col.setType(NDBCOL::Varchar); + col.setCharset(cs); + } + col.setLength(field->pack_length()); + break; + // Blob types (all come in as MYSQL_TYPE_BLOB) + mysql_type_tiny_blob: + case MYSQL_TYPE_TINY_BLOB: + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } + col.setInlineSize(256); + // No parts + col.setPartSize(0); + col.setStripeSize(0); + break; + mysql_type_blob: + case MYSQL_TYPE_BLOB: + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } + // Use "<=" even if "<" is the exact condition + if (field->max_length() <= (1 << 8)) + goto mysql_type_tiny_blob; + else if (field->max_length() <= (1 << 16)) + { + col.setInlineSize(256); + col.setPartSize(2000); + col.setStripeSize(16); + } + else if (field->max_length() <= (1 << 24)) + goto mysql_type_medium_blob; + else + goto mysql_type_long_blob; + break; + mysql_type_medium_blob: + case MYSQL_TYPE_MEDIUM_BLOB: + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } + col.setInlineSize(256); + col.setPartSize(4000); + col.setStripeSize(8); + break; + mysql_type_long_blob: + case MYSQL_TYPE_LONG_BLOB: + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } + col.setInlineSize(256); + col.setPartSize(8000); + col.setStripeSize(4); + break; + // Other types + case MYSQL_TYPE_ENUM: + col.setType(NDBCOL::Char); + col.setLength(field->pack_length()); + break; + case MYSQL_TYPE_SET: + col.setType(NDBCOL::Char); + col.setLength(field->pack_length()); + break; + case MYSQL_TYPE_NULL: + case MYSQL_TYPE_GEOMETRY: + goto mysql_type_unsupported; + mysql_type_unsupported: + default: + return HA_ERR_UNSUPPORTED; + } + // Set nullable and pk + col.setNullable(field->maybe_null()); + col.setPrimaryKey(field->flags & PRI_KEY_FLAG); + // Set autoincrement + if (field->flags & AUTO_INCREMENT_FLAG) + { + col.setAutoIncrement(TRUE); + ulonglong value= info->auto_increment_value ? + info->auto_increment_value : (ulonglong) 1; + DBUG_PRINT("info", ("Autoincrement key, initial: %llu", value)); + col.setAutoIncrementInitialValue(value); + } + else + col.setAutoIncrement(FALSE); + return 0; +} + +/* + Create a table in NDB Cluster + */ + +static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) +{ + if (form->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */ + return; + /** + * get the number of fragments right + */ + uint no_fragments; + { +#if MYSQL_VERSION_ID >= 50000 + uint acc_row_size= 25 + /*safety margin*/ 2; +#else + uint acc_row_size= pk_length*4; + /* add acc overhead */ + if (pk_length <= 8) /* main page will set the limit */ + acc_row_size+= 25 + /*safety margin*/ 2; + else /* overflow page will set the limit */ + acc_row_size+= 4 + /*safety margin*/ 4; +#endif + ulonglong acc_fragment_size= 512*1024*1024; + ulonglong max_rows= form->max_rows; +#if MYSQL_VERSION_ID >= 50100 + no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; +#else + no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1 + +1/*correct rounding*/)/2; +#endif + } + { + uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); + NDBTAB::FragmentType ftype; + if (no_fragments > 2*no_nodes) + { + ftype= NDBTAB::FragAllLarge; + if (no_fragments > 4*no_nodes) + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount of rows specified"); + } + else if (no_fragments > no_nodes) + ftype= NDBTAB::FragAllMedium; + else + ftype= NDBTAB::FragAllSmall; + tab.setFragmentType(ftype); + } +} + +int ha_ndbcluster::create(const char *name, + TABLE *form, + HA_CREATE_INFO *info) +{ + NDBTAB tab; + NDBCOL col; + uint pack_length, length, i, pk_length= 0; + const void *data, *pack_data; + const char **key_names= form->keynames.type_names; + char name2[FN_HEADLEN]; + bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE); + + DBUG_ENTER("create"); + DBUG_PRINT("enter", ("name: %s", name)); + fn_format(name2, name, "", "",2); // Remove the .frm extension + set_dbname(name2); + set_tabname(name2); + + if (create_from_engine) + { + /* + Table alreay exists in NDB and frm file has been created by + caller. + Do Ndb specific stuff, such as create a .ndb file + */ + my_errno= write_ndb_file(); + DBUG_RETURN(my_errno); + } + + DBUG_PRINT("table", ("name: %s", m_tabname)); + tab.setName(m_tabname); + tab.setLogging(!(info->options & HA_LEX_CREATE_TMP_TABLE)); + + // Save frm data for this table + if (readfrm(name, &data, &length)) + DBUG_RETURN(1); + if (packfrm(data, length, &pack_data, &pack_length)) + DBUG_RETURN(2); + + DBUG_PRINT("info", ("setFrm data=%x, len=%d", pack_data, pack_length)); + tab.setFrm(pack_data, pack_length); + my_free((char*)data, MYF(0)); + my_free((char*)pack_data, MYF(0)); + + for (i= 0; i < form->fields; i++) + { + Field *field= form->field[i]; + DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d", + field->field_name, field->real_type(), + field->pack_length())); + if ((my_errno= create_ndb_column(col, field, info))) + DBUG_RETURN(my_errno); + tab.addColumn(col); + if(col.getPrimaryKey()) + pk_length += (field->pack_length() + 3) / 4; + } + + // No primary key, create shadow key as 64 bit, auto increment + if (form->primary_key == MAX_KEY) + { + DBUG_PRINT("info", ("Generating shadow key")); + col.setName("$PK"); + col.setType(NdbDictionary::Column::Bigunsigned); + col.setLength(1); + col.setNullable(FALSE); + col.setPrimaryKey(TRUE); + col.setAutoIncrement(TRUE); + tab.addColumn(col); + pk_length += 2; + } + + // Make sure that blob tables don't have to big part size + for (i= 0; i < form->fields; i++) + { + /** + * The extra +7 concists + * 2 - words from pk in blob table + * 5 - from extra words added by tup/dict?? + */ + switch (form->field[i]->real_type()) { + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + { + NdbDictionary::Column * col = tab.getColumn(i); + int size = pk_length + (col->getPartSize()+3)/4 + 7; + if(size > NDB_MAX_TUPLE_SIZE_IN_WORDS && + (pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS) + { + size = NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7; + col->setPartSize(4*size); + } + /** + * If size > NDB_MAX and pk_length+7 >= NDB_MAX + * then the table can't be created anyway, so skip + * changing part size, and have error later + */ + } + default: + break; + } + } + + ndb_set_fragmentation(tab, form, pk_length); + + if ((my_errno= check_ndb_connection())) + DBUG_RETURN(my_errno); + + // Create the table in NDB + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); + if (dict->createTable(tab) != 0) + { + const NdbError err= dict->getNdbError(); + ERR_PRINT(err); + my_errno= ndb_to_mysql_error(&err); + DBUG_RETURN(my_errno); + } + DBUG_PRINT("info", ("Table %s/%s created successfully", + m_dbname, m_tabname)); + + // Create secondary indexes + my_errno= build_index_list(ndb, form, ILBP_CREATE); + + if (!my_errno) + my_errno= write_ndb_file(); + + DBUG_RETURN(my_errno); +} + + +int ha_ndbcluster::create_ordered_index(const char *name, + KEY *key_info) +{ + DBUG_ENTER("create_ordered_index"); + DBUG_RETURN(create_index(name, key_info, FALSE)); +} + +int ha_ndbcluster::create_unique_index(const char *name, + KEY *key_info) +{ + + DBUG_ENTER("create_unique_index"); + DBUG_RETURN(create_index(name, key_info, TRUE)); +} + + +/* + Create an index in NDB Cluster + */ + +int ha_ndbcluster::create_index(const char *name, + KEY *key_info, + bool unique) +{ + Ndb *ndb= get_ndb(); + NdbDictionary::Dictionary *dict= ndb->getDictionary(); + KEY_PART_INFO *key_part= key_info->key_part; + KEY_PART_INFO *end= key_part + key_info->key_parts; + + DBUG_ENTER("create_index"); + DBUG_PRINT("enter", ("name: %s ", name)); + + NdbDictionary::Index ndb_index(name); + if (unique) + ndb_index.setType(NdbDictionary::Index::UniqueHashIndex); + else + { + ndb_index.setType(NdbDictionary::Index::OrderedIndex); + // TODO Only temporary ordered indexes supported + ndb_index.setLogging(FALSE); + } + ndb_index.setTable(m_tabname); + + for (; key_part != end; key_part++) + { + Field *field= key_part->field; + DBUG_PRINT("info", ("attr: %s", field->field_name)); + { + char truncated_field_name[NDB_MAX_ATTR_NAME_SIZE]; + strnmov(truncated_field_name,field->field_name,sizeof(truncated_field_name)); + truncated_field_name[sizeof(truncated_field_name)-1]= '\0'; + ndb_index.addColumnName(truncated_field_name); + } + } + + if (dict->createIndex(ndb_index)) + ERR_RETURN(dict->getNdbError()); + + // Success + DBUG_PRINT("info", ("Created index %s", name)); + DBUG_RETURN(0); +} + + +/* + Rename a table in NDB Cluster +*/ + +int ha_ndbcluster::rename_table(const char *from, const char *to) +{ + NDBDICT *dict; + char new_tabname[FN_HEADLEN]; + const NDBTAB *orig_tab; + int result; + + DBUG_ENTER("ha_ndbcluster::rename_table"); + DBUG_PRINT("info", ("Renaming %s to %s", from, to)); + set_dbname(from); + set_tabname(from); + set_tabname(to, new_tabname); + + if (check_ndb_connection()) + DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION); + + Ndb *ndb= get_ndb(); + dict= ndb->getDictionary(); + if (!(orig_tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + // Check if thread has stale local cache + if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + dict->removeCachedTable(m_tabname); + if (!(orig_tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + } + m_table= (void *)orig_tab; + // Change current database to that of target table + set_dbname(to); + ndb->setDatabaseName(m_dbname); + if (!(result= alter_table_name(new_tabname))) + { + // Rename .ndb file + result= handler::rename_table(from, to); + } + + DBUG_RETURN(result); +} + + +/* + Rename a table in NDB Cluster using alter table + */ + +int ha_ndbcluster::alter_table_name(const char *to) +{ + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); + const NDBTAB *orig_tab= (const NDBTAB *) m_table; + int ret; + DBUG_ENTER("alter_table_name_table"); + + NdbDictionary::Table new_tab= *orig_tab; + new_tab.setName(to); + if (dict->alterTable(new_tab) != 0) + ERR_RETURN(dict->getNdbError()); + + m_table= NULL; + m_table_info= NULL; + + DBUG_RETURN(0); +} + + +/* + Delete a table from NDB Cluster + */ + +int ha_ndbcluster::delete_table(const char *name) +{ + DBUG_ENTER("delete_table"); + DBUG_PRINT("enter", ("name: %s", name)); + set_dbname(name); + set_tabname(name); + + if (check_ndb_connection()) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + // Remove .ndb file + handler::delete_table(name); + DBUG_RETURN(drop_table()); +} + + +/* + Drop a table in NDB Cluster + */ + +int ha_ndbcluster::drop_table() +{ + Ndb *ndb= get_ndb(); + NdbDictionary::Dictionary *dict= ndb->getDictionary(); + + DBUG_ENTER("drop_table"); + DBUG_PRINT("enter", ("Deleting %s", m_tabname)); + + if (dict->dropTable(m_tabname)) + { + const NdbError err= dict->getNdbError(); + if (err.code == 709) + ; // 709: No such table existed + else + ERR_RETURN(dict->getNdbError()); + } + release_metadata(); + DBUG_RETURN(0); +} + + +longlong ha_ndbcluster::get_auto_increment() +{ + DBUG_ENTER("get_auto_increment"); + DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); + Ndb *ndb= get_ndb(); + + if (m_rows_inserted > m_rows_to_insert) + { + /* We guessed too low */ + m_rows_to_insert+= m_autoincrement_prefetch; + } + int cache_size= + (int) + (m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? + m_rows_to_insert - m_rows_inserted + : (m_rows_to_insert > m_autoincrement_prefetch) ? + m_rows_to_insert + : m_autoincrement_prefetch; + Uint64 auto_value= NDB_FAILED_AUTO_INCREMENT; + uint retries= NDB_AUTO_INCREMENT_RETRIES; + do { + auto_value= + (m_skip_auto_increment) ? + ndb->readAutoIncrementValue((const NDBTAB *) m_table) + : ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size); + } while (auto_value == NDB_FAILED_AUTO_INCREMENT && + --retries && + ndb->getNdbError().status == NdbError::TemporaryError); + if (auto_value == NDB_FAILED_AUTO_INCREMENT) + { + const NdbError err= ndb->getNdbError(); + sql_print_error("Error %lu in ::get_auto_increment(): %s", + (ulong) err.code, err.message); + DBUG_RETURN(~(ulonglong) 0); + } + DBUG_RETURN((longlong)auto_value); +} + + +/* + Constructor for the NDB Cluster table handler + */ + +ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): + handler(table_arg), + m_active_trans(NULL), + m_active_cursor(NULL), + m_table(NULL), + m_table_version(-1), + m_table_info(NULL), + m_table_flags(HA_REC_NOT_IN_SEQ | + HA_NULL_IN_KEY | + HA_AUTO_PART_KEY | + HA_NO_PREFIX_CHAR_KEYS), + m_share(0), + m_use_write(FALSE), + m_ignore_dup_key(FALSE), + m_primary_key_update(FALSE), + m_retrieve_all_fields(FALSE), + m_retrieve_primary_key(FALSE), + m_rows_to_insert((ha_rows) 1), + m_rows_inserted((ha_rows) 0), + m_bulk_insert_rows((ha_rows) 1024), + m_bulk_insert_not_flushed(FALSE), + m_ops_pending(0), + m_skip_auto_increment(TRUE), + m_blobs_pending(0), + m_blobs_buffer(0), + m_blobs_buffer_size(0), + m_dupkey((uint) -1), + m_ha_not_exact_count(FALSE), + m_force_send(TRUE), + m_autoincrement_prefetch((ha_rows) 32), + m_transaction_on(TRUE), + m_use_local_query_cache(FALSE) +{ + int i; + + DBUG_ENTER("ha_ndbcluster"); + + m_tabname[0]= '\0'; + m_dbname[0]= '\0'; + + records= ~(ha_rows)0; // uninitialized + block_size= 1024; + + for (i= 0; i < MAX_KEY; i++) + { + m_index[i].type= UNDEFINED_INDEX; + m_index[i].unique_index= NULL; + m_index[i].index= NULL; + m_index[i].unique_index_attrid_map= NULL; + } + + DBUG_VOID_RETURN; +} + + +/* + Destructor for NDB Cluster table handler + */ + +ha_ndbcluster::~ha_ndbcluster() +{ + DBUG_ENTER("~ha_ndbcluster"); + + if (m_share) + free_share(m_share); + release_metadata(); + my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR)); + m_blobs_buffer= 0; + + // Check for open cursor/transaction + if (m_active_cursor) { + } + DBUG_ASSERT(m_active_cursor == NULL); + if (m_active_trans) { + } + DBUG_ASSERT(m_active_trans == NULL); + + DBUG_VOID_RETURN; +} + + +/* + Open a table for further use + - fetch metadata for this table from NDB + - check that table exists +*/ + +int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) +{ + int res; + KEY *key; + DBUG_ENTER("open"); + DBUG_PRINT("enter", ("name: %s mode: %d test_if_locked: %d", + name, mode, test_if_locked)); + + // Setup ref_length to make room for the whole + // primary key to be written in the ref variable + + if (table->primary_key != MAX_KEY) + { + key= table->key_info+table->primary_key; + ref_length= key->key_length; + DBUG_PRINT("info", (" ref_length: %d", ref_length)); + } + // Init table lock structure + if (!(m_share=get_share(name))) + DBUG_RETURN(1); + thr_lock_data_init(&m_share->lock,&m_lock,(void*) 0); + + set_dbname(name); + set_tabname(name); + + if (check_ndb_connection()) { + free_share(m_share); m_share= 0; + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } + + res= get_metadata(name); + if (!res) + info(HA_STATUS_VARIABLE | HA_STATUS_CONST); + + DBUG_RETURN(res); +} + + +/* + Close the table + - release resources setup by open() + */ + +int ha_ndbcluster::close(void) +{ + DBUG_ENTER("close"); + free_share(m_share); m_share= 0; + release_metadata(); + DBUG_RETURN(0); +} + + +Thd_ndb* ha_ndbcluster::seize_thd_ndb() +{ + Thd_ndb *thd_ndb; + DBUG_ENTER("seize_thd_ndb"); + + thd_ndb= new Thd_ndb(); + thd_ndb->ndb->getDictionary()->set_local_table_data_size( + sizeof(Ndb_local_table_statistics) + ); + if (thd_ndb->ndb->init(max_transactions) != 0) + { + ERR_PRINT(thd_ndb->ndb->getNdbError()); + /* + TODO + Alt.1 If init fails because to many allocated Ndb + wait on condition for a Ndb object to be released. + Alt.2 Seize/release from pool, wait until next release + */ + delete thd_ndb; + thd_ndb= NULL; + } + DBUG_RETURN(thd_ndb); +} + + +void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb) +{ + DBUG_ENTER("release_thd_ndb"); + delete thd_ndb; + DBUG_VOID_RETURN; +} + + +/* + If this thread already has a Thd_ndb object allocated + in current THD, reuse it. Otherwise + seize a Thd_ndb object, assign it to current THD and use it. + +*/ + +Ndb* check_ndb_in_thd(THD* thd) +{ + DBUG_ENTER("check_ndb_in_thd"); + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; + + if (!thd_ndb) + { + if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) + DBUG_RETURN(NULL); + thd->transaction.thd_ndb= thd_ndb; + } + DBUG_RETURN(thd_ndb->ndb); +} + + + +int ha_ndbcluster::check_ndb_connection() +{ + THD* thd= current_thd; + Ndb *ndb; + DBUG_ENTER("check_ndb_connection"); + + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + ndb->setDatabaseName(m_dbname); + DBUG_RETURN(0); +} + + +void ndbcluster_close_connection(THD *thd) +{ + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; + DBUG_ENTER("ndbcluster_close_connection"); + if (thd_ndb) + { + ha_ndbcluster::release_thd_ndb(thd_ndb); + thd->transaction.thd_ndb= NULL; + } + DBUG_VOID_RETURN; +} + + +/* + Try to discover one table from NDB + */ + +int ndbcluster_discover(THD* thd, const char *db, const char *name, + const void** frmblob, uint* frmlen) +{ + uint len; + const void* data; + const NDBTAB* tab; + Ndb* ndb; + DBUG_ENTER("ndbcluster_discover"); + DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); + + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + ndb->setDatabaseName(db); + + NDBDICT* dict= ndb->getDictionary(); + dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics)); + dict->invalidateTable(name); + if (!(tab= dict->getTable(name))) + { + const NdbError err= dict->getNdbError(); + if (err.code == 709) + DBUG_RETURN(-1); + ERR_RETURN(err); + } + DBUG_PRINT("info", ("Found table %s", tab->getName())); + + len= tab->getFrmLength(); + if (len == 0 || tab->getFrmData() == NULL) + { + DBUG_PRINT("error", ("No frm data found.")); + DBUG_RETURN(1); + } + + if (unpackfrm(&data, &len, tab->getFrmData())) + { + DBUG_PRINT("error", ("Could not unpack table")); + DBUG_RETURN(1); + } + + *frmlen= len; + *frmblob= data; + + DBUG_RETURN(0); +} + +/* + Check if a table exists in NDB + + */ + +int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name) +{ + uint len; + const void* data; + const NDBTAB* tab; + Ndb* ndb; + DBUG_ENTER("ndbcluster_table_exists_in_engine"); + DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); + + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + ndb->setDatabaseName(db); + + NDBDICT* dict= ndb->getDictionary(); + dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics)); + dict->invalidateTable(name); + if (!(tab= dict->getTable(name))) + { + const NdbError err= dict->getNdbError(); + if (err.code == 709) + DBUG_RETURN(0); + ERR_RETURN(err); + } + + DBUG_PRINT("info", ("Found table %s", tab->getName())); + DBUG_RETURN(1); +} + + + +extern "C" byte* tables_get_key(const char *entry, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= strlen(entry); + return (byte*) entry; +} + + +/* + Drop a database in NDB Cluster + */ + +int ndbcluster_drop_database(const char *path) +{ + DBUG_ENTER("ndbcluster_drop_database"); + THD *thd= current_thd; + char dbname[FN_HEADLEN]; + Ndb* ndb; + NdbDictionary::Dictionary::List list; + uint i; + char *tabname; + List<char> drop_list; + int ret= 0; + ha_ndbcluster::set_dbname(path, (char *)&dbname); + DBUG_PRINT("enter", ("db: %s", dbname)); + + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + + // List tables in NDB + NDBDICT *dict= ndb->getDictionary(); + if (dict->listObjects(list, + NdbDictionary::Object::UserTable) != 0) + ERR_RETURN(dict->getNdbError()); + for (i= 0 ; i < list.count ; i++) + { + NdbDictionary::Dictionary::List::Element& t= list.elements[i]; + DBUG_PRINT("info", ("Found %s/%s in NDB", t.database, t.name)); + + // Add only tables that belongs to db + if (my_strcasecmp(system_charset_info, t.database, dbname)) + continue; + DBUG_PRINT("info", ("%s must be dropped", t.name)); + drop_list.push_back(thd->strdup(t.name)); + } + // Drop any tables belonging to database + ndb->setDatabaseName(dbname); + List_iterator_fast<char> it(drop_list); + while ((tabname=it++)) + { + if (dict->dropTable(tabname)) + { + const NdbError err= dict->getNdbError(); + if (err.code != 709) + { + ERR_PRINT(err); + ret= ndb_to_mysql_error(&err); + } + } + } + DBUG_RETURN(ret); +} + + +int ndbcluster_find_files(THD *thd,const char *db,const char *path, + const char *wild, bool dir, List<char> *files) +{ + DBUG_ENTER("ndbcluster_find_files"); + DBUG_PRINT("enter", ("db: %s", db)); + { // extra bracket to avoid gcc 2.95.3 warning + uint i; + Ndb* ndb; + char name[FN_REFLEN]; + HASH ndb_tables, ok_tables; + NdbDictionary::Dictionary::List list; + + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + + if (dir) + DBUG_RETURN(0); // Discover of databases not yet supported + + // List tables in NDB + NDBDICT *dict= ndb->getDictionary(); + if (dict->listObjects(list, + NdbDictionary::Object::UserTable) != 0) + ERR_RETURN(dict->getNdbError()); + + if (hash_init(&ndb_tables, system_charset_info,list.count,0,0, + (hash_get_key)tables_get_key,0,0)) + { + DBUG_PRINT("error", ("Failed to init HASH ndb_tables")); + DBUG_RETURN(-1); + } + + if (hash_init(&ok_tables, system_charset_info,32,0,0, + (hash_get_key)tables_get_key,0,0)) + { + DBUG_PRINT("error", ("Failed to init HASH ok_tables")); + hash_free(&ndb_tables); + DBUG_RETURN(-1); + } + + for (i= 0 ; i < list.count ; i++) + { + NdbDictionary::Dictionary::List::Element& t= list.elements[i]; + DBUG_PRINT("info", ("Found %s/%s in NDB", t.database, t.name)); + + // Add only tables that belongs to db + if (my_strcasecmp(system_charset_info, t.database, db)) + continue; + + // Apply wildcard to list of tables in NDB + if (wild) + { + if (lower_case_table_names) + { + if (wild_case_compare(files_charset_info, t.name, wild)) + continue; + } + else if (wild_compare(t.name,wild,0)) + continue; + } + DBUG_PRINT("info", ("Inserting %s into ndb_tables hash", t.name)); + my_hash_insert(&ndb_tables, (byte*)thd->strdup(t.name)); + } + + char *file_name; + List_iterator<char> it(*files); + List<char> delete_list; + while ((file_name=it++)) + { + DBUG_PRINT("info", ("%s", file_name)); + if (hash_search(&ndb_tables, file_name, strlen(file_name))) + { + DBUG_PRINT("info", ("%s existed in NDB _and_ on disk ", file_name)); + // File existed in NDB and as frm file, put in ok_tables list + my_hash_insert(&ok_tables, (byte*)file_name); + continue; + } + + // File is not in NDB, check for .ndb file with this name + (void)strxnmov(name, FN_REFLEN, + mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS); + DBUG_PRINT("info", ("Check access for %s", name)); + if (access(name, F_OK)) + { + DBUG_PRINT("info", ("%s did not exist on disk", name)); + // .ndb file did not exist on disk, another table type + continue; + } + + DBUG_PRINT("info", ("%s existed on disk", name)); + // The .ndb file exists on disk, but it's not in list of tables in ndb + // Verify that handler agrees table is gone. + if (ndbcluster_table_exists_in_engine(thd, db, file_name) == 0) + { + DBUG_PRINT("info", ("NDB says %s does not exists", file_name)); + it.remove(); + // Put in list of tables to remove from disk + delete_list.push_back(thd->strdup(file_name)); + } + } + + // Check for new files to discover + DBUG_PRINT("info", ("Checking for new files to discover")); + List<char> create_list; + for (i= 0 ; i < ndb_tables.records ; i++) + { + file_name= hash_element(&ndb_tables, i); + if (!hash_search(&ok_tables, file_name, strlen(file_name))) + { + DBUG_PRINT("info", ("%s must be discovered", file_name)); + // File is in list of ndb tables and not in ok_tables + // This table need to be created + create_list.push_back(thd->strdup(file_name)); + } + } + + // Lock mutex before deleting and creating frm files + pthread_mutex_lock(&LOCK_open); + + if (!global_read_lock) + { + // Delete old files + List_iterator_fast<char> it3(delete_list); + while ((file_name=it3++)) + { + DBUG_PRINT("info", ("Remove table %s/%s",db, file_name )); + // Delete the table and all related files + TABLE_LIST table_list; + bzero((char*) &table_list,sizeof(table_list)); + table_list.db= (char*) db; + table_list.alias=table_list.real_name=(char*)file_name; + (void)mysql_rm_table_part2(thd, &table_list, + /* if_exists */ TRUE, + /* drop_temporary */ FALSE, + /* dont_log_query*/ TRUE); + } + } + + // Create new files + List_iterator_fast<char> it2(create_list); + while ((file_name=it2++)) + { + DBUG_PRINT("info", ("Table %s need discovery", name)); + if (ha_create_table_from_engine(thd, db, file_name) == 0) + files->push_back(thd->strdup(file_name)); + } + + pthread_mutex_unlock(&LOCK_open); + + hash_free(&ok_tables); + hash_free(&ndb_tables); + } // extra bracket to avoid gcc 2.95.3 warning + DBUG_RETURN(0); +} + + +/* + Initialise all gloal variables before creating + a NDB Cluster table handler + */ + +bool ndbcluster_init() +{ + int res; + DBUG_ENTER("ndbcluster_init"); + // Set connectstring if specified + if (opt_ndbcluster_connectstring != 0) + DBUG_PRINT("connectstring", ("%s", opt_ndbcluster_connectstring)); + if ((g_ndb_cluster_connection= + new Ndb_cluster_connection(opt_ndbcluster_connectstring)) == 0) + { + DBUG_PRINT("error",("Ndb_cluster_connection(%s)", + opt_ndbcluster_connectstring)); + goto ndbcluster_init_error; + } + + g_ndb_cluster_connection->set_optimized_node_selection + (opt_ndb_optimized_node_selection); + + // Create a Ndb object to open the connection to NDB + g_ndb= new Ndb(g_ndb_cluster_connection, "sys"); + g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_local_table_statistics)); + if (g_ndb->init() != 0) + { + ERR_PRINT (g_ndb->getNdbError()); + goto ndbcluster_init_error; + } + + if ((res= g_ndb_cluster_connection->connect(0,0,0)) == 0) + { + DBUG_PRINT("info",("NDBCLUSTER storage engine at %s on port %d", + g_ndb_cluster_connection->get_connected_host(), + g_ndb_cluster_connection->get_connected_port())); + g_ndb_cluster_connection->wait_until_ready(10,3); + } + else if(res == 1) + { + if (g_ndb_cluster_connection->start_connect_thread()) { + DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()")); + goto ndbcluster_init_error; + } + { + char buf[1024]; + DBUG_PRINT("info",("NDBCLUSTER storage engine not started, will connect using %s", + g_ndb_cluster_connection->get_connectstring(buf,sizeof(buf)))); + } + } + else + { + DBUG_ASSERT(res == -1); + DBUG_PRINT("error", ("permanent error")); + goto ndbcluster_init_error; + } + + (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, + (hash_get_key) ndbcluster_get_key,0,0); + pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); + + ndbcluster_inited= 1; + DBUG_RETURN(FALSE); + + ndbcluster_init_error: + ndbcluster_end(); + DBUG_RETURN(TRUE); +} + + +/* + End use of the NDB Cluster table handler + - free all global variables allocated by + ndcluster_init() +*/ + +bool ndbcluster_end() +{ + DBUG_ENTER("ndbcluster_end"); + if(g_ndb) + { +#ifndef DBUG_OFF + Ndb::Free_list_usage tmp; tmp.m_name= 0; + while (g_ndb->get_free_list_usage(&tmp)) + { + uint leaked= (uint) tmp.m_created - tmp.m_free; + if (leaked) + fprintf(stderr, "NDB: Found %u %s%s that %s not been released\n", + leaked, tmp.m_name, + (leaked == 1)?"":"'s", + (leaked == 1)?"has":"have"); + } +#endif + delete g_ndb; + } + g_ndb= NULL; + if (g_ndb_cluster_connection) + delete g_ndb_cluster_connection; + g_ndb_cluster_connection= NULL; + if (!ndbcluster_inited) + DBUG_RETURN(0); + hash_free(&ndbcluster_open_tables); + pthread_mutex_destroy(&ndbcluster_mutex); + ndbcluster_inited= 0; + DBUG_RETURN(0); +} + +/* + Static error print function called from + static handler method ndbcluster_commit + and ndbcluster_rollback +*/ + +void ndbcluster_print_error(int error, const NdbOperation *error_op) +{ + DBUG_ENTER("ndbcluster_print_error"); + TABLE tab; + const char *tab_name= (error_op) ? error_op->getTableName() : ""; + tab.table_name= (char *) tab_name; + ha_ndbcluster error_handler(&tab); + tab.file= &error_handler; + error_handler.print_error(error, MYF(0)); + DBUG_VOID_RETURN; +} + +/** + * Set a given location from full pathname to database name + * + */ +void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) +{ + char *end, *ptr; + + /* Scan name from the end */ + ptr= strend(path_name)-1; + while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { + ptr--; + } + ptr--; + end= ptr; + while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { + ptr--; + } + uint name_len= end - ptr; + memcpy(dbname, ptr + 1, name_len); + dbname[name_len]= '\0'; +#ifdef __WIN__ + /* Put to lower case */ + + ptr= dbname; + + while (*ptr != '\0') { + *ptr= tolower(*ptr); + ptr++; + } +#endif +} + +/* + Set m_dbname from full pathname to table file + */ + +void ha_ndbcluster::set_dbname(const char *path_name) +{ + set_dbname(path_name, m_dbname); +} + +/** + * Set a given location from full pathname to table file + * + */ +void +ha_ndbcluster::set_tabname(const char *path_name, char * tabname) +{ + char *end, *ptr; + + /* Scan name from the end */ + end= strend(path_name)-1; + ptr= end; + while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { + ptr--; + } + uint name_len= end - ptr; + memcpy(tabname, ptr + 1, end - ptr); + tabname[name_len]= '\0'; +#ifdef __WIN__ + /* Put to lower case */ + ptr= tabname; + + while (*ptr != '\0') { + *ptr= tolower(*ptr); + ptr++; + } +#endif +} + +/* + Set m_tabname from full pathname to table file + */ + +void ha_ndbcluster::set_tabname(const char *path_name) +{ + set_tabname(path_name, m_tabname); +} + + +ha_rows +ha_ndbcluster::records_in_range(uint inx, key_range *min_key, + key_range *max_key) +{ + KEY *key_info= table->key_info + inx; + uint key_length= key_info->key_length; + NDB_INDEX_TYPE idx_type= get_index_type(inx); + + DBUG_ENTER("records_in_range"); + // Prevent partial read of hash indexes by returning HA_POS_ERROR + if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) && + ((min_key && min_key->length < key_length) || + (max_key && max_key->length < key_length))) + DBUG_RETURN(HA_POS_ERROR); + + // Read from hash index with full key + // This is a "const" table which returns only one record! + if ((idx_type != ORDERED_INDEX) && + ((min_key && min_key->length == key_length) || + (max_key && max_key->length == key_length))) + DBUG_RETURN(1); + + DBUG_RETURN(10); /* Good guess when you don't know anything */ +} + +ulong ha_ndbcluster::table_flags(void) const +{ + if (m_ha_not_exact_count) + return m_table_flags | HA_NOT_EXACT_COUNT; + else + return m_table_flags; +} +const char * ha_ndbcluster::table_type() const +{ + return("ndbcluster"); +} +uint ha_ndbcluster::max_supported_record_length() const +{ + return NDB_MAX_TUPLE_SIZE; +} +uint ha_ndbcluster::max_supported_keys() const +{ + return MAX_KEY; +} +uint ha_ndbcluster::max_supported_key_parts() const +{ + return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY; +} +uint ha_ndbcluster::max_supported_key_length() const +{ + return NDB_MAX_KEY_SIZE; +} +bool ha_ndbcluster::low_byte_first() const +{ +#ifdef WORDS_BIGENDIAN + return FALSE; +#else + return TRUE; +#endif +} +bool ha_ndbcluster::has_transactions() +{ + return TRUE; +} +const char* ha_ndbcluster::index_type(uint key_number) +{ + switch (get_index_type(key_number)) { + case ORDERED_INDEX: + case UNIQUE_ORDERED_INDEX: + case PRIMARY_KEY_ORDERED_INDEX: + return "BTREE"; + case UNIQUE_INDEX: + case PRIMARY_KEY_INDEX: + default: + return "HASH"; + } +} +uint8 ha_ndbcluster::table_cache_type() +{ + if (m_use_local_query_cache) + return HA_CACHE_TBL_TRANSACT; + else + return HA_CACHE_TBL_NOCACHE; +} + +/* + Handling the shared NDB_SHARE structure that is needed to + provide table locking. + It's also used for sharing data with other NDB handlers + in the same MySQL Server. There is currently not much + data we want to or can share. + */ + +static byte* ndbcluster_get_key(NDB_SHARE *share,uint *length, + my_bool not_used __attribute__((unused))) +{ + *length=share->table_name_length; + return (byte*) share->table_name; +} + +static NDB_SHARE* get_share(const char *table_name) +{ + NDB_SHARE *share; + pthread_mutex_lock(&ndbcluster_mutex); + uint length=(uint) strlen(table_name); + if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) table_name, + length))) + { + if ((share=(NDB_SHARE *) my_malloc(sizeof(*share)+length+1, + MYF(MY_WME | MY_ZEROFILL)))) + { + share->table_name_length=length; + share->table_name=(char*) (share+1); + strmov(share->table_name,table_name); + if (my_hash_insert(&ndbcluster_open_tables, (byte*) share)) + { + pthread_mutex_unlock(&ndbcluster_mutex); + my_free((gptr) share,0); + return 0; + } + thr_lock_init(&share->lock); + pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + } + } + share->use_count++; + pthread_mutex_unlock(&ndbcluster_mutex); + return share; +} + + +static void free_share(NDB_SHARE *share) +{ + pthread_mutex_lock(&ndbcluster_mutex); + if (!--share->use_count) + { + hash_delete(&ndbcluster_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); + my_free((gptr) share, MYF(0)); + } + pthread_mutex_unlock(&ndbcluster_mutex); +} + + + +/* + Internal representation of the frm blob + +*/ + +struct frm_blob_struct +{ + struct frm_blob_header + { + uint ver; // Version of header + uint orglen; // Original length of compressed data + uint complen; // Compressed length of data, 0=uncompressed + } head; + char data[1]; +}; + + + +static int packfrm(const void *data, uint len, + const void **pack_data, uint *pack_len) +{ + int error; + ulong org_len, comp_len; + uint blob_len; + frm_blob_struct* blob; + DBUG_ENTER("packfrm"); + DBUG_PRINT("enter", ("data: %x, len: %d", data, len)); + + error= 1; + org_len= len; + if (my_compress((byte*)data, &org_len, &comp_len)) + goto err; + + DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len)); + DBUG_DUMP("compressed", (char*)data, org_len); + + error= 2; + blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len; + if (!(blob= (frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME)))) + goto err; + + // Store compressed blob in machine independent format + int4store((char*)(&blob->head.ver), 1); + int4store((char*)(&blob->head.orglen), comp_len); + int4store((char*)(&blob->head.complen), org_len); + + // Copy frm data into blob, already in machine independent format + memcpy(blob->data, data, org_len); + + *pack_data= blob; + *pack_len= blob_len; + error= 0; + + DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len)); +err: + DBUG_RETURN(error); + +} + + +static int unpackfrm(const void **unpack_data, uint *unpack_len, + const void *pack_data) +{ + const frm_blob_struct *blob= (frm_blob_struct*)pack_data; + byte *data; + ulong complen, orglen, ver; + DBUG_ENTER("unpackfrm"); + DBUG_PRINT("enter", ("pack_data: %x", pack_data)); + + complen= uint4korr((char*)&blob->head.complen); + orglen= uint4korr((char*)&blob->head.orglen); + ver= uint4korr((char*)&blob->head.ver); + + DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", + ver,complen,orglen)); + DBUG_DUMP("blob->data", (char*) blob->data, complen); + + if (ver != 1) + DBUG_RETURN(1); + if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME)))) + DBUG_RETURN(2); + memcpy(data, blob->data, complen); + + if (my_uncompress(data, &complen, &orglen)) + { + my_free((char*)data, MYF(0)); + DBUG_RETURN(3); + } + + *unpack_data= data; + *unpack_len= complen; + + DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len)); + + DBUG_RETURN(0); +} + +static +int +ndb_get_table_statistics(Ndb* ndb, const char * table, + Uint64* row_count, Uint64* commit_count) +{ + DBUG_ENTER("ndb_get_table_statistics"); + DBUG_PRINT("enter", ("table: %s", table)); + NdbConnection* pTrans= ndb->startTransaction(); + do + { + if (pTrans == NULL) + break; + + NdbScanOperation* pOp= pTrans->getNdbScanOperation(table); + if (pOp == NULL) + break; + + NdbResultSet* rs= pOp->readTuples(NdbOperation::LM_CommittedRead); + if (rs == 0) + break; + + int check= pOp->interpret_exit_last_row(); + if (check == -1) + break; + + Uint64 rows, commits; + pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); + pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); + + check= pTrans->execute(NoCommit, AbortOnError, TRUE); + if (check == -1) + break; + + Uint64 sum_rows= 0; + Uint64 sum_commits= 0; + while((check= rs->nextResult(TRUE, TRUE)) == 0) + { + sum_rows+= rows; + sum_commits+= commits; + } + + if (check == -1) + break; + + rs->close(TRUE); + + ndb->closeTransaction(pTrans); + if(row_count) + * row_count= sum_rows; + if(commit_count) + * commit_count= sum_commits; + DBUG_PRINT("exit", ("records: %u commits: %u", sum_rows, sum_commits)); + DBUG_RETURN(0); + } while(0); + + ndb->closeTransaction(pTrans); + DBUG_PRINT("exit", ("failed")); + DBUG_RETURN(-1); +} + +/* + Create a .ndb file to serve as a placeholder indicating + that the table with this name is a ndb table +*/ + +int ha_ndbcluster::write_ndb_file() +{ + File file; + bool error=1; + char path[FN_REFLEN]; + + DBUG_ENTER("write_ndb_file"); + DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname)); + + (void)strxnmov(path, FN_REFLEN, + mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS); + + if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) + { + // It's an empty file + error=0; + my_close(file,MYF(0)); + } + DBUG_RETURN(error); +} + +int +ndbcluster_show_status(THD* thd) +{ + Protocol *protocol= thd->protocol; + + DBUG_ENTER("ndbcluster_show_status"); + + if (have_ndbcluster != SHOW_OPTION_YES) + { + my_message(ER_NOT_SUPPORTED_YET, + "Cannot call SHOW NDBCLUSTER STATUS because skip-ndbcluster is defined", + MYF(0)); + DBUG_RETURN(TRUE); + } + + List<Item> field_list; + field_list.push_back(new Item_empty_string("free_list", 255)); + field_list.push_back(new Item_return_int("created", 10,MYSQL_TYPE_LONG)); + field_list.push_back(new Item_return_int("free", 10,MYSQL_TYPE_LONG)); + field_list.push_back(new Item_return_int("sizeof", 10,MYSQL_TYPE_LONG)); + + if (protocol->send_fields(&field_list, 1)) + DBUG_RETURN(TRUE); + + if (thd->transaction.thd_ndb && + ((Thd_ndb*)thd->transaction.thd_ndb)->ndb) + { + Ndb* ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; + Ndb::Free_list_usage tmp; tmp.m_name= 0; + while (ndb->get_free_list_usage(&tmp)) + { + protocol->prepare_for_resend(); + + protocol->store(tmp.m_name, &my_charset_bin); + protocol->store((uint)tmp.m_created); + protocol->store((uint)tmp.m_free); + protocol->store((uint)tmp.m_sizeof); + if (protocol->write()) + DBUG_RETURN(TRUE); + } + } + send_eof(thd); + + DBUG_RETURN(FALSE); +} + +#endif /* HAVE_NDBCLUSTER_DB */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h new file mode 100644 index 00000000000..83d9d87777a --- /dev/null +++ b/sql/ha_ndbcluster.h @@ -0,0 +1,286 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + This file defines the NDB Cluster handler: the interface between MySQL and + NDB Cluster +*/ + +/* The class defining a handle to an NDB Cluster table */ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +#include <ndbapi_limits.h> + +#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 + +class Ndb; // Forward declaration +class NdbOperation; // Forward declaration +class NdbConnection; // Forward declaration +class NdbRecAttr; // Forward declaration +class NdbResultSet; // Forward declaration +class NdbScanOperation; +class NdbIndexScanOperation; +class NdbBlob; + +// connectstring to cluster if given by mysqld +extern const char *ndbcluster_connectstring; + +typedef enum ndb_index_type { + UNDEFINED_INDEX = 0, + PRIMARY_KEY_INDEX = 1, + PRIMARY_KEY_ORDERED_INDEX = 2, + UNIQUE_INDEX = 3, + UNIQUE_ORDERED_INDEX = 4, + ORDERED_INDEX = 5 +} NDB_INDEX_TYPE; + +typedef struct ndb_index_data { + NDB_INDEX_TYPE type; + void *index; + void *unique_index; + unsigned char *unique_index_attrid_map; +} NDB_INDEX_DATA; + +typedef struct st_ndbcluster_share { + THR_LOCK lock; + pthread_mutex_t mutex; + char *table_name; + uint table_name_length,use_count; +} NDB_SHARE; + +/* + Place holder for ha_ndbcluster thread specific data +*/ + +class Thd_ndb { + public: + Thd_ndb(); + ~Thd_ndb(); + Ndb *ndb; + ulong count; + uint lock_count; + int error; +}; + +class ha_ndbcluster: public handler +{ + public: + ha_ndbcluster(TABLE *table); + ~ha_ndbcluster(); + + int open(const char *name, int mode, uint test_if_locked); + int close(void); + + int write_row(byte *buf); + int update_row(const byte *old_data, byte *new_data); + int delete_row(const byte *buf); + int index_init(uint index); + int index_end(); + int index_read(byte *buf, const byte *key, uint key_len, + enum ha_rkey_function find_flag); + int index_read_idx(byte *buf, uint index, const byte *key, uint key_len, + enum ha_rkey_function find_flag); + int index_next(byte *buf); + int index_prev(byte *buf); + int index_first(byte *buf); + int index_last(byte *buf); + int rnd_init(bool scan); + int rnd_end(); + int rnd_next(byte *buf); + int rnd_pos(byte *buf, byte *pos); + void position(const byte *record); + int read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range, bool sorted); + int read_range_first_to_buf(const key_range *start_key, + const key_range *end_key, + bool eq_range, bool sorted, + byte* buf); + int read_range_next(); + + bool get_error_message(int error, String *buf); + void info(uint); + int extra(enum ha_extra_function operation); + int extra_opt(enum ha_extra_function operation, ulong cache_size); + int reset(); + int external_lock(THD *thd, int lock_type); + int start_stmt(THD *thd); + const char * table_type() const; + const char ** bas_ext() const; + ulong table_flags(void) const; + ulong index_flags(uint idx, uint part, bool all_parts) const; + uint max_supported_record_length() const; + uint max_supported_keys() const; + uint max_supported_key_parts() const; + uint max_supported_key_length() const; + + int rename_table(const char *from, const char *to); + int delete_table(const char *name); + int create(const char *name, TABLE *form, HA_CREATE_INFO *info); + THR_LOCK_DATA **store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type); + + bool low_byte_first() const; + bool has_transactions(); + const char* index_type(uint key_number); + + double scan_time(); + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); + void start_bulk_insert(ha_rows rows); + int end_bulk_insert(); + + static Thd_ndb* seize_thd_ndb(); + static void release_thd_ndb(Thd_ndb* thd_ndb); + uint8 table_cache_type(); + + static void set_dbname(const char *pathname, char *dbname); + static void set_tabname(const char *pathname, char *tabname); + + private: + int alter_table_name(const char *to); + int drop_table(); + int create_index(const char *name, KEY *key_info, bool unique); + int create_ordered_index(const char *name, KEY *key_info); + int create_unique_index(const char *name, KEY *key_info); + int initialize_autoincrement(const void *table); + enum ILBP {ILBP_CREATE = 0, ILBP_OPEN = 1}; // Index List Build Phase + int build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase); + int get_metadata(const char* path); + void release_metadata(); + NDB_INDEX_TYPE get_index_type(uint idx_no) const; + NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const; + int check_index_fields_not_null(uint index_no); + + int pk_read(const byte *key, uint key_len, byte *buf); + int complemented_pk_read(const byte *old_data, byte *new_data); + int peek_row(const byte *record); + int unique_index_read(const byte *key, uint key_len, + byte *buf); + int ordered_index_scan(const key_range *start_key, + const key_range *end_key, + bool sorted, byte* buf); + int full_table_scan(byte * buf); + int next_result(byte *buf); + int define_read_attrs(byte* buf, NdbOperation* op); + int filtered_scan(const byte *key, uint key_len, + byte *buf, + enum ha_rkey_function find_flag); + int close_scan(); + void unpack_record(byte *buf); + int get_ndb_lock_type(enum thr_lock_type type); + + void set_dbname(const char *pathname); + void set_tabname(const char *pathname); + + bool set_hidden_key(NdbOperation*, + uint fieldnr, const byte* field_ptr); + int set_ndb_key(NdbOperation*, Field *field, + uint fieldnr, const byte* field_ptr); + int set_ndb_value(NdbOperation*, Field *field, uint fieldnr, bool *set_blob_value= 0); + int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*); + friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg); + int get_ndb_blobs_value(NdbBlob *last_ndb_blob); + int set_primary_key(NdbOperation *op, const byte *key); + int set_primary_key_from_record(NdbOperation *op, const byte *record); + int set_bounds(NdbIndexScanOperation *ndb_op, const key_range *keys[2]); + int key_cmp(uint keynr, const byte * old_row, const byte * new_row); + void print_results(); + + longlong get_auto_increment(); + void invalidate_dictionary_cache(bool global); + int ndb_err(NdbConnection*); + bool uses_blob_value(bool all_fields); + + int write_ndb_file(); + + private: + int check_ndb_connection(); + + NdbConnection *m_active_trans; + NdbResultSet *m_active_cursor; + void *m_table; + int m_table_version; + void *m_table_info; + char m_dbname[FN_HEADLEN]; + //char m_schemaname[FN_HEADLEN]; + char m_tabname[FN_HEADLEN]; + ulong m_table_flags; + THR_LOCK_DATA m_lock; + NDB_SHARE *m_share; + NDB_INDEX_DATA m_index[MAX_KEY]; + // NdbRecAttr has no reference to blob + typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue; + NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE]; + byte m_ref[NDB_HIDDEN_PRIMARY_KEY_LENGTH]; + bool m_use_write; + bool m_ignore_dup_key; + bool m_primary_key_update; + bool m_retrieve_all_fields; + bool m_retrieve_primary_key; + ha_rows m_rows_to_insert; + ha_rows m_rows_inserted; + ha_rows m_bulk_insert_rows; + bool m_bulk_insert_not_flushed; + ha_rows m_ops_pending; + bool m_skip_auto_increment; + bool m_blobs_pending; + // memory for blobs in one tuple + char *m_blobs_buffer; + uint32 m_blobs_buffer_size; + uint m_dupkey; + // set from thread variables at external lock + bool m_ha_not_exact_count; + bool m_force_send; + ha_rows m_autoincrement_prefetch; + bool m_transaction_on; + bool m_use_local_query_cache; + + Ndb *get_ndb(); + void set_rec_per_key(); + void records_update(); + void no_uncommitted_rows_execute_failure(); + void no_uncommitted_rows_update(int); + void no_uncommitted_rows_init(THD *); + void no_uncommitted_rows_reset(THD *); + + friend int execute_no_commit(ha_ndbcluster*, NdbConnection*); + friend int execute_commit(ha_ndbcluster*, NdbConnection*); + friend int execute_no_commit_ie(ha_ndbcluster*, NdbConnection*); +}; + +bool ndbcluster_init(void); +bool ndbcluster_end(void); + +int ndbcluster_commit(THD *thd, void* ndb_transaction); +int ndbcluster_rollback(THD *thd, void* ndb_transaction); + +void ndbcluster_close_connection(THD *thd); + +int ndbcluster_discover(THD* thd, const char* dbname, const char* name, + const void** frmblob, uint* frmlen); +int ndbcluster_find_files(THD *thd,const char *db,const char *path, + const char *wild, bool dir, List<char> *files); +int ndbcluster_table_exists_in_engine(THD* thd, + const char *db, const char *name); +int ndbcluster_drop_database(const char* path); + +void ndbcluster_print_error(int error, const NdbOperation *error_op); + +int ndbcluster_show_status(THD*); diff --git a/sql/handler.cc b/sql/handler.cc index 65078a485c5..e166f9885fc 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -17,7 +17,7 @@ /* Handler-calling-functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -32,9 +32,24 @@ #ifdef HAVE_BERKELEY_DB #include "ha_berkeley.h" #endif +#ifdef HAVE_BLACKHOLE_DB +#include "ha_blackhole.h" +#endif +#ifdef HAVE_EXAMPLE_DB +#include "examples/ha_example.h" +#endif +#ifdef HAVE_ARCHIVE_DB +#include "examples/ha_archive.h" +#endif +#ifdef HAVE_CSV_DB +#include "examples/ha_tina.h" +#endif #ifdef HAVE_INNOBASE_DB #include "ha_innodb.h" #endif +#ifdef HAVE_NDBCLUSTER_DB +#include "ha_ndbcluster.h" +#endif #include <myisampack.h> #include <errno.h> @@ -46,16 +61,47 @@ ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count, ha_read_key_count, ha_read_next_count, ha_read_prev_count, ha_read_first_count, ha_read_last_count, ha_commit_count, ha_rollback_count, - ha_read_rnd_count, ha_read_rnd_next_count; + ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count; -const char *ha_table_type[] = { - "", "DIAB_ISAM","HASH","MISAM","PISAM","RMS_ISAM","HEAP", "ISAM", - "MRG_ISAM","MYISAM", "MRG_MYISAM", "BDB", "INNODB", "GEMINI", "?", "?",NullS -}; +static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES; -TYPELIB ha_table_typelib= +struct show_table_type_st sys_table_types[]= { - array_elements(ha_table_type)-3, "", ha_table_type + {"MyISAM", &have_yes, + "Default engine as of MySQL 3.23 with great performance", DB_TYPE_MYISAM}, + {"HEAP", &have_yes, + "Alias for MEMORY", DB_TYPE_HEAP}, + {"MEMORY", &have_yes, + "Hash based, stored in memory, useful for temporary tables", DB_TYPE_HEAP}, + {"MERGE", &have_yes, + "Collection of identical MyISAM tables", DB_TYPE_MRG_MYISAM}, + {"MRG_MYISAM",&have_yes, + "Alias for MERGE", DB_TYPE_MRG_MYISAM}, + {"ISAM", &have_isam, + "Obsolete storage engine, now replaced by MyISAM", DB_TYPE_ISAM}, + {"MRG_ISAM", &have_isam, + "Obsolete storage engine, now replaced by MERGE", DB_TYPE_MRG_ISAM}, + {"InnoDB", &have_innodb, + "Supports transactions, row-level locking, and foreign keys", DB_TYPE_INNODB}, + {"INNOBASE", &have_innodb, + "Alias for INNODB", DB_TYPE_INNODB}, + {"BDB", &have_berkeley_db, + "Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB}, + {"BERKELEYDB",&have_berkeley_db, + "Alias for BDB", DB_TYPE_BERKELEY_DB}, + {"NDBCLUSTER", &have_ndbcluster, + "Clustered, fault-tolerant, memory-based tables", DB_TYPE_NDBCLUSTER}, + {"NDB", &have_ndbcluster, + "Alias for NDBCLUSTER", DB_TYPE_NDBCLUSTER}, + {"EXAMPLE",&have_example_db, + "Example storage engine", DB_TYPE_EXAMPLE_DB}, + {"ARCHIVE",&have_archive_db, + "Archive storage engine", DB_TYPE_ARCHIVE_DB}, + {"CSV",&have_csv_db, + "CSV storage engine", DB_TYPE_CSV_DB}, + {"BLACKHOLE",&have_blackhole_db, + "Storage engine designed to act as null storage", DB_TYPE_BLACKHOLE_DB}, + {NullS, NULL, NullS, DB_TYPE_UNKNOWN} }; const char *ha_row_type[] = { @@ -66,36 +112,77 @@ const char *tx_isolation_names[] = { "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE", NullS}; TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"", - tx_isolation_names}; + tx_isolation_names, NULL}; + +static TYPELIB known_extensions= {0,"known_exts", NULL, NULL}; +uint known_extensions_id= 0; + +enum db_type ha_resolve_by_name(const char *name, uint namelen) +{ + THD *thd=current_thd; + if (thd && !my_strcasecmp(&my_charset_latin1, name, "DEFAULT")) { + return (enum db_type) thd->variables.table_type; + } + + show_table_type_st *types; + for (types= sys_table_types; types->type; types++) + { + if (!my_strcasecmp(&my_charset_latin1, name, types->type)) + return (enum db_type) types->db_type; + } + return DB_TYPE_UNKNOWN; +} + +const char *ha_get_storage_engine(enum db_type db_type) +{ + show_table_type_st *types; + for (types= sys_table_types; types->type; types++) + { + if (db_type == types->db_type) + return types->type; + } + + return "none"; +} + + +my_bool ha_storage_engine_is_enabled(enum db_type database_type) +{ + show_table_type_st *types; + for (types= sys_table_types; types->type; types++) + { + if ((database_type == types->db_type) && + (*types->value == SHOW_OPTION_YES)) + return TRUE; + } + return FALSE; +} + /* Use other database handler if databasehandler is not incompiled */ enum db_type ha_checktype(enum db_type database_type) { + if (ha_storage_engine_is_enabled(database_type)) + return database_type; + switch (database_type) { -#ifdef HAVE_BERKELEY_DB - case DB_TYPE_BERKELEY_DB: - return(berkeley_skip ? DB_TYPE_MYISAM : database_type); -#endif -#ifdef HAVE_INNOBASE_DB - case DB_TYPE_INNODB: - return(innodb_skip ? DB_TYPE_MYISAM : database_type); -#endif #ifndef NO_HASH case DB_TYPE_HASH: + return (database_type); #endif -#ifdef HAVE_ISAM - case DB_TYPE_ISAM: case DB_TYPE_MRG_ISAM: -#endif - case DB_TYPE_HEAP: - case DB_TYPE_MYISAM: - case DB_TYPE_MRG_MYISAM: - return (database_type); /* Database exists on system */ + return (DB_TYPE_MRG_MYISAM); default: break; } - return(DB_TYPE_MYISAM); /* Use this as default */ + + return + DB_TYPE_UNKNOWN != (enum db_type) current_thd->variables.table_type ? + (enum db_type) current_thd->variables.table_type : + DB_TYPE_UNKNOWN != (enum db_type) global_system_variables.table_type ? + (enum db_type) global_system_variables.table_type : + DB_TYPE_MYISAM; } /* ha_checktype */ @@ -103,13 +190,17 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) { switch (db_type) { #ifndef NO_HASH - return new ha_hash(table); + case DB_TYPE_HASH: + return new ha_hash(table); #endif #ifdef HAVE_ISAM case DB_TYPE_MRG_ISAM: return new ha_isammrg(table); case DB_TYPE_ISAM: return new ha_isam(table); +#else + case DB_TYPE_MRG_ISAM: + return new ha_myisammrg(table); #endif #ifdef HAVE_BERKELEY_DB case DB_TYPE_BERKELEY_DB: @@ -119,6 +210,26 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) case DB_TYPE_INNODB: return new ha_innobase(table); #endif +#ifdef HAVE_EXAMPLE_DB + case DB_TYPE_EXAMPLE_DB: + return new ha_example(table); +#endif +#ifdef HAVE_ARCHIVE_DB + case DB_TYPE_ARCHIVE_DB: + return new ha_archive(table); +#endif +#ifdef HAVE_BLACKHOLE_DB + case DB_TYPE_BLACKHOLE_DB: + return new ha_blackhole(table); +#endif +#ifdef HAVE_CSV_DB + case DB_TYPE_CSV_DB: + return new ha_tina(table); +#endif +#ifdef HAVE_NDBCLUSTER_DB + case DB_TYPE_NDBCLUSTER: + return new ha_ndbcluster(table); +#endif case DB_TYPE_HEAP: return new ha_heap(table); default: // should never happen @@ -136,32 +247,66 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) } } +bool ha_caching_allowed(THD* thd, char* table_key, + uint key_length, uint8 cache_type) +{ +#ifdef HAVE_INNOBASE_DB + if (cache_type == HA_CACHE_TBL_ASKTRANSACT) + return innobase_query_caching_of_table_permitted(thd, table_key, key_length); +#endif + return 1; +} + int ha_init() { + int error= 0; #ifdef HAVE_BERKELEY_DB - if (!berkeley_skip) + if (have_berkeley_db == SHOW_OPTION_YES) { - int error; - if ((error=berkeley_init())) - return error; - if (!berkeley_skip) // If we couldn't use handler - opt_using_transactions=1; + if (berkeley_init()) + { + have_berkeley_db= SHOW_OPTION_DISABLED; // If we couldn't use handler + error= 1; + } else - have_berkeley_db=SHOW_OPTION_DISABLED; + opt_using_transactions=1; } #endif #ifdef HAVE_INNOBASE_DB - if (!innodb_skip) + if (have_innodb == SHOW_OPTION_YES) { if (innobase_init()) - return -1; - if (!innodb_skip) // If we couldn't use handler + { + have_innodb= SHOW_OPTION_DISABLED; // If we couldn't use handler + error= 1; + } + else opt_using_transactions=1; + } +#endif +#ifdef HAVE_NDBCLUSTER_DB + if (have_ndbcluster == SHOW_OPTION_YES) + { + if (ndbcluster_init()) + { + have_ndbcluster= SHOW_OPTION_DISABLED; + error= 1; + } else - have_innodb=SHOW_OPTION_DISABLED; + opt_using_transactions=1; } #endif - return 0; +#ifdef HAVE_ARCHIVE_DB + if (have_archive_db == SHOW_OPTION_YES) + { + if (archive_db_init()) + { + have_archive_db= SHOW_OPTION_DISABLED; + error= 1; + } + } +#endif + return error; } /* close, flush or restart databases */ @@ -181,30 +326,46 @@ int ha_panic(enum ha_panic_function flag) error|=mi_panic(flag); error|=myrg_panic(flag); #ifdef HAVE_BERKELEY_DB - if (!berkeley_skip) + if (have_berkeley_db == SHOW_OPTION_YES) error|=berkeley_end(); #endif #ifdef HAVE_INNOBASE_DB - if (!innodb_skip) + if (have_innodb == SHOW_OPTION_YES) error|=innobase_end(); #endif +#ifdef HAVE_NDBCLUSTER_DB + if (have_ndbcluster == SHOW_OPTION_YES) + error|=ndbcluster_end(); +#endif +#ifdef HAVE_ARCHIVE_DB + if (have_archive_db == SHOW_OPTION_YES) + error|= archive_db_end(); +#endif return error; } /* ha_panic */ void ha_drop_database(char* path) { #ifdef HAVE_INNOBASE_DB - if (!innodb_skip) + if (have_innodb == SHOW_OPTION_YES) innobase_drop_database(path); #endif +#ifdef HAVE_NDBCLUSTER_DB + if (have_ndbcluster == SHOW_OPTION_YES) + ndbcluster_drop_database(path); +#endif } void ha_close_connection(THD* thd) { #ifdef HAVE_INNOBASE_DB - if (!innodb_skip) + if (have_innodb == SHOW_OPTION_YES) innobase_close_connection(thd); #endif +#ifdef HAVE_NDBCLUSTER_DB + if (have_ndbcluster == SHOW_OPTION_YES) + ndbcluster_close_connection(thd); +#endif } /* @@ -266,17 +427,25 @@ int ha_report_binlog_offset_and_commit(THD *thd, #ifdef HAVE_INNOBASE_DB THD_TRANS *trans; trans = &thd->transaction.all; - if (trans->innobase_tid) + if (trans->innodb_active_trans) { + /* + If we updated some InnoDB tables (innodb_active_trans is true), the + binlog coords will be reported into InnoDB during the InnoDB commit + (innobase_report_binlog_offset_and_commit). But if we updated only + non-InnoDB tables, we need an explicit call to report it. + */ if ((error=innobase_report_binlog_offset_and_commit(thd, - trans->innobase_tid, - log_file_name, - end_offset))) + trans->innobase_tid, + log_file_name, + end_offset))) { my_error(ER_ERROR_DURING_COMMIT, MYF(0), error); error=1; } } + else if (opt_innodb_safe_binlog) // Don't report if not useful + innobase_store_binlog_offset_and_flush_log(log_file_name, end_offset); #endif return error; } @@ -338,15 +507,16 @@ int ha_release_temporary_latches(THD *thd) int ha_commit_trans(THD *thd, THD_TRANS* trans) { int error=0; - DBUG_ENTER("ha_commit"); + DBUG_ENTER("ha_commit_trans"); #ifdef USING_TRANSACTIONS if (opt_using_transactions) { - bool operation_done= 0, need_start_waiters= 0; bool transaction_commited= 0; + bool operation_done= 0, need_start_waiters= 0; + /* If transaction has done some updates to tables */ - if (trans == &thd->transaction.all && - my_b_tell(&thd->transaction.trans_log)) + if (trans == &thd->transaction.all && mysql_bin_log.is_open() && + my_b_tell(&thd->transaction.trans_log)) { if ((error= wait_if_global_read_lock(thd, 0, 0))) { @@ -362,11 +532,35 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans) if (mysql_bin_log.is_open()) { mysql_bin_log.write(thd, &thd->transaction.trans_log, 1); + statistic_increment(binlog_cache_use, &LOCK_status); + if (thd->transaction.trans_log.disk_writes != 0) + { + /* + We have to do this after addition of trans_log to main binlog since + this operation can cause flushing of end of trans_log to disk. + */ + statistic_increment(binlog_cache_disk_use, &LOCK_status); + thd->transaction.trans_log.disk_writes= 0; + } reinit_io_cache(&thd->transaction.trans_log, WRITE_CACHE, (my_off_t) 0, 0, 1); thd->transaction.trans_log.end_of_file= max_binlog_cache_size; } } +#ifdef HAVE_NDBCLUSTER_DB + if (trans->ndb_tid) + { + if ((error=ndbcluster_commit(thd,trans->ndb_tid))) + { + if (error == -1) + my_error(ER_ERROR_DURING_COMMIT, MYF(0)); + error=1; + } + if (trans == &thd->transaction.all) + operation_done= transaction_commited= 1; + trans->ndb_tid=0; + } +#endif #ifdef HAVE_BERKELEY_DB if (trans->bdb_tid) { @@ -399,7 +593,7 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans) query_cache.invalidate(thd->transaction.changed_tables); #endif /*HAVE_QUERY_CACHE*/ if (error && trans == &thd->transaction.all && mysql_bin_log.is_open()) - sql_print_error("Error: Got error during commit; Binlog is not up to date!"); + sql_print_error("Got error during commit; Binlog is not up to date!"); thd->variables.tx_isolation=thd->session_tx_isolation; if (operation_done) { @@ -417,11 +611,30 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans) int ha_rollback_trans(THD *thd, THD_TRANS *trans) { int error=0; - DBUG_ENTER("ha_rollback"); + DBUG_ENTER("ha_rollback_trans"); #ifdef USING_TRANSACTIONS if (opt_using_transactions) { bool operation_done=0; + /* + As rollback can be 30 times slower than insert in InnoDB, and user may + not know there's rollback (if it's because of a dupl row), better warn. + */ + const char *save_proc_info= thd->proc_info; + thd->proc_info= "Rolling back"; +#ifdef HAVE_NDBCLUSTER_DB + if (trans->ndb_tid) + { + if ((error=ndbcluster_rollback(thd, trans->ndb_tid))) + { + if (error == -1) + my_error(ER_ERROR_DURING_ROLLBACK, MYF(0)); + error=1; + } + trans->ndb_tid = 0; + operation_done=1; + } +#endif #ifdef HAVE_BERKELEY_DB if (trans->bdb_tid) { @@ -446,17 +659,30 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans) operation_done=1; } #endif - if (trans == &thd->transaction.all) + if ((trans == &thd->transaction.all) && mysql_bin_log.is_open()) { /* - Update the binary log with a BEGIN/ROLLBACK block if we have cached some - queries and we updated some non-transactional table. Such cases should - be rare (updating a non-transactional table inside a transaction...). + Update the binary log with a BEGIN/ROLLBACK block if we have + cached some queries and we updated some non-transactional + table. Such cases should be rare (updating a + non-transactional table inside a transaction...). Count disk + writes to trans_log in any case. */ - if (unlikely((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && - mysql_bin_log.is_open() && - my_b_tell(&thd->transaction.trans_log))) - mysql_bin_log.write(thd, &thd->transaction.trans_log, 0); + if (my_b_tell(&thd->transaction.trans_log)) + { + if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE)) + mysql_bin_log.write(thd, &thd->transaction.trans_log, 0); + statistic_increment(binlog_cache_use, &LOCK_status); + if (thd->transaction.trans_log.disk_writes != 0) + { + /* + We have to do this after addition of trans_log to main binlog since + this operation can cause flushing of end of trans_log to disk. + */ + statistic_increment(binlog_cache_disk_use, &LOCK_status); + thd->transaction.trans_log.disk_writes= 0; + } + } /* Flushed or not, empty the binlog cache */ reinit_io_cache(&thd->transaction.trans_log, WRITE_CACHE, (my_off_t) 0, 0, 1); @@ -467,6 +693,7 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans) thd->variables.tx_isolation=thd->session_tx_isolation; if (operation_done) statistic_increment(ha_rollback_count,&LOCK_status); + thd->proc_info= save_proc_info; } #endif /* USING_TRANSACTIONS */ DBUG_RETURN(error); @@ -484,12 +711,12 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans) simply truncate the binlog cache, we lose the part of the binlog cache where the update is. If we want to not lose it, we need to write the SAVEPOINT command and the ROLLBACK TO SAVEPOINT command to the binlog cache. The latter - is easy: it's just write at the end of the binlog cache, but the former should - be *inserted* to the place where the user called SAVEPOINT. The solution is - that when the user calls SAVEPOINT, we write it to the binlog cache (so no - need to later insert it). As transactions are never intermixed in the binary log - (i.e. they are serialized), we won't have conflicts with savepoint names when - using mysqlbinlog or in the slave SQL thread. + is easy: it's just write at the end of the binlog cache, but the former + should be *inserted* to the place where the user called SAVEPOINT. The + solution is that when the user calls SAVEPOINT, we write it to the binlog + cache (so no need to later insert it). As transactions are never intermixed + in the binary log (i.e. they are serialized), we won't have conflicts with + savepoint names when using mysqlbinlog or in the slave SQL thread. Then when ROLLBACK TO SAVEPOINT is called, if we updated some non-transactional table, we don't truncate the binlog cache but instead write ROLLBACK TO SAVEPOINT to it; otherwise we truncate the binlog cache (which @@ -518,7 +745,7 @@ int ha_rollback_to_savepoint(THD *thd, char *savepoint_name) my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), error); error=1; } - else + else if (mysql_bin_log.is_open()) { /* Write ROLLBACK TO SAVEPOINT to the binlog cache if we have updated some @@ -526,10 +753,9 @@ int ha_rollback_to_savepoint(THD *thd, char *savepoint_name) from the SAVEPOINT command. */ if (unlikely((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && - mysql_bin_log.is_open() && my_b_tell(&thd->transaction.trans_log))) { - Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE); + Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE); if (mysql_bin_log.write(&qinfo)) error= 1; } @@ -555,37 +781,61 @@ Return value: always 0, that is, succeeds always int ha_savepoint(THD *thd, char *savepoint_name) { - my_off_t binlog_cache_pos=0; int error=0; DBUG_ENTER("ha_savepoint"); #ifdef USING_TRANSACTIONS if (opt_using_transactions) { - binlog_cache_pos=my_b_tell(&thd->transaction.trans_log); -#ifdef HAVE_INNOBASE_DB - innobase_savepoint(thd,savepoint_name, binlog_cache_pos); -#endif - /* Write it to the binary log (see comments of ha_rollback_to_savepoint). */ + /* Write it to the binary log (see comments of ha_rollback_to_savepoint) */ if (mysql_bin_log.is_open()) { - Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE); +#ifdef HAVE_INNOBASE_DB + innobase_savepoint(thd,savepoint_name, + my_b_tell(&thd->transaction.trans_log)); +#endif + Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE); if (mysql_bin_log.write(&qinfo)) error= 1; } +#ifdef HAVE_INNOBASE_DB + else + innobase_savepoint(thd,savepoint_name,0); +#endif } #endif /* USING_TRANSACTIONS */ DBUG_RETURN(error); } + +int ha_start_consistent_snapshot(THD *thd) +{ +#ifdef HAVE_INNOBASE_DB + if ((have_innodb == SHOW_OPTION_YES) && + !innobase_start_trx_and_assign_read_view(thd)) + return 0; +#endif + /* + Same idea as when one wants to CREATE TABLE in one engine which does not + exist: + */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "This MySQL server does not support any " + "consistent-read capable storage engine"); + return 0; +} + + bool ha_flush_logs() { bool result=0; #ifdef HAVE_BERKELEY_DB - if (!berkeley_skip && berkeley_flush_logs()) + if ((have_berkeley_db == SHOW_OPTION_YES) && + berkeley_flush_logs()) result=1; #endif #ifdef HAVE_INNOBASE_DB - if (!innodb_skip && innobase_flush_logs()) + if ((have_innodb == SHOW_OPTION_YES) && + innobase_flush_logs()) result=1; #endif return result; @@ -606,7 +856,7 @@ int ha_delete_table(enum db_type table_type, const char *path) { /* Ensure that table handler get path in lower case */ strmov(tmp_path, path); - casedn_str(tmp_path); + my_casedn_str(files_charset_info, tmp_path); path= tmp_path; } int error=file->delete_table(path); @@ -679,7 +929,7 @@ my_off_t ha_get_ptr(byte *ptr, uint pack_length) int handler::ha_open(const char *name, int mode, int test_if_locked) { int error; - DBUG_ENTER("handler::open"); + DBUG_ENTER("handler::ha_open"); DBUG_PRINT("enter",("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d", name, table->db_type, table->db_stat, mode, test_if_locked)); @@ -719,36 +969,6 @@ int handler::ha_open(const char *name, int mode, int test_if_locked) DBUG_RETURN(error); } -int handler::check(THD* thd, HA_CHECK_OPT* check_opt) -{ - return HA_ADMIN_NOT_IMPLEMENTED; -} - -int handler::backup(THD* thd, HA_CHECK_OPT* check_opt) -{ - return HA_ADMIN_NOT_IMPLEMENTED; -} - -int handler::restore(THD* thd, HA_CHECK_OPT* check_opt) -{ - return HA_ADMIN_NOT_IMPLEMENTED; -} - -int handler::repair(THD* thd, HA_CHECK_OPT* check_opt) -{ - return HA_ADMIN_NOT_IMPLEMENTED; -} - -int handler::optimize(THD* thd, HA_CHECK_OPT* check_opt) -{ - return HA_ADMIN_NOT_IMPLEMENTED; -} - -int handler::analyze(THD* thd, HA_CHECK_OPT* check_opt) -{ - return HA_ADMIN_NOT_IMPLEMENTED; -} - /* Read first row (only) from a table This is never called for InnoDB or BDB tables, as these table types @@ -765,53 +985,27 @@ int handler::read_first_row(byte * buf, uint primary_key) /* If there is very few deleted rows in the table, find the first row by scanning the table. + TODO remove the test for HA_READ_ORDER */ - if (deleted < 10 || primary_key >= MAX_KEY || - !(index_flags(primary_key) & HA_READ_ORDER)) + if (deleted < 10 || primary_key >= MAX_KEY || + !(index_flags(primary_key, 0, 0) & HA_READ_ORDER)) { - (void) rnd_init(); + (void) ha_rnd_init(1); while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ; - (void) rnd_end(); + (void) ha_rnd_end(); } else { /* Find the first row through the primary key */ - (void) index_init(primary_key); + (void) ha_index_init(primary_key); error=index_first(buf); - (void) index_end(); + (void) ha_index_end(); } DBUG_RETURN(error); } /* - The following function is only needed for tables that may be temporary tables - during joins -*/ - -int handler::restart_rnd_next(byte *buf, byte *pos) -{ - return HA_ERR_WRONG_COMMAND; -} - - -/* Set a timestamp in record */ - -void handler::update_timestamp(byte *record) -{ - long skr= (long) current_thd->query_start(); -#ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) - { - int4store(record,skr); - } - else -#endif - longstore(record,skr); - return; -} - -/* Updates field with field_type NEXT_NUMBER according to following: if field = 0 change field to the next free key in database. */ @@ -820,19 +1014,25 @@ void handler::update_auto_increment() { longlong nr; THD *thd; - DBUG_ENTER("update_auto_increment"); - if (table->next_number_field->val_int() != 0) + DBUG_ENTER("handler::update_auto_increment"); + if (table->next_number_field->val_int() != 0 || + table->auto_increment_field_not_null && + current_thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { + table->auto_increment_field_not_null= FALSE; auto_increment_column_changed=0; DBUG_VOID_RETURN; } + table->auto_increment_field_not_null= FALSE; thd=current_thd; if ((nr=thd->next_insert_id)) thd->next_insert_id=0; // Clear after use else nr=get_auto_increment(); - thd->insert_id((ulonglong) nr); - table->next_number_field->store(nr); + if (!table->next_number_field->store(nr)) + thd->insert_id((ulonglong) nr); + else + thd->insert_id(table->next_number_field->val_int()); auto_increment_column_changed=1; DBUG_VOID_RETURN; } @@ -872,7 +1072,7 @@ longlong handler::get_auto_increment() void handler::print_error(int error, myf errflag) { - DBUG_ENTER("print_error"); + DBUG_ENTER("handler::print_error"); DBUG_PRINT("enter",("error: %d",error)); int textno=ER_GET_ERRNO; @@ -901,7 +1101,7 @@ void handler::print_error(int error, myf errflag) { /* Write the dupplicated key in the error message */ char key[MAX_KEY_LENGTH]; - String str(key,sizeof(key)); + String str(key,sizeof(key),system_charset_info); key_unpack(&str,table,(uint) key_nr); uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_DUP_ENTRY)); if (str.length() >= max_length) @@ -915,6 +1115,9 @@ void handler::print_error(int error, myf errflag) textno=ER_DUP_KEY; break; } + case HA_ERR_NULL_IN_SPATIAL: + textno= ER_UNKNOWN_ERROR; + DBUG_VOID_RETURN; case HA_ERR_FOUND_DUPP_UNIQUE: textno=ER_DUP_UNIQUE; break; @@ -966,9 +1169,38 @@ void handler::print_error(int error, myf errflag) case HA_ERR_NO_REFERENCED_ROW: textno=ER_NO_REFERENCED_ROW; break; + case HA_ERR_NO_SUCH_TABLE: + { + /* + We have to use path to find database name instead of using + table->table_cache_key because if the table didn't exist, then + table_cache_key was not set up + */ + char *db; + char buff[FN_REFLEN]; + uint length=dirname_part(buff,table->path); + buff[length-1]=0; + db=buff+dirname_length(buff); + my_error(ER_NO_SUCH_TABLE,MYF(0),db,table->table_name); + break; + } default: { - my_error(ER_GET_ERRNO,errflag,error); + /* The error was "unknown" to this function. + Ask handler if it has got a message for this error */ + bool temporary= FALSE; + String str; + temporary= get_error_message(error, &str); + if (!str.is_empty()) + { + const char* engine= table_type(); + if (temporary) + my_error(ER_GET_TEMPORARY_ERRMSG,MYF(0),error,str.ptr(),engine); + else + my_error(ER_GET_ERRMSG,MYF(0),error,str.ptr(),engine); + } + else + my_error(ER_GET_ERRNO,errflag,error); DBUG_VOID_RETURN; } } @@ -977,13 +1209,30 @@ void handler::print_error(int error, myf errflag) } +/* + Return an error message specific to this handler + + SYNOPSIS + error error code previously returned by handler + buf Pointer to String where to add error message + + Returns true if this is a temporary error + */ + +bool handler::get_error_message(int error, String* buf) +{ + return FALSE; +} + + /* Return key if error because of duplicated keys */ uint handler::get_dup_key(int error) { - DBUG_ENTER("get_dup_key"); + DBUG_ENTER("handler::get_dup_key"); table->file->errkey = (uint) -1; - if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE) + if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE || + error == HA_ERR_NULL_IN_SPATIAL) info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK); DBUG_RETURN(table->file->errkey); } @@ -1006,24 +1255,29 @@ int handler::delete_table(const char *name) int handler::rename_table(const char * from, const char * to) { - DBUG_ENTER("handler::rename_table"); - for (const char **ext=bas_ext(); *ext ; ext++) + int error= 0; + for (const char **ext= bas_ext(); *ext ; ext++) { - if (rename_file_ext(from,to,*ext)) - DBUG_RETURN(my_errno); + if (rename_file_ext(from, to, *ext)) + { + if ((error=my_errno) != ENOENT) + break; + error= 0; + } } - DBUG_RETURN(0); + return error; } /* - Tell the handler to turn on or off logging to the handler's recovery log + Tell the handler to turn on or off transaction in the handler */ -int ha_recovery_logging(THD *thd, bool on) +int ha_enable_transaction(THD *thd, bool on) { int error=0; - DBUG_ENTER("ha_recovery_logging"); + DBUG_ENTER("ha_enable_transaction"); + thd->transaction.on= on; DBUG_RETURN(error); } @@ -1032,7 +1286,7 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen) int error; if (!(error=index_next(buf))) { - if (key_cmp(table, key, active_index, keylen)) + if (key_cmp_if_same(table, key, active_index, keylen)) { table->status=STATUS_NOT_FOUND; error=HA_ERR_END_OF_FILE; @@ -1042,24 +1296,14 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen) } -/* - This is called to delete all rows in a table - If the handler don't support this, then this function will - return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one - by one. -*/ - -int handler::delete_all_rows() -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - /**************************************************************************** ** Some general functions that isn't in the handler class ****************************************************************************/ - /* Initiates table-file and calls apropriate database-creator */ - /* Returns 1 if something got wrong */ +/* + Initiates table-file and calls apropriate database-creator + Returns 1 if something got wrong +*/ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, bool update_create_info) @@ -1074,15 +1318,13 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, if (update_create_info) { update_create_info_from_table(create_info, &table); - if (table.file->table_flags() & HA_DROP_BEFORE_CREATE) - table.file->delete_table(name); // Needed for BDB tables } if (lower_case_table_names == 2 && !(table.file->table_flags() & HA_FILE_BASED)) { /* Ensure that handler gets name in lower case */ strmov(name_buff, name); - casedn_str(name_buff); + my_casedn_str(files_charset_info, name_buff); name= name_buff; } @@ -1093,31 +1335,66 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, DBUG_RETURN(error != 0); } - /* Use key cacheing on all databases */ +/* + Try to discover table from engine and + if found, write the frm file to disk. -void ha_key_cache(void) + RETURN VALUES: + -1 : Table did not exists + 0 : Table created ok + > 0 : Error, table existed but could not be created + +*/ + +int ha_create_table_from_engine(THD* thd, + const char *db, + const char *name) { + int error; + const void *frmblob; + uint frmlen; + char path[FN_REFLEN]; + HA_CREATE_INFO create_info; + TABLE table; + DBUG_ENTER("ha_create_table_from_engine"); + DBUG_PRINT("enter", ("name '%s'.'%s'", db, name)); + + bzero((char*) &create_info,sizeof(create_info)); + if ((error= ha_discover(thd, db, name, &frmblob, &frmlen))) + { + /* Table could not be discovered and thus not created */ + DBUG_RETURN(error); + } + /* - The following mutex is not really needed as long as keybuff_size is - treated as a long value, but we use the mutex here to guard for future - changes. + Table exists in handler and could be discovered + frmblob and frmlen are set, write the frm to disk */ - pthread_mutex_lock(&LOCK_global_system_variables); - long tmp= (long) keybuff_size; - pthread_mutex_unlock(&LOCK_global_system_variables); - if (tmp) - (void) init_key_cache(tmp); -} + (void)strxnmov(path,FN_REFLEN,mysql_data_home,"/",db,"/",name,NullS); + // Save the frm file + error= writefrm(path, frmblob, frmlen); + my_free((char*) frmblob, MYF(0)); + if (error) + DBUG_RETURN(2); -void ha_resize_key_cache(void) -{ - pthread_mutex_lock(&LOCK_global_system_variables); - long tmp= (long) keybuff_size; - pthread_mutex_unlock(&LOCK_global_system_variables); - (void) resize_key_cache(tmp); -} + if (openfrm(path,"",0,(uint) READ_ALL, 0, &table)) + DBUG_RETURN(3); + + update_create_info_from_table(&create_info, &table); + create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE; + if (lower_case_table_names == 2 && + !(table.file->table_flags() & HA_FILE_BASED)) + { + /* Ensure that handler gets name in lower case */ + my_casedn_str(files_charset_info, path); + } + error=table.file->create(path,&table,&create_info); + VOID(closefrm(&table)); + + DBUG_RETURN(error != 0); +} static int NEAR_F delete_file(const char *name,const char *ext,int extflag) { @@ -1131,3 +1408,355 @@ void st_ha_check_opt::init() flags= sql_flags= 0; sort_buffer_size = current_thd->variables.myisam_sort_buff_size; } + + +/***************************************************************************** + Key cache handling. + + This code is only relevant for ISAM/MyISAM tables + + key_cache->cache may be 0 only in the case where a key cache is not + initialized or when we where not able to init the key cache in a previous + call to ha_init_key_cache() (probably out of memory) +*****************************************************************************/ + +/* Init a key cache if it has not been initied before */ + + +int ha_init_key_cache(const char *name, KEY_CACHE *key_cache) +{ + DBUG_ENTER("ha_init_key_cache"); + + if (!key_cache->key_cache_inited) + { + pthread_mutex_lock(&LOCK_global_system_variables); + long tmp_buff_size= (long) key_cache->param_buff_size; + long tmp_block_size= (long) key_cache->param_block_size; + uint division_limit= key_cache->param_division_limit; + uint age_threshold= key_cache->param_age_threshold; + pthread_mutex_unlock(&LOCK_global_system_variables); + DBUG_RETURN(!init_key_cache(key_cache, + tmp_block_size, + tmp_buff_size, + division_limit, age_threshold)); + } + DBUG_RETURN(0); +} + + +/* Resize key cache */ + +int ha_resize_key_cache(KEY_CACHE *key_cache) +{ + DBUG_ENTER("ha_resize_key_cache"); + + if (key_cache->key_cache_inited) + { + pthread_mutex_lock(&LOCK_global_system_variables); + long tmp_buff_size= (long) key_cache->param_buff_size; + long tmp_block_size= (long) key_cache->param_block_size; + uint division_limit= key_cache->param_division_limit; + uint age_threshold= key_cache->param_age_threshold; + pthread_mutex_unlock(&LOCK_global_system_variables); + DBUG_RETURN(!resize_key_cache(key_cache, tmp_block_size, + tmp_buff_size, + division_limit, age_threshold)); + } + DBUG_RETURN(0); +} + + +/* Change parameters for key cache (like size) */ + +int ha_change_key_cache_param(KEY_CACHE *key_cache) +{ + if (key_cache->key_cache_inited) + { + pthread_mutex_lock(&LOCK_global_system_variables); + uint division_limit= key_cache->param_division_limit; + uint age_threshold= key_cache->param_age_threshold; + pthread_mutex_unlock(&LOCK_global_system_variables); + change_key_cache_param(key_cache, division_limit, age_threshold); + } + return 0; +} + +/* Free memory allocated by a key cache */ + +int ha_end_key_cache(KEY_CACHE *key_cache) +{ + end_key_cache(key_cache, 1); // Can never fail + return 0; +} + +/* Move all tables from one key cache to another one */ + +int ha_change_key_cache(KEY_CACHE *old_key_cache, + KEY_CACHE *new_key_cache) +{ + mi_change_key_cache(old_key_cache, new_key_cache); + return 0; +} + + +/* + Try to discover one table from handler(s) + + RETURN + -1 : Table did not exists + 0 : OK. In this case *frmblob and *frmlen are set + >0 : error. frmblob and frmlen may not be set +*/ + +int ha_discover(THD *thd, const char *db, const char *name, + const void **frmblob, uint *frmlen) +{ + int error= -1; // Table does not exist in any handler + DBUG_ENTER("ha_discover"); + DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); + if (is_prefix(name,tmp_file_prefix)) /* skip temporary tables */ + DBUG_RETURN(error); +#ifdef HAVE_NDBCLUSTER_DB + if (have_ndbcluster == SHOW_OPTION_YES) + error= ndbcluster_discover(thd, db, name, frmblob, frmlen); +#endif + if (!error) + statistic_increment(ha_discover_count,&LOCK_status); + DBUG_RETURN(error); +} + + +/* + Call this function in order to give the handler the possiblity + to ask engine if there are any new tables that should be written to disk + or any dropped tables that need to be removed from disk +*/ + +int +ha_find_files(THD *thd,const char *db,const char *path, + const char *wild, bool dir, List<char> *files) +{ + int error= 0; + DBUG_ENTER("ha_find_files"); + DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d", + db, path, wild, dir)); +#ifdef HAVE_NDBCLUSTER_DB + if (have_ndbcluster == SHOW_OPTION_YES) + error= ndbcluster_find_files(thd, db, path, wild, dir, files); +#endif + DBUG_RETURN(error); +} + + +/* + Ask handler if the table exists in engine + + RETURN + 0 Table does not exist + 1 Table exists + # Error code + + */ +int ha_table_exists_in_engine(THD* thd, const char* db, const char* name) +{ + int error= 0; + DBUG_ENTER("ha_table_exists_in_engine"); + DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); +#ifdef HAVE_NDBCLUSTER_DB + if (have_ndbcluster == SHOW_OPTION_YES) + error= ndbcluster_table_exists_in_engine(thd, db, name); +#endif + DBUG_PRINT("exit", ("error: %d", error)); + DBUG_RETURN(error); +} + + +/* + Read first row between two ranges. + Store ranges for future calls to read_range_next + + SYNOPSIS + read_range_first() + start_key Start key. Is 0 if no min range + end_key End key. Is 0 if no max range + eq_range_arg Set to 1 if start_key == end_key + sorted Set to 1 if result should be sorted per key + + NOTES + Record is read into table->record[0] + + RETURN + 0 Found row + HA_ERR_END_OF_FILE No rows in range + # Error code +*/ + +int handler::read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range_arg, bool sorted) +{ + int result; + DBUG_ENTER("handler::read_range_first"); + + eq_range= eq_range_arg; + end_range= 0; + if (end_key) + { + end_range= &save_end_range; + save_end_range= *end_key; + key_compare_result_on_equal= ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 : + (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0); + } + range_key_part= table->key_info[active_index].key_part; + + if (!start_key) // Read first record + result= index_first(table->record[0]); + else + result= index_read(table->record[0], + start_key->key, + start_key->length, + start_key->flag); + if (result) + DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) + ? HA_ERR_END_OF_FILE + : result); + + DBUG_RETURN (compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE); +} + + +/* + Read next row between two ranges. + + SYNOPSIS + read_range_next() + + NOTES + Record is read into table->record[0] + + RETURN + 0 Found row + HA_ERR_END_OF_FILE No rows in range + # Error code +*/ + +int handler::read_range_next() +{ + int result; + DBUG_ENTER("handler::read_range_next"); + + if (eq_range) + { + /* We trust that index_next_same always gives a row in range */ + DBUG_RETURN(index_next_same(table->record[0], + end_range->key, + end_range->length)); + } + result= index_next(table->record[0]); + if (result) + DBUG_RETURN(result); + DBUG_RETURN(compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE); +} + + +/* + Compare if found key (in row) is over max-value + + SYNOPSIS + compare_key + range range to compare to row. May be 0 for no range + + NOTES + See key.cc::key_cmp() for details + + RETURN + The return value is SIGN(key_in_row - range_key): + + 0 Key is equal to range or 'range' == 0 (no range) + -1 Key is less than range + 1 Key is larger than range +*/ + +int handler::compare_key(key_range *range) +{ + int cmp; + if (!range) + return 0; // No max range + cmp= key_cmp(range_key_part, range->key, range->length); + if (!cmp) + cmp= key_compare_result_on_equal; + return cmp; +} + +int handler::index_read_idx(byte * buf, uint index, const byte * key, + uint key_len, enum ha_rkey_function find_flag) +{ + int error= ha_index_init(index); + if (!error) + error= index_read(buf, key, key_len, find_flag); + if (!error) + error= ha_index_end(); + return error; +} + + +/* + Returns a list of all known extensions. + + SYNOPSIS + ha_known_exts() + + NOTES + No mutexes, worst case race is a minor surplus memory allocation + We have to recreate the extension map if mysqld is restarted (for example + within libmysqld) + + RETURN VALUE + pointer pointer to TYPELIB structure +*/ + +TYPELIB *ha_known_exts(void) +{ + if (!known_extensions.type_names || mysys_usage_id != known_extensions_id) + { + show_table_type_st *types; + List<char> found_exts; + List_iterator_fast<char> it(found_exts); + const char **ext, *old_ext; + + known_extensions_id= mysys_usage_id; + found_exts.push_back((char*) ".db"); + for (types= sys_table_types; types->type; types++) + { + if (*types->value == SHOW_OPTION_YES) + { + handler *file= get_new_handler(0,(enum db_type) types->db_type); + for (ext= file->bas_ext(); *ext; ext++) + { + while ((old_ext= it++)) + { + if (!strcmp(old_ext, *ext)) + break; + } + if (!old_ext) + found_exts.push_back((char *) *ext); + + it.rewind(); + } + delete file; + } + } + ext= (const char **) my_once_alloc(sizeof(char *)* + (found_exts.elements+1), + MYF(MY_WME | MY_FAE)); + + DBUG_ASSERT(ext); + known_extensions.count= found_exts.elements; + known_extensions.type_names= ext; + + while ((old_ext= it++)) + *ext++= old_ext; + *ext= 0; + } + return &known_extensions; +} diff --git a/sql/handler.h b/sql/handler.h index e52164a871a..d4bb19dd7b2 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,17 +17,19 @@ /* Definitions for parameters to do with handler-routines */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif #include <ft_global.h> +#include <keycache.h> #ifndef NO_HASH #define NO_HASH /* Not yet implemented */ #endif -#if defined(HAVE_BERKELEY_DB) || defined(HAVE_INNOBASE_DB) +#if defined(HAVE_BERKELEY_DB) || defined(HAVE_INNOBASE_DB) || \ + defined(HAVE_NDBCLUSTER_DB) #define USING_TRANSACTIONS #endif @@ -41,51 +43,63 @@ #define HA_ADMIN_INTERNAL_ERROR -4 #define HA_ADMIN_INVALID -5 #define HA_ADMIN_REJECT -6 +#define HA_ADMIN_TRY_ALTER -7 /* Bits in table_flags() to show what database can do */ -#define HA_READ_RND_SAME 1 /* Read RND-record to KEY-record - (To update with RND-read) */ -#define HA_KEYPOS_TO_RNDPOS 2 /* ha_info gives pos to record */ -#define HA_TABLE_SCAN_ON_INDEX 4 /* No separate data/index file */ -#define HA_REC_NOT_IN_SEQ 8 /* ha_info don't return recnumber; - It returns a position to ha_r_rnd */ -#define HA_NO_INDEX 32 /* No index needed for next/prev */ -#define HA_KEY_READ_WRONG_STR 64 /* keyread returns converted strings */ -#define HA_NULL_KEY 128 /* One can have keys with NULL */ -#define HA_DUPP_POS 256 /* ha_position() gives dupp row */ -#define HA_NO_BLOBS 512 /* Doesn't support blobs */ -#define HA_BLOB_KEY (HA_NO_BLOBS*2) /* key on blob */ -#define HA_AUTO_PART_KEY (HA_BLOB_KEY*2) -#define HA_REQUIRE_PRIMARY_KEY (HA_AUTO_PART_KEY*2) -#define HA_NOT_EXACT_COUNT (HA_REQUIRE_PRIMARY_KEY*2) -#define HA_NO_WRITE_DELAYED (HA_NOT_EXACT_COUNT*2) -#define HA_PRIMARY_KEY_IN_READ_INDEX (HA_NO_WRITE_DELAYED*2) -#define HA_DROP_BEFORE_CREATE (HA_PRIMARY_KEY_IN_READ_INDEX*2) -#define HA_NOT_READ_AFTER_KEY (HA_DROP_BEFORE_CREATE*2) -#define HA_NOT_DELETE_WITH_CACHE (HA_NOT_READ_AFTER_KEY*2) -#define HA_NO_TEMP_TABLES (HA_NOT_DELETE_WITH_CACHE*2) -#define HA_NO_PREFIX_CHAR_KEYS (HA_NO_TEMP_TABLES*2) -#define HA_CAN_FULLTEXT (HA_NO_PREFIX_CHAR_KEYS*2) -#define HA_CAN_SQL_HANDLER (HA_CAN_FULLTEXT*2) -#define HA_NO_AUTO_INCREMENT (HA_CAN_SQL_HANDLER*2) -/* Table data are stored in separate files */ -#define HA_FILE_BASED (HA_NO_AUTO_INCREMENT*2) - -/* - Next record gives next record according last record read (even - if database is updated after read). Not used at this point. -*/ -#define HA_LASTKEY_ORDER (HA_FILE_BASED*2) +#define HA_READ_RND_SAME (1 << 0) /* can switch index during the scan + with ::rnd_same() - not used yet. + see mi_rsame/heap_rsame/myrg_rsame */ +#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */ +#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber; + It returns a position to ha_r_rnd */ +#define HA_CAN_GEOMETRY (1 << 4) +#define HA_FAST_KEY_READ (1 << 5) /* no need for a record cache in filesort */ +#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */ +#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */ +#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */ +#define HA_CAN_INDEX_BLOBS (1 << 10) +#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */ +#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */ +#define HA_NOT_EXACT_COUNT (1 << 13) +#define HA_CAN_INSERT_DELAYED (1 << 14) /* only handlers with table-level locks + need no special code to support + INSERT DELAYED */ +#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15) +#define HA_NOT_DELETE_WITH_CACHE (1 << 18) +#define HA_NO_PREFIX_CHAR_KEYS (1 << 20) +#define HA_CAN_FULLTEXT (1 << 21) +#define HA_CAN_SQL_HANDLER (1 << 22) +#define HA_NO_AUTO_INCREMENT (1 << 23) +#define HA_HAS_CHECKSUM (1 << 24) +/* Table data are stored in separate files (for lower_case_table_names) */ +#define HA_FILE_BASED (1 << 26) +#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30) /* bits in index_flags(index_number) for what you can do with index */ -#define HA_WRONG_ASCII_ORDER 1 /* Can't use sorting through key */ -#define HA_READ_NEXT 2 /* Read next record with same key */ -#define HA_READ_PREV 4 /* Read prev. record with same key */ -#define HA_READ_ORDER 8 /* Read through record-keys in order */ +#define HA_READ_NEXT 1 /* TODO really use this flag */ +#define HA_READ_PREV 2 /* supports ::index_prev */ +#define HA_READ_ORDER 4 /* index_next/prev follow sort order */ +#define HA_READ_RANGE 8 /* can find all records in a range */ #define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */ -#define HA_NOT_READ_PREFIX_LAST 32 /* No support for index_read_last() */ -#define HA_KEY_READ_ONLY 64 /* Support HA_EXTRA_KEYREAD */ +#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */ + +/* operations for disable/enable indexes */ +#define HA_KEY_SWITCH_NONUNIQ 0 +#define HA_KEY_SWITCH_ALL 1 +#define HA_KEY_SWITCH_NONUNIQ_SAVE 2 +#define HA_KEY_SWITCH_ALL_SAVE 3 + + +/* + Bits in index_ddl_flags(KEY *wanted_index) + for what ddl you can do with index + If none is set, the wanted type of index is not supported + by the handler at all. See WorkLog 1563. +*/ +#define HA_DDL_SUPPORT 1 /* Supported by handler */ +#define HA_DDL_WITH_LOCK 2 /* Can create/drop with locked table */ +#define HA_DDL_ONLINE 4 /* Can create/drop without lock */ /* Parameters for open() (in register form->filestat) @@ -119,12 +133,35 @@ #define HA_OPTION_NO_DELAY_KEY_WRITE (1L << 18) #define HA_MAX_REC_LENGTH 65535 -enum db_type { DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1, - DB_TYPE_HASH,DB_TYPE_MISAM,DB_TYPE_PISAM, - DB_TYPE_RMS_ISAM, DB_TYPE_HEAP, DB_TYPE_ISAM, - DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM, - DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, DB_TYPE_GEMINI, - DB_TYPE_DEFAULT }; +/* Table caching type */ +#define HA_CACHE_TBL_NONTRANSACT 0 +#define HA_CACHE_TBL_NOCACHE 1 +#define HA_CACHE_TBL_ASKTRANSACT 2 +#define HA_CACHE_TBL_TRANSACT 4 + +/* Options of START TRANSACTION statement (and later of SET TRANSACTION stmt) */ +#define MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT 1 + +enum db_type +{ + DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1, + DB_TYPE_HASH,DB_TYPE_MISAM,DB_TYPE_PISAM, + DB_TYPE_RMS_ISAM, DB_TYPE_HEAP, DB_TYPE_ISAM, + DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM, + DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, + DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER, + DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB, + DB_TYPE_FEDERATED_DB, + DB_TYPE_BLACKHOLE_DB, + DB_TYPE_DEFAULT // Must be last +}; + +struct show_table_type_st { + const char *type; + SHOW_COMP_OPTION *value; + const char *comment; + enum db_type db_type; +}; enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED}; @@ -140,11 +177,14 @@ enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, #define HA_CREATE_USED_MAX_ROWS 32 #define HA_CREATE_USED_AVG_ROW_LENGTH 64 #define HA_CREATE_USED_PACK_KEYS 128 +#define HA_CREATE_USED_CHARSET 256 +#define HA_CREATE_USED_DEFAULT_CHARSET 512 typedef struct st_thd_trans { void *bdb_tid; void *innobase_tid; bool innodb_active_trans; + void *ndb_tid; } THD_TRANS; enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED, @@ -152,6 +192,7 @@ enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED, typedef struct st_ha_create_information { + CHARSET_INFO *table_charset, *default_table_charset; const char *comment,*password; const char *data_file_name, *index_file_name; const char *alias; @@ -182,14 +223,28 @@ typedef struct st_ha_check_opt ulong sort_buffer_size; uint flags; /* isam layer flags (e.g. for myisamchk) */ uint sql_flags; /* sql layer flags - for something myisamchk cannot do */ + KEY_CACHE *key_cache; /* new key cache when changing key cache */ void init(); } HA_CHECK_OPT; + class handler :public Sql_alloc { protected: struct st_table *table; /* The table definition */ + virtual int index_init(uint idx) { active_index=idx; return 0; } + virtual int index_end() { active_index=MAX_KEY; return 0; } + /* + rnd_init() can be called two times without rnd_end() in between + (it only makes sense if scan=1). + then the second call should prepare for the new table scan (e.g + if rnd_init allocates the cursor, second call should position it + to the start of the table, no need to deallocate and allocate it again + */ + virtual int rnd_init(bool scan) =0; + virtual int rnd_end() { return 0; } + public: byte *ref; /* Pointer to current row */ byte *dupp_ref; /* Pointer to dupp row */ @@ -206,6 +261,13 @@ public: time_t create_time; /* When table was created */ time_t check_time; time_t update_time; + + /* The following are for read_range() */ + key_range save_end_range, *end_range; + KEY_PART_INFO *range_key_part; + int key_compare_result_on_equal; + bool eq_range; + uint errkey; /* Last dup key */ uint sortkey, key_used_on_scan; uint active_index; @@ -214,150 +276,253 @@ public: uint block_size; /* index block size */ uint raid_type,raid_chunks; FT_INFO *ft_handler; + enum {NONE=0, INDEX, RND} inited; bool auto_increment_column_changed; bool implicit_emptied; /* Can be !=0 only if HEAP */ + handler(TABLE *table_arg) :table(table_arg), ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0), delete_length(0), auto_increment_value(0), records(0), deleted(0), mean_rec_length(0), create_time(0), check_time(0), update_time(0), - key_used_on_scan(MAX_KEY), active_index(MAX_REF_PARTS), + key_used_on_scan(MAX_KEY), active_index(MAX_KEY), ref_length(sizeof(my_off_t)), block_size(0), - raid_type(0), ft_handler(0), implicit_emptied(0) + raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0) {} - virtual ~handler(void) {} + virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ } int ha_open(const char *name, int mode, int test_if_locked); - void update_timestamp(byte *record); void update_auto_increment(); virtual void print_error(int error, myf errflag); + virtual bool get_error_message(int error, String *buf); uint get_dup_key(int error); void change_table_ptr(TABLE *table_arg) { table=table_arg; } virtual double scan_time() { return ulonglong2double(data_file_length) / IO_SIZE + 2; } virtual double read_time(uint index, uint ranges, ha_rows rows) { return rows2double(ranges+rows); } - virtual bool fast_key_read() { return 0;} - virtual key_map keys_to_use_for_scanning() { return 0; } + virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; } virtual bool has_transactions(){ return 0;} virtual uint extra_rec_buf_length() { return 0; } - virtual ha_rows estimate_number_of_rows() { return records+EXTRA_RECORDS; } - virtual const char *index_type(uint key_number) { return "";} + + /* + Return upper bound of current number of records in the table + (max. of how many records one will retrieve when doing a full table scan) + If upper bound is not known, HA_POS_ERROR should be returned as a max + possible upper bound. + */ + virtual ha_rows estimate_rows_upper_bound() + { return records+EXTRA_RECORDS; } + + virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";} - virtual int index_init(uint idx) { active_index=idx; return 0;} - virtual int index_end() {return 0; } + int ha_index_init(uint idx) + { + DBUG_ASSERT(inited==NONE); + inited=INDEX; + return index_init(idx); + } + int ha_index_end() + { + DBUG_ASSERT(inited==INDEX); + inited=NONE; + return index_end(); + } + int ha_rnd_init(bool scan) + { + DBUG_ASSERT(inited==NONE || (inited==RND && scan)); + inited=RND; + return rnd_init(scan); + } + int ha_rnd_end() + { + DBUG_ASSERT(inited==RND); + inited=NONE; + return rnd_end(); + } + /* this is neseccary in many places, e.g. in HANDLER command */ + int ha_index_or_rnd_end() + { + return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0; + } uint get_index(void) const { return active_index; } virtual int open(const char *name, int mode, uint test_if_locked)=0; - virtual void initialize(void) {} virtual int close(void)=0; - virtual int write_row(byte * buf)=0; - virtual int update_row(const byte * old_data, byte * new_data)=0; - virtual int delete_row(const byte * buf)=0; + virtual int write_row(byte * buf) { return HA_ERR_WRONG_COMMAND; } + virtual int update_row(const byte * old_data, byte * new_data) + { return HA_ERR_WRONG_COMMAND; } + virtual int delete_row(const byte * buf) + { return HA_ERR_WRONG_COMMAND; } virtual int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag)=0; + uint key_len, enum ha_rkey_function find_flag) + { return HA_ERR_WRONG_COMMAND; } virtual int index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag)=0; - virtual int index_next(byte * buf)=0; - virtual int index_prev(byte * buf)=0; - virtual int index_first(byte * buf)=0; - virtual int index_last(byte * buf)=0; + uint key_len, enum ha_rkey_function find_flag); + virtual int index_next(byte * buf) + { return HA_ERR_WRONG_COMMAND; } + virtual int index_prev(byte * buf) + { return HA_ERR_WRONG_COMMAND; } + virtual int index_first(byte * buf) + { return HA_ERR_WRONG_COMMAND; } + virtual int index_last(byte * buf) + { return HA_ERR_WRONG_COMMAND; } virtual int index_next_same(byte *buf, const byte *key, uint keylen); virtual int index_read_last(byte * buf, const byte * key, uint key_len) - { - return (my_errno=HA_ERR_WRONG_COMMAND); - } - virtual int ft_init() - { return -1; } - virtual FT_INFO *ft_init_ext(uint mode,uint inx,const byte *key, uint keylen, - bool presort) + { return (my_errno=HA_ERR_WRONG_COMMAND); } + virtual int read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range, bool sorted); + virtual int read_range_next(); + int compare_key(key_range *range); + virtual int ft_init() { return HA_ERR_WRONG_COMMAND; } + void ft_end() { ft_handler=NULL; } + virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key) { return NULL; } - virtual int ft_read(byte *buf) { return -1; } - virtual int rnd_init(bool scan=1)=0; - virtual int rnd_end() { return 0; } + virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; } virtual int rnd_next(byte *buf)=0; virtual int rnd_pos(byte * buf, byte *pos)=0; virtual int read_first_row(byte *buf, uint primary_key); - virtual int restart_rnd_next(byte *buf, byte *pos); - virtual ha_rows records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) + /* + The following function is only needed for tables that may be temporary + tables during joins + */ + virtual int restart_rnd_next(byte *buf, byte *pos) + { return HA_ERR_WRONG_COMMAND; } + virtual int rnd_same(byte *buf, uint inx) + { return HA_ERR_WRONG_COMMAND; } + virtual ha_rows records_in_range(uint inx, key_range *min_key, + key_range *max_key) { return (ha_rows) 10; } virtual void position(const byte *record)=0; - virtual my_off_t row_position() { return HA_OFFSET_ERROR; } virtual void info(uint)=0; - virtual int extra(enum ha_extra_function operation)=0; + virtual int extra(enum ha_extra_function operation) + { return 0; } virtual int extra_opt(enum ha_extra_function operation, ulong cache_size) - { - return extra(operation); - } - virtual int reset()=0; + { return extra(operation); } + virtual int reset() { return extra(HA_EXTRA_RESET); } virtual int external_lock(THD *thd, int lock_type)=0; virtual void unlock_row() {} virtual int start_stmt(THD *thd) {return 0;} - virtual int delete_all_rows(); + /* + This is called to delete all rows in a table + If the handler don't support this, then this function will + return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one + by one. + */ + virtual int delete_all_rows() + { return (my_errno=HA_ERR_WRONG_COMMAND); } virtual longlong get_auto_increment(); virtual void update_create_info(HA_CREATE_INFO *create_info) {} - virtual int check(THD* thd, HA_CHECK_OPT* check_opt ); - virtual int repair(THD* thd, HA_CHECK_OPT* check_opt); - virtual bool check_and_repair(THD *thd) {return 1;} - virtual int optimize(THD* thd,HA_CHECK_OPT* check_opt); - virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt); - virtual int backup(THD* thd, HA_CHECK_OPT* check_opt); + + /* admin commands - called from mysql_admin_table */ + virtual int check(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } + virtual int backup(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } /* restore assumes .frm file must exist, and that generate_table() has been called; It will just copy the data file and run repair. */ - virtual int restore(THD* thd, HA_CHECK_OPT* check_opt); - virtual int dump(THD* thd, int fd = -1) { return ER_DUMP_NOT_IMPLEMENTED; } - virtual void deactivate_non_unique_index(ha_rows rows) {} - virtual bool activate_all_index(THD *thd) {return 0;} - // not implemented by default - virtual int net_read_dump(NET* net) - { return ER_DUMP_NOT_IMPLEMENTED; } + virtual int restore(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } + virtual int repair(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } + virtual int optimize(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } + virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } + virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } + virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt) + { return HA_ADMIN_NOT_IMPLEMENTED; } + /* end of the list of admin commands */ + + virtual bool check_and_repair(THD *thd) { return HA_ERR_WRONG_COMMAND; } + virtual int dump(THD* thd, int fd = -1) { return HA_ERR_WRONG_COMMAND; } + virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; } + virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; } + virtual int indexes_are_disabled(void) {return 0;} + virtual void start_bulk_insert(ha_rows rows) {} + virtual int end_bulk_insert() {return 0; } + virtual int discard_or_import_tablespace(my_bool discard) + {return HA_ERR_WRONG_COMMAND;} + virtual int net_read_dump(NET* net) { return HA_ERR_WRONG_COMMAND; } virtual char *update_table_comment(const char * comment) { return (char*) comment;} virtual void append_create_info(String *packet) {} virtual char* get_foreign_key_create_info() { return(NULL);} /* gets foreign key create string from InnoDB */ - + /* used in ALTER TABLE; 1 if changing storage engine is allowed */ + virtual bool can_switch_engines() { return 1; } /* used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */ virtual uint referenced_by_foreign_key() { return 0;} virtual void init_table_handle_for_HANDLER() - { return; } /* prepare InnoDB for HANDLER */ - virtual void free_foreign_key_create_info(char* str) {} + { return; } /* prepare InnoDB for HANDLER */ + virtual void free_foreign_key_create_info(char* str) {} /* The following can be called without an open handler */ virtual const char *table_type() const =0; virtual const char **bas_ext() const =0; virtual ulong table_flags(void) const =0; - virtual ulong index_flags(uint idx) const - { - return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEY_READ_ONLY); - } - virtual uint max_record_length() const =0; - virtual uint max_keys() const =0; - virtual uint max_key_parts() const =0; - virtual uint max_key_length()const =0; - virtual uint max_key_part_length() { return 255; } + virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0; + virtual ulong index_ddl_flags(KEY *wanted_index) const + { return (HA_DDL_SUPPORT); } + virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys) + { return (HA_ERR_WRONG_COMMAND); } + virtual int drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys) + { return (HA_ERR_WRONG_COMMAND); } + + uint max_record_length() const + { return min(HA_MAX_REC_LENGTH, max_supported_record_length()); } + uint max_keys() const + { return min(MAX_KEY, max_supported_keys()); } + uint max_key_parts() const + { return min(MAX_REF_PARTS, max_supported_key_parts()); } + uint max_key_length() const + { return min(MAX_KEY_LENGTH, max_supported_key_length()); } + uint max_key_part_length() const + { return min(MAX_KEY_LENGTH, max_supported_key_part_length()); } + + virtual uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + virtual uint max_supported_keys() const { return 0; } + virtual uint max_supported_key_parts() const { return MAX_REF_PARTS; } + virtual uint max_supported_key_length() const { return MAX_KEY_LENGTH; } + virtual uint max_supported_key_part_length() const { return 255; } virtual uint min_record_length(uint options) const { return 1; } + virtual bool low_byte_first() const { return 1; } + virtual uint checksum() const { return 0; } virtual bool is_crashed() const { return 0; } virtual bool auto_repair() const { return 0; } + /* + default rename_table() and delete_table() rename/delete files with a + given name and extensions from bas_ext() + */ virtual int rename_table(const char *from, const char *to); virtual int delete_table(const char *name); + virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0; + + /* lock_count() can be more than one if the table is a MERGE */ virtual uint lock_count(void) const { return 1; } virtual THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type)=0; + + /* Type of table for caching query */ + virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } + /* + Is query with this table cachable (have sense only for ASKTRANSACT + tables) + */ }; /* Some extern variables used with handlers */ +extern struct show_table_type_st sys_table_types[]; extern const char *ha_row_type[]; -extern TYPELIB ha_table_typelib, tx_isolation_typelib; +extern TYPELIB tx_isolation_typelib; +extern TYPELIB myisam_stats_method_typelib; /* Wrapper functions */ #define ha_commit_stmt(thd) (ha_commit_trans((thd), &((thd)->transaction.stmt))) @@ -365,8 +530,16 @@ extern TYPELIB ha_table_typelib, tx_isolation_typelib; #define ha_commit(thd) (ha_commit_trans((thd), &((thd)->transaction.all))) #define ha_rollback(thd) (ha_rollback_trans((thd), &((thd)->transaction.all))) -#define ha_supports_generate(T) (T != DB_TYPE_INNODB) +#define ha_supports_generate(T) (T != DB_TYPE_INNODB && \ + T != DB_TYPE_BERKELEY_DB && \ + T != DB_TYPE_ARCHIVE_DB && \ + T != DB_TYPE_FEDERATED_DB && \ + T != DB_TYPE_NDBCLUSTER) +bool ha_caching_allowed(THD* thd, char* table_key, + uint key_length, uint8 cache_type); +enum db_type ha_resolve_by_name(const char *name, uint namelen); +const char *ha_get_storage_engine(enum db_type db_type); handler *get_new_handler(TABLE *table, enum db_type db_type); my_off_t ha_get_ptr(byte *ptr, uint pack_length); void ha_store_ptr(byte *buff, uint pack_length, my_off_t pos); @@ -374,13 +547,17 @@ int ha_init(void); int ha_panic(enum ha_panic_function flag); void ha_close_connection(THD* thd); enum db_type ha_checktype(enum db_type database_type); +my_bool ha_storage_engine_is_enabled(enum db_type database_type); int ha_create_table(const char *name, HA_CREATE_INFO *create_info, bool update_create_info); +int ha_create_table_from_engine(THD* thd, const char *db, const char *name); int ha_delete_table(enum db_type db_type, const char *path); void ha_drop_database(char* path); -void ha_key_cache(void); -void ha_resize_key_cache(void); -int ha_start_stmt(THD *thd); +int ha_init_key_cache(const char *name, KEY_CACHE *key_cache); +int ha_resize_key_cache(KEY_CACHE *key_cache); +int ha_change_key_cache_param(KEY_CACHE *key_cache); +int ha_end_key_cache(KEY_CACHE *key_cache); +int ha_start_stmt(THD *thd); int ha_report_binlog_offset_and_commit(THD *thd, char *log_file_name, my_off_t end_offset); int ha_commit_complete(THD *thd); @@ -392,4 +569,13 @@ int ha_savepoint(THD *thd, char *savepoint_name); int ha_autocommit_or_rollback(THD *thd, int error); void ha_set_spin_retries(uint retries); bool ha_flush_logs(void); -int ha_recovery_logging(THD *thd, bool on); +int ha_enable_transaction(THD *thd, bool on); +int ha_change_key_cache(KEY_CACHE *old_key_cache, + KEY_CACHE *new_key_cache); +int ha_discover(THD* thd, const char* dbname, const char* name, + const void** frmblob, uint* frmlen); +int ha_find_files(THD *thd,const char *db,const char *path, + const char *wild, bool dir,List<char>* files); +int ha_table_exists_in_engine(THD* thd, const char* db, const char* name); +TYPELIB *ha_known_exts(void); +int ha_start_consistent_snapshot(THD *thd); diff --git a/sql/hash_filo.cc b/sql/hash_filo.cc index b85f8054f10..ec200768222 100644 --- a/sql/hash_filo.cc +++ b/sql/hash_filo.cc @@ -20,7 +20,7 @@ ** to usage. */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif diff --git a/sql/hash_filo.h b/sql/hash_filo.h index 34584b45d8c..fc48c3b1540 100644 --- a/sql/hash_filo.h +++ b/sql/hash_filo.h @@ -23,7 +23,7 @@ #ifndef HASH_FILO_H #define HASH_FILO_H -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class interface */ #endif @@ -42,6 +42,7 @@ class hash_filo const hash_get_key get_key; hash_free_key free_element; bool init; + CHARSET_INFO *hash_charset; hash_filo_element *first_link,*last_link; public: @@ -49,9 +50,11 @@ public: HASH cache; hash_filo(uint size_arg, uint key_offset_arg , uint key_length_arg, - hash_get_key get_key_arg, hash_free_key free_element_arg) + hash_get_key get_key_arg, hash_free_key free_element_arg, + CHARSET_INFO *hash_charset_arg) :size(size_arg), key_offset(key_offset_arg), key_length(key_length_arg), - get_key(get_key_arg), free_element(free_element_arg),init(0) + get_key(get_key_arg), free_element(free_element_arg),init(0), + hash_charset(hash_charset_arg) { bzero((char*) &cache,sizeof(cache)); } @@ -75,8 +78,8 @@ public: if (!locked) (void) pthread_mutex_lock(&lock); (void) hash_free(&cache); - (void) hash_init(&cache,size,key_offset, key_length, get_key, free_element, - 0); + (void) hash_init(&cache,hash_charset,size,key_offset, + key_length, get_key, free_element,0); if (!locked) (void) pthread_mutex_unlock(&lock); first_link=last_link=0; @@ -113,7 +116,7 @@ public: last_link=last_link->prev_used; hash_delete(&cache,(byte*) tmp); } - if (hash_insert(&cache,(byte*) entry)) + if (my_hash_insert(&cache,(byte*) entry)) { if (free_element) (*free_element)(entry); // This should never happen diff --git a/sql/hostname.cc b/sql/hostname.cc index ed56e199c3c..32c4bb8533d 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -27,9 +27,6 @@ extern "C" { // Because of SCO 3.2V4.2 #endif #if !defined( __WIN__) && !defined(OS2) -#if !defined(__NETWARE__) -#include <sys/resource.h> -#endif /* __NETWARE__ */ #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif @@ -61,22 +58,27 @@ bool hostname_cache_init() { host_entry tmp; uint offset= (uint) ((char*) (&tmp.ip) - (char*) &tmp); - (void) pthread_mutex_init(&LOCK_hostname,MY_MUTEX_INIT_SLOW); - if (!(hostname_cache=new hash_filo(HOST_CACHE_SIZE, offset, sizeof(struct in_addr),NULL, - (hash_free_key) free))) + (hash_free_key) free, + &my_charset_bin))) return 1; hostname_cache->clear(); + (void) pthread_mutex_init(&LOCK_hostname,MY_MUTEX_INIT_SLOW); return 0; } void hostname_cache_free() { - (void) pthread_mutex_destroy(&LOCK_hostname); - delete hostname_cache; + if (hostname_cache) + { + (void) pthread_mutex_destroy(&LOCK_hostname); + delete hostname_cache; + hostname_cache= 0; + } } + static void add_hostname(struct in_addr *in,const char *name) { if (!(specialflag & SPECIAL_NO_HOST_CACHE)) @@ -128,15 +130,23 @@ void reset_host_errors(struct in_addr *in) VOID(pthread_mutex_unlock(&hostname_cache->lock)); } +/* Deal with systems that don't defined INADDR_LOOPBACK */ +#ifndef INADDR_LOOPBACK +#define INADDR_LOOPBACK 0x7f000001UL +#endif my_string ip_to_hostname(struct in_addr *in, uint *errors) { uint i; host_entry *entry; DBUG_ENTER("ip_to_hostname"); + *errors= 0; + + /* We always treat the loopback address as "localhost". */ + if (in->s_addr == htonl(INADDR_LOOPBACK)) + DBUG_RETURN((char *)my_localhost); /* Check first if we have name in cache */ - *errors=0; if (!(specialflag & SPECIAL_NO_HOST_CACHE)) { VOID(pthread_mutex_lock(&hostname_cache->lock)); @@ -175,7 +185,14 @@ my_string ip_to_hostname(struct in_addr *in, uint *errors) &tmp_errno))) { DBUG_PRINT("error",("gethostbyname_r returned %d",tmp_errno)); - add_wrong_ip(in); + /* + Don't cache responses when the DSN server is down, as otherwise + transient DNS failure may leave any number of clients (those + that attempted to connect during the outage) unable to connect + indefinitely. + */ + if (tmp_errno == HOST_NOT_FOUND || tmp_errno == NO_DATA) + add_wrong_ip(in); my_gethostbyname_r_free(); DBUG_RETURN(0); } @@ -198,13 +215,17 @@ my_string ip_to_hostname(struct in_addr *in, uint *errors) { VOID(pthread_mutex_unlock(&LOCK_hostname)); DBUG_PRINT("error",("gethostbyaddr returned %d",errno)); - goto err; + + if (errno == HOST_NOT_FOUND || errno == NO_DATA) + goto add_wrong_ip_and_return; + /* Failure, don't cache responce */ + DBUG_RETURN(0); } if (!hp->h_name[0]) // Don't allow empty hostnames { VOID(pthread_mutex_unlock(&LOCK_hostname)); DBUG_PRINT("error",("Got an empty hostname")); - goto err; + goto add_wrong_ip_and_return; } if (!(name=my_strdup(hp->h_name,MYF(0)))) { @@ -223,15 +244,15 @@ my_string ip_to_hostname(struct in_addr *in, uint *errors) /* Don't accept hostnames that starts with digits because they may be false ip:s */ - if (isdigit(name[0])) + if (my_isdigit(&my_charset_latin1,name[0])) { char *pos; - for (pos= name+1 ; isdigit(*pos); pos++) ; + for (pos= name+1 ; my_isdigit(&my_charset_latin1,*pos); pos++) ; if (*pos == '.') { DBUG_PRINT("error",("mysqld doesn't accept hostnames that starts with a number followed by a '.'")); my_free(name,MYF(0)); - goto err; + goto add_wrong_ip_and_return; } } @@ -247,7 +268,7 @@ my_string ip_to_hostname(struct in_addr *in, uint *errors) DBUG_PRINT("error",("Couldn't verify hostname with gethostbyname")); my_free(name,MYF(0)); -err: +add_wrong_ip_and_return: add_wrong_ip(in); DBUG_RETURN(0); } diff --git a/sql/init.cc b/sql/init.cc index 7d90cc564a1..4beb8db0c6f 100644 --- a/sql/init.cc +++ b/sql/init.cc @@ -34,14 +34,11 @@ void unireg_init(ulong options) current_pid=(ulong) getpid(); /* Save for later ref */ init_time(); /* Init time-functions (read zone) */ -#ifdef USE_MY_ATOF - init_my_atof(); /* use our atof */ -#endif +#ifndef EMBEDDED_LIBRARY my_abort_hook=unireg_abort; /* Abort with close of databases */ +#endif VOID(strmov(reg_ext,".frm")); - for (i=0 ; i < 6 ; i++) // YYMMDDHHMMSS - dayord.pos[i]=i; specialflag=SPECIAL_SAME_DB_NAME; /* Make a tab of powers of 10 */ for (i=0,nr=1.0; i < array_elements(log_10) ; i++) @@ -49,18 +46,5 @@ void unireg_init(ulong options) log_10[i]= nr ; nr*= 10.0; } specialflag|=options; /* Set options from argv */ - - // The following is needed because of like optimization in select.cc - - uchar max_char=my_sort_order[(uchar) max_sort_char]; - for (i = 0; i < 256; i++) - { - if ((uchar) my_sort_order[i] > max_char) - { - max_char=(uchar) my_sort_order[i]; - max_sort_char= (char) i; - } - } - thread_stack_min=thread_stack - STACK_MIN_SIZE; DBUG_VOID_RETURN; } diff --git a/sql/item.cc b/sql/item.cc index 38b6516b742..5964ed388c6 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -15,14 +15,19 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif - #include "mysql_priv.h" #include <m_ctype.h> #include "my_dir.h" +static void mark_as_dependent(THD *thd, + SELECT_LEX *last, SELECT_LEX *current, + Item_ident *item); + +const String my_null_string("NULL", 4, default_charset_info); + /***************************************************************************** ** Item functions *****************************************************************************/ @@ -34,48 +39,276 @@ void item_init(void) item_user_lock_init(); } -Item::Item() +Item::Item(): + fixed(0) +{ + marker= 0; + maybe_null=null_value=with_sum_func=unsigned_flag=0; + collation.set(&my_charset_bin, DERIVATION_COERCIBLE); + name= 0; + decimals= 0; max_length= 0; + + /* Put item in free list so that we can free all items at end */ + THD *thd= current_thd; + next= thd->free_list; + thd->free_list= this; + /* + Item constructor can be called during execution other then SQL_COM + command => we should check thd->lex->current_select on zero (thd->lex + can be uninitialised) + */ + if (thd->lex->current_select) + { + enum_parsing_place place= + thd->lex->current_select->parsing_place; + if (place == SELECT_LIST || + place == IN_HAVING) + thd->lex->current_select->select_n_having_items++; + } +} + +/* + Constructor used by Item_field, Item_*_ref & agregate (sum) functions. + Used for duplicating lists in processing queries with temporary + tables +*/ +Item::Item(THD *thd, Item *item): + str_value(item->str_value), + name(item->name), + max_length(item->max_length), + marker(item->marker), + decimals(item->decimals), + maybe_null(item->maybe_null), + null_value(item->null_value), + unsigned_flag(item->unsigned_flag), + with_sum_func(item->with_sum_func), + fixed(item->fixed), + collation(item->collation) +{ + next= thd->free_list; // Put in free list + thd->free_list= this; +} + + +void Item::print_item_w_name(String *str) { - marker=0; - binary=maybe_null=null_value=with_sum_func=unsigned_flag=0; - name=0; - decimals=0; max_length=0; - next=current_thd->free_list; // Put in free list - current_thd->free_list=this; + print(str); + if (name) + { + str->append(" AS `", 5); + str->append(name); + str->append('`'); + } } -void Item::set_name(char *str,uint length) + +Item_ident::Item_ident(const char *db_name_par,const char *table_name_par, + const char *field_name_par) + :orig_db_name(db_name_par), orig_table_name(table_name_par), + orig_field_name(field_name_par), + db_name(db_name_par), table_name(table_name_par), + field_name(field_name_par), cached_field_index(NO_CACHED_FIELD_INDEX), + cached_table(0), depended_from(0) +{ + name = (char*) field_name_par; +} + +// Constructor used by Item_field & Item_*_ref (see Item comment) +Item_ident::Item_ident(THD *thd, Item_ident *item) + :Item(thd, item), + orig_db_name(item->orig_db_name), + orig_table_name(item->orig_table_name), + orig_field_name(item->orig_field_name), + db_name(item->db_name), + table_name(item->table_name), + field_name(item->field_name), + cached_field_index(item->cached_field_index), + cached_table(item->cached_table), + depended_from(item->depended_from) +{} + +void Item_ident::cleanup() +{ + DBUG_ENTER("Item_ident::cleanup"); + DBUG_PRINT("enter", ("b:%s(%s), t:%s(%s), f:%s(%s)", + db_name, orig_db_name, + table_name, orig_table_name, + field_name, orig_field_name)); + Item::cleanup(); + db_name= orig_db_name; + table_name= orig_table_name; + field_name= orig_field_name; + DBUG_VOID_RETURN; +} + +bool Item_ident::remove_dependence_processor(byte * arg) +{ + DBUG_ENTER("Item_ident::remove_dependence_processor"); + if (depended_from == (st_select_lex *) arg) + depended_from= 0; + DBUG_RETURN(0); +} + + +bool Item::check_cols(uint c) +{ + if (c != 1) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), c); + return 1; + } + return 0; +} + + +void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) { if (!length) - name=str; // Used by AS - else { - while (length && !isgraph(*str)) + /* Empty string, used by AS or internal function like last_insert_id() */ + name= (char*) str; + return; + } + if (cs->ctype) + { + // This will probably need a better implementation in the future: + // a function in CHARSET_INFO structure. + while (length && !my_isgraph(cs,*str)) { // Fix problem with yacc length--; str++; } - name=sql_strmake(str,min(length,MAX_FIELD_WIDTH)); } + if (!my_charset_same(cs, system_charset_info)) + { + uint32 res_length; + name= sql_strmake_with_convert(str, length, cs, + MAX_ALIAS_NAME, system_charset_info, + &res_length); + } + else + name=sql_strmake(str, min(length,MAX_ALIAS_NAME)); } + /* - This function is only called when comparing items in the WHERE clause + This function is called when: + - Comparing items in the WHERE clause (when doing where optimization) + - When trying to find an ORDER BY/GROUP BY item in the SELECT part */ bool Item::eq(const Item *item, bool binary_cmp) const { + /* + Note, that this is never TRUE if item is a Item_param: + for all basic constants we have special checks, and Item_param's + type() can be only among basic constant types. + */ return type() == item->type() && name && item->name && - !my_strcasecmp(name,item->name); + !my_strcasecmp(system_charset_info,name,item->name); +} + + +Item *Item::safe_charset_converter(CHARSET_INFO *tocs) +{ + Item_func_conv_charset *conv= new Item_func_conv_charset(this, tocs, 1); + return conv->safe ? conv : NULL; +} + + +/* + Created mostly for mysql_prepare_table(). Important + when a string ENUM/SET column is described with a numeric default value: + + CREATE TABLE t1(a SET('a') DEFAULT 1); + + We cannot use generic Item::safe_charset_converter(), because + the latter returns a non-fixed Item, so val_str() crashes afterwards. + Override Item_num method, to return a fixed item. +*/ +Item *Item_num::safe_charset_converter(CHARSET_INFO *tocs) +{ + Item_string *conv; + char buf[64]; + String *s, tmp(buf, sizeof(buf), &my_charset_bin); + s= val_str(&tmp); + if ((conv= new Item_string(s->ptr(), s->length(), s->charset()))) + { + conv->str_value.copy(); + conv->str_value.shrink_to_length(); + } + return conv; +} + + +Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs) +{ + Item_string *conv; + uint conv_errors; + String tmp, cstr, *ostr= val_str(&tmp); + cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); + if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), + cstr.charset(), + collation.derivation))) + { + /* + Safe conversion is not possible (or EOM). + We could not convert a string into the requested character set + without data loss. The target charset does not cover all the + characters from the string. Operation cannot be done correctly. + */ + return NULL; + } + conv->str_value.copy(); + /* + The above line executes str_value.realloc() internally, + which alligns Alloced_length using ALLIGN_SIZE. + In the case of Item_string::str_value we don't want + Alloced_length to be longer than str_length. + Otherwise, some functions like Item_func_concat::val_str() + try to reuse str_value as a buffer for concatenation result + for optimization purposes, so our string constant become + corrupted. See bug#8785 for more details. + Let's shrink Alloced_length to str_length to avoid this problem. + */ + conv->str_value.shrink_to_length(); + return conv; +} + + +Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs) +{ + if (const_item()) + { + Item_string *conv; + uint conv_errors; + char buf[MAX_FIELD_WIDTH]; + String tmp(buf, sizeof(buf), &my_charset_bin); + String cstr, *ostr= val_str(&tmp); + /* + As safe_charset_converter is not executed for + a parameter bound to NULL, ostr should never be 0. + */ + cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); + if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), + cstr.charset(), + collation.derivation))) + return NULL; + conv->str_value.copy(); + conv->str_value.shrink_to_length(); + return conv; + } + return NULL; } + bool Item_string::eq(const Item *item, bool binary_cmp) const { - if (type() == item->type()) + if (type() == item->type() && item->basic_const_item()) { if (binary_cmp) return !stringcmp(&str_value, &item->str_value); - return !sortcmp(&str_value, &item->str_value); + return !sortcmp(&str_value, &item->str_value, collation.collation); } return 0; } @@ -86,12 +319,13 @@ bool Item_string::eq(const Item *item, bool binary_cmp) const As a extra convenience the time structure is reset on error! */ -bool Item::get_date(TIME *ltime,bool fuzzydate) +bool Item::get_date(TIME *ltime,uint fuzzydate) { char buff[40]; - String tmp(buff,sizeof(buff)),*res; + String tmp(buff,sizeof(buff), &my_charset_bin),*res; if (!(res=val_str(&tmp)) || - str_to_TIME(res->ptr(),res->length(),ltime,fuzzydate) == TIMESTAMP_NONE) + str_to_datetime_with_warn(res->ptr(), res->length(), + ltime, fuzzydate) <= MYSQL_TIMESTAMP_ERROR) { bzero((char*) ltime,sizeof(*ltime)); return 1; @@ -107,9 +341,9 @@ bool Item::get_date(TIME *ltime,bool fuzzydate) bool Item::get_time(TIME *ltime) { char buff[40]; - String tmp(buff,sizeof(buff)),*res; + String tmp(buff,sizeof(buff),&my_charset_bin),*res; if (!(res=val_str(&tmp)) || - str_to_time(res->ptr(),res->length(),ltime)) + str_to_time_with_warn(res->ptr(), res->length(), ltime)) { bzero((char*) ltime,sizeof(*ltime)); return 1; @@ -117,38 +351,452 @@ bool Item::get_time(TIME *ltime) return 0; } -Item_ident::Item_ident(const char *db_name_par,const char *table_name_par, - const char *field_name_par) - :db_name(db_name_par),table_name(table_name_par),field_name(field_name_par) +CHARSET_INFO *Item::default_charset() { - name = (char*) field_name_par; + return current_thd->variables.collation_connection; +} + + +/* + Move SUM items out from item tree and replace with reference + + SYNOPSIS + split_sum_func2() + thd Thread handler + ref_pointer_array Pointer to array of reference fields + fields All fields in select + ref Pointer to item + + NOTES + This is from split_sum_func2() for items that should be split + + All found SUM items are added FIRST in the fields list and + we replace the item with a reference. + + thd->fatal_error() may be called if we are out of memory +*/ + + +void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, + List<Item> &fields, Item **ref) +{ + if (type() != SUM_FUNC_ITEM && with_sum_func) + { + /* Will split complicated items and ignore simple ones */ + split_sum_func(thd, ref_pointer_array, fields); + } + else if ((type() == SUM_FUNC_ITEM || + (used_tables() & ~PARAM_TABLE_BIT)) && + type() != REF_ITEM) + { + /* + Replace item with a reference so that we can easily calculate + it (in case of sum functions) or copy it (in case of fields) + + The test above is to ensure we don't do a reference for things + that are constants (PARAM_TABLE_BIT is in effect a constant) + or already referenced (for example an item in HAVING) + */ + uint el= fields.elements; + Item *new_item; + ref_pointer_array[el]= this; + if (!(new_item= new Item_ref(ref_pointer_array + el, 0, name))) + return; // fatal_error is set + fields.push_front(this); + ref_pointer_array[el]= this; + thd->change_item_tree(ref, new_item); + } +} + + +/* + Aggregate two collations together taking + into account their coercibility (aka derivation): + + 0 == DERIVATION_EXPLICIT - an explicitely written COLLATE clause + 1 == DERIVATION_NONE - a mix of two different collations + 2 == DERIVATION_IMPLICIT - a column + 3 == DERIVATION_COERCIBLE - a string constant + + The most important rules are: + + 1. If collations are the same: + chose this collation, and the strongest derivation. + + 2. If collations are different: + - Character sets may differ, but only if conversion without + data loss is possible. The caller provides flags whether + character set conversion attempts should be done. If no + flags are substituted, then the character sets must be the same. + Currently processed flags are: + MY_COLL_ALLOW_SUPERSET_CONV - allow conversion to a superset + MY_COLL_ALLOW_COERCIBLE_CONV - allow conversion of a coercible value + - two EXPLICIT collations produce an error, e.g. this is wrong: + CONCAT(expr1 collate latin1_swedish_ci, expr2 collate latin1_german_ci) + - the side with smaller derivation value wins, + i.e. a column is stronger than a string constant, + an explicit COLLATE clause is stronger than a column. + - if derivations are the same, we have DERIVATION_NONE, + we'll wait for an explicit COLLATE clause which possibly can + come from another argument later: for example, this is valid, + but we don't know yet when collecting the first two arguments: + CONCAT(latin1_swedish_ci_column, + latin1_german1_ci_column, + expr COLLATE latin1_german2_ci) +*/ +bool DTCollation::aggregate(DTCollation &dt, uint flags) +{ + if (!my_charset_same(collation, dt.collation)) + { + /* + We do allow to use binary strings (like BLOBS) + together with character strings. + Binaries have more precedance than a character + string of the same derivation. + */ + if (collation == &my_charset_bin) + { + if (derivation <= dt.derivation) + ; // Do nothing + else + { + set(dt); + } + } + else if (dt.collation == &my_charset_bin) + { + if (dt.derivation <= derivation) + { + set(dt); + } + else + ; // Do nothing + } + else if ((flags & MY_COLL_ALLOW_SUPERSET_CONV) && + collation->state & MY_CS_UNICODE && + (derivation < dt.derivation || + (derivation == dt.derivation && + !(dt.collation->state & MY_CS_UNICODE)))) + { + // Do nothing + } + else if ((flags & MY_COLL_ALLOW_SUPERSET_CONV) && + dt.collation->state & MY_CS_UNICODE && + (dt.derivation < derivation || + (dt.derivation == derivation && + !(collation->state & MY_CS_UNICODE)))) + { + set(dt); + } + else if ((flags & MY_COLL_ALLOW_COERCIBLE_CONV) && + derivation < dt.derivation && + dt.derivation >= DERIVATION_SYSCONST) + { + // Do nothing; + } + else if ((flags & MY_COLL_ALLOW_COERCIBLE_CONV) && + dt.derivation < derivation && + derivation >= DERIVATION_SYSCONST) + { + set(dt); + } + else + { + // Cannot apply conversion + set(0, DERIVATION_NONE); + return 1; + } + } + else if (derivation < dt.derivation) + { + // Do nothing + } + else if (dt.derivation < derivation) + { + set(dt); + } + else + { + if (collation == dt.collation) + { + // Do nothing + } + else + { + if (derivation == DERIVATION_EXPLICIT) + { + set(0, DERIVATION_NONE); + return 1; + } + if (collation->state & MY_CS_BINSORT) + { + return 0; + } + else if (dt.collation->state & MY_CS_BINSORT) + { + set(dt); + return 0; + } + CHARSET_INFO *bin= get_charset_by_csname(collation->csname, + MY_CS_BINSORT,MYF(0)); + set(bin, DERIVATION_NONE); + } + } + return 0; +} + +/******************************/ +static +void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) +{ + my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), + c1.collation->name,c1.derivation_name(), + c2.collation->name,c2.derivation_name(), + fname); +} + + +static +void my_coll_agg_error(DTCollation &c1, DTCollation &c2, DTCollation &c3, + const char *fname) +{ + my_error(ER_CANT_AGGREGATE_3COLLATIONS,MYF(0), + c1.collation->name,c1.derivation_name(), + c2.collation->name,c2.derivation_name(), + c3.collation->name,c3.derivation_name(), + fname); +} + + +static +void my_coll_agg_error(Item** args, uint count, const char *fname) +{ + if (count == 2) + my_coll_agg_error(args[0]->collation, args[1]->collation, fname); + else if (count == 3) + my_coll_agg_error(args[0]->collation, args[1]->collation, + args[2]->collation, fname); + else + my_error(ER_CANT_AGGREGATE_NCOLLATIONS,MYF(0),fname); +} + + +bool agg_item_collations(DTCollation &c, const char *fname, + Item **av, uint count, uint flags) +{ + uint i; + c.set(av[0]->collation); + for (i= 1; i < count; i++) + { + if (c.aggregate(av[i]->collation, flags)) + { + my_coll_agg_error(av, count, fname); + return TRUE; + } + } + if ((flags & MY_COLL_DISALLOW_NONE) && + c.derivation == DERIVATION_NONE) + { + my_coll_agg_error(av, count, fname); + return TRUE; + } + return FALSE; } -Item_field::Item_field(Field *f) :Item_ident(NullS,f->table_name,f->field_name) +bool agg_item_collations_for_comparison(DTCollation &c, const char *fname, + Item **av, uint count, uint flags) { + return (agg_item_collations(c, fname, av, count, + flags | MY_COLL_DISALLOW_NONE)); +} + + +/* + Collect arguments' character sets together. + We allow to apply automatic character set conversion in some cases. + The conditions when conversion is possible are: + - arguments A and B have different charsets + - A wins according to coercibility rules + (i.e. a column is stronger than a string constant, + an explicit COLLATE clause is stronger than a column) + - character set of A is either superset for character set of B, + or B is a string constant which can be converted into the + character set of A without data loss. + + If all of the above is true, then it's possible to convert + B into the character set of A, and then compare according + to the collation of A. + + For functions with more than two arguments: + + collect(A,B,C) ::= collect(collect(A,B),C) +*/ + +bool agg_item_charsets(DTCollation &coll, const char *fname, + Item **args, uint nargs, uint flags) +{ + Item **arg, **last, *safe_args[2]; + if (agg_item_collations(coll, fname, args, nargs, flags)) + return TRUE; + + /* + For better error reporting: save the first and the second argument. + We need this only if the the number of args is 3 or 2: + - for a longer argument list, "Illegal mix of collations" + doesn't display each argument's characteristics. + - if nargs is 1, then this error cannot happen. + */ + if (nargs >=2 && nargs <= 3) + { + safe_args[0]= args[0]; + safe_args[1]= args[1]; + } + + THD *thd= current_thd; + Item_arena *arena, backup; + bool res= FALSE; + /* + In case we're in statement prepare, create conversion item + in its memory: it will be reused on each execute. + */ + arena= thd->change_arena_if_needed(&backup); + + for (arg= args, last= args + nargs; arg < last; arg++) + { + Item* conv; + uint32 dummy_offset; + if (!String::needs_conversion(0, coll.collation, + (*arg)->collation.collation, + &dummy_offset)) + continue; + + if (!(conv= (*arg)->safe_charset_converter(coll.collation))) + { + if (nargs >=2 && nargs <= 3) + { + /* restore the original arguments for better error message */ + args[0]= safe_args[0]; + args[1]= safe_args[1]; + } + my_coll_agg_error(args, nargs, fname); + res= TRUE; + break; // we cannot return here, we need to restore "arena". + } + conv->fix_fields(thd, 0, &conv); + /* + If in statement prepare, then we create a converter for two + constant items, do it once and then reuse it. + If we're in execution of a prepared statement, arena is NULL, + and the conv was created in runtime memory. This can be + the case only if the argument is a parameter marker ('?'), + because for all true constants the charset converter has already + been created in prepare. In this case register the change for + rollback. + */ + if (arena) + *arg= conv; + else + thd->change_item_tree(arg, conv); + } + if (arena) + thd->restore_backup_item_arena(arena, &backup); + return res; +} + + + + +/**********************************************/ + +Item_field::Item_field(Field *f) + :Item_ident(NullS, f->table_name, f->field_name) +{ + set_field(f); + /* + field_name and talbe_name should not point to garbage + if this item is to be reused + */ + orig_table_name= orig_field_name= ""; +} + +Item_field::Item_field(THD *thd, Field *f) + :Item_ident(f->table->table_cache_key, f->table_name, f->field_name) +{ + /* + We always need to provide Item_field with a fully qualified field + name to avoid ambiguity when executing prepared statements like + SELECT * from d1.t1, d2.t1; (assuming d1.t1 and d2.t1 have columns + with same names). + This is because prepared statements never deal with wildcards in + select list ('*') and always fix fields using fully specified path + (i.e. db.table.column). + No check for OOM: if db_name is NULL, we'll just get + "Field not found" error. + We need to copy db_name, table_name and field_name because they must + be allocated in the statement memory, not in table memory (the table + structure can go away and pop up again between subsequent executions + of a prepared statement). + */ + if (thd->current_arena->is_stmt_prepare()) + { + if (db_name) + orig_db_name= thd->strdup(db_name); + orig_table_name= thd->strdup(table_name); + orig_field_name= thd->strdup(field_name); + /* + We don't restore 'name' in cleanup because it's not changed + during execution. Still we need it to point to persistent + memory if this item is to be reused. + */ + name= (char*) orig_field_name; + } set_field(f); } +// Constructor need to process subselect with temporary tables (see Item) +Item_field::Item_field(THD *thd, Item_field *item) + :Item_ident(thd, item), + field(item->field), + result_field(item->result_field) +{ + collation.set(DERIVATION_IMPLICIT); +} void Item_field::set_field(Field *field_par) { field=result_field=field_par; // for easy coding with fields maybe_null=field->maybe_null(); - max_length=field_par->field_length; + max_length=field_par->max_length(); decimals= field->decimals(); table_name=field_par->table_name; field_name=field_par->field_name; - binary=field_par->binary(); + db_name=field_par->table->table_cache_key; unsigned_flag=test(field_par->flags & UNSIGNED_FLAG); + collation.set(field_par->charset(), DERIVATION_IMPLICIT); + fixed= 1; +} + + +/* + Reset this item to point to a field from the new temporary table. + This is used when we create a new temporary table for each execution + of prepared statement. +*/ + +void Item_field::reset_field(Field *f) +{ + set_field(f); + /* 'name' is pointing at field->field_name of old field */ + name= (char*) f->field_name; } const char *Item_ident::full_name() const { char *tmp; - if (!table_name) + if (!table_name || !field_name) return field_name ? field_name : name ? name : "tmp_field"; - if (db_name) + if (db_name && db_name[0]) { tmp=(char*) sql_alloc((uint) strlen(db_name)+(uint) strlen(table_name)+ (uint) strlen(field_name)+3); @@ -156,9 +804,14 @@ const char *Item_ident::full_name() const } else { - tmp=(char*) sql_alloc((uint) strlen(table_name)+ - (uint) strlen(field_name)+2); - strxmov(tmp,table_name,".",field_name,NullS); + if (table_name[0]) + { + tmp= (char*) sql_alloc((uint) strlen(table_name) + + (uint) strlen(field_name) + 2); + strxmov(tmp, table_name, ".", field_name, NullS); + } + else + tmp= (char*) field_name; } return tmp; } @@ -166,13 +819,16 @@ const char *Item_ident::full_name() const /* ARGSUSED */ String *Item_field::val_str(String *str) { + DBUG_ASSERT(fixed == 1); if ((null_value=field->is_null())) return 0; + str->set_charset(str_value.charset()); return field->val_str(str,&str_value); } double Item_field::val() { + DBUG_ASSERT(fixed == 1); if ((null_value=field->is_null())) return 0.0; return field->val_real(); @@ -180,6 +836,7 @@ double Item_field::val() longlong Item_field::val_int() { + DBUG_ASSERT(fixed == 1); if ((null_value=field->is_null())) return 0; return field->val_int(); @@ -190,10 +847,11 @@ String *Item_field::str_result(String *str) { if ((null_value=result_field->is_null())) return 0; + str->set_charset(str_value.charset()); return result_field->val_str(str,&str_value); } -bool Item_field::get_date(TIME *ltime,bool fuzzydate) +bool Item_field::get_date(TIME *ltime,uint fuzzydate) { if ((null_value=field->is_null()) || field->get_date(ltime,fuzzydate)) { @@ -203,7 +861,7 @@ bool Item_field::get_date(TIME *ltime,bool fuzzydate) return 0; } -bool Item_field::get_date_result(TIME *ltime,bool fuzzydate) +bool Item_field::get_date_result(TIME *ltime,uint fuzzydate) { if ((null_value=result_field->is_null()) || result_field->get_date(ltime,fuzzydate)) @@ -238,72 +896,741 @@ longlong Item_field::val_int_result() return result_field->val_int(); } + bool Item_field::eq(const Item *item, bool binary_cmp) const { - return item->type() == FIELD_ITEM && ((Item_field*) item)->field == field; + if (item->type() != FIELD_ITEM) + return 0; + + Item_field *item_field= (Item_field*) item; + if (item_field->field) + return item_field->field == field; + /* + We may come here when we are trying to find a function in a GROUP BY + clause from the select list. + In this case the '100 % correct' way to do this would be to first + run fix_fields() on the GROUP BY item and then retry this function, but + I think it's better to relax the checking a bit as we will in + most cases do the correct thing by just checking the field name. + (In cases where we would choose wrong we would have to generate a + ER_NON_UNIQ_ERROR). + */ + return (!my_strcasecmp(system_charset_info, item_field->name, + field_name) && + (!item_field->table_name || + (!my_strcasecmp(table_alias_charset, item_field->table_name, + table_name) && + (!item_field->db_name || + (item_field->db_name && !strcmp(item_field->db_name, + db_name)))))); } + table_map Item_field::used_tables() const { if (field->table->const_table) return 0; // const item - return field->table->map; + return (depended_from ? OUTER_REF_TABLE_BIT : field->table->map); +} + + +Item *Item_field::get_tmp_table_item(THD *thd) +{ + Item_field *new_item= new Item_field(thd, this); + if (new_item) + new_item->field= new_item->result_field; + return new_item; +} + + +/* + Create an item from a string we KNOW points to a valid longlong/ulonglong + end \0 terminated number string +*/ + +Item_int::Item_int(const char *str_arg, uint length) +{ + char *end_ptr= (char*) str_arg + length; + int error; + value= my_strtoll10(str_arg, &end_ptr, &error); + max_length= (uint) (end_ptr - str_arg); + name= (char*) str_arg; + fixed= 1; } String *Item_int::val_str(String *str) { - str->set(value); + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + str->set(value, &my_charset_bin); return str; } void Item_int::print(String *str) { - if (!name) - { - str_value.set(value); - name=str_value.c_ptr(); - } - str->append(name); + // my_charset_bin is good enough for numbers + str_value.set(value, &my_charset_bin); + str->append(str_value); +} + + +Item_uint::Item_uint(const char *str_arg, uint length): + Item_int(str_arg, length) +{ + unsigned_flag= 1; +} + + +Item_uint::Item_uint(const char *str_arg, longlong i, uint length): + Item_int(str_arg, i, length) +{ + unsigned_flag= 1; } + String *Item_uint::val_str(String *str) { - str->set((ulonglong) value); + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + str->set((ulonglong) value, &my_charset_bin); return str; } + void Item_uint::print(String *str) { - if (!name) - { - str_value.set((ulonglong) value); - name=str_value.c_ptr(); - } - str->append(name); + // latin1 is good enough for numbers + str_value.set((ulonglong) value, default_charset()); + str->append(str_value); } String *Item_real::val_str(String *str) { - str->set(value,decimals); + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + str->set(value,decimals,&my_charset_bin); return str; } + void Item_string::print(String *str) { + str->append('_'); + str->append(collation.collation->csname); str->append('\''); - str->append(full_name()); + str_value.print(str); str->append('\''); } bool Item_null::eq(const Item *item, bool binary_cmp) const { return item->type() == type(); } -double Item_null::val() { null_value=1; return 0.0; } -longlong Item_null::val_int() { null_value=1; return 0; } +double Item_null::val() +{ + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + null_value=1; + return 0.0; +} +longlong Item_null::val_int() +{ + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + null_value=1; + return 0; +} /* ARGSUSED */ String *Item_null::val_str(String *str) -{ null_value=1; return 0;} +{ + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + null_value=1; + return 0; +} + + +Item *Item_null::safe_charset_converter(CHARSET_INFO *tocs) +{ + collation.set(tocs); + return this; +} + +/*********************** Item_param related ******************************/ + +/* + Default function of Item_param::set_param_func, so in case + of malformed packet the server won't SIGSEGV +*/ + +static void +default_set_param_func(Item_param *param, + uchar **pos __attribute__((unused)), + ulong len __attribute__((unused))) +{ + param->set_null(); +} + +Item_param::Item_param(unsigned pos_in_query_arg) : + state(NO_VALUE), + item_result_type(STRING_RESULT), + /* Don't pretend to be a literal unless value for this item is set. */ + item_type(PARAM_ITEM), + param_type(MYSQL_TYPE_STRING), + pos_in_query(pos_in_query_arg), + set_param_func(default_set_param_func) +{ + name= (char*) "?"; + /* + Since we can't say whenever this item can be NULL or cannot be NULL + before mysql_stmt_execute(), so we assuming that it can be NULL until + value is set. + */ + maybe_null= 1; +} + +void Item_param::set_null() +{ + DBUG_ENTER("Item_param::set_null"); + /* These are cleared after each execution by reset() method */ + max_length= 0; + null_value= 1; + /* + Because of NULL and string values we need to set max_length for each new + placeholder value: user can submit NULL for any placeholder type, and + string length can be different in each execution. + */ + max_length= 0; + decimals= 0; + state= NULL_VALUE; + item_type= Item::NULL_ITEM; + DBUG_VOID_RETURN; +} + +void Item_param::set_int(longlong i, uint32 max_length_arg) +{ + DBUG_ENTER("Item_param::set_int"); + value.integer= (longlong) i; + state= INT_VALUE; + max_length= max_length_arg; + decimals= 0; + maybe_null= 0; + DBUG_VOID_RETURN; +} + +void Item_param::set_double(double d) +{ + DBUG_ENTER("Item_param::set_double"); + value.real= d; + state= REAL_VALUE; + max_length= DBL_DIG + 8; + decimals= NOT_FIXED_DEC; + maybe_null= 0; + DBUG_VOID_RETURN; +} + + +/* + Set parameter value from TIME value. + + SYNOPSIS + set_time() + tm - datetime value to set (time_type is ignored) + type - type of datetime value + max_length_arg - max length of datetime value as string + + NOTE + If we value to be stored is not normalized, zero value will be stored + instead and proper warning will be produced. This function relies on + the fact that even wrong value sent over binary protocol fits into + MAX_DATE_STRING_REP_LENGTH buffer. +*/ +void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg) +{ + DBUG_ENTER("Item_param::set_time"); + + value.time= *tm; + value.time.time_type= type; + + if (value.time.year > 9999 || value.time.month > 12 || + value.time.day > 31 || + type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23 || + value.time.minute > 59 || value.time.second > 59) + { + char buff[MAX_DATE_STRING_REP_LENGTH]; + uint length= my_TIME_to_str(&value.time, buff); + make_truncated_value_warning(current_thd, buff, length, type); + set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR); + } + + state= TIME_VALUE; + maybe_null= 0; + max_length= max_length_arg; + decimals= 0; + DBUG_VOID_RETURN; +} + + +bool Item_param::set_str(const char *str, ulong length) +{ + DBUG_ENTER("Item_param::set_str"); + /* + Assign string with no conversion: data is converted only after it's + been written to the binary log. + */ + uint dummy_errors; + if (str_value.copy(str, length, &my_charset_bin, &my_charset_bin, + &dummy_errors)) + DBUG_RETURN(TRUE); + state= STRING_VALUE; + maybe_null= 0; + /* max_length and decimals are set after charset conversion */ + /* sic: str may be not null-terminated, don't add DBUG_PRINT here */ + DBUG_RETURN(FALSE); +} + + +bool Item_param::set_longdata(const char *str, ulong length) +{ + DBUG_ENTER("Item_param::set_longdata"); + + /* + If client character set is multibyte, end of long data packet + may hit at the middle of a multibyte character. Additionally, + if binary log is open we must write long data value to the + binary log in character set of client. This is why we can't + convert long data to connection character set as it comes + (here), and first have to concatenate all pieces together, + write query to the binary log and only then perform conversion. + */ + if (str_value.append(str, length, &my_charset_bin)) + DBUG_RETURN(TRUE); + state= LONG_DATA_VALUE; + maybe_null= 0; + + DBUG_RETURN(FALSE); +} + + +/* + Set parameter value from user variable value. + + SYNOPSIS + set_from_user_var + thd Current thread + entry User variable structure (NULL means use NULL value) + + RETURN + 0 OK + 1 Out of memort +*/ + +bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry) +{ + DBUG_ENTER("Item_param::set_from_user_var"); + if (entry && entry->value) + { + item_result_type= entry->type; + switch (entry->type) { + case REAL_RESULT: + set_double(*(double*)entry->value); + item_type= Item::REAL_ITEM; + item_result_type= REAL_RESULT; + break; + case INT_RESULT: + set_int(*(longlong*)entry->value, 21); + item_type= Item::INT_ITEM; + item_result_type= INT_RESULT; + break; + case STRING_RESULT: + { + CHARSET_INFO *fromcs= entry->collation.collation; + CHARSET_INFO *tocs= thd->variables.collation_connection; + uint32 dummy_offset; + + value.cs_info.character_set_of_placeholder= fromcs; + /* + Setup source and destination character sets so that they + are different only if conversion is necessary: this will + make later checks easier. + */ + value.cs_info.final_character_set_of_str_value= + String::needs_conversion(0, fromcs, tocs, &dummy_offset) ? + tocs : fromcs; + /* + Exact value of max_length is not known unless data is converted to + charset of connection, so we have to set it later. + */ + item_type= Item::STRING_ITEM; + item_result_type= STRING_RESULT; + + if (set_str((const char *)entry->value, entry->length)) + DBUG_RETURN(1); + break; + } + default: + DBUG_ASSERT(0); + set_null(); + } + } + else + set_null(); + + DBUG_RETURN(0); +} + +/* + Resets parameter after execution. + + SYNOPSIS + Item_param::reset() + + NOTES + We clear null_value here instead of setting it in set_* methods, + because we want more easily handle case for long data. +*/ + +void Item_param::reset() +{ + /* Shrink string buffer if it's bigger than max possible CHAR column */ + if (str_value.alloced_length() > MAX_CHAR_WIDTH) + str_value.free(); + else + str_value.length(0); + str_value_ptr.length(0); + /* + We must prevent all charset conversions untill data has been written + to the binary log. + */ + str_value.set_charset(&my_charset_bin); + collation.set(&my_charset_bin, DERIVATION_COERCIBLE); + state= NO_VALUE; + maybe_null= 1; + null_value= 0; + /* + Don't reset item_type to PARAM_ITEM: it's only needed to guard + us from item optimizations at prepare stage, when item doesn't yet + contain a literal of some kind. + In all other cases when this object is accessed its value is + set (this assumption is guarded by 'state' and + DBUG_ASSERTS(state != NO_VALUE) in all Item_param::get_* + methods). + */ +} + + +int Item_param::save_in_field(Field *field, bool no_conversions) +{ + field->set_notnull(); + + switch (state) { + case INT_VALUE: + return field->store(value.integer); + case REAL_VALUE: + return field->store(value.real); + case TIME_VALUE: + field->store_time(&value.time, value.time.time_type); + return 0; + case STRING_VALUE: + case LONG_DATA_VALUE: + return field->store(str_value.ptr(), str_value.length(), + str_value.charset()); + case NULL_VALUE: + return set_field_to_null_with_conversions(field, no_conversions); + case NO_VALUE: + default: + DBUG_ASSERT(0); + } + return 1; +} + + +bool Item_param::get_time(TIME *res) +{ + if (state == TIME_VALUE) + { + *res= value.time; + return 0; + } + /* + If parameter value isn't supplied assertion will fire in val_str() + which is called from Item::get_time(). + */ + return Item::get_time(res); +} + + +bool Item_param::get_date(TIME *res, uint fuzzydate) +{ + if (state == TIME_VALUE) + { + *res= value.time; + return 0; + } + return Item::get_date(res, fuzzydate); +} + + +double Item_param::val() +{ + switch (state) { + case REAL_VALUE: + return value.real; + case INT_VALUE: + return (double) value.integer; + case STRING_VALUE: + case LONG_DATA_VALUE: + { + int dummy_err; + char *end_not_used; + return my_strntod(str_value.charset(), (char*) str_value.ptr(), + str_value.length(), &end_not_used, &dummy_err); + } + case TIME_VALUE: + /* + This works for example when user says SELECT ?+0.0 and supplies + time value for the placeholder. + */ + return ulonglong2double(TIME_to_ulonglong(&value.time)); + case NULL_VALUE: + return 0.0; + default: + DBUG_ASSERT(0); + } + return 0.0; +} + + +longlong Item_param::val_int() +{ + switch (state) { + case REAL_VALUE: + return (longlong) (value.real + (value.real > 0 ? 0.5 : -0.5)); + case INT_VALUE: + return value.integer; + case STRING_VALUE: + case LONG_DATA_VALUE: + { + int dummy_err; + return my_strntoll(str_value.charset(), str_value.ptr(), + str_value.length(), 10, (char**) 0, &dummy_err); + } + case TIME_VALUE: + return (longlong) TIME_to_ulonglong(&value.time); + case NULL_VALUE: + return 0; + default: + DBUG_ASSERT(0); + } + return 0; +} + + +String *Item_param::val_str(String* str) +{ + switch (state) { + case STRING_VALUE: + case LONG_DATA_VALUE: + return &str_value_ptr; + case REAL_VALUE: + str->set(value.real, NOT_FIXED_DEC, &my_charset_bin); + return str; + case INT_VALUE: + str->set(value.integer, &my_charset_bin); + return str; + case TIME_VALUE: + { + if (str->reserve(MAX_DATE_STRING_REP_LENGTH)) + break; + str->length((uint) my_TIME_to_str(&value.time, (char*) str->ptr())); + str->set_charset(&my_charset_bin); + return str; + } + case NULL_VALUE: + return NULL; + default: + DBUG_ASSERT(0); + } + return str; +} + +/* + Return Param item values in string format, for generating the dynamic + query used in update/binary logs + TODO: change interface and implementation to fill log data in place + and avoid one more memcpy/alloc between str and log string. +*/ + +const String *Item_param::query_val_str(String* str) const +{ + switch (state) { + case INT_VALUE: + str->set(value.integer, &my_charset_bin); + break; + case REAL_VALUE: + str->set(value.real, NOT_FIXED_DEC, &my_charset_bin); + break; + case TIME_VALUE: + { + char *buf, *ptr; + str->length(0); + /* + TODO: in case of error we need to notify replication + that binary log contains wrong statement + */ + if (str->reserve(MAX_DATE_STRING_REP_LENGTH+3)) + break; + + /* Create date string inplace */ + buf= str->c_ptr_quick(); + ptr= buf; + *ptr++= '\''; + ptr+= (uint) my_TIME_to_str(&value.time, ptr); + *ptr++= '\''; + str->length((uint32) (ptr - buf)); + break; + } + case STRING_VALUE: + case LONG_DATA_VALUE: + { + char *buf, *ptr; + str->length(0); + if (str->reserve(str_value.length()*2+3)) + break; + + buf= str->c_ptr_quick(); + ptr= buf; + if (value.cs_info.character_set_client->escape_with_backslash_is_dangerous) + { + ptr= str_to_hex(ptr, str_value.ptr(), str_value.length()); + } + else + { + *ptr++= '\''; + ptr+= escape_string_for_mysql(str_value.charset(), ptr, + str_value.ptr(), str_value.length()); + *ptr++='\''; + } + str->length(ptr - buf); + break; + } + case NULL_VALUE: + return &my_null_string; + default: + DBUG_ASSERT(0); + } + return str; +} + + +/* + Convert string from client character set to the character set of + connection. +*/ + +bool Item_param::convert_str_value(THD *thd) +{ + bool rc= FALSE; + if (state == STRING_VALUE || state == LONG_DATA_VALUE) + { + /* + Check is so simple because all charsets were set up properly + in setup_one_conversion_function, where typecode of + placeholder was also taken into account: the variables are different + here only if conversion is really necessary. + */ + if (value.cs_info.final_character_set_of_str_value != + value.cs_info.character_set_of_placeholder) + { + rc= thd->convert_string(&str_value, + value.cs_info.character_set_of_placeholder, + value.cs_info.final_character_set_of_str_value); + } + else + str_value.set_charset(value.cs_info.final_character_set_of_str_value); + /* Here str_value is guaranteed to be in final_character_set_of_str_value */ + + max_length= str_value.length(); + decimals= 0; + /* + str_value_ptr is returned from val_str(). It must be not alloced + to prevent it's modification by val_str() invoker. + */ + str_value_ptr.set(str_value.ptr(), str_value.length(), + str_value.charset()); + /* Synchronize item charset with value charset */ + collation.set(str_value.charset(), DERIVATION_COERCIBLE); + } + return rc; +} + + +bool Item_param::basic_const_item() const +{ + if (state == NO_VALUE || state == TIME_VALUE) + return FALSE; + return TRUE; +} + +Item * +Item_param::new_item() +{ + /* see comments in the header file */ + switch (state) { + case NULL_VALUE: + return new Item_null(name); + case INT_VALUE: + return (unsigned_flag ? + new Item_uint(name, value.integer, max_length) : + new Item_int(name, value.integer, max_length)); + case REAL_VALUE: + return new Item_real(name, value.real, decimals, max_length); + case STRING_VALUE: + case LONG_DATA_VALUE: + return new Item_string(name, str_value.c_ptr_quick(), str_value.length(), + str_value.charset()); + case TIME_VALUE: + break; + case NO_VALUE: + default: + DBUG_ASSERT(0); + }; + return 0; +} + + +bool +Item_param::eq(const Item *arg, bool binary_cmp) const +{ + Item *item; + if (!basic_const_item() || !arg->basic_const_item() || arg->type() != type()) + return FALSE; + /* + We need to cast off const to call val_int(). This should be OK for + a basic constant. + */ + item= (Item*) arg; + + switch (state) { + case NULL_VALUE: + return TRUE; + case INT_VALUE: + return value.integer == item->val_int() && + unsigned_flag == item->unsigned_flag; + case REAL_VALUE: + return value.real == item->val(); + case STRING_VALUE: + case LONG_DATA_VALUE: + if (binary_cmp) + return !stringcmp(&str_value, &item->str_value); + return !sortcmp(&str_value, &item->str_value, collation.collation); + default: + break; + } + return FALSE; +} + +/* End of Item_param related */ void Item_copy_string::copy() @@ -317,69 +1644,327 @@ void Item_copy_string::copy() /* ARGSUSED */ String *Item_copy_string::val_str(String *str) { + // Item_copy_string is used without fix_fields call if (null_value) return (String*) 0; return &str_value; } -bool Item_copy_string::save_in_field(Field *field, bool no_conversions) + +int Item_copy_string::save_in_field(Field *field, bool no_conversions) { if (null_value) return set_field_to_null(field); field->set_notnull(); - field->store(str_value.ptr(), str_value.length()); - return 0; + return field->store(str_value.ptr(),str_value.length(), + collation.collation); } /* -** Functions to convert item to field (for send_fields) + Functions to convert item to field (for send_fields) */ /* ARGSUSED */ bool Item::fix_fields(THD *thd, - struct st_table_list *list) + struct st_table_list *list, + Item ** ref) { + + // We do not check fields which are fixed during construction + DBUG_ASSERT(fixed == 0 || basic_const_item()); + fixed= 1; return 0; } -bool Item_field::fix_fields(THD *thd,TABLE_LIST *tables) +double Item_ref_null_helper::val() +{ + DBUG_ASSERT(fixed == 1); + double tmp= (*ref)->val_result(); + owner->was_null|= null_value= (*ref)->null_value; + return tmp; +} + + +longlong Item_ref_null_helper::val_int() +{ + DBUG_ASSERT(fixed == 1); + longlong tmp= (*ref)->val_int_result(); + owner->was_null|= null_value= (*ref)->null_value; + return tmp; +} + + +String* Item_ref_null_helper::val_str(String* s) { + DBUG_ASSERT(fixed == 1); + String* tmp= (*ref)->str_result(s); + owner->was_null|= null_value= (*ref)->null_value; + return tmp; +} + + +bool Item_ref_null_helper::get_date(TIME *ltime, uint fuzzydate) +{ + return (owner->was_null|= null_value= (*ref)->get_date(ltime, fuzzydate)); +} + + +/* + Mark item and SELECT_LEXs as dependent if it is not outer resolving + + SYNOPSIS + mark_as_dependent() + thd - thread handler + last - select from which current item depend + current - current select + item - item which should be marked +*/ + +static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, + Item_ident *item) +{ + // store pointer on SELECT_LEX from which item is dependent + item->depended_from= last; + current->mark_as_dependent(last); + if (thd->lex->describe & DESCRIBE_EXTENDED) + { + char warn_buff[MYSQL_ERRMSG_SIZE]; + sprintf(warn_buff, ER(ER_WARN_FIELD_RESOLVED), + (item->db_name?item->db_name:""), (item->db_name?".":""), + (item->table_name?item->table_name:""), (item->table_name?".":""), + item->field_name, + current->select_number, last->select_number); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_WARN_FIELD_RESOLVED, warn_buff); + } +} + + +bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +{ + enum_parsing_place place= NO_MATTER; + DBUG_ASSERT(fixed == 0); if (!field) // If field is not checked { - Field *tmp; - if (!(tmp=find_field_in_tables(thd,this,tables))) + TABLE_LIST *where= 0; + bool upward_lookup= 0; + Field *tmp= (Field *)not_found_field; + if ((tmp= find_field_in_tables(thd, this, tables, &where, 0)) == + not_found_field) { - if (thd->lex.select_lex.is_item_list_lookup) + /* Look up in current select's item_list to find aliased fields */ + if (thd->lex->current_select->is_item_list_lookup) { - Item **res= find_item_in_list(this, thd->lex.select_lex.item_list); - if (res && (*res)->type() == Item::FIELD_ITEM) + uint counter; + bool not_used; + Item** res= find_item_in_list(this, thd->lex->current_select->item_list, + &counter, REPORT_EXCEPT_NOT_FOUND, + ¬_used); + if (res != (Item **)not_found_item && (*res)->type() == Item::FIELD_ITEM) { set_field((*((Item_field**)res))->field); return 0; } } - return 1; + + /* + We can't find table field in table list of current select, + consequently we have to find it in outer subselect(s). + We can't join lists of outer & current select, because of scope + of view rules. For example if both tables (outer & current) have + field 'field' it is not mistake to refer to this field without + mention of table name, but if we join tables in one list it will + cause error ER_NON_UNIQ_ERROR in find_field_in_tables. + */ + SELECT_LEX *last= 0; +#ifdef EMBEDDED_LIBRARY + thd->net.last_errno= 0; +#endif + TABLE_LIST *table_list; + Item **refer= (Item **)not_found_item; + uint counter; + bool not_used; + // Prevent using outer fields in subselects, that is not supported now + SELECT_LEX *cursel= (SELECT_LEX *) thd->lex->current_select; + if (cursel->master_unit()->first_select()->linkage != DERIVED_TABLE_TYPE) + { + SELECT_LEX_UNIT *prev_unit= cursel->master_unit(); + for (SELECT_LEX *sl= prev_unit->outer_select(); + sl; + sl= (prev_unit= sl->master_unit())->outer_select()) + { + upward_lookup= 1; + table_list= (last= sl)->get_table_list(); + if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) + { + /* + it is primary INSERT st_select_lex => skip first table + resolving + */ + table_list= table_list->next; + } + + Item_subselect *prev_subselect_item= prev_unit->item; + place= prev_subselect_item->parsing_place; + /* + check table fields only if subquery used somewhere out of HAVING + or outer SELECT do not use groupping (i.e. tables are + accessable) + */ + if ((place != IN_HAVING || + (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && + (tmp= find_field_in_tables(thd, this, + table_list, &where, + 0)) != not_found_field) + { + if (!tmp) + return -1; + prev_subselect_item->used_tables_cache|= tmp->table->map; + prev_subselect_item->const_item_cache= 0; + break; + } + if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && + (refer= find_item_in_list(this, sl->item_list, &counter, + REPORT_EXCEPT_NOT_FOUND, + ¬_used)) != + (Item **) not_found_item) + { + if (refer && (*refer)->fixed) // Avoid crash in case of error + { + prev_subselect_item->used_tables_cache|= (*refer)->used_tables(); + prev_subselect_item->const_item_cache&= (*refer)->const_item(); + } + break; + } + + // Reference is not found => depend from outer (or just error) + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; + prev_subselect_item->const_item_cache= 0; + + if (sl->master_unit()->first_select()->linkage == + DERIVED_TABLE_TYPE) + break; // do not look over derived table + } + } + if (!tmp) + return -1; + else if (!refer) + return 1; + else if (tmp == not_found_field && refer == (Item **)not_found_item) + { + if (upward_lookup) + { + // We can't say exactly what absend table or field + my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), + full_name(), thd->where); + } + else + { + // Call to report error + find_field_in_tables(thd, this, tables, &where, 1); + } + return -1; + } + else if (refer != (Item **)not_found_item) + { + if (!last->ref_pointer_array[counter]) + { + my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, + "forward reference in item list"); + return -1; + } + DBUG_ASSERT((*refer)->fixed); + /* + Here, a subset of actions performed by Item_ref::set_properties + is not enough. So we pass ptr to NULL into Item_[direct]_ref + constructor, so no initialization is performed, and call + fix_fields() below. + */ + Item *save= last->ref_pointer_array[counter]; + last->ref_pointer_array[counter]= NULL; + Item_ref *rf= (place == IN_HAVING ? + new Item_ref(last->ref_pointer_array + counter, + (char *)table_name, + (char *)field_name) : + new Item_direct_ref(last->ref_pointer_array + counter, + (char *)table_name, + (char *)field_name)); + if (!rf) + return 1; + thd->change_item_tree(ref, rf); + last->ref_pointer_array[counter]= save; + /* + rf is Item_ref => never substitute other items (in this case) + during fix_fields() => we can use rf after fix_fields() + */ + if (rf->fix_fields(thd, tables, ref) || rf->check_cols(1)) + return 1; + + mark_as_dependent(thd, last, cursel, rf); + return 0; + } + else + { + mark_as_dependent(thd, last, cursel, this); + if (last->having_fix_field) + { + Item_ref *rf; + rf= new Item_ref((where->db[0] ? where->db : 0), + (char*) where->alias, (char*) field_name); + if (!rf) + return 1; + thd->change_item_tree(ref, rf); + /* + rf is Item_ref => never substitute other items (in this case) + during fix_fields() => we can use rf after fix_fields() + */ + return rf->fix_fields(thd, tables, ref) || rf->check_cols(1); + } + } } + else if (!tmp) + return -1; + set_field(tmp); } - else if (thd && thd->set_query_id && field->query_id != thd->query_id) + else if (thd->set_query_id && field->query_id != thd->query_id) { /* We only come here in unions */ TABLE *table=field->table; field->query_id=thd->query_id; table->used_fields++; - table->used_keys&=field->part_of_key; + table->used_keys.intersect(field->part_of_key); + fixed= 1; } return 0; } +void Item_field::cleanup() +{ + DBUG_ENTER("Item_field::cleanup"); + Item_ident::cleanup(); + /* + Even if this object was created by direct link to field in setup_wild() + it will be linked correctly next tyme by name of field and table alias. + I.e. we can drop 'field'. + */ + field= result_field= 0; + DBUG_VOID_RETURN; +} void Item::init_make_field(Send_field *tmp_field, enum enum_field_types field_type) { - tmp_field->table_name=(char*) ""; - tmp_field->col_name=name; - tmp_field->flags=maybe_null ? 0 : NOT_NULL_FLAG; + char *empty_name= (char*) ""; + tmp_field->db_name= empty_name; + tmp_field->org_table_name= empty_name; + tmp_field->org_col_name= empty_name; + tmp_field->table_name= empty_name; + tmp_field->col_name= name; + tmp_field->charsetnr= collation.collation->number; + tmp_field->flags= (maybe_null ? 0 : NOT_NULL_FLAG) | + (my_binary_compare(collation.collation) ? + BINARY_FLAG : 0); tmp_field->type=field_type; tmp_field->length=max_length; tmp_field->decimals=decimals; @@ -387,77 +1972,120 @@ void Item::init_make_field(Send_field *tmp_field, tmp_field->flags |= UNSIGNED_FLAG; } -/* ARGSUSED */ -void Item_field::make_field(Send_field *tmp_field) +void Item::make_field(Send_field *tmp_field) { - field->make_field(tmp_field); - if (name) - tmp_field->col_name=name; // Use user supplied name + init_make_field(tmp_field, field_type()); } -void Item_int::make_field(Send_field *tmp_field) -{ - init_make_field(tmp_field,FIELD_TYPE_LONGLONG); -} - -void Item_uint::make_field(Send_field *tmp_field) -{ - init_make_field(tmp_field,FIELD_TYPE_LONGLONG); - tmp_field->flags|= UNSIGNED_FLAG; - unsigned_flag=1; -} - -void Item_real::make_field(Send_field *tmp_field) -{ - init_make_field(tmp_field,FIELD_TYPE_DOUBLE); -} - -void Item_string::make_field(Send_field *tmp_field) -{ - init_make_field(tmp_field,FIELD_TYPE_STRING); -} void Item_empty_string::make_field(Send_field *tmp_field) { init_make_field(tmp_field,FIELD_TYPE_VAR_STRING); } -void Item_datetime::make_field(Send_field *tmp_field) + +enum_field_types Item::field_type() const { - init_make_field(tmp_field,FIELD_TYPE_DATETIME); + return ((result_type() == STRING_RESULT) ? FIELD_TYPE_VAR_STRING : + (result_type() == INT_RESULT) ? FIELD_TYPE_LONGLONG : + FIELD_TYPE_DOUBLE); } -void Item_null::make_field(Send_field *tmp_field) +Field *Item::tmp_table_field_from_field_type(TABLE *table) { - init_make_field(tmp_field,FIELD_TYPE_NULL); - tmp_field->length=4; -} + /* + The field functions defines a field to be not null if null_ptr is not 0 + */ + uchar *null_ptr= maybe_null ? (uchar*) "" : 0; + switch (field_type()) { + case MYSQL_TYPE_DECIMAL: + return new Field_decimal((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, decimals, 0, unsigned_flag); + case MYSQL_TYPE_TINY: + return new Field_tiny((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, 0, unsigned_flag); + case MYSQL_TYPE_SHORT: + return new Field_short((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, 0, unsigned_flag); + case MYSQL_TYPE_LONG: + return new Field_long((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, 0, unsigned_flag); +#ifdef HAVE_LONG_LONG + case MYSQL_TYPE_LONGLONG: + return new Field_longlong((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, 0, unsigned_flag); +#endif + case MYSQL_TYPE_FLOAT: + return new Field_float((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, decimals, 0, unsigned_flag); + case MYSQL_TYPE_DOUBLE: + return new Field_double((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, decimals, 0, unsigned_flag); + case MYSQL_TYPE_NULL: + return new Field_null((char*) 0, max_length, Field::NONE, + name, table, &my_charset_bin); + case MYSQL_TYPE_INT24: + return new Field_medium((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table, 0, unsigned_flag); + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_DATE: + return new Field_date(maybe_null, name, table, &my_charset_bin); + case MYSQL_TYPE_TIME: + return new Field_time(maybe_null, name, table, &my_charset_bin); + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_DATETIME: + return new Field_datetime(maybe_null, name, table, &my_charset_bin); + case MYSQL_TYPE_YEAR: + return new Field_year((char*) 0, max_length, null_ptr, 0, Field::NONE, + name, table); + default: + /* This case should never be choosen */ + DBUG_ASSERT(0); + /* If something goes awfully wrong, it's better to get a string than die */ + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_VAR_STRING: + DBUG_ASSERT(collation.collation); + if (max_length/collation.collation->mbmaxlen > 255) + break; // If blob + return new Field_varstring(max_length, maybe_null, name, table, + collation.collation); + case MYSQL_TYPE_STRING: + DBUG_ASSERT(collation.collation); + if (max_length/collation.collation->mbmaxlen > 255) // If blob + break; + return new Field_string(max_length, maybe_null, name, table, + collation.collation); + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_GEOMETRY: + break; // Blob handled outside of case + } -void Item_func::make_field(Send_field *tmp_field) -{ - init_make_field(tmp_field, ((result_type() == STRING_RESULT) ? - FIELD_TYPE_VAR_STRING : - (result_type() == INT_RESULT) ? - FIELD_TYPE_LONGLONG : FIELD_TYPE_DOUBLE)); + /* blob is special as it's generated for both blobs and long strings */ + return new Field_blob(max_length, maybe_null, name, table, + collation.collation); } -void Item_avg_field::make_field(Send_field *tmp_field) -{ - init_make_field(tmp_field,FIELD_TYPE_DOUBLE); -} -void Item_std_field::make_field(Send_field *tmp_field) +/* ARGSUSED */ +void Item_field::make_field(Send_field *tmp_field) { - init_make_field(tmp_field,FIELD_TYPE_DOUBLE); + field->make_field(tmp_field); + DBUG_ASSERT(tmp_field->table_name); + if (name) + tmp_field->col_name=name; // Use user supplied name } + /* -** Set a field:s value from a item + Set a field:s value from a item */ - void Item_field::save_org_in_field(Field *to) { if (field->is_null()) @@ -473,7 +2101,7 @@ void Item_field::save_org_in_field(Field *to) } } -bool Item_field::save_in_field(Field *to, bool no_conversions) +int Item_field::save_in_field(Field *to, bool no_conversions) { if (result_field->is_null()) { @@ -489,6 +2117,7 @@ bool Item_field::save_in_field(Field *to, bool no_conversions) return 0; } + /* Store null in field @@ -505,7 +2134,7 @@ bool Item_field::save_in_field(Field *to, bool no_conversions) 1 Field doesn't support NULL values and can't handle 'field = NULL' */ -bool Item_null::save_in_field(Field *field, bool no_conversions) +int Item_null::save_in_field(Field *field, bool no_conversions) { return set_field_to_null_with_conversions(field, no_conversions); } @@ -523,30 +2152,32 @@ bool Item_null::save_in_field(Field *field, bool no_conversions) 1 Field doesn't support NULL values */ -bool Item_null::save_safe_in_field(Field *field) +int Item_null::save_safe_in_field(Field *field) { return set_field_to_null(field); } -bool Item::save_in_field(Field *field, bool no_conversions) +int Item::save_in_field(Field *field, bool no_conversions) { + int error; if (result_type() == STRING_RESULT || result_type() == REAL_RESULT && field->result_type() == STRING_RESULT) { String *result; + CHARSET_INFO *cs= collation.collation; char buff[MAX_FIELD_WIDTH]; // Alloc buffer for small columns - str_value.set_quick(buff,sizeof(buff)); + str_value.set_quick(buff, sizeof(buff), cs); result=val_str(&str_value); if (null_value) { - str_value.set_quick(0, 0); + str_value.set_quick(0, 0, cs); return set_field_to_null_with_conversions(field, no_conversions); } field->set_notnull(); - field->store(result->ptr(),result->length()); - str_value.set_quick(0, 0); + error=field->store(result->ptr(),result->length(),cs); + str_value.set_quick(0, 0, cs); } else if (result_type() == REAL_RESULT) { @@ -554,7 +2185,7 @@ bool Item::save_in_field(Field *field, bool no_conversions) if (null_value) return set_field_to_null(field); field->set_notnull(); - field->store(nr); + error=field->store(nr); } else { @@ -562,42 +2193,98 @@ bool Item::save_in_field(Field *field, bool no_conversions) if (null_value) return set_field_to_null_with_conversions(field, no_conversions); field->set_notnull(); - field->store(nr); + error=field->store(nr); } - return 0; + return error; } -bool Item_string::save_in_field(Field *field, bool no_conversions) +int Item_string::save_in_field(Field *field, bool no_conversions) { String *result; result=val_str(&str_value); if (null_value) return set_field_to_null(field); field->set_notnull(); - field->store(result->ptr(),result->length()); - return 0; + return field->store(result->ptr(),result->length(),collation.collation); +} + +int Item_uint::save_in_field(Field *field, bool no_conversions) +{ + /* + TODO: To be fixed when wen have a + field->store(longlong, unsigned_flag) method + */ + return Item_int::save_in_field(field, no_conversions); } -bool Item_int::save_in_field(Field *field, bool no_conversions) +int Item_int::save_in_field(Field *field, bool no_conversions) { longlong nr=val_int(); if (null_value) return set_field_to_null(field); field->set_notnull(); - field->store(nr); - return 0; + return field->store(nr); +} + + +bool Item_int::eq(const Item *arg, bool binary_cmp) const +{ + /* No need to check for null value as basic constant can't be NULL */ + if (arg->basic_const_item() && arg->type() == type()) + { + /* + We need to cast off const to call val_int(). This should be OK for + a basic constant. + */ + Item *item= (Item*) arg; + return item->val_int() == value && item->unsigned_flag == unsigned_flag; + } + return FALSE; +} + + +Item *Item_int_with_ref::new_item() +{ + DBUG_ASSERT(ref->const_item()); + /* + We need to evaluate the constant to make sure it works with + parameter markers. + */ + return (ref->unsigned_flag ? + new Item_uint(ref->name, ref->val_int(), ref->max_length) : + new Item_int(ref->name, ref->val_int(), ref->max_length)); +} + + +Item_num *Item_uint::neg() +{ + return new Item_real(name, - ((double) value), 0, max_length); } -bool Item_real::save_in_field(Field *field, bool no_conversions) +int Item_real::save_in_field(Field *field, bool no_conversions) { double nr=val(); if (null_value) return set_field_to_null(field); field->set_notnull(); - field->store(nr); - return 0; + return field->store(nr); +} + + +bool Item_real::eq(const Item *arg, bool binary_cmp) const +{ + if (arg->basic_const_item() && arg->type() == type()) + { + /* + We need to cast off const to call val_int(). This should be OK for + a basic constant. + */ + Item *item= (Item*) arg; + return item->val() == value; + } + return FALSE; } /**************************************************************************** @@ -613,13 +2300,6 @@ inline uint char_val(char X) X-'a'+10); } -/* In MySQL 4.1 this will always return STRING_RESULT */ - -enum Item_result Item_varbinary::result_type () const -{ - return (current_thd->variables.new_mode) ? STRING_RESULT : INT_RESULT; -} - Item_varbinary::Item_varbinary(const char *str, uint str_length) { @@ -628,7 +2308,7 @@ Item_varbinary::Item_varbinary(const char *str, uint str_length) char *ptr=(char*) sql_alloc(max_length+1); if (!ptr) return; - str_value.set(ptr,max_length); + str_value.set(ptr,max_length,&my_charset_bin); char *end=ptr+max_length; if (max_length*2 != str_length) *ptr++=char_val(*str++); // Not even, assume 0 prefix @@ -638,11 +2318,15 @@ Item_varbinary::Item_varbinary(const char *str, uint str_length) str+=2; } *ptr=0; // Keep purify happy - binary=1; // Binary is default + collation.set(&my_charset_bin, DERIVATION_COERCIBLE); + fixed= 1; + unsigned_flag= 1; } longlong Item_varbinary::val_int() { + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); char *end=(char*) str_value.ptr()+str_value.length(), *ptr=end-min(str_value.length(),sizeof(longlong)); @@ -653,69 +2337,503 @@ longlong Item_varbinary::val_int() } -bool Item_varbinary::save_in_field(Field *field, bool no_conversions) +int Item_varbinary::save_in_field(Field *field, bool no_conversions) { + int error; field->set_notnull(); if (field->result_type() == STRING_RESULT) { - field->store(str_value.ptr(),str_value.length()); + error=field->store(str_value.ptr(),str_value.length(),collation.collation); } else { longlong nr=val_int(); - field->store(nr); + error=field->store(nr); } - return 0; + return error; } -void Item_varbinary::make_field(Send_field *tmp_field) +bool Item_varbinary::eq(const Item *arg, bool binary_cmp) const { - init_make_field(tmp_field,FIELD_TYPE_STRING); + if (arg->basic_const_item() && arg->type() == type()) + { + if (binary_cmp) + return !stringcmp(&str_value, &arg->str_value); + return !sortcmp(&str_value, &arg->str_value, collation.collation); + } + return FALSE; +} + + +Item *Item_varbinary::safe_charset_converter(CHARSET_INFO *tocs) +{ + Item_string *conv; + String tmp, *str= val_str(&tmp); + + if (!(conv= new Item_string(str->ptr(), str->length(), tocs))) + return NULL; + conv->str_value.copy(); + conv->str_value.shrink_to_length(); + return conv; } + /* -** pack data in buffer for sending + Pack data in buffer for sending */ -bool Item::send(THD *thd, String *packet) +bool Item_null::send(Protocol *protocol, String *packet) { - char buff[MAX_FIELD_WIDTH]; - CONVERT *convert; - String s(buff,sizeof(buff)),*res; - if (!(res=val_str(&s))) - return net_store_null(packet); - if ((convert=thd->variables.convert_set)) - return convert->store(packet,res->ptr(),res->length()); - return net_store_data(packet,res->ptr(),res->length()); + return protocol->store_null(); } -bool Item_null::send(THD *thd, String *packet) +/* + This is only called from items that is not of type item_field +*/ + +bool Item::send(Protocol *protocol, String *buffer) +{ + bool result; + enum_field_types type; + LINT_INIT(result); + + switch ((type=field_type())) { + default: + case MYSQL_TYPE_NULL: + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_GEOMETRY: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: + { + String *res; + if ((res=val_str(buffer))) + result= protocol->store(res->ptr(),res->length(),res->charset()); + break; + } + case MYSQL_TYPE_TINY: + { + longlong nr; + nr= val_int(); + if (!null_value) + result= protocol->store_tiny(nr); + break; + } + case MYSQL_TYPE_SHORT: + { + longlong nr; + nr= val_int(); + if (!null_value) + result= protocol->store_short(nr); + break; + } + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_LONG: + { + longlong nr; + nr= val_int(); + if (!null_value) + result= protocol->store_long(nr); + break; + } + case MYSQL_TYPE_LONGLONG: + { + longlong nr; + nr= val_int(); + if (!null_value) + result= protocol->store_longlong(nr, unsigned_flag); + break; + } + case MYSQL_TYPE_FLOAT: + { + float nr; + nr= (float) val(); + if (!null_value) + result= protocol->store(nr, decimals, buffer); + break; + } + case MYSQL_TYPE_DOUBLE: + { + double nr; + nr= val(); + if (!null_value) + result= protocol->store(nr, decimals, buffer); + break; + } + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_TIMESTAMP: + { + TIME tm; + get_date(&tm, TIME_FUZZY_DATE); + if (!null_value) + { + if (type == MYSQL_TYPE_DATE) + return protocol->store_date(&tm); + else + result= protocol->store(&tm); + } + break; + } + case MYSQL_TYPE_TIME: + { + TIME tm; + get_time(&tm); + if (!null_value) + result= protocol->store_time(&tm); + break; + } + } + if (null_value) + result= protocol->store_null(); + return result; +} + + +bool Item_field::send(Protocol *protocol, String *buffer) { - return net_store_null(packet); + return protocol->store(result_field); } + /* This is used for HAVING clause Find field in select list having the same name - This is not always called, see also Item_ref::Item_ref - */ +*/ -bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables) +bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference) { + DBUG_ASSERT(fixed == 0); + uint counter; + enum_parsing_place place= NO_MATTER; + bool not_used; if (!ref) { - if (!(ref=find_item_in_list(this,thd->lex.select->item_list))) + TABLE_LIST *where= 0, *table_list; + SELECT_LEX_UNIT *prev_unit= thd->lex->current_select->master_unit(); + SELECT_LEX *sl= prev_unit->outer_select(); + /* + Finding only in current select will be performed for selects that have + not outer one and for derived tables (which not support using outer + fields for now) + */ + if ((ref= find_item_in_list(this, + *(thd->lex->current_select->get_item_list()), + &counter, + ((sl && + thd->lex->current_select->master_unit()-> + first_select()->linkage != + DERIVED_TABLE_TYPE) ? + REPORT_EXCEPT_NOT_FOUND : + REPORT_ALL_ERRORS ), ¬_used)) == + (Item **)not_found_item) + { + Field *tmp= (Field*) not_found_field; + SELECT_LEX *last= 0; + /* + We can't find table field in select list of current select, + consequently we have to find it in outer subselect(s). + We can't join lists of outer & current select, because of scope + of view rules. For example if both tables (outer & current) have + field 'field' it is not mistake to refer to this field without + mention of table name, but if we join tables in one list it will + cause error ER_NON_UNIQ_ERROR in find_item_in_list. + */ + for ( ; sl ; sl= (prev_unit= sl->master_unit())->outer_select()) + { + last= sl; + Item_subselect *prev_subselect_item= prev_unit->item; + if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && + (ref= find_item_in_list(this, sl->item_list, + &counter, REPORT_EXCEPT_NOT_FOUND, + ¬_used)) != + (Item **)not_found_item) + { + if (ref && (*ref)->fixed) // Avoid crash in case of error + { + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + } + break; + } + table_list= sl->get_table_list(); + if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) + { + // it is primary INSERT st_select_lex => skip first table resolving + table_list= table_list->next; + } + place= prev_subselect_item->parsing_place; + /* + check table fields only if subquery used somewhere out of HAVING + or SELECT list or outer SELECT do not use groupping (i.e. tables + are accessable) + */ + if ((place != IN_HAVING || + (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && + (tmp= find_field_in_tables(thd, this, + table_list, &where, + 0)) != not_found_field) + { + prev_subselect_item->used_tables_cache|= tmp->table->map; + prev_subselect_item->const_item_cache= 0; + break; + } + // Reference is not found => depend from outer (or just error) + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; + prev_subselect_item->const_item_cache= 0; + + if (sl->master_unit()->first_select()->linkage == + DERIVED_TABLE_TYPE) + break; // do not look over derived table + } + + if (!ref) + return 1; + if (!tmp) + return -1; + if (ref == (Item **)not_found_item && tmp == not_found_field) + { + // We can't say exactly what absend (table or field) + my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), + full_name(), thd->where); + ref= 0; // Safety + return 1; + } + if (tmp != not_found_field) + { + Item_field* fld; + /* + Set ref to 0 as we are replacing this item with the found item + and this will ensure we get an error if this item would be + used elsewhere + */ + ref= 0; // Safety + if (!(fld= new Item_field(tmp))) + return 1; + thd->change_item_tree(reference, fld); + mark_as_dependent(thd, last, thd->lex->current_select, fld); + return 0; + } + if (!last->ref_pointer_array[counter]) + { + my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, + "forward reference in item list"); + return -1; + } + DBUG_ASSERT((*ref)->fixed); + mark_as_dependent(thd, last, thd->lex->current_select, + this); + if (place == IN_HAVING) + { + Item_ref *rf; + if (!(rf= new Item_direct_ref(last->ref_pointer_array + counter, + (char *)table_name, + (char *)field_name))) + return 1; + ref= 0; // Safety + if (rf->fix_fields(thd, tables, ref) || rf->check_cols(1)) + return 1; + thd->change_item_tree(reference, rf); + return 0; + } + ref= last->ref_pointer_array + counter; + } + else if (!ref) + return 1; + else + { + if (!(*ref)->fixed) + { + my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, + "forward reference in item list"); + return -1; + } + ref= thd->lex->current_select->ref_pointer_array + counter; + } + } + + /* + The following conditional is changed as to correctly identify + incorrect references in group functions or forward references + with sub-select's / derived tables, while it prevents this + check when Item_ref is created in an expression involving + summing function, which is to be placed in the user variable. + */ + if (((*ref)->with_sum_func && name && + (depended_from || + !(thd->lex->current_select->linkage != GLOBAL_OPTIONS_TYPE && + thd->lex->current_select->having_fix_field))) || + !(*ref)->fixed) + { + my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, + ((*ref)->with_sum_func? + "reference on group function": + "forward reference in item list")); + return 1; + } + + set_properties(); + + if (ref && (*ref)->check_cols(1)) + return 1; + return 0; +} + +void Item_ref::set_properties() +{ + max_length= (*ref)->max_length; + maybe_null= (*ref)->maybe_null; + decimals= (*ref)->decimals; + collation.set((*ref)->collation); + with_sum_func= (*ref)->with_sum_func; + unsigned_flag= (*ref)->unsigned_flag; + fixed= 1; +} + +void Item_ref::print(String *str) +{ + if (ref && *ref) + (*ref)->print(str); + else + Item_ident::print(str); +} + + +void Item_ref_null_helper::print(String *str) +{ + str->append("<ref_null_helper>(", 18); + if (ref && *ref) + (*ref)->print(str); + else + str->append('?'); + str->append(')'); +} + + +void Item_null_helper::print(String *str) +{ + str->append("<null_helper>(", 14); + store->print(str); + str->append(')'); +} + + +bool Item_default_value::eq(const Item *item, bool binary_cmp) const +{ + return item->type() == DEFAULT_VALUE_ITEM && + ((Item_default_value *)item)->arg->eq(arg, binary_cmp); +} + + +bool Item_default_value::fix_fields(THD *thd, + struct st_table_list *table_list, + Item **items) +{ + DBUG_ASSERT(fixed == 0); + if (!arg) + { + fixed= 1; + return 0; + } + if (!arg->fixed && arg->fix_fields(thd, table_list, &arg)) + return 1; + + if (arg->type() == REF_ITEM) + { + Item_ref *ref= (Item_ref *)arg; + if (ref->ref[0]->type() != FIELD_ITEM) + { + return 1; + } + arg= ref->ref[0]; + } + Item_field *field_arg= (Item_field *)arg; + Field *def_field= (Field*) sql_alloc(field_arg->field->size_of()); + if (!def_field) + return 1; + memcpy(def_field, field_arg->field, field_arg->field->size_of()); + def_field->move_field(def_field->table->default_values - + def_field->table->record[0]); + set_field(def_field); + return 0; +} + +void Item_default_value::print(String *str) +{ + if (!arg) + { + str->append("default", 7); + return; + } + str->append("default(", 8); + arg->print(str); + str->append(')'); +} + +bool Item_insert_value::eq(const Item *item, bool binary_cmp) const +{ + return item->type() == INSERT_VALUE_ITEM && + ((Item_default_value *)item)->arg->eq(arg, binary_cmp); +} + + +bool Item_insert_value::fix_fields(THD *thd, + struct st_table_list *table_list, + Item **items) +{ + DBUG_ASSERT(fixed == 0); + st_table_list *orig_next_table= table_list->next; + table_list->next= 0; + if (!arg->fixed && arg->fix_fields(thd, table_list, &arg)) + { + table_list->next= orig_next_table; + return 1; + } + table_list->next= orig_next_table; + + if (arg->type() == REF_ITEM) + { + Item_ref *ref= (Item_ref *)arg; + if (ref->ref[0]->type() != FIELD_ITEM) + { + return 1; + } + arg= ref->ref[0]; + } + Item_field *field_arg= (Item_field *)arg; + + if (field_arg->field->table->insert_values) + { + Field *def_field= (Field*) sql_alloc(field_arg->field->size_of()); + if (!def_field) return 1; - max_length= (*ref)->max_length; - maybe_null= (*ref)->maybe_null; - decimals= (*ref)->decimals; - binary= (*ref)->binary; - with_sum_func= (*ref)->with_sum_func; + memcpy(def_field, field_arg->field, field_arg->field->size_of()); + def_field->move_field(def_field->table->insert_values - + def_field->table->record[0]); + set_field(def_field); + } + else + { + Field *tmp_field= field_arg->field; + /* charset doesn't matter here, it's to avoid sigsegv only */ + set_field(new Field_null(0, 0, Field::NONE, tmp_field->field_name, + tmp_field->table, &my_charset_bin)); } return 0; } +void Item_insert_value::print(String *str) +{ + str->append("values(", 7); + arg->print(str); + str->append(')'); +} /* If item is a const function, calculate it and return a const item @@ -728,14 +2846,18 @@ Item_result item_cmp_type(Item_result a,Item_result b) return STRING_RESULT; if (a == INT_RESULT && b == INT_RESULT) return INT_RESULT; + else if (a == ROW_RESULT || b == ROW_RESULT) + return ROW_RESULT; return REAL_RESULT; } -Item *resolve_const_item(Item *item,Item *comp_item) +void resolve_const_item(THD *thd, Item **ref, Item *comp_item) { + Item *item= *ref; + Item *new_item= NULL; if (item->basic_const_item()) - return item; // Can't be better + return; // Can't be better Item_result res_type=item_cmp_type(comp_item->result_type(), item->result_type()); char *name=item->name; // Alloced by sql_alloc @@ -743,44 +2865,62 @@ Item *resolve_const_item(Item *item,Item *comp_item) if (res_type == STRING_RESULT) { char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)),*result; + String tmp(buff,sizeof(buff),&my_charset_bin),*result; result=item->val_str(&tmp); if (item->null_value) + new_item= new Item_null(name); + else { -#ifdef DELETE_ITEMS - delete item; -#endif - return new Item_null(name); + uint length= result->length(); + char *tmp_str= sql_strmake(result->ptr(), length); + new_item= new Item_string(name, tmp_str, length, result->charset()); } - uint length=result->length(); - char *tmp_str=sql_strmake(result->ptr(),length); -#ifdef DELETE_ITEMS - delete item; -#endif - return new Item_string(name,tmp_str,length); } - if (res_type == INT_RESULT) + else if (res_type == INT_RESULT) { longlong result=item->val_int(); uint length=item->max_length; bool null_value=item->null_value; -#ifdef DELETE_ITEMS - delete item; -#endif - return (null_value ? (Item*) new Item_null(name) : - (Item*) new Item_int(name,result,length)); + new_item= (null_value ? (Item*) new Item_null(name) : + (Item*) new Item_int(name, result, length)); } - else + else if (res_type == ROW_RESULT && item->type() == Item::ROW_ITEM && + comp_item->type() == Item::ROW_ITEM) + { + /* + Substitute constants only in Item_rows. Don't affect other Items + with ROW_RESULT (eg Item_singlerow_subselect). + + For such Items more optimal is to detect if it is constant and replace + it with Item_row. This would optimize queries like this: + SELECT * FROM t1 WHERE (a,b) = (SELECT a,b FROM t2 LIMIT 1); + */ + Item_row *item_row= (Item_row*) item; + Item_row *comp_item_row= (Item_row*) comp_item; + uint col; + new_item= 0; + /* + If item and comp_item are both Item_rows and have same number of cols + then process items in Item_row one by one. + We can't ignore NULL values here as this item may be used with <=>, in + which case NULL's are significant. + */ + DBUG_ASSERT(item->result_type() == comp_item->result_type()); + DBUG_ASSERT(item_row->cols() == comp_item_row->cols()); + col= item_row->cols(); + while (col-- > 0) + resolve_const_item(thd, item_row->addr(col), comp_item_row->el(col)); + } + else if (res_type == REAL_RESULT) { // It must REAL_RESULT double result=item->val(); uint length=item->max_length,decimals=item->decimals; bool null_value=item->null_value; -#ifdef DELETE_ITEMS - delete item; -#endif - return (null_value ? (Item*) new Item_null(name) : - (Item*) new Item_real(name,result,decimals,length)); + new_item= (null_value ? (Item*) new Item_null(name) : (Item*) + new Item_real(name, result, decimals, length)); } + if (new_item) + thd->change_item_tree(ref, new_item); } /* @@ -798,12 +2938,12 @@ bool field_is_equal_to_item(Field *field,Item *item) { char item_buff[MAX_FIELD_WIDTH]; char field_buff[MAX_FIELD_WIDTH]; - String item_tmp(item_buff,sizeof(item_buff)),*item_result; - String field_tmp(field_buff,sizeof(field_buff)); + String item_tmp(item_buff,sizeof(item_buff),&my_charset_bin),*item_result; + String field_tmp(field_buff,sizeof(field_buff),&my_charset_bin); item_result=item->val_str(&item_tmp); if (item->null_value) return 1; // This must be true - field->val_str(&field_tmp,&field_tmp); + field->val_str(&field_tmp); return !stringcmp(&field_tmp,item_result); } if (res_type == INT_RESULT) @@ -814,6 +2954,508 @@ bool field_is_equal_to_item(Field *field,Item *item) return result == field->val_real(); } +Item_cache* Item_cache::get_cache(Item_result type) +{ + switch (type) + { + case INT_RESULT: + return new Item_cache_int(); + case REAL_RESULT: + return new Item_cache_real(); + case STRING_RESULT: + return new Item_cache_str(); + case ROW_RESULT: + return new Item_cache_row(); + default: + // should never be in real life + DBUG_ASSERT(0); + return 0; + } +} + + +void Item_cache::print(String *str) +{ + str->append("<cache>(", 8); + if (example) + example->print(str); + else + Item::print(str); + str->append(')'); +} + + +void Item_cache_int::store(Item *item) +{ + value= item->val_int_result(); + null_value= item->null_value; +} + + +void Item_cache_real::store(Item *item) +{ + value= item->val_result(); + null_value= item->null_value; +} + + +void Item_cache_str::store(Item *item) +{ + value_buff.set(buffer, sizeof(buffer), item->collation.collation); + value= item->str_result(&value_buff); + if ((null_value= item->null_value)) + value= 0; + else if (value != &value_buff) + { + /* + We copy string value to avoid changing value if 'item' is table field + in queries like following (where t1.c is varchar): + select a, + (select a,b,c from t1 where t1.a=t2.a) = ROW(a,2,'a'), + (select c from t1 where a=t2.a) + from t2; + */ + value_buff.copy(*value); + value= &value_buff; + } +} + + +double Item_cache_str::val() +{ + DBUG_ASSERT(fixed == 1); + int err; + if (value) + { + char *end_not_used; + return my_strntod(value->charset(), (char*) value->ptr(), + value->length(), &end_not_used, &err); + } + return (double)0; +} + + +longlong Item_cache_str::val_int() +{ + DBUG_ASSERT(fixed == 1); + int err; + if (value) + return my_strntoll(value->charset(), value->ptr(), + value->length(), 10, (char**) 0, &err); + else + return (longlong)0; +} + + +bool Item_cache_row::allocate(uint num) +{ + item_count= num; + THD *thd= current_thd; + return (!(values= + (Item_cache **) thd->calloc(sizeof(Item_cache *)*item_count))); +} + + +bool Item_cache_row::setup(Item * item) +{ + example= item; + if (!values && allocate(item->cols())) + return 1; + for (uint i= 0; i < item_count; i++) + { + Item *el= item->el(i); + Item_cache *tmp; + if (!(tmp= values[i]= Item_cache::get_cache(el->result_type()))) + return 1; + tmp->setup(el); + } + return 0; +} + + +void Item_cache_row::store(Item * item) +{ + null_value= 0; + item->bring_value(); + for (uint i= 0; i < item_count; i++) + { + values[i]->store(item->el(i)); + null_value|= values[i]->null_value; + } +} + + +void Item_cache_row::illegal_method_call(const char *method) +{ + DBUG_ENTER("Item_cache_row::illegal_method_call"); + DBUG_PRINT("error", ("!!! %s method was called for row item", method)); + DBUG_ASSERT(0); + my_error(ER_OPERAND_COLUMNS, MYF(0), 1); + DBUG_VOID_RETURN; +} + + +bool Item_cache_row::check_cols(uint c) +{ + if (c != item_count) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), c); + return 1; + } + return 0; +} + + +bool Item_cache_row::null_inside() +{ + for (uint i= 0; i < item_count; i++) + { + if (values[i]->cols() > 1) + { + if (values[i]->null_inside()) + return 1; + } + else + { + values[i]->val_int(); + if (values[i]->null_value) + return 1; + } + } + return 0; +} + + +void Item_cache_row::bring_value() +{ + for (uint i= 0; i < item_count; i++) + values[i]->bring_value(); + return; +} + + +Item_type_holder::Item_type_holder(THD *thd, Item *item) + :Item(thd, item), enum_set_typelib(0), fld_type(get_real_type(item)) +{ + DBUG_ASSERT(item->fixed); + + max_length= display_length(item); + maybe_null= item->maybe_null; + collation.set(item->collation); + get_full_info(item); +} + + +/* + Return expression type of Item_type_holder + + SYNOPSIS + Item_type_holder::result_type() + + RETURN + Item_result (type of internal MySQL expression result) +*/ + +Item_result Item_type_holder::result_type() const +{ + return Field::result_merge_type(fld_type); +} + + +/* + Find real field type of item + + SYNOPSIS + Item_type_holder::get_real_type() + + RETURN + type of field which should be created to store item value +*/ + +enum_field_types Item_type_holder::get_real_type(Item *item) +{ + switch(item->type()) + { + case FIELD_ITEM: + { + /* + Item_fields::field_type ask Field_type() but sometimes field return + a different type, like for enum/set, so we need to ask real type. + */ + Field *field= ((Item_field *) item)->field; + enum_field_types type= field->real_type(); + /* work around about varchar type field detection */ + if (type == MYSQL_TYPE_STRING && field->type() == MYSQL_TYPE_VAR_STRING) + return MYSQL_TYPE_VAR_STRING; + return type; + } + case SUM_FUNC_ITEM: + { + /* + Argument of aggregate function sometimes should be asked about field + type + */ + Item_sum *item_sum= (Item_sum *) item; + if (item_sum->keep_field_type()) + return get_real_type(item_sum->args[0]); + break; + } + case FUNC_ITEM: + if (((Item_func *) item)->functype() == Item_func::VAR_VALUE_FUNC) + { + /* + There are work around of problem with changing variable type on the + fly and variable always report "string" as field type to get + acceptable information for client in send_field, so we make field + type from expression type. + */ + switch (item->result_type()) + { + case STRING_RESULT: + return MYSQL_TYPE_VAR_STRING; + case INT_RESULT: + return MYSQL_TYPE_LONGLONG; + case REAL_RESULT: + return MYSQL_TYPE_DOUBLE; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + return MYSQL_TYPE_VAR_STRING; + } + } + break; + default: + break; + } + return item->field_type(); +} + +/* + Find field type which can carry current Item_type_holder type and + type of given Item. + + SYNOPSIS + Item_type_holder::join_types() + thd thread handler + item given item to join its parameters with this item ones + + RETURN + TRUE error - types are incompatible + FALSE OK +*/ + +bool Item_type_holder::join_types(THD *thd, Item *item) +{ + uint max_length_orig= max_length; + uint decimals_orig= decimals; + max_length= max(max_length, display_length(item)); + decimals= max(decimals, item->decimals); + fld_type= Field::field_type_merge(fld_type, get_real_type(item)); + switch (Field::result_merge_type(fld_type)) + { + case STRING_RESULT: + { + const char *old_cs, *old_derivation; + old_cs= collation.collation->name; + old_derivation= collation.derivation_name(); + if (collation.aggregate(item->collation, MY_COLL_ALLOW_CONV)) + { + my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0), + old_cs, old_derivation, + item->collation.collation->name, + item->collation.derivation_name(), + "UNION"); + return TRUE; + } + break; + } + case REAL_RESULT: + { + if (decimals != NOT_FIXED_DEC) + { + int delta1= max_length_orig - decimals_orig; + int delta2= item->max_length - item->decimals; + if (fld_type == MYSQL_TYPE_DECIMAL) + max_length= max(delta1, delta2) + decimals; + else + max_length= min(max(delta1, delta2) + decimals, + (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7); + } + else + max_length= (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7; + break; + } + default:; + }; + maybe_null|= item->maybe_null; + get_full_info(item); + return FALSE; +} + +/* + Calculate lenth for merging result for given Item type + + SYNOPSIS + Item_type_holder::real_length() + item Item for lrngth detection + + RETURN + length +*/ + +uint32 Item_type_holder::display_length(Item *item) +{ + if (item->type() == Item::FIELD_ITEM) + return ((Item_field *)item)->max_disp_length(); + + switch (item->field_type()) + { + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_GEOMETRY: + return item->max_length; + case MYSQL_TYPE_TINY: + return 4; + case MYSQL_TYPE_SHORT: + return 6; + case MYSQL_TYPE_LONG: + return 11; + case MYSQL_TYPE_FLOAT: + return 25; + case MYSQL_TYPE_DOUBLE: + return 53; + case MYSQL_TYPE_NULL: + return 4; + case MYSQL_TYPE_LONGLONG: + return 20; + case MYSQL_TYPE_INT24: + return 8; + default: + DBUG_ASSERT(0); // we should never go there + return 0; + } +} + + +/* + Make temporary table field according collected information about type + of UNION result + + SYNOPSIS + Item_type_holder::make_field_by_type() + table temporary table for which we create fields + + RETURN + created field +*/ + +Field *Item_type_holder::make_field_by_type(TABLE *table) +{ + /* + The field functions defines a field to be not null if null_ptr is not 0 + */ + uchar *null_ptr= maybe_null ? (uchar*) "" : 0; + switch (fld_type) + { + case MYSQL_TYPE_ENUM: + DBUG_ASSERT(enum_set_typelib); + return new Field_enum((char *) 0, max_length, null_ptr, 0, + Field::NONE, name, + table, get_enum_pack_length(enum_set_typelib->count), + enum_set_typelib, collation.collation); + case MYSQL_TYPE_SET: + DBUG_ASSERT(enum_set_typelib); + return new Field_set((char *) 0, max_length, null_ptr, 0, + Field::NONE, name, + table, get_set_pack_length(enum_set_typelib->count), + enum_set_typelib, collation.collation); + case MYSQL_TYPE_VAR_STRING: + table->db_create_options|= HA_OPTION_PACK_RECORD; + fld_type= MYSQL_TYPE_STRING; + break; + default: + break; + } + return tmp_table_field_from_field_type(table); +} + + +/* + Get full information from Item about enum/set fields to be able to create + them later + + SYNOPSIS + Item_type_holder::get_full_info + item Item for information collection +*/ +void Item_type_holder::get_full_info(Item *item) +{ + if (fld_type == MYSQL_TYPE_ENUM || + fld_type == MYSQL_TYPE_SET) + { + if (item->type() == Item::SUM_FUNC_ITEM && + (((Item_sum*)item)->sum_func() == Item_sum::MAX_FUNC || + ((Item_sum*)item)->sum_func() == Item_sum::MIN_FUNC)) + item = ((Item_sum*)item)->args[0]; + /* + We can have enum/set type after merging only if we have one enum|set + field (or MIN|MAX(enum|set field)) and number of NULL fields + */ + DBUG_ASSERT((enum_set_typelib && + get_real_type(item) == MYSQL_TYPE_NULL) || + (!enum_set_typelib && + item->type() == Item::FIELD_ITEM && + (get_real_type(item) == MYSQL_TYPE_ENUM || + get_real_type(item) == MYSQL_TYPE_SET) && + ((Field_enum*)((Item_field *) item)->field)->typelib)); + if (!enum_set_typelib) + { + enum_set_typelib= ((Field_enum*)((Item_field *) item)->field)->typelib; + } + } +} + + +double Item_type_holder::val() +{ + DBUG_ASSERT(0); // should never be called + return 0.0; +} + + +longlong Item_type_holder::val_int() +{ + DBUG_ASSERT(0); // should never be called + return 0; +} + + +String *Item_type_holder::val_str(String*) +{ + DBUG_ASSERT(0); // should never be called + return 0; +} + +void Item_result_field::cleanup() +{ + DBUG_ENTER("Item_result_field::cleanup()"); + Item::cleanup(); + result_field= 0; + DBUG_VOID_RETURN; +} /***************************************************************************** ** Instantiate templates diff --git a/sql/item.h b/sql/item.h index cc6a846d6c1..1d01ce0d3f3 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -15,58 +15,206 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif +class Protocol; struct st_table_list; -void item_init(void); /* Init item functions */ +void item_init(void); /* Init item functions */ + + +/* + "Declared Type Collation" + A combination of collation and its deriviation. +*/ + +enum Derivation +{ + DERIVATION_IGNORABLE= 5, + DERIVATION_COERCIBLE= 4, + DERIVATION_SYSCONST= 3, + DERIVATION_IMPLICIT= 2, + DERIVATION_NONE= 1, + DERIVATION_EXPLICIT= 0 +}; + +/* + Flags for collation aggregation modes: + MY_COLL_ALLOW_SUPERSET_CONV - allow conversion to a superset + MY_COLL_ALLOW_COERCIBLE_CONV - allow conversion of a coercible value + (i.e. constant). + MY_COLL_ALLOW_CONV - allow any kind of conversion + (combintion of the above two) + MY_COLL_DISALLOW_NONE - don't allow return DERIVATION_NONE + (e.g. when aggregating for comparison) + MY_COLL_CMP_CONV - combination of MY_COLL_ALLOW_CONV + and MY_COLL_DISALLOW_NONE +*/ + +#define MY_COLL_ALLOW_SUPERSET_CONV 1 +#define MY_COLL_ALLOW_COERCIBLE_CONV 2 +#define MY_COLL_ALLOW_CONV 3 +#define MY_COLL_DISALLOW_NONE 4 +#define MY_COLL_CMP_CONV 7 + +class DTCollation { +public: + CHARSET_INFO *collation; + enum Derivation derivation; + + DTCollation() + { + collation= &my_charset_bin; + derivation= DERIVATION_NONE; + } + DTCollation(CHARSET_INFO *collation_arg, Derivation derivation_arg) + { + collation= collation_arg; + derivation= derivation_arg; + } + void set(DTCollation &dt) + { + collation= dt.collation; + derivation= dt.derivation; + } + void set(CHARSET_INFO *collation_arg, Derivation derivation_arg) + { + collation= collation_arg; + derivation= derivation_arg; + } + void set(CHARSET_INFO *collation_arg) + { collation= collation_arg; } + void set(Derivation derivation_arg) + { derivation= derivation_arg; } + bool aggregate(DTCollation &dt, uint flags= 0); + bool set(DTCollation &dt1, DTCollation &dt2, uint flags= 0) + { set(dt1); return aggregate(dt2, flags); } + const char *derivation_name() const + { + switch(derivation) + { + case DERIVATION_IGNORABLE: return "IGNORABLE"; + case DERIVATION_COERCIBLE: return "COERCIBLE"; + case DERIVATION_IMPLICIT: return "IMPLICIT"; + case DERIVATION_SYSCONST: return "SYSCONST"; + case DERIVATION_EXPLICIT: return "EXPLICIT"; + case DERIVATION_NONE: return "NONE"; + default: return "UNKNOWN"; + } + } +}; + +typedef bool (Item::*Item_processor)(byte *arg); class Item { - Item(const Item &); /* Prevent use of these */ + Item(const Item &); /* Prevent use of these */ void operator=(Item &); public: static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } - static void operator delete(void *ptr,size_t size) {} /*lint -e715 */ + static void *operator new(size_t size, MEM_ROOT *mem_root) + { return (void*) alloc_root(mem_root, (uint) size); } + static void operator delete(void *ptr,size_t size) {} + static void operator delete(void *ptr, MEM_ROOT *mem_root) {} - enum Type {FIELD_ITEM,FUNC_ITEM,SUM_FUNC_ITEM,STRING_ITEM, - INT_ITEM,REAL_ITEM,NULL_ITEM,VARBIN_ITEM, - COPY_STR_ITEM,FIELD_AVG_ITEM, DEFAULT_ITEM, - PROC_ITEM,COND_ITEM,REF_ITEM,FIELD_STD_ITEM, CONST_ITEM}; - enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE }; + enum Type {FIELD_ITEM, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM, + INT_ITEM, REAL_ITEM, NULL_ITEM, VARBIN_ITEM, + COPY_STR_ITEM, FIELD_AVG_ITEM, DEFAULT_VALUE_ITEM, + PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM, + FIELD_VARIANCE_ITEM, INSERT_VALUE_ITEM, + SUBSELECT_ITEM, ROW_ITEM, CACHE_ITEM, TYPE_HOLDER, + PARAM_ITEM}; - String str_value; /* used to store value */ + enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE }; + + /* + str_values's main purpose is to be used to cache the value in + save_in_field + */ + String str_value; my_string name; /* Name from select */ Item *next; uint32 max_length; uint8 marker,decimals; my_bool maybe_null; /* If item may be null */ my_bool null_value; /* if item is null */ - my_bool binary; my_bool unsigned_flag; my_bool with_sum_func; - + my_bool fixed; /* If item fixed with fix_fields */ + DTCollation collation; // alloc & destruct is done as start of select using sql_alloc Item(); + /* + Constructor used by Item_field, Item_ref & agregate (sum) functions. + Used for duplicating lists in processing queries with temporary + tables + Also it used for Item_cond_and/Item_cond_or for creating + top AND/OR ctructure of WHERE clause to protect it of + optimisation changes in prepared statements + */ + Item(THD *thd, Item *item); virtual ~Item() { name=0; } /*lint -e1509 */ - void set_name(char* str,uint length=0); + void set_name(const char *str,uint length, CHARSET_INFO *cs); void init_make_field(Send_field *tmp_field,enum enum_field_types type); - virtual bool fix_fields(THD *,struct st_table_list *); - virtual bool save_in_field(Field *field, bool no_conversions); + virtual void cleanup() + { + DBUG_ENTER("Item::cleanup"); + DBUG_PRINT("info", ("Type: %d", (int)type())); + fixed=0; + marker= 0; + DBUG_VOID_RETURN; + } + virtual void make_field(Send_field *field); + virtual bool fix_fields(THD *, struct st_table_list *, Item **); + /* + should be used in case where we are sure that we do not need + complete fix_fields() procedure. + */ + inline void quick_fix_field() { fixed= 1; } + /* Function returns 1 on overflow and -1 on fatal errors */ + virtual int save_in_field(Field *field, bool no_conversions); virtual void save_org_in_field(Field *field) { (void) save_in_field(field, 1); } - virtual bool save_safe_in_field(Field *field) + virtual int save_safe_in_field(Field *field) { return save_in_field(field, 1); } - virtual bool send(THD *thd, String *str); + virtual bool send(Protocol *protocol, String *str); virtual bool eq(const Item *, bool binary_cmp) const; - virtual Item_result result_type () const { return REAL_RESULT; } + virtual Item_result result_type() const { return REAL_RESULT; } + virtual Item_result cast_to_int_type() const { return result_type(); } + virtual enum_field_types field_type() const; virtual enum Type type() const =0; + /* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */ virtual double val()=0; virtual longlong val_int()=0; + /* + Return string representation of this item object. + + The argument to val_str() is an allocated buffer this or any + nested Item object can use to store return value of this method. + This buffer should only be used if the item itself doesn't have an + own String buffer. In case when the item maintains it's own string + buffer, it's preferrable to return it instead to minimize number of + mallocs/memcpys. + The caller of this method can modify returned string, but only in + case when it was allocated on heap, (is_alloced() is true). This + allows the caller to efficiently use a buffer allocated by a child + without having to allocate a buffer of it's own. The buffer, given + to val_str() as agrument, belongs to the caller and is later used + by the caller at it's own choosing. + A few implications from the above: + - unless you return a string object which only points to your buffer + but doesn't manages it you should be ready that it will be + modified. + - even for not allocated strings (is_alloced() == false) the caller + can change charset (see Item_func_{typecast/binary}. XXX: is this + a bug? + - still you should try to minimize data copying and return internal + object whenever possible. + */ virtual String *val_str(String*)=0; - virtual void make_field(Send_field *field)=0; - virtual Field *tmp_table_field(TABLE *t_arg=(TABLE *)0) { return 0; } + virtual Field *get_tmp_table_field() { return 0; } + virtual Field *tmp_table_field(TABLE *t_arg) { return 0; } virtual const char *full_name() const { return name ? name : "???"; } virtual double val_result() { return val(); } virtual longlong val_int_result() { return val_int(); } @@ -87,41 +235,155 @@ public: virtual table_map not_null_tables() const { return used_tables(); } /* Returns true if this is a simple constant item like an integer, not - a constant expression + a constant expression. Used in the optimizer to propagate basic constants. */ virtual bool basic_const_item() const { return 0; } - virtual Item *new_item() { return 0; } /* Only for const items */ + /* cloning of constant items (0 if it is not const) */ + virtual Item *new_item() { return 0; } virtual cond_result eq_cmp_result() const { return COND_OK; } inline uint float_length(uint decimals_par) const { return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;} + /* + Returns true if this is constant (during query execution, i.e. its value + will not change until next fix_fields) and its value is known. + */ virtual bool const_item() const { return used_tables() == 0; } + /* + Returns true if this is constant but its value may be not known yet. + (Can be used for parameters of prep. stmts or of stored procedures.) + */ + virtual bool const_during_execution() const + { return (used_tables() & ~PARAM_TABLE_BIT) == 0; } virtual void print(String *str_arg) { str_arg->append(full_name()); } + void print_item_w_name(String *); virtual void update_used_tables() {} - virtual void split_sum_func(List<Item> &fields) {} - virtual bool get_date(TIME *ltime,bool fuzzydate); + virtual void split_sum_func(THD *thd, Item **ref_pointer_array, + List<Item> &fields) {} + /* Called for items that really have to be split */ + void split_sum_func2(THD *thd, Item **ref_pointer_array, List<Item> &fields, + Item **ref); + virtual bool get_date(TIME *ltime,uint fuzzydate); virtual bool get_time(TIME *ltime); - virtual bool get_date_result(TIME *ltime,bool fuzzydate) + virtual bool get_date_result(TIME *ltime,uint fuzzydate) { return get_date(ltime,fuzzydate); } + /* + This function is used only in Item_func_isnull/Item_func_isnotnull + (implementations of IS NULL/IS NOT NULL clauses). Item_func_is{not}null + calls this method instead of one of val/result*() methods, which + normally will set null_value. This allows to determine nullness of + a complex expression without fully evaluating it. + Any new item which can be NULL must implement this call. + */ virtual bool is_null() { return 0; } - virtual unsigned int size_of()= 0; + /* + it is "top level" item of WHERE clause and we do not need correct NULL + handling + */ virtual void top_level_item() {} + /* + set field of temporary table for Item which can be switched on temporary + table during query processing (groupping and so on) + */ virtual void set_result_field(Field *field) {} virtual bool is_result_field() { return 0; } + virtual bool is_bool_func() { return 0; } virtual void save_in_result_field(bool no_conversions) {} + /* + set value of aggegate function in case of no rows for groupping were found + */ virtual void no_rows_in_result() {} + virtual Item *copy_or_same(THD *thd) { return this; } + virtual Item *copy_andor_structure(THD *thd) { return this; } + virtual Item *real_item() { return this; } + virtual Item *get_tmp_table_item(THD *thd) { return copy_or_same(thd); } + + static CHARSET_INFO *default_charset(); + virtual CHARSET_INFO *compare_collation() { return NULL; } + + virtual bool walk(Item_processor processor, byte *arg) + { + return (this->*processor)(arg); + } + + virtual bool remove_dependence_processor(byte * arg) { return 0; } + virtual bool remove_fixed(byte * arg) { fixed= 0; return 0; } + + // Row emulation + virtual uint cols() { return 1; } + virtual Item* el(uint i) { return this; } + virtual Item** addr(uint i) { return 0; } + virtual bool check_cols(uint c); + // It is not row => null inside is impossible + virtual bool null_inside() { return 0; } + // used in row subselects to get value of elements + virtual void bring_value() {} + + Field *tmp_table_field_from_field_type(TABLE *table); + + virtual Item *neg_transformer(THD *thd) { return NULL; } + virtual Item *safe_charset_converter(CHARSET_INFO *tocs); + void delete_self() + { + cleanup(); + delete this; + } +}; + + +bool agg_item_collations(DTCollation &c, const char *name, + Item **items, uint nitems, uint flags= 0); +bool agg_item_collations_for_comparison(DTCollation &c, const char *name, + Item **items, uint nitems, + uint flags= 0); +bool agg_item_charsets(DTCollation &c, const char *name, + Item **items, uint nitems, uint flags= 0); + + +class Item_num: public Item +{ +public: + virtual Item_num *neg()= 0; + Item *safe_charset_converter(CHARSET_INFO *tocs); }; +#define NO_CACHED_FIELD_INDEX ((uint)(-1)) +class st_select_lex; class Item_ident :public Item { +protected: + /* + We have to store initial values of db_name, table_name and field_name + to be able to restore them during cleanup() because they can be + updated during fix_fields() to values from Field object and life-time + of those is shorter than life-time of Item_field. + */ + const char *orig_db_name; + const char *orig_table_name; + const char *orig_field_name; public: const char *db_name; const char *table_name; const char *field_name; + /* + Cached value of index for this field in table->field array, used by prep. + stmts for speeding up their re-execution. Holds NO_CACHED_FIELD_INDEX + if index value is not known. + */ + uint cached_field_index; + /* + Cached pointer to table which contains this field, used for the same reason + by prep. stmt. too in case then we have not-fully qualified field. + 0 - means no cached value. + */ + TABLE_LIST *cached_table; + st_select_lex *depended_from; Item_ident(const char *db_name_par,const char *table_name_par, const char *field_name_par); + Item_ident(THD *thd, Item_ident *item); const char *full_name() const; - unsigned int size_of() { return sizeof(*this);} + void cleanup(); + bool remove_dependence_processor(byte * arg); }; @@ -130,12 +392,27 @@ class Item_field :public Item_ident void set_field(Field *field); public: Field *field,*result_field; - // Item_field() {} Item_field(const char *db_par,const char *table_name_par, const char *field_name_par) - :Item_ident(db_par,table_name_par,field_name_par),field(0),result_field(0) - {} + :Item_ident(db_par,table_name_par,field_name_par), + field(0), result_field(0) + { collation.set(DERIVATION_IMPLICIT); } + /* + Constructor needed to process subselect with temporary tables (see Item) + */ + Item_field(THD *thd, Item_field *item); + /* + Constructor used inside setup_wild(), ensures that field, table, + and database names will live as long as Item_field (this is important + in prepared statements). + */ + Item_field(THD *thd, Field *field); + /* + If this constructor is used, fix_fields() won't work, because + db_name, table_name and column_name are unknown. It's necessary to call + reset_field() before fix_fields() for all fields created this way. + */ Item_field(Field *field); enum Type type() const { return FIELD_ITEM; } bool eq(const Item *item, bool binary_cmp) const; @@ -145,111 +422,270 @@ public: double val_result(); longlong val_int_result(); String *str_result(String* tmp); - bool send(THD *thd, String *str_arg) - { - return result_field->send(thd,str_arg); - } - void make_field(Send_field *field); - bool fix_fields(THD *,struct st_table_list *); - bool save_in_field(Field *field,bool no_conversions); + bool send(Protocol *protocol, String *str_arg); + void reset_field(Field *f); + bool fix_fields(THD *, struct st_table_list *, Item **); + void make_field(Send_field *tmp_field); + int save_in_field(Field *field,bool no_conversions); void save_org_in_field(Field *field); table_map used_tables() const; enum Item_result result_type () const { return field->result_type(); } - Field *tmp_table_field(TABLE *t_arg=(TABLE *)0) { return result_field; } - bool get_date(TIME *ltime,bool fuzzydate); - bool get_date_result(TIME *ltime,bool fuzzydate); + Item_result cast_to_int_type() const + { + return field->cast_to_int_type(); + } + enum_field_types field_type() const + { + return field->type(); + } + Field *get_tmp_table_field() { return result_field; } + Field *tmp_table_field(TABLE *t_arg) { return result_field; } + bool get_date(TIME *ltime,uint fuzzydate); + bool get_date_result(TIME *ltime,uint fuzzydate); bool get_time(TIME *ltime); bool is_null() { return field->is_null(); } - unsigned int size_of() { return sizeof(*this);} + Item *get_tmp_table_item(THD *thd); + void cleanup(); + inline uint32 max_disp_length() { return field->max_length(); } + friend class Item_default_value; + friend class Item_insert_value; + friend class st_select_lex_unit; }; - class Item_null :public Item { public: Item_null(char *name_par=0) - { maybe_null=null_value=TRUE; name= name_par ? name_par : (char*) "NULL";} + { + maybe_null= null_value= TRUE; + max_length= 0; + name= name_par ? name_par : (char*) "NULL"; + fixed= 1; + collation.set(&my_charset_bin, DERIVATION_IGNORABLE); + } enum Type type() const { return NULL_ITEM; } bool eq(const Item *item, bool binary_cmp) const; double val(); longlong val_int(); String *val_str(String *str); - void make_field(Send_field *field); - bool save_in_field(Field *field, bool no_conversions); - bool save_safe_in_field(Field *field); - enum Item_result result_type () const - { return STRING_RESULT; } - bool send(THD *thd, String *str); + int save_in_field(Field *field, bool no_conversions); + int save_safe_in_field(Field *field); + bool send(Protocol *protocol, String *str); + enum Item_result result_type () const { return STRING_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_NULL; } + // to prevent drop fixed flag (no need parent cleanup call) + void cleanup() {} bool basic_const_item() const { return 1; } Item *new_item() { return new Item_null(name); } bool is_null() { return 1; } - unsigned int size_of() { return sizeof(*this);} + void print(String *str) { str->append("NULL", 4); } + Item *safe_charset_converter(CHARSET_INFO *tocs); }; +class Item_null_result :public Item_null +{ +public: + Field *result_field; + Item_null_result() : Item_null(), result_field(0) {} + bool is_result_field() { return result_field != 0; } + void save_in_result_field(bool no_conversions) + { + save_in_field(result_field, no_conversions); + } +}; + +/* Item represents one placeholder ('?') of prepared statement */ -class Item_int :public Item +class Item_param :public Item { public: - const longlong value; + enum enum_item_param_state + { + NO_VALUE, NULL_VALUE, INT_VALUE, REAL_VALUE, + STRING_VALUE, TIME_VALUE, LONG_DATA_VALUE + } state; + + /* + A buffer for string and long data values. Historically all allocated + values returned from val_str() were treated as eligible to + modification. I. e. in some cases Item_func_concat can append it's + second argument to return value of the first one. Because of that we + can't return the original buffer holding string data from val_str(), + and have to have one buffer for data and another just pointing to + the data. This is the latter one and it's returned from val_str(). + Can not be declared inside the union as it's not a POD type. + */ + String str_value_ptr; + union + { + longlong integer; + double real; + /* + Character sets conversion info for string values. + Character sets of client and connection defined at bind time are used + for all conversions, even if one of them is later changed (i.e. + between subsequent calls to mysql_stmt_execute). + */ + struct CONVERSION_INFO + { + CHARSET_INFO *character_set_client; + CHARSET_INFO *character_set_of_placeholder; + /* + This points at character set of connection if conversion + to it is required (i. e. if placeholder typecode is not BLOB). + Otherwise it's equal to character_set_client (to simplify + check in convert_str_value()). + */ + CHARSET_INFO *final_character_set_of_str_value; + } cs_info; + TIME time; + } value; + + /* Cached values for virtual methods to save us one switch. */ + enum Item_result item_result_type; + enum Type item_type; + + /* + Used when this item is used in a temporary table. + This is NOT placeholder metadata sent to client, as this value + is assigned after sending metadata (in setup_one_conversion_function). + For example in case of 'SELECT ?' you'll get MYSQL_TYPE_STRING both + in result set and placeholders metadata, no matter what type you will + supply for this placeholder in mysql_stmt_execute. + */ + enum enum_field_types param_type; + /* + Offset of placeholder inside statement text. Used to create + no-placeholders version of this statement for the binary log. + */ + uint pos_in_query; + + Item_param(uint pos_in_query_arg); + + enum Item_result result_type () const { return item_result_type; } + enum Type type() const { return item_type; } + enum_field_types field_type() const { return param_type; } + + double val(); + longlong val_int(); + String *val_str(String*); + bool get_time(TIME *tm); + bool get_date(TIME *tm, uint fuzzydate); + int save_in_field(Field *field, bool no_conversions); + + void set_null(); + void set_int(longlong i, uint32 max_length_arg); + void set_double(double i); + bool set_str(const char *str, ulong length); + bool set_longdata(const char *str, ulong length); + void set_time(TIME *tm, timestamp_type type, uint32 max_length_arg); + bool set_from_user_var(THD *thd, const user_var_entry *entry); + void reset(); + /* + Assign placeholder value from bind data. + Note, that 'len' has different semantics in embedded library (as we + don't need to check that packet is not broken there). See + sql_prepare.cc for details. + */ + void (*set_param_func)(Item_param *param, uchar **pos, ulong len); + + const String *query_val_str(String *str) const; + + bool convert_str_value(THD *thd); + + /* + If value for parameter was not set we treat it as non-const + so noone will use parameters value in fix_fields still + parameter is constant during execution. + */ + virtual table_map used_tables() const + { return state != NO_VALUE ? (table_map)0 : PARAM_TABLE_BIT; } + void print(String *str) { str->append('?'); } + bool is_null() + { DBUG_ASSERT(state != NO_VALUE); return state == NULL_VALUE; } + bool basic_const_item() const; + /* + This method is used to make a copy of a basic constant item when + propagating constants in the optimizer. The reason to create a new + item and not use the existing one is not precisely known (2005/04/16). + Probably we are trying to preserve tree structure of items, in other + words, avoid pointing at one item from two different nodes of the tree. + Return a new basic constant item if parameter value is a basic + constant, assert otherwise. This method is called only if + basic_const_item returned TRUE. + */ + Item *new_item(); + Item *safe_charset_converter(CHARSET_INFO *tocs); + /* + Implement by-value equality evaluation if parameter value + is set and is a basic constant (integer, real or string). + Otherwise return FALSE. + */ + bool eq(const Item *item, bool binary_cmp) const; +}; + + +class Item_int :public Item_num +{ +public: + longlong value; Item_int(int32 i,uint length=11) :value((longlong) i) - { max_length=length;} + { max_length=length; fixed= 1; } #ifdef HAVE_LONG_LONG Item_int(longlong i,uint length=21) :value(i) - { max_length=length;} + { max_length=length; fixed= 1;} #endif Item_int(const char *str_arg,longlong i,uint length) :value(i) - { max_length=length; name=(char*) str_arg;} - Item_int(const char *str_arg) : - value(str_arg[0] == '-' ? strtoll(str_arg,(char**) 0,10) : - (longlong) strtoull(str_arg,(char**) 0,10)) - { max_length= (uint) strlen(str_arg); name=(char*) str_arg;} + { max_length=length; name=(char*) str_arg; fixed= 1; } + Item_int(const char *str_arg, uint length=64); enum Type type() const { return INT_ITEM; } - virtual enum Item_result result_type () const { return INT_RESULT; } - longlong val_int() { return value; } - double val() { return (double) value; } + enum Item_result result_type () const { return INT_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } + longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } + double val() { DBUG_ASSERT(fixed == 1); return (double) value; } String *val_str(String*); - void make_field(Send_field *field); - bool save_in_field(Field *field, bool no_conversions); + int save_in_field(Field *field, bool no_conversions); bool basic_const_item() const { return 1; } Item *new_item() { return new Item_int(name,value,max_length); } + // to prevent drop fixed flag (no need parent cleanup call) + void cleanup() {} void print(String *str); - unsigned int size_of() { return sizeof(*this);} + Item_num *neg() { value= -value; return this; } + bool eq(const Item *, bool binary_cmp) const; }; class Item_uint :public Item_int { public: - Item_uint(const char *str_arg, uint length) : - Item_int(str_arg, (longlong) strtoull(str_arg,(char**) 0,10), length) {} - Item_uint(uint32 i) :Item_int((longlong) i, 10) {} - double val() { return ulonglong2double(value); } + Item_uint(const char *str_arg, uint length); + Item_uint(const char *str_arg, longlong i, uint length); + Item_uint(uint32 i) :Item_int((longlong) i, 10) + { unsigned_flag= 1; } + double val() + { DBUG_ASSERT(fixed == 1); return ulonglong2double((ulonglong)value); } String *val_str(String*); - void make_field(Send_field *field); Item *new_item() { return new Item_uint(name,max_length); } - bool fix_fields(THD *thd,struct st_table_list *table_list) - { - unsigned_flag= 1; - return 0; - } + int save_in_field(Field *field, bool no_conversions); void print(String *str); - unsigned int size_of() { return sizeof(*this);} + Item_num *neg (); }; -class Item_real :public Item +class Item_real :public Item_num { public: - const double value; + double value; // Item_real() :value(0) {} - Item_real(const char *str_arg,uint length) :value(atof(str_arg)) + Item_real(const char *str_arg, uint length) :value(my_atof(str_arg)) { name=(char*) str_arg; decimals=(uint8) nr_of_decimals(str_arg); max_length=length; + fixed= 1; } Item_real(const char *str,double val_arg,uint decimal_par,uint length) :value(val_arg) @@ -257,17 +693,33 @@ public: name=(char*) str; decimals=(uint8) decimal_par; max_length=length; + fixed= 1; } - Item_real(double value_par) :value(value_par) {} - bool save_in_field(Field *field, bool no_conversions); + Item_real(double value_par) :value(value_par) { fixed= 1; } + int save_in_field(Field *field, bool no_conversions); enum Type type() const { return REAL_ITEM; } - double val() { return value; } - longlong val_int() { return (longlong) (value+(value > 0 ? 0.5 : -0.5));} + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } + double val() { DBUG_ASSERT(fixed == 1); return value; } + longlong val_int() + { + DBUG_ASSERT(fixed == 1); + if (value <= (double) LONGLONG_MIN) + { + return LONGLONG_MIN; + } + else if (value >= (double) (ulonglong) LONGLONG_MAX) + { + return LONGLONG_MAX; + } + return (longlong) (value+(value > 0 ? 0.5 : -0.5)); + } String *val_str(String*); - void make_field(Send_field *field); bool basic_const_item() const { return 1; } + // to prevent drop fixed flag (no need parent cleanup call) + void cleanup() {} Item *new_item() { return new Item_real(name,value,decimals,max_length); } - unsigned int size_of() { return sizeof(*this);} + Item_num *neg() { value= -value; return this; } + bool eq(const Item *, bool binary_cmp) const; }; @@ -279,98 +731,131 @@ public: decimals=NOT_FIXED_DEC; max_length=DBL_DIG+8; } - unsigned int size_of() { return sizeof(*this);} }; class Item_string :public Item { public: - Item_string(const char *str,uint length) + Item_string(const char *str,uint length, + CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE) { - str_value.set(str,length); - max_length=length; - name=(char*) str_value.ptr(); + collation.set(cs, dv); + str_value.set_or_copy_aligned(str,length,cs); + /* + We have to have a different max_length than 'length' here to + ensure that we get the right length if we do use the item + to create a new table. In this case max_length must be the maximum + number of chars for a string of this type because we in create_field:: + divide the max_length with mbmaxlen). + */ + max_length= str_value.numchars()*cs->mbmaxlen; + set_name(str, length, cs); decimals=NOT_FIXED_DEC; + // it is constant => can be used without fix_fields (and frequently used) + fixed= 1; } - Item_string(const char *name_par,const char *str,uint length) + Item_string(const char *name_par, const char *str, uint length, + CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE) { - str_value.set(str,length); - max_length=length; - name=(char*) name_par; + collation.set(cs, dv); + str_value.set_or_copy_aligned(str,length,cs); + max_length= str_value.numchars()*cs->mbmaxlen; + set_name(name_par,0,cs); decimals=NOT_FIXED_DEC; + // it is constant => can be used without fix_fields (and frequently used) + fixed= 1; } - ~Item_string() {} enum Type type() const { return STRING_ITEM; } - double val() { return atof(str_value.ptr()); } - longlong val_int() { return strtoll(str_value.ptr(),(char**) 0,10); } - String *val_str(String*) { return (String*) &str_value; } - bool save_in_field(Field *field, bool no_conversions); - void make_field(Send_field *field); + double val() + { + DBUG_ASSERT(fixed == 1); + int err; + char *end_not_used; + return my_strntod(str_value.charset(), (char*) str_value.ptr(), + str_value.length(), &end_not_used, &err); + } + longlong val_int() + { + DBUG_ASSERT(fixed == 1); + int err; + return my_strntoll(str_value.charset(), str_value.ptr(), + str_value.length(), 10, (char**) 0, &err); + } + String *val_str(String*) + { + DBUG_ASSERT(fixed == 1); + return (String*) &str_value; + } + int save_in_field(Field *field, bool no_conversions); enum Item_result result_type () const { return STRING_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_STRING; } bool basic_const_item() const { return 1; } bool eq(const Item *item, bool binary_cmp) const; - Item *new_item() { return new Item_string(name,str_value.ptr(),max_length); } - String *const_string() { return &str_value; } - inline void append(char *str,uint length) { str_value.append(str,length); } - void print(String *str); - unsigned int size_of() { return sizeof(*this);} -}; - - -/* For INSERT ... VALUES (DEFAULT) */ - -class Item_default :public Item -{ -public: - Item_default() { name= (char*) "DEFAULT"; } - enum Type type() const { return DEFAULT_ITEM; } - void make_field(Send_field *field) {} - bool save_in_field(Field *field, bool no_conversions) + Item *new_item() { - field->set_default(); - return 0; + return new Item_string(name, str_value.ptr(), + str_value.length(), collation.collation); } - virtual double val() { return 0.0; } - virtual longlong val_int() { return 0; } - virtual String *val_str(String *str) { return 0; } - bool basic_const_item() const { return 1; } - unsigned int size_of() { return sizeof(*this);} + Item *safe_charset_converter(CHARSET_INFO *tocs); + String *const_string() { return &str_value; } + inline void append(char *str, uint length) { str_value.append(str, length); } + void print(String *str); + // to prevent drop fixed flag (no need parent cleanup call) + void cleanup() {} }; - /* for show tables */ class Item_datetime :public Item_string { public: - Item_datetime(const char *item_name): Item_string(item_name,"",0) + Item_datetime(const char *item_name): Item_string(item_name,"",0, + &my_charset_bin) { max_length=19;} - void make_field(Send_field *field); - unsigned int size_of() { return sizeof(*this);} + enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; } }; class Item_empty_string :public Item_string { public: - Item_empty_string(const char *header,uint length) :Item_string("",0) - { name=(char*) header; max_length=length;} + Item_empty_string(const char *header,uint length, CHARSET_INFO *cs= NULL) : + Item_string("",0, cs ? cs : &my_charset_bin) + { name=(char*) header; max_length= cs ? length * cs->mbmaxlen : length; } void make_field(Send_field *field); - unsigned int size_of() { return sizeof(*this);} }; +class Item_return_int :public Item_int +{ + enum_field_types int_field_type; +public: + Item_return_int(const char *name, uint length, + enum_field_types field_type_arg) + :Item_int(name, 0, length), int_field_type(field_type_arg) + { + unsigned_flag=1; + } + enum_field_types field_type() const { return int_field_type; } +}; + + class Item_varbinary :public Item { public: Item_varbinary(const char *str,uint str_length); - ~Item_varbinary() {} enum Type type() const { return VARBIN_ITEM; } - double val() { return (double) Item_varbinary::val_int(); } + double val() + { DBUG_ASSERT(fixed == 1); return (double) Item_varbinary::val_int(); } longlong val_int(); - String *val_str(String*) { return &str_value; } - bool save_in_field(Field *field, bool no_conversions); - void make_field(Send_field *field); - enum Item_result result_type () const; - unsigned int size_of() { return sizeof(*this);} + bool basic_const_item() const { return 1; } + String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; } + int save_in_field(Field *field, bool no_conversions); + enum Item_result result_type () const { return STRING_RESULT; } + enum Item_result cast_to_int_type() const { return INT_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + // to prevent drop fixed flag (no need parent cleanup call) + void cleanup() {} + bool eq(const Item *item, bool binary_cmp) const; + virtual Item *safe_charset_converter(CHARSET_INFO *tocs); }; @@ -379,94 +864,207 @@ class Item_result_field :public Item /* Item with result field */ public: Field *result_field; /* Save result here */ Item_result_field() :result_field(0) {} + // Constructor used for Item_sum/Item_cond_and/or (see Item comment) + Item_result_field(THD *thd, Item_result_field *item): + Item(thd, item), result_field(item->result_field) + {} ~Item_result_field() {} /* Required with gcc 2.95 */ - Field *tmp_table_field(TABLE *t_arg=(TABLE *)0) { return result_field; } + Field *get_tmp_table_field() { return result_field; } + Field *tmp_table_field(TABLE *t_arg) { return result_field; } table_map used_tables() const { return 1; } virtual void fix_length_and_dec()=0; - unsigned int size_of() { return sizeof(*this);} void set_result_field(Field *field) { result_field= field; } bool is_result_field() { return 1; } void save_in_result_field(bool no_conversions) { save_in_field(result_field, no_conversions); } + void cleanup(); }; class Item_ref :public Item_ident { +protected: + void set_properties(); public: - Field *result_field; /* Save result here */ + Field *result_field; /* Save result here */ Item **ref; - Item_ref(char *db_par,char *table_name_par,char *field_name_par) - :Item_ident(db_par,table_name_par,field_name_par),ref(0) {} - Item_ref(Item **item, char *table_name_par,char *field_name_par) - :Item_ident(NullS,table_name_par,field_name_par),ref(item) + Item_ref(const char *db_par, const char *table_name_par, + const char *field_name_par) + :Item_ident(db_par, table_name_par, field_name_par), ref(0) {} + /* + This constructor is used in two scenarios: + A) *item = NULL + No initialization is performed, fix_fields() call will be necessary. + + B) *item points to an Item this Item_ref will refer to. This is + used for GROUP BY. fix_fields() will not be called in this case, + so we call set_properties to make this item "fixed". set_properties + performs a subset of action Item_ref::fix_fields does, and this subset + is enough for Item_ref's used in GROUP BY. + + TODO we probably fix a superset of problems like in BUG#6658. Check this + with Bar, and if we have a more broader set of problems like this. + */ + Item_ref(Item **item, const char *table_name_par, const char *field_name_par) + :Item_ident(NullS, table_name_par, field_name_par), ref(item) { - /* - This ctor is called from Item_XXX::split_sum_func, and fix_fields will - not be called for *this, so we must setup everything here. **ref is - already fixed at this point. - */ - max_length= (*ref)->max_length; - decimals= (*ref)->decimals; - binary= (*ref)->binary; - with_sum_func= (*ref)->with_sum_func; - maybe_null= (*ref)->maybe_null; + DBUG_ASSERT(item); + if (*item) + set_properties(); } + + /* Constructor need to process subselect with temporary tables (see Item) */ + Item_ref(THD *thd, Item_ref *item) :Item_ident(thd, item), ref(item->ref) {} enum Type type() const { return REF_ITEM; } bool eq(const Item *item, bool binary_cmp) const - { return (*ref)->eq(item, binary_cmp); } - ~Item_ref() { if (ref) delete *ref; } + { return ref && (*ref)->eq(item, binary_cmp); } double val() { + DBUG_ASSERT(fixed); double tmp=(*ref)->val_result(); null_value=(*ref)->null_value; return tmp; } longlong val_int() { + DBUG_ASSERT(fixed); longlong tmp=(*ref)->val_int_result(); null_value=(*ref)->null_value; return tmp; } String *val_str(String* tmp) { + DBUG_ASSERT(fixed); tmp=(*ref)->str_result(tmp); null_value=(*ref)->null_value; return tmp; } bool is_null() { + DBUG_ASSERT(fixed); (void) (*ref)->val_int_result(); return (*ref)->null_value; } - bool get_date(TIME *ltime,bool fuzzydate) + bool get_date(TIME *ltime,uint fuzzydate) { + DBUG_ASSERT(fixed); return (null_value=(*ref)->get_date_result(ltime,fuzzydate)); } - bool send(THD *thd, String *tmp) { return (*ref)->send(thd, tmp); } + bool send(Protocol *prot, String *tmp){ return (*ref)->send(prot, tmp); } void make_field(Send_field *field) { (*ref)->make_field(field); } - bool fix_fields(THD *,struct st_table_list *); - bool save_in_field(Field *field, bool no_conversions) + bool fix_fields(THD *, struct st_table_list *, Item **); + int save_in_field(Field *field, bool no_conversions) { return (*ref)->save_in_field(field, no_conversions); } void save_org_in_field(Field *field) { (*ref)->save_org_in_field(field); } enum Item_result result_type () const { return (*ref)->result_type(); } - table_map used_tables() const { return (*ref)->used_tables(); } - unsigned int size_of() { return sizeof(*this);} - void set_result_field(Field *field) { result_field= field; } + enum_field_types field_type() const { return (*ref)->field_type(); } + Field *get_tmp_table_field() { return result_field; } + table_map used_tables() const + { + return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables(); + } + void set_result_field(Field *field) { result_field= field; } bool is_result_field() { return 1; } void save_in_result_field(bool no_conversions) { (*ref)->save_in_field(result_field, no_conversions); } + Item *real_item() { return *ref; } + void print(String *str); +}; + + +/* + The same as Item_ref, but get value from val_* family of method to get + value of item on which it referred instead of result* family. +*/ +class Item_direct_ref :public Item_ref +{ +public: + Item_direct_ref(Item **item, const char *table_name_par, + const char *field_name_par) + :Item_ref(item, table_name_par, field_name_par) {} + /* Constructor need to process subselect with temporary tables (see Item) */ + Item_direct_ref(THD *thd, Item_direct_ref *item) : Item_ref(thd, item) {} + + double val() + { + double tmp=(*ref)->val(); + null_value=(*ref)->null_value; + return tmp; + } + longlong val_int() + { + longlong tmp=(*ref)->val_int(); + null_value=(*ref)->null_value; + return tmp; + } + String *val_str(String* tmp) + { + tmp=(*ref)->val_str(tmp); + null_value=(*ref)->null_value; + return tmp; + } + bool is_null() + { + (void) (*ref)->val_int(); + return (*ref)->null_value; + } + bool get_date(TIME *ltime,uint fuzzydate) + { + return (null_value=(*ref)->get_date(ltime,fuzzydate)); + } +}; + + +class Item_in_subselect; + +class Item_ref_null_helper: public Item_ref +{ +protected: + Item_in_subselect* owner; +public: + Item_ref_null_helper(Item_in_subselect* master, Item **item, + const char *table_name_par, const char *field_name_par): + Item_ref(item, table_name_par, field_name_par), owner(master) {} + double val(); + longlong val_int(); + String* val_str(String* s); + bool get_date(TIME *ltime, uint fuzzydate); + void print(String *str); + /* + we add RAND_TABLE_BIT to prevent moving this item from HAVING to WHERE + */ + table_map used_tables() const + { + return (depended_from ? + OUTER_REF_TABLE_BIT : + (*ref)->used_tables() | RAND_TABLE_BIT); + } +}; + +class Item_null_helper :public Item_ref_null_helper +{ + Item *store; +public: + Item_null_helper(Item_in_subselect* master, Item *item, + const char *table_name_par, const char *field_name_par) + :Item_ref_null_helper(master, &item, table_name_par, field_name_par), + store(item) + { ref= &store; } + void print(String *str); }; /* - The following class is used to optimize comparing of date columns - We need to save the original item, to be able to set the field to the - original value in 'opt_range'. + The following class is used to optimize comparing of date and bigint columns + We need to save the original item ('ref') to be able to call + ref->save_in_field(). This is used to create index search keys. + + An instance of Item_int_with_ref may have signed or unsigned integer value. + */ class Item_int_with_ref :public Item_int @@ -474,24 +1072,32 @@ class Item_int_with_ref :public Item_int Item *ref; public: Item_int_with_ref(longlong i, Item *ref_arg) :Item_int(i), ref(ref_arg) - {} - bool save_in_field(Field *field, bool no_conversions) + { + unsigned_flag= ref_arg->unsigned_flag; + } + int save_in_field(Field *field, bool no_conversions) { return ref->save_in_field(field, no_conversions); } - unsigned int size_of() { return sizeof(*this);} + Item *new_item(); }; +#include "gstream.h" +#include "spatial.h" #include "item_sum.h" #include "item_func.h" +#include "item_row.h" #include "item_cmpfunc.h" #include "item_strfunc.h" +#include "item_geofunc.h" #include "item_timefunc.h" #include "item_uniq.h" +#include "item_subselect.h" class Item_copy_string :public Item { + enum enum_field_types cached_field_type; public: Item *item; Item_copy_string(Item *i) :item(i) @@ -500,22 +1106,31 @@ public: decimals=item->decimals; max_length=item->max_length; name=item->name; + cached_field_type= item->field_type(); } - ~Item_copy_string() { delete item; } enum Type type() const { return COPY_STR_ITEM; } enum Item_result result_type () const { return STRING_RESULT; } + enum_field_types field_type() const { return cached_field_type; } double val() - { return null_value ? 0.0 : atof(str_value.c_ptr()); } + { + int err; + char *end_not_used; + return (null_value ? 0.0 : + my_strntod(str_value.charset(), (char*) str_value.ptr(), + str_value.length(), &end_not_used, &err)); + } longlong val_int() - { return null_value ? LL(0) : strtoll(str_value.c_ptr(),(char**) 0,10); } + { + int err; + return null_value ? LL(0) : my_strntoll(str_value.charset(),str_value.ptr(),str_value.length(),10, (char**) 0,&err); + } String *val_str(String*); void make_field(Send_field *field) { item->make_field(field); } void copy(); - bool save_in_field(Field *field, bool no_conversions); + int save_in_field(Field *field, bool no_conversions); table_map used_tables() const { return (table_map) 1L; } bool const_item() const { return 0; } bool is_null() { return null_value; } - unsigned int size_of() { return sizeof(*this);} }; @@ -526,7 +1141,6 @@ public: Item_buff() :null_value(0) {} virtual bool cmp(void)=0; virtual ~Item_buff(); /*line -e1509 */ - unsigned int size_of() { return sizeof(*this);} }; class Item_str_buff :public Item_buff @@ -534,10 +1148,9 @@ class Item_str_buff :public Item_buff Item *item; String value,tmp_value; public: - Item_str_buff(Item *arg) :item(arg),value(arg->max_length) {} + Item_str_buff(THD *thd, Item *arg); bool cmp(void); ~Item_str_buff(); // Deallocate String:s - unsigned int size_of() { return sizeof(*this);} }; @@ -548,7 +1161,6 @@ class Item_real_buff :public Item_buff public: Item_real_buff(Item *item_par) :item(item_par),value(0.0) {} bool cmp(void); - unsigned int size_of() { return sizeof(*this);} }; class Item_int_buff :public Item_buff @@ -558,7 +1170,6 @@ class Item_int_buff :public Item_buff public: Item_int_buff(Item *item_par) :item(item_par),value(0) {} bool cmp(void); - unsigned int size_of() { return sizeof(*this);} }; @@ -575,10 +1186,236 @@ public: buff= (char*) sql_calloc(length=field->pack_length()); } bool cmp(void); - unsigned int size_of() { return sizeof(*this);} }; -extern Item_buff *new_Item_buff(Item *item); +class Item_default_value : public Item_field +{ +public: + Item *arg; + Item_default_value() : + Item_field((const char *)NULL, (const char *)NULL, (const char *)NULL), arg(NULL) {} + Item_default_value(Item *a) : + Item_field((const char *)NULL, (const char *)NULL, (const char *)NULL), arg(a) {} + enum Type type() const { return DEFAULT_VALUE_ITEM; } + bool eq(const Item *item, bool binary_cmp) const; + bool fix_fields(THD *, struct st_table_list *, Item **); + void print(String *str); + int save_in_field(Field *field_arg, bool no_conversions) + { + if (!arg) + { + field_arg->set_default(); + return 0; + } + return Item_field::save_in_field(field_arg, no_conversions); + } + table_map used_tables() const { return (table_map)0L; } + + bool walk(Item_processor processor, byte *args) + { + return arg->walk(processor, args) || + (this->*processor)(args); + } +}; + +class Item_insert_value : public Item_field +{ +public: + Item *arg; + Item_insert_value(Item *a) : + Item_field((const char *)NULL, (const char *)NULL, (const char *)NULL), arg(a) {} + bool eq(const Item *item, bool binary_cmp) const; + bool fix_fields(THD *, struct st_table_list *, Item **); + void print(String *str); + int save_in_field(Field *field_arg, bool no_conversions) + { + return Item_field::save_in_field(field_arg, no_conversions); + } + table_map used_tables() const { return (table_map)0L; } + + bool walk(Item_processor processor, byte *args) + { + return arg->walk(processor, args) || + (this->*processor)(args); + } +}; + +class Item_cache: public Item +{ +protected: + Item *example; + table_map used_table_map; +public: + Item_cache(): example(0), used_table_map(0) {fixed= 1; null_value= 1;} + + void set_used_tables(table_map map) { used_table_map= map; } + + virtual bool allocate(uint i) { return 0; } + virtual bool setup(Item *item) + { + example= item; + max_length= item->max_length; + decimals= item->decimals; + collation.set(item->collation); + return 0; + }; + virtual void store(Item *)= 0; + enum Type type() const { return CACHE_ITEM; } + static Item_cache* get_cache(Item_result type); + table_map used_tables() const { return used_table_map; } + virtual void keep_array() {} + // to prevent drop fixed flag (no need parent cleanup call) + void cleanup() {} + void print(String *str); +}; + +class Item_cache_int: public Item_cache +{ + longlong value; +public: + Item_cache_int(): Item_cache(), value(0) {} + + void store(Item *item); + double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } + String* val_str(String *str) + { + DBUG_ASSERT(fixed == 1); + str->set(value, default_charset()); + return str; + } + enum Item_result result_type() const { return INT_RESULT; } +}; + +class Item_cache_real: public Item_cache +{ + double value; +public: + Item_cache_real(): Item_cache(), value(0) {} + + void store(Item *item); + double val() { DBUG_ASSERT(fixed == 1); return value; } + longlong val_int() + { + DBUG_ASSERT(fixed == 1); + return (longlong) (value+(value > 0 ? 0.5 : -0.5)); + } + String* val_str(String *str) + { + str->set(value, decimals, default_charset()); + return str; + } + enum Item_result result_type() const { return REAL_RESULT; } +}; + +class Item_cache_str: public Item_cache +{ + char buffer[80]; + String *value, value_buff; +public: + Item_cache_str(): Item_cache(), value(0) { } + + void store(Item *item); + double val(); + longlong val_int(); + String* val_str(String *) { DBUG_ASSERT(fixed == 1); return value; } + enum Item_result result_type() const { return STRING_RESULT; } + CHARSET_INFO *charset() const { return value->charset(); }; +}; + +class Item_cache_row: public Item_cache +{ + Item_cache **values; + uint item_count; + bool save_array; +public: + Item_cache_row() + :Item_cache(), values(0), item_count(2), save_array(0) {} + + /* + 'allocate' used only in row transformer, to preallocate space for row + cache. + */ + bool allocate(uint num); + /* + 'setup' is needed only by row => it not called by simple row subselect + (only by IN subselect (in subselect optimizer)) + */ + bool setup(Item *item); + void store(Item *item); + void illegal_method_call(const char *); + void make_field(Send_field *) + { + illegal_method_call((const char*)"make_field"); + }; + double val() + { + illegal_method_call((const char*)"val"); + return 0; + }; + longlong val_int() + { + illegal_method_call((const char*)"val_int"); + return 0; + }; + String *val_str(String *) + { + illegal_method_call((const char*)"val_str"); + return 0; + }; + enum Item_result result_type() const { return ROW_RESULT; } + + uint cols() { return item_count; } + Item* el(uint i) { return values[i]; } + Item** addr(uint i) { return (Item **) (values + i); } + bool check_cols(uint c); + bool null_inside(); + void bring_value(); + void keep_array() { save_array= 1; } + void cleanup() + { + DBUG_ENTER("Item_cache_row::cleanup"); + Item_cache::cleanup(); + if (save_array) + bzero(values, item_count*sizeof(Item**)); + else + values= 0; + DBUG_VOID_RETURN; + } +}; + + +/* + Item_type_holder used to store type. name, length of Item for UNIONS & + derived tables. + + Item_type_holder do not need cleanup() because its time of live limited by + single SP/PS execution. +*/ +class Item_type_holder: public Item +{ +protected: + TYPELIB *enum_set_typelib; + enum_field_types fld_type; + + void get_full_info(Item *item); +public: + Item_type_holder(THD*, Item*); + + Item_result result_type() const; + virtual enum_field_types field_type() const { return fld_type; }; + enum Type type() const { return TYPE_HOLDER; } + double val(); + longlong val_int(); + String *val_str(String*); + bool join_types(THD *thd, Item *); + Field *make_field_by_type(TABLE *table); + static uint32 display_length(Item *item); + static enum_field_types get_real_type(Item *); +}; + + +extern Item_buff *new_Item_buff(THD *thd, Item *item); extern Item_result item_cmp_type(Item_result a,Item_result b); -extern Item *resolve_const_item(Item *item,Item *cmp_item); +extern void resolve_const_item(THD *thd, Item **ref, Item *cmp_item); extern bool field_is_equal_to_item(Field *field,Item *item); diff --git a/sql/item_buff.cc b/sql/item_buff.cc index b55a4dc66a0..8298ce2cfb7 100644 --- a/sql/item_buff.cc +++ b/sql/item_buff.cc @@ -23,13 +23,13 @@ ** Create right type of item_buffer for an item */ -Item_buff *new_Item_buff(Item *item) +Item_buff *new_Item_buff(THD *thd, Item *item) { if (item->type() == Item::FIELD_ITEM && !(((Item_field *) item)->field->flags & BLOB_FLAG)) return new Item_field_buff((Item_field *) item); if (item->result_type() == STRING_RESULT) - return new Item_str_buff((Item_field *) item); + return new Item_str_buff(thd, (Item_field *) item); if (item->result_type() == INT_RESULT) return new Item_int_buff((Item_field *) item); return new Item_real_buff(item); @@ -42,12 +42,17 @@ Item_buff::~Item_buff() {} ** Return true if values have changed */ +Item_str_buff::Item_str_buff(THD *thd, Item *arg) + :item(arg), value(min(arg->max_length, thd->variables.max_sort_length)) +{} + bool Item_str_buff::cmp(void) { String *res; bool tmp; - res=item->val_str(&tmp_value); + if ((res=item->val_str(&tmp_value))) + res->length(min(res->length(), value.alloced_length())); if (null_value != item->null_value) { if ((null_value= item->null_value)) @@ -56,10 +61,8 @@ bool Item_str_buff::cmp(void) } else if (null_value) return 0; // new and old value was null - else if (!item->binary) - tmp= sortcmp(&value,res) != 0; else - tmp= stringcmp(&value,res) != 0; + tmp= sortcmp(&value,res,item->collation.collation) != 0; if (tmp) value.copy(*res); // Remember for next cmp return tmp; @@ -99,7 +102,7 @@ bool Item_field_buff::cmp(void) { bool tmp= field->cmp(buff) != 0; // This is not a blob! if (tmp) - field->get_image(buff,length); + field->get_image(buff,length,field->charset()); if (null_value != field->is_null()) { null_value= !null_value; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index db6c4d9789b..3c41fb56d89 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,12 +17,99 @@ /* This file defines all compare functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" #include <m_ctype.h> +#include "sql_select.h" + +static Item_result item_store_type(Item_result a,Item_result b) +{ + if (a == STRING_RESULT || b == STRING_RESULT) + return STRING_RESULT; + else if (a == REAL_RESULT || b == REAL_RESULT) + return REAL_RESULT; + else + return INT_RESULT; +} + +static void agg_result_type(Item_result *type, Item **items, uint nitems) +{ + Item **item, **item_end; + + *type= STRING_RESULT; + /* Skip beginning NULL items */ + for (item= items, item_end= item + nitems; item < item_end; item++) + { + if ((*item)->type() != Item::NULL_ITEM) + { + *type= (*item)->result_type(); + item++; + break; + } + } + /* Combine result types. Note: NULL items don't affect the result */ + for (; item < item_end; item++) + { + if ((*item)->type() != Item::NULL_ITEM) + *type= item_store_type(type[0], (*item)->result_type()); + } +} + +static void agg_cmp_type(Item_result *type, Item **items, uint nitems) +{ + uint i; + type[0]= items[0]->result_type(); + for (i=1 ; i < nitems ; i++) + type[0]= item_cmp_type(type[0], items[i]->result_type()); +} + +static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, + const char *fname) +{ + my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), + c1.collation->name,c1.derivation_name(), + c2.collation->name,c2.derivation_name(), + fname); +} + + +Item_bool_func2* Eq_creator::create(Item *a, Item *b) const +{ + return new Item_func_eq(a, b); +} + + +Item_bool_func2* Ne_creator::create(Item *a, Item *b) const +{ + return new Item_func_ne(a, b); +} + + +Item_bool_func2* Gt_creator::create(Item *a, Item *b) const +{ + return new Item_func_gt(a, b); +} + + +Item_bool_func2* Lt_creator::create(Item *a, Item *b) const +{ + return new Item_func_lt(a, b); +} + + +Item_bool_func2* Ge_creator::create(Item *a, Item *b) const +{ + return new Item_func_ge(a, b); +} + + +Item_bool_func2* Le_creator::create(Item *a, Item *b) const +{ + return new Item_func_le(a, b); +} /* Test functions @@ -32,28 +119,91 @@ longlong Item_func_not::val_int() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); null_value=args[0]->null_value; - return !null_value && value == 0 ? 1 : 0; + return ((!null_value && value == 0) ? 1 : 0); +} + +/* + special NOT for ALL subquery +*/ + +longlong Item_func_not_all::val_int() +{ + DBUG_ASSERT(fixed == 1); + double value= args[0]->val(); + + /* + return TRUE if there was records in underlaying select in max/min + optimisation (ALL subquery) + */ + if (empty_underlying_subquery()) + return 1; + + null_value= args[0]->null_value; + return ((!null_value && value == 0) ? 1 : 0); +} + + +bool Item_func_not_all::empty_underlying_subquery() +{ + return ((test_sum_item && !test_sum_item->any_value()) || + (test_sub_item && !test_sub_item->any_value())); +} + +void Item_func_not_all::print(String *str) +{ + if (show) + Item_func::print(str); + else + args[0]->print(str); } + +/* + Special NOP (No OPeration) for ALL subquery it is like Item_func_not_all + (return TRUE if underlaying sudquery do not return rows) but if subquery + returns some rows it return same value as argument (TRUE/FALSE). +*/ + +longlong Item_func_nop_all::val_int() +{ + DBUG_ASSERT(fixed == 1); + double value= args[0]->val(); + + /* + return FALSE if there was records in underlaying select in max/min + optimisation (SAME/ANY subquery) + */ + if (empty_underlying_subquery()) + return 0; + + null_value= args[0]->null_value; + return (null_value || value == 0) ? 0 : 1; +} + + /* Convert a constant expression or string to an integer. This is done when comparing DATE's of different formats and also when comparing bigint to strings (in which case the string is converted once to a bigint). + + RESULT VALUES + 0 Can't convert item + 1 Item was replaced with an integer version of the item */ -static bool convert_constant_item(Field *field, Item **item) +static bool convert_constant_item(THD *thd, Field *field, Item **item) { if ((*item)->const_item()) { - if (!(*item)->save_in_field(field, 1) && - !((*item)->null_value)) + if (!(*item)->save_in_field(field, 1) && !((*item)->null_value)) { Item *tmp=new Item_int_with_ref(field->val_int(), *item); if (tmp) - *item=tmp; + thd->change_item_tree(item, tmp); return 1; // Item was replaced } } @@ -63,23 +213,39 @@ static bool convert_constant_item(Field *field, Item **item) void Item_bool_func2::fix_length_and_dec() { - max_length=1; // Function returns 0 or 1 + max_length= 1; // Function returns 0 or 1 + THD *thd= current_thd; /* As some compare functions are generated after sql_yacc, - we have to check for out of memory conditons here + we have to check for out of memory conditions here */ if (!args[0] || !args[1]) return; + + DTCollation coll; + if (args[0]->result_type() == STRING_RESULT && + args[1]->result_type() == STRING_RESULT && + agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV)) + return; + // Make a special case of compare with fields to get nicer DATE comparisons + + if (functype() == LIKE_FUNC) // Disable conversion in case of LIKE function. + { + set_cmp_func(); + return; + } + if (args[0]->type() == FIELD_ITEM) { Field *field=((Item_field*) args[0])->field; - if (field->store_for_compare()) + if (field->can_be_compared_as_longlong()) { - if (convert_constant_item(field,&args[1])) + if (convert_constant_item(thd, field,&args[1])) { - cmp_func= &Item_bool_func2::compare_int; // Works for all types. + cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, + INT_RESULT); // Works for all types. return; } } @@ -87,174 +253,510 @@ void Item_bool_func2::fix_length_and_dec() if (args[1]->type() == FIELD_ITEM) { Field *field=((Item_field*) args[1])->field; - if (field->store_for_compare()) + if (field->can_be_compared_as_longlong()) { - if (convert_constant_item(field,&args[0])) + if (convert_constant_item(thd, field,&args[0])) { - cmp_func= &Item_bool_func2::compare_int; // Works for all types. + cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, + INT_RESULT); // Works for all types. return; } } } - set_cmp_func(item_cmp_type(args[0]->result_type(),args[1]->result_type())); + set_cmp_func(); } -void Item_bool_func2::set_cmp_func(Item_result type) +int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) { - switch (type) { - case STRING_RESULT: - cmp_func=&Item_bool_func2::compare_string; - break; - case REAL_RESULT: - cmp_func=&Item_bool_func2::compare_real; - break; - case INT_RESULT: - cmp_func=&Item_bool_func2::compare_int; - break; + owner= item; + func= comparator_matrix[type] + [test(owner->functype() == Item_func::EQUAL_FUNC)]; + if (type == ROW_RESULT) + { + uint n= (*a)->cols(); + if (n != (*b)->cols()) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), n); + comparators= 0; + return 1; + } + if (!(comparators= new Arg_comparator[n])) + return 1; + for (uint i=0; i < n; i++) + { + if ((*a)->el(i)->cols() != (*b)->el(i)->cols()) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), (*a)->el(i)->cols()); + return 1; + } + comparators[i].set_cmp_func(owner, (*a)->addr(i), (*b)->addr(i)); + } + } + else if (type == STRING_RESULT) + { + /* + We must set cmp_charset here as we may be called from for an automatic + generated item, like in natural join + */ + if (cmp_collation.set((*a)->collation, (*b)->collation) || + cmp_collation.derivation == DERIVATION_NONE) + { + my_coll_agg_error((*a)->collation, (*b)->collation, owner->func_name()); + return 1; + } + if (cmp_collation.collation == &my_charset_bin) + { + /* + We are using BLOB/BINARY/VARBINARY, change to compare byte by byte, + without removing end space + */ + if (func == &Arg_comparator::compare_string) + func= &Arg_comparator::compare_binary_string; + else if (func == &Arg_comparator::compare_e_string) + func= &Arg_comparator::compare_e_binary_string; + } } + else if (type == INT_RESULT) + { + if (func == &Arg_comparator::compare_int_signed) + { + if ((*a)->unsigned_flag) + func= ((*b)->unsigned_flag)? &Arg_comparator::compare_int_unsigned : + &Arg_comparator::compare_int_unsigned_signed; + else if ((*b)->unsigned_flag) + func= &Arg_comparator::compare_int_signed_unsigned; + } + else if (func== &Arg_comparator::compare_e_int) + { + if ((*a)->unsigned_flag ^ (*b)->unsigned_flag) + func= &Arg_comparator::compare_e_int_diff_signedness; + } + } + return 0; } -int Item_bool_func2::compare_string() +int Arg_comparator::compare_string() { String *res1,*res2; - if ((res1=args[0]->val_str(&tmp_value1))) + if ((res1= (*a)->val_str(&owner->tmp_value1))) { - if ((res2=args[1]->val_str(&tmp_value2))) + if ((res2= (*b)->val_str(&owner->tmp_value2))) { - null_value=0; - return binary ? stringcmp(res1,res2) : sortcmp(res1,res2); + owner->null_value= 0; + return sortcmp(res1,res2,cmp_collation.collation); } } - null_value=1; + owner->null_value= 1; return -1; } -int Item_bool_func2::compare_real() + +/* + Compare strings byte by byte. End spaces are also compared. + + RETURN + < 0 *a < *b + 0 *b == *b + > 0 *a > *b +*/ + +int Arg_comparator::compare_binary_string() { - double val1=args[0]->val(); - if (!args[0]->null_value) + String *res1,*res2; + if ((res1= (*a)->val_str(&owner->tmp_value1))) { - double val2=args[1]->val(); - if (!args[1]->null_value) + if ((res2= (*b)->val_str(&owner->tmp_value2))) { - null_value=0; + owner->null_value= 0; + uint res1_length= res1->length(); + uint res2_length= res2->length(); + int cmp= memcmp(res1->ptr(), res2->ptr(), min(res1_length,res2_length)); + return cmp ? cmp : (int) (res1_length - res2_length); + } + } + owner->null_value= 1; + return -1; +} + + +/* + Compare strings, but take into account that NULL == NULL +*/ + +int Arg_comparator::compare_e_string() +{ + String *res1,*res2; + res1= (*a)->val_str(&owner->tmp_value1); + res2= (*b)->val_str(&owner->tmp_value2); + if (!res1 || !res2) + return test(res1 == res2); + return test(sortcmp(res1, res2, cmp_collation.collation) == 0); +} + + +int Arg_comparator::compare_e_binary_string() +{ + String *res1,*res2; + res1= (*a)->val_str(&owner->tmp_value1); + res2= (*b)->val_str(&owner->tmp_value2); + if (!res1 || !res2) + return test(res1 == res2); + return test(stringcmp(res1, res2) == 0); +} + + +int Arg_comparator::compare_real() +{ + /* + Fix yet another manifestation of Bug#2338. 'Volatile' will instruct + gcc to flush double values out of 80-bit Intel FPU registers before + performing the comparison. + */ + volatile double val1, val2; + val1= (*a)->val(); + if (!(*a)->null_value) + { + val2= (*b)->val(); + if (!(*b)->null_value) + { + owner->null_value= 0; if (val1 < val2) return -1; if (val1 == val2) return 0; return 1; } } - null_value=1; + owner->null_value= 1; return -1; } +int Arg_comparator::compare_e_real() +{ + double val1= (*a)->val(); + double val2= (*b)->val(); + if ((*a)->null_value || (*b)->null_value) + return test((*a)->null_value && (*b)->null_value); + return test(val1 == val2); +} -int Item_bool_func2::compare_int() +int Arg_comparator::compare_int_signed() { - longlong val1=args[0]->val_int(); - if (!args[0]->null_value) + longlong val1= (*a)->val_int(); + if (!(*a)->null_value) { - longlong val2=args[1]->val_int(); - if (!args[1]->null_value) + longlong val2= (*b)->val_int(); + if (!(*b)->null_value) { - null_value=0; + owner->null_value= 0; if (val1 < val2) return -1; if (val1 == val2) return 0; return 1; } } - null_value=1; + owner->null_value= 1; return -1; } +/* + Compare values as BIGINT UNSIGNED. +*/ -longlong Item_func_eq::val_int() +int Arg_comparator::compare_int_unsigned() { - int value=(this->*cmp_func)(); - return value == 0 ? 1 : 0; + ulonglong val1= (*a)->val_int(); + if (!(*a)->null_value) + { + ulonglong val2= (*b)->val_int(); + if (!(*b)->null_value) + { + owner->null_value= 0; + if (val1 < val2) return -1; + if (val1 == val2) return 0; + return 1; + } + } + owner->null_value= 1; + return -1; } -/* Same as Item_func_eq, but NULL = NULL */ -void Item_func_equal::fix_length_and_dec() +/* + Compare signed (*a) with unsigned (*B) +*/ + +int Arg_comparator::compare_int_signed_unsigned() { - Item_bool_func2::fix_length_and_dec(); - cmp_result_type=item_cmp_type(args[0]->result_type(),args[1]->result_type()); - maybe_null=null_value=0; + longlong sval1= (*a)->val_int(); + if (!(*a)->null_value) + { + ulonglong uval2= (ulonglong)(*b)->val_int(); + if (!(*b)->null_value) + { + owner->null_value= 0; + if (sval1 < 0 || (ulonglong)sval1 < uval2) + return -1; + if ((ulonglong)sval1 == uval2) + return 0; + return 1; + } + } + owner->null_value= 1; + return -1; } -longlong Item_func_equal::val_int() + +/* + Compare unsigned (*a) with signed (*B) +*/ + +int Arg_comparator::compare_int_unsigned_signed() { - switch (cmp_result_type) { - case STRING_RESULT: + ulonglong uval1= (ulonglong)(*a)->val_int(); + if (!(*a)->null_value) { - String *res1,*res2; - res1=args[0]->val_str(&tmp_value1); - res2=args[1]->val_str(&tmp_value2); - if (!res1 || !res2) - return test(res1 == res2); - return (binary ? test(stringcmp(res1,res2) == 0) : - test(sortcmp(res1,res2) == 0)); + longlong sval2= (*b)->val_int(); + if (!(*b)->null_value) + { + owner->null_value= 0; + if (sval2 < 0) + return 1; + if (uval1 < (ulonglong)sval2) + return -1; + if (uval1 == (ulonglong)sval2) + return 0; + return 1; + } } - case REAL_RESULT: + owner->null_value= 1; + return -1; +} + + +int Arg_comparator::compare_e_int() +{ + longlong val1= (*a)->val_int(); + longlong val2= (*b)->val_int(); + if ((*a)->null_value || (*b)->null_value) + return test((*a)->null_value && (*b)->null_value); + return test(val1 == val2); +} + +/* + Compare unsigned *a with signed *b or signed *a with unsigned *b. +*/ +int Arg_comparator::compare_e_int_diff_signedness() +{ + longlong val1= (*a)->val_int(); + longlong val2= (*b)->val_int(); + if ((*a)->null_value || (*b)->null_value) + return test((*a)->null_value && (*b)->null_value); + return (val1 >= 0) && test(val1 == val2); +} + +int Arg_comparator::compare_row() +{ + int res= 0; + (*a)->bring_value(); + (*b)->bring_value(); + uint n= (*a)->cols(); + for (uint i= 0; i<n; i++) { - double val1=args[0]->val(); - double val2=args[1]->val(); - if (args[0]->null_value || args[1]->null_value) - return test(args[0]->null_value && args[1]->null_value); - return test(val1 == val2); + if ((res= comparators[i].compare())) + return res; + if (owner->null_value) + return -1; } - case INT_RESULT: + return res; +} + +int Arg_comparator::compare_e_row() +{ + (*a)->bring_value(); + (*b)->bring_value(); + uint n= (*a)->cols(); + for (uint i= 0; i<n; i++) + { + if (!comparators[i].compare()) + return 0; + } + return 1; +} + + +bool Item_in_optimizer::fix_left(THD *thd, + struct st_table_list *tables, + Item **ref) +{ + if (!args[0]->fixed && args[0]->fix_fields(thd, tables, args) || + !cache && !(cache= Item_cache::get_cache(args[0]->result_type()))) + return 1; + + cache->setup(args[0]); + /* + If it is preparation PS only then we do not know values of parameters => + cant't get there values and do not need that values. + */ + if (! thd->current_arena->is_stmt_prepare()) + cache->store(args[0]); + if (cache->cols() == 1) + { + if ((used_tables_cache= args[0]->used_tables())) + cache->set_used_tables(OUTER_REF_TABLE_BIT); + else + cache->set_used_tables(0); + } + else + { + uint n= cache->cols(); + for (uint i= 0; i < n; i++) + { + if (args[0]->el(i)->used_tables()) + ((Item_cache *)cache->el(i))->set_used_tables(OUTER_REF_TABLE_BIT); + else + ((Item_cache *)cache->el(i))->set_used_tables(0); + } + used_tables_cache= args[0]->used_tables(); + } + not_null_tables_cache= args[0]->not_null_tables(); + with_sum_func= args[0]->with_sum_func; + const_item_cache= args[0]->const_item(); + return 0; +} + + +bool Item_in_optimizer::fix_fields(THD *thd, struct st_table_list *tables, + Item ** ref) +{ + DBUG_ASSERT(fixed == 0); + if (fix_left(thd, tables, ref)) + return 1; + if (args[0]->maybe_null) + maybe_null=1; + + if (!args[1]->fixed && args[1]->fix_fields(thd, tables, args+1)) + return 1; + Item_in_subselect * sub= (Item_in_subselect *)args[1]; + if (args[0]->cols() != sub->engine->cols()) { - longlong val1=args[0]->val_int(); - longlong val2=args[1]->val_int(); - if (args[0]->null_value || args[1]->null_value) - return test(args[0]->null_value && args[1]->null_value); - return test(val1 == val2); + my_error(ER_OPERAND_COLUMNS, MYF(0), args[0]->cols()); + return 1; } + if (args[1]->maybe_null) + maybe_null=1; + with_sum_func= with_sum_func || args[1]->with_sum_func; + used_tables_cache|= args[1]->used_tables(); + not_null_tables_cache|= args[1]->not_null_tables(); + const_item_cache&= args[1]->const_item(); + fixed= 1; + return 0; +} + + +longlong Item_in_optimizer::val_int() +{ + DBUG_ASSERT(fixed == 1); + cache->store(args[0]); + if (cache->null_value) + { + null_value= 1; + return 0; } - return 0; // Impossible + longlong tmp= args[1]->val_int_result(); + null_value= args[1]->null_value; + return tmp; +} + + +void Item_in_optimizer::keep_top_level_cache() +{ + cache->keep_array(); + save_cache= 1; +} + + +void Item_in_optimizer::cleanup() +{ + DBUG_ENTER("Item_in_optimizer::cleanup"); + Item_bool_func::cleanup(); + if (!save_cache) + cache= 0; + DBUG_VOID_RETURN; +} + + +bool Item_in_optimizer::is_null() +{ + cache->store(args[0]); + return (null_value= (cache->null_value || args[1]->is_null())); +} + + +longlong Item_func_eq::val_int() +{ + DBUG_ASSERT(fixed == 1); + int value= cmp.compare(); + return value == 0 ? 1 : 0; +} + + +/* Same as Item_func_eq, but NULL = NULL */ + +void Item_func_equal::fix_length_and_dec() +{ + Item_bool_func2::fix_length_and_dec(); + maybe_null=null_value=0; } +longlong Item_func_equal::val_int() +{ + DBUG_ASSERT(fixed == 1); + return cmp.compare(); +} longlong Item_func_ne::val_int() { - int value=(this->*cmp_func)(); + DBUG_ASSERT(fixed == 1); + int value= cmp.compare(); return value != 0 && !null_value ? 1 : 0; } longlong Item_func_ge::val_int() { - int value=(this->*cmp_func)(); + DBUG_ASSERT(fixed == 1); + int value= cmp.compare(); return value >= 0 ? 1 : 0; } longlong Item_func_gt::val_int() { - int value=(this->*cmp_func)(); + DBUG_ASSERT(fixed == 1); + int value= cmp.compare(); return value > 0 ? 1 : 0; } longlong Item_func_le::val_int() { - int value=(this->*cmp_func)(); + DBUG_ASSERT(fixed == 1); + int value= cmp.compare(); return value <= 0 && !null_value ? 1 : 0; } longlong Item_func_lt::val_int() { - int value=(this->*cmp_func)(); + DBUG_ASSERT(fixed == 1); + int value= cmp.compare(); return value < 0 && !null_value ? 1 : 0; } longlong Item_func_strcmp::val_int() { + DBUG_ASSERT(fixed == 1); String *a=args[0]->val_str(&tmp_value1); String *b=args[1]->val_str(&tmp_value2); if (!a || !b) @@ -262,7 +764,7 @@ longlong Item_func_strcmp::val_int() null_value=1; return 0; } - int value= binary ? stringcmp(a,b) : sortcmp(a,b); + int value= sortcmp(a,b,cmp.cmp_collation.collation); null_value=0; return !value ? 0 : (value < 0 ? (longlong) -1 : (longlong) 1); } @@ -270,62 +772,51 @@ longlong Item_func_strcmp::val_int() void Item_func_interval::fix_length_and_dec() { - bool nums=1; - uint i; - for (i=0 ; i < arg_count ; i++) + if (row->cols() > 8) { - if (!args[i]) - return; // End of memory - if (args[i]->type() != Item::INT_ITEM && - args[i]->type() != Item::REAL_ITEM) + bool consts=1; + + for (uint i=1 ; consts && i < row->cols() ; i++) { - nums=0; - break; + consts&= row->el(i)->const_item(); } - } - if (nums && arg_count >= 8) - { - if ((intervals=(double*) sql_alloc(sizeof(double)*arg_count))) + + if (consts && + (intervals=(double*) sql_alloc(sizeof(double)*(row->cols()-1)))) { - for (i=0 ; i < arg_count ; i++) - intervals[i]=args[i]->val(); + for (uint i=1 ; i < row->cols(); i++) + intervals[i-1]=row->el(i)->val(); } } - maybe_null=0; max_length=2; - used_tables_cache|= item->used_tables(); - not_null_tables_cache= item->not_null_tables(); - with_sum_func= with_sum_func || item->with_sum_func; - const_item_cache&= item->const_item(); + maybe_null= 0; + max_length= 2; + used_tables_cache|= row->used_tables(); + not_null_tables_cache= row->not_null_tables(); + with_sum_func= with_sum_func || row->with_sum_func; + const_item_cache&= row->const_item(); } -void Item_func_interval::split_sum_func(List<Item> &fields) -{ - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) - item->split_sum_func(fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) - { - fields.push_front(item); - item= new Item_ref((Item**) fields.head_ref(), 0, item->name); - } - Item_int_func::split_sum_func(fields); -} - /* return -1 if null value, 0 if lower than lowest - 1 - arg_count if between args[n] and args[n+1] - arg_count+1 if higher than biggest argument + 1 - arg_count-1 if between args[n] and args[n+1] + arg_count if higher than biggest argument */ longlong Item_func_interval::val_int() { - double value= item->val(); - if (item->null_value) - return -1; // -1 if NULL + DBUG_ASSERT(fixed == 1); + double value= row->el(0)->val(); + uint i; + + if (row->el(0)->null_value) + return -1; // -1 if null if (intervals) { // Use binary search to find interval - uint start= 0, end= arg_count - 1; + uint start,end; + start= 0; + end= row->cols()-2; while (start != end) { uint mid= (start + end + 1) / 2; @@ -336,28 +827,68 @@ longlong Item_func_interval::val_int() } return (value < intervals[start]) ? 0 : start + 1; } - if (args[0]->val() > value) - return 0; - for (uint i= 1; i < arg_count; i++) + + for (i=1 ; i < row->cols() ; i++) { - if (args[i]->val() > value) - return i; + if (row->el(i)->val() > value) + return i-1; } - return (longlong) arg_count; + return i-1; } -void Item_func_interval::update_used_tables() +/* + Perform context analysis of a BETWEEN item tree + + SYNOPSIS: + fix_fields() + thd reference to the global context of the query thread + tables list of all open tables involved in the query + ref pointer to Item* variable where pointer to resulting "fixed" + item is to be assigned + + DESCRIPTION + This function performs context analysis (name resolution) and calculates + various attributes of the item tree with Item_func_between as its root. + The function saves in ref the pointer to the item or to a newly created + item that is considered as a replacement for the original one. + + NOTES + Let T0(e)/T1(e) be the value of not_null_tables(e) when e is used on + a predicate/function level. Then it's easy to show that: + T0(e BETWEEN e1 AND e2) = union(T1(e),T1(e1),T1(e2)) + T1(e BETWEEN e1 AND e2) = union(T1(e),intersection(T1(e1),T1(e2))) + T0(e NOT BETWEEN e1 AND e2) = union(T1(e),intersection(T1(e1),T1(e2))) + T1(e NOT BETWEEN e1 AND e2) = union(T1(e),intersection(T1(e1),T1(e2))) + + RETURN + 0 ok + 1 got error +*/ + +bool Item_func_between::fix_fields(THD *thd, struct st_table_list *tables, + Item **ref) { - Item_func::update_used_tables(); - item->update_used_tables(); - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); + if (Item_func_opt_neg::fix_fields(thd, tables, ref)) + return 1; + + /* not_null_tables_cache == union(T1(e),T1(e1),T1(e2)) */ + if (pred_level && !negated) + return 0; + + /* not_null_tables_cache == union(T1(e), intersection(T1(e1),T1(e2))) */ + not_null_tables_cache= (args[0]->not_null_tables() | + (args[1]->not_null_tables() & + args[2]->not_null_tables())); + + return 0; } + void Item_func_between::fix_length_and_dec() { - max_length=1; + max_length= 1; + THD *thd= current_thd; /* As some compare functions are generated after sql_yacc, @@ -365,13 +896,10 @@ void Item_func_between::fix_length_and_dec() */ if (!args[0] || !args[1] || !args[2]) return; - cmp_type=item_cmp_type(args[0]->result_type(), - item_cmp_type(args[1]->result_type(), - args[2]->result_type())); - if (args[0]->binary | args[1]->binary | args[2]->binary) - string_compare=stringcmp; - else - string_compare=sortcmp; + agg_cmp_type(&cmp_type, args, 3); + if (cmp_type == STRING_RESULT && + agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV)) + return; /* Make a special case of compare with date/time and longlong fields. @@ -381,11 +909,15 @@ void Item_func_between::fix_length_and_dec() if (args[0]->type() == FIELD_ITEM) { Field *field=((Item_field*) args[0])->field; - if (field->store_for_compare()) + if (field->can_be_compared_as_longlong()) { - if (convert_constant_item(field,&args[1])) + /* + The following can't be recoded with || as convert_constant_item + changes the argument + */ + if (convert_constant_item(thd, field,&args[1])) cmp_type=INT_RESULT; // Works for all types. - if (convert_constant_item(field,&args[2])) + if (convert_constant_item(thd, field,&args[2])) cmp_type=INT_RESULT; // Works for all types. } } @@ -394,6 +926,7 @@ void Item_func_between::fix_length_and_dec() longlong Item_func_between::val_int() { // ANSI BETWEEN + DBUG_ASSERT(fixed == 1); if (cmp_type == STRING_RESULT) { String *value,*a,*b; @@ -403,17 +936,20 @@ longlong Item_func_between::val_int() a=args[1]->val_str(&value1); b=args[2]->val_str(&value2); if (!args[1]->null_value && !args[2]->null_value) - return (string_compare(value,a) >= 0 && string_compare(value,b) <= 0) ? - 1 : 0; + return (longlong) ((sortcmp(value,a,cmp_collation.collation) >= 0 && + sortcmp(value,b,cmp_collation.collation) <= 0) != + negated); if (args[1]->null_value && args[2]->null_value) null_value=1; else if (args[1]->null_value) { - null_value= string_compare(value,b) <= 0; // not null if false range. + // Set to not null if false range. + null_value= sortcmp(value,b,cmp_collation.collation) <= 0; } else { - null_value= string_compare(value,a) >= 0; // not null if false range. + // Set to not null if false range. + null_value= sortcmp(value,a,cmp_collation.collation) >= 0; } } else if (cmp_type == INT_RESULT) @@ -424,7 +960,7 @@ longlong Item_func_between::val_int() a=args[1]->val_int(); b=args[2]->val_int(); if (!args[1]->null_value && !args[2]->null_value) - return (value >= a && value <= b) ? 1 : 0; + return (longlong) ((value >= a && value <= b) != negated); if (args[1]->null_value && args[2]->null_value) null_value=1; else if (args[1]->null_value) @@ -444,7 +980,7 @@ longlong Item_func_between::val_int() a=args[1]->val(); b=args[2]->val(); if (!args[1]->null_value && !args[2]->null_value) - return (value >= a && value <= b) ? 1 : 0; + return (longlong) ((value >= a && value <= b) != negated); if (args[1]->null_value && args[2]->null_value) null_value=1; else if (args[1]->null_value) @@ -456,16 +992,21 @@ longlong Item_func_between::val_int() null_value= value >= a; } } - return 0; + return (longlong) (!null_value && negated); } -static Item_result item_store_type(Item_result a,Item_result b) + +void Item_func_between::print(String *str) { - if (a == STRING_RESULT || b == STRING_RESULT) - return STRING_RESULT; - if (a == REAL_RESULT || b == REAL_RESULT) - return REAL_RESULT; - return INT_RESULT; + str->append('('); + args[0]->print(str); + if (negated) + str->append(" not", 4); + str->append(" between ", 9); + args[1]->print(str); + str->append(" and ", 5); + args[2]->print(str); + str->append(')'); } void @@ -474,16 +1015,31 @@ Item_func_ifnull::fix_length_and_dec() maybe_null=args[1]->maybe_null; max_length=max(args[0]->max_length,args[1]->max_length); decimals=max(args[0]->decimals,args[1]->decimals); - if ((cached_result_type=item_store_type(args[0]->result_type(), - args[1]->result_type())) != - REAL_RESULT) + agg_result_type(&cached_result_type, args, 2); + if (cached_result_type == STRING_RESULT) + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV); + else if (cached_result_type != REAL_RESULT) decimals= 0; + + cached_field_type= args[0]->field_type(); + if (cached_field_type != args[1]->field_type()) + cached_field_type= Item_func::field_type(); } +enum_field_types Item_func_ifnull::field_type() const +{ + return cached_field_type; +} + +Field *Item_func_ifnull::tmp_table_field(TABLE *table) +{ + return tmp_table_field_from_field_type(table); +} double Item_func_ifnull::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if (!args[0]->null_value) { @@ -499,6 +1055,7 @@ Item_func_ifnull::val() longlong Item_func_ifnull::val_int() { + DBUG_ASSERT(fixed == 1); longlong value=args[0]->val_int(); if (!args[0]->null_value) { @@ -514,19 +1071,65 @@ Item_func_ifnull::val_int() String * Item_func_ifnull::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); if (!args[0]->null_value) { null_value=0; + res->set_charset(collation.collation); return res; } res=args[1]->val_str(str); if ((null_value=args[1]->null_value)) return 0; + res->set_charset(collation.collation); return res; } +/* + Perform context analysis of an IF item tree + + SYNOPSIS: + fix_fields() + thd reference to the global context of the query thread + tables list of all open tables involved in the query + ref pointer to Item* variable where pointer to resulting "fixed" + item is to be assigned + + DESCRIPTION + This function performs context analysis (name resolution) and calculates + various attributes of the item tree with Item_func_if as its root. + The function saves in ref the pointer to the item or to a newly created + item that is considered as a replacement for the original one. + + NOTES + Let T0(e)/T1(e) be the value of not_null_tables(e) when e is used on + a predicate/function level. Then it's easy to show that: + T0(IF(e,e1,e2) = T1(IF(e,e1,e2)) + T1(IF(e,e1,e2)) = intersection(T1(e1),T1(e2)) + + RETURN + 0 ok + 1 got error +*/ + +bool +Item_func_if::fix_fields(THD *thd, struct st_table_list *tlist, Item **ref) +{ + DBUG_ASSERT(fixed == 0); + args[0]->top_level_item(); + + if (Item_func::fix_fields(thd, tlist, ref)) + return 1; + + not_null_tables_cache= (args[1]->not_null_tables() & + args[2]->not_null_tables()); + + return 0; +} + + void Item_func_if::fix_length_and_dec() { @@ -541,25 +1144,25 @@ Item_func_if::fix_length_and_dec() if (null1) { cached_result_type= arg2_type; - binary= args[2]->binary; + collation.set(args[2]->collation.collation); } else if (null2) { cached_result_type= arg1_type; - binary= args[1]->binary; - } - else if (arg1_type == STRING_RESULT || arg2_type == STRING_RESULT) - { - cached_result_type = STRING_RESULT; - binary=args[1]->binary | args[2]->binary; + collation.set(args[1]->collation.collation); } else { - binary=1; // Number - if (arg1_type == REAL_RESULT || arg2_type == REAL_RESULT) - cached_result_type = REAL_RESULT; + agg_result_type(&cached_result_type, args+1, 2); + if (cached_result_type == STRING_RESULT) + { + if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV)) + return; + } else - cached_result_type=arg1_type; // Should be INT_RESULT + { + collation.set(&my_charset_bin); // Number + } } } @@ -567,6 +1170,7 @@ Item_func_if::fix_length_and_dec() double Item_func_if::val() { + DBUG_ASSERT(fixed == 1); Item *arg= args[0]->val_int() ? args[1] : args[2]; double value=arg->val(); null_value=arg->null_value; @@ -576,6 +1180,7 @@ Item_func_if::val() longlong Item_func_if::val_int() { + DBUG_ASSERT(fixed == 1); Item *arg= args[0]->val_int() ? args[1] : args[2]; longlong value=arg->val_int(); null_value=arg->null_value; @@ -585,8 +1190,11 @@ Item_func_if::val_int() String * Item_func_if::val_str(String *str) { + DBUG_ASSERT(fixed == 1); Item *arg= args[0]->val_int() ? args[1] : args[2]; String *res=arg->val_str(str); + if (res) + res->set_charset(collation.collation); null_value=arg->null_value; return res; } @@ -601,12 +1209,15 @@ Item_func_nullif::fix_length_and_dec() { max_length=args[0]->max_length; decimals=args[0]->decimals; - cached_result_type=args[0]->result_type(); + agg_result_type(&cached_result_type, args, 2); + if (cached_result_type == STRING_RESULT && + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV)) + return; } } /* - nullif () returns NULL if arguments are different, else it returns the + nullif () returns NULL if arguments are equal, else it returns the first argument. Note that we have to evaluate the first argument twice as the compare may have been done with a different type than return value @@ -615,8 +1226,9 @@ Item_func_nullif::fix_length_and_dec() double Item_func_nullif::val() { + DBUG_ASSERT(fixed == 1); double value; - if (!(this->*cmp_func)()) + if (!cmp.compare()) { null_value=1; return 0.0; @@ -629,8 +1241,9 @@ Item_func_nullif::val() longlong Item_func_nullif::val_int() { + DBUG_ASSERT(fixed == 1); longlong value; - if (!(this->*cmp_func)()) + if (!cmp.compare()) { null_value=1; return 0; @@ -643,8 +1256,9 @@ Item_func_nullif::val_int() String * Item_func_nullif::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res; - if (!(this->*cmp_func)()) + if (!cmp.compare()) { null_value=1; return 0; @@ -658,9 +1272,7 @@ Item_func_nullif::val_str(String *str) bool Item_func_nullif::is_null() { - if (!(this->*cmp_func)()) - return (null_value= 1); - return 0; + return (null_value= (!cmp.compare() ? 1 : args[0]->null_value)); } /* @@ -673,75 +1285,81 @@ Item *Item_func_case::find_item(String *str) String *first_expr_str,*tmp; longlong first_expr_int; double first_expr_real; - bool int_used, real_used,str_used; - int_used=real_used=str_used=0; - + char buff[MAX_FIELD_WIDTH]; + String buff_str(buff,sizeof(buff),default_charset()); + /* These will be initialized later */ LINT_INIT(first_expr_str); LINT_INIT(first_expr_int); LINT_INIT(first_expr_real); + if (first_expr_num != -1) + { + switch (cmp_type) + { + case STRING_RESULT: + // We can't use 'str' here as this may be overwritten + if (!(first_expr_str= args[first_expr_num]->val_str(&buff_str))) + return else_expr_num != -1 ? args[else_expr_num] : 0; // Impossible + break; + case INT_RESULT: + first_expr_int= args[first_expr_num]->val_int(); + if (args[first_expr_num]->null_value) + return else_expr_num != -1 ? args[else_expr_num] : 0; + break; + case REAL_RESULT: + first_expr_real= args[first_expr_num]->val(); + if (args[first_expr_num]->null_value) + return else_expr_num != -1 ? args[else_expr_num] : 0; + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; + } + } + // Compare every WHEN argument with it and return the first match - for (uint i=0 ; i < arg_count ; i+=2) + for (uint i=0 ; i < ncases ; i+=2) { - if (!first_expr) + if (first_expr_num == -1) { - // No expression between CASE and first WHEN + // No expression between CASE and the first WHEN if (args[i]->val_int()) return args[i+1]; continue; } - switch (args[i]->result_type()) { + switch (cmp_type) { case STRING_RESULT: - if (!str_used) - { - str_used=1; - // We can't use 'str' here as this may be overwritten - if (!(first_expr_str= first_expr->val_str(&str_value))) - return else_expr; // Impossible - } if ((tmp=args[i]->val_str(str))) // If not null - { - if (first_expr->binary || args[i]->binary) - { - if (stringcmp(tmp,first_expr_str)==0) - return args[i+1]; - } - else if (sortcmp(tmp,first_expr_str)==0) + if (sortcmp(tmp,first_expr_str,cmp_collation.collation)==0) return args[i+1]; - } break; case INT_RESULT: - if (!int_used) - { - int_used=1; - first_expr_int= first_expr->val_int(); - if (first_expr->null_value) - return else_expr; - } if (args[i]->val_int()==first_expr_int && !args[i]->null_value) return args[i+1]; break; case REAL_RESULT: - if (!real_used) - { - real_used=1; - first_expr_real= first_expr->val(); - if (first_expr->null_value) - return else_expr; - } if (args[i]->val()==first_expr_real && !args[i]->null_value) return args[i+1]; + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; } } // No, WHEN clauses all missed, return ELSE expression - return else_expr; + return else_expr_num != -1 ? args[else_expr_num] : 0; } String *Item_func_case::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res; Item *item=find_item(str); @@ -759,8 +1377,9 @@ String *Item_func_case::val_str(String *str) longlong Item_func_case::val_int() { + DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; - String dummy_str(buff,sizeof(buff)); + String dummy_str(buff,sizeof(buff),default_charset()); Item *item=find_item(&dummy_str); longlong res; @@ -776,8 +1395,9 @@ longlong Item_func_case::val_int() double Item_func_case::val() { + DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; - String dummy_str(buff,sizeof(buff)); + String dummy_str(buff,sizeof(buff),default_charset()); Item *item=find_item(&dummy_str); double res; @@ -791,96 +1411,88 @@ double Item_func_case::val() return res; } - -bool -Item_func_case::fix_fields(THD *thd,TABLE_LIST *tables) -{ - if (first_expr && first_expr->fix_fields(thd,tables) || - else_expr && else_expr->fix_fields(thd,tables)) - return 1; - if (Item_func::fix_fields(thd,tables)) - return 1; - if (first_expr) - { - used_tables_cache|=(first_expr)->used_tables(); - const_item_cache&= (first_expr)->const_item(); - with_sum_func= with_sum_func || (first_expr)->with_sum_func; - } - if (else_expr) - { - used_tables_cache|=(else_expr)->used_tables(); - const_item_cache&= (else_expr)->const_item(); - with_sum_func= with_sum_func || (else_expr)->with_sum_func; - } - if (!else_expr || else_expr->maybe_null) - maybe_null=1; // The result may be NULL - return 0; -} - -void Item_func_case::split_sum_func(List<Item> &fields) -{ - if (first_expr) - { - if (first_expr->with_sum_func && first_expr->type() != SUM_FUNC_ITEM) - first_expr->split_sum_func(fields); - else if (first_expr->used_tables() || first_expr->type() == SUM_FUNC_ITEM) - { - fields.push_front(first_expr); - first_expr= new Item_ref((Item**) fields.head_ref(), 0, - first_expr->name); - } - } - if (else_expr) - { - if (else_expr->with_sum_func && else_expr->type() != SUM_FUNC_ITEM) - else_expr->split_sum_func(fields); - else if (else_expr->used_tables() || else_expr->type() == SUM_FUNC_ITEM) - { - fields.push_front(else_expr); - else_expr= new Item_ref((Item**) fields.head_ref(), 0, else_expr->name); - } - } - Item_func::split_sum_func(fields); -} - -void Item_func_case::update_used_tables() -{ - Item_func::update_used_tables(); - if (first_expr) - { - used_tables_cache|=(first_expr)->used_tables(); - const_item_cache&= (first_expr)->const_item(); - } - if (else_expr) - { - used_tables_cache|=(else_expr)->used_tables(); - const_item_cache&= (else_expr)->const_item(); - } -} - - void Item_func_case::fix_length_and_dec() { + Item **agg; + uint nagg; + + if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) + return; + + // Aggregate all THEN and ELSE expression types + // and collations when string result + + for (nagg= 0 ; nagg < ncases/2 ; nagg++) + agg[nagg]= args[nagg*2+1]; + + if (else_expr_num != -1) + agg[nagg++]= args[else_expr_num]; + + agg_result_type(&cached_result_type, agg, nagg); + if ((cached_result_type == STRING_RESULT) && + agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV)) + return; + + + /* + Aggregate first expression and all THEN expression types + and collations when string comparison + */ + if (first_expr_num != -1) + { + agg[0]= args[first_expr_num]; + for (nagg= 0; nagg < ncases/2 ; nagg++) + agg[nagg+1]= args[nagg*2]; + nagg++; + agg_cmp_type(&cmp_type, agg, nagg); + if ((cmp_type == STRING_RESULT) && + agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV)) + return; + } + + if (else_expr_num == -1 || args[else_expr_num]->maybe_null) + maybe_null=1; + max_length=0; decimals=0; - cached_result_type = args[1]->result_type(); - for (uint i=0 ; i < arg_count ; i+=2) + for (uint i=0 ; i < ncases ; i+=2) { set_if_bigger(max_length,args[i+1]->max_length); set_if_bigger(decimals,args[i+1]->decimals); } - if (else_expr != NULL) + if (else_expr_num != -1) { - set_if_bigger(max_length,else_expr->max_length); - set_if_bigger(decimals,else_expr->decimals); + set_if_bigger(max_length,args[else_expr_num]->max_length); + set_if_bigger(decimals,args[else_expr_num]->decimals); } } + /* TODO: Fix this so that it prints the whole CASE expression */ void Item_func_case::print(String *str) { - str->append("case "); // Not yet complete + str->append("(case ", 6); + if (first_expr_num != -1) + { + args[first_expr_num]->print(str); + str->append(' '); + } + for (uint i=0 ; i < ncases ; i+=2) + { + str->append("when ", 5); + args[i]->print(str); + str->append(" then ", 6); + args[i+1]->print(str); + str->append(' '); + } + if (else_expr_num != -1) + { + str->append("else ", 5); + args[else_expr_num]->print(str); + str->append(' '); + } + str->append("end)", 4); } /* @@ -889,6 +1501,7 @@ void Item_func_case::print(String *str) String *Item_func_coalesce::val_str(String *str) { + DBUG_ASSERT(fixed == 1); null_value=0; for (uint i=0 ; i < arg_count ; i++) { @@ -902,6 +1515,7 @@ String *Item_func_coalesce::val_str(String *str) longlong Item_func_coalesce::val_int() { + DBUG_ASSERT(fixed == 1); null_value=0; for (uint i=0 ; i < arg_count ; i++) { @@ -915,6 +1529,7 @@ longlong Item_func_coalesce::val_int() double Item_func_coalesce::val() { + DBUG_ASSERT(fixed == 1); null_value=0; for (uint i=0 ; i < arg_count ; i++) { @@ -929,30 +1544,39 @@ double Item_func_coalesce::val() void Item_func_coalesce::fix_length_and_dec() { - max_length=0; - decimals=0; - cached_result_type = args[0]->result_type(); + max_length= 0; + decimals= 0; + agg_result_type(&cached_result_type, args, arg_count); for (uint i=0 ; i < arg_count ; i++) { set_if_bigger(max_length,args[i]->max_length); set_if_bigger(decimals,args[i]->decimals); } + if (cached_result_type == STRING_RESULT) + agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV); + else if (cached_result_type != REAL_RESULT) + decimals= 0; } /**************************************************************************** Classes and function for the IN operator ****************************************************************************/ -static int cmp_longlong(longlong *a,longlong *b) +static int cmp_longlong(void *cmp_arg, longlong *a,longlong *b) { return *a < *b ? -1 : *a == *b ? 0 : 1; } -static int cmp_double(double *a,double *b) +static int cmp_double(void *cmp_arg, double *a,double *b) { return *a < *b ? -1 : *a == *b ? 0 : 1; } +static int cmp_row(void *cmp_arg, cmp_item_row* a, cmp_item_row* b) +{ + return a->compare(b); +} + int in_vector::find(Item *item) { byte *result=get_value(item); @@ -965,25 +1589,29 @@ int in_vector::find(Item *item) { uint mid=(start+end+1)/2; int res; - if ((res=(*compare)(base+mid*size,result)) == 0) + if ((res=(*compare)(collation, base+mid*size, result)) == 0) return 1; if (res < 0) start=mid; else end=mid-1; } - return (int) ((*compare)(base+start*size,result) == 0); + return (int) ((*compare)(collation, base+start*size, result) == 0); } - -in_string::in_string(uint elements,qsort_cmp cmp_func) - :in_vector(elements,sizeof(String),cmp_func),tmp(buff,sizeof(buff)) +in_string::in_string(uint elements,qsort2_cmp cmp_func, CHARSET_INFO *cs) + :in_vector(elements, sizeof(String), cmp_func, cs), + tmp(buff, sizeof(buff), &my_charset_bin) {} in_string::~in_string() { - for (uint i=0 ; i < count ; i++) - ((String*) base)[i].free(); + if (base) + { + // base was allocated with help of sql_alloc => following is OK + for (uint i=0 ; i < count ; i++) + ((String*) base)[i].free(); + } } void in_string::set(uint pos,Item *item) @@ -991,17 +1619,64 @@ void in_string::set(uint pos,Item *item) String *str=((String*) base)+pos; String *res=item->val_str(str); if (res && res != str) + { + if (res->uses_buffer_owned_by(str)) + res->copy(); *str= *res; + } + if (!str->charset()) + { + CHARSET_INFO *cs; + if (!(cs= item->collation.collation)) + cs= &my_charset_bin; // Should never happen for STR items + str->set_charset(cs); + } } + byte *in_string::get_value(Item *item) { return (byte*) item->val_str(&tmp); } +in_row::in_row(uint elements, Item * item) +{ + base= (char*) new cmp_item_row[count= elements]; + size= sizeof(cmp_item_row); + compare= (qsort2_cmp) cmp_row; + tmp.store_value(item); + /* + We need to reset these as otherwise we will call sort() with + uninitialized (even if not used) elements + */ + used_count= elements; + collation= 0; +} + +in_row::~in_row() +{ + if (base) + delete [] (cmp_item_row*) base; +} + +byte *in_row::get_value(Item *item) +{ + tmp.store_value(item); + if (item->is_null()) + return 0; + return (byte *)&tmp; +} + +void in_row::set(uint pos, Item *item) +{ + DBUG_ENTER("in_row::set"); + DBUG_PRINT("enter", ("pos %u item 0x%lx", pos, (ulong) item)); + ((cmp_item_row*) base)[pos].store_value_by_template(&tmp, item); + DBUG_VOID_RETURN; +} in_longlong::in_longlong(uint elements) - :in_vector(elements,sizeof(longlong),(qsort_cmp) cmp_longlong) + :in_vector(elements,sizeof(longlong),(qsort2_cmp) cmp_longlong, 0) {} void in_longlong::set(uint pos,Item *item) @@ -1011,15 +1686,14 @@ void in_longlong::set(uint pos,Item *item) byte *in_longlong::get_value(Item *item) { - tmp=item->val_int(); + tmp= item->val_int(); if (item->null_value) - return 0; /* purecov: inspected */ + return 0; return (byte*) &tmp; } - in_double::in_double(uint elements) - :in_vector(elements,sizeof(double),(qsort_cmp) cmp_double) + :in_vector(elements,sizeof(double),(qsort2_cmp) cmp_double, 0) {} void in_double::set(uint pos,Item *item) @@ -1029,39 +1703,271 @@ void in_double::set(uint pos,Item *item) byte *in_double::get_value(Item *item) { - tmp=item->val(); + tmp= item->val(); if (item->null_value) return 0; /* purecov: inspected */ return (byte*) &tmp; } +cmp_item* cmp_item::get_comparator(Item *item) +{ + switch (item->result_type()) { + case STRING_RESULT: + return new cmp_item_sort_string(item->collation.collation); + case INT_RESULT: + return new cmp_item_int; + case REAL_RESULT: + return new cmp_item_real; + case ROW_RESULT: + return new cmp_item_row; + default: + DBUG_ASSERT(0); + break; + } + return 0; // to satisfy compiler :) +} + + +cmp_item* cmp_item_sort_string::make_same() +{ + return new cmp_item_sort_string_in_static(cmp_charset); +} + +cmp_item* cmp_item_int::make_same() +{ + return new cmp_item_int(); +} + +cmp_item* cmp_item_real::make_same() +{ + return new cmp_item_real(); +} + +cmp_item* cmp_item_row::make_same() +{ + return new cmp_item_row(); +} + + +cmp_item_row::~cmp_item_row() +{ + DBUG_ENTER("~cmp_item_row"); + DBUG_PRINT("enter",("this: %lx", this)); + if (comparators) + { + for (uint i= 0; i < n; i++) + { + if (comparators[i]) + delete comparators[i]; + } + } + DBUG_VOID_RETURN; +} + + +void cmp_item_row::store_value(Item *item) +{ + DBUG_ENTER("cmp_item_row::store_value"); + n= item->cols(); + if (!comparators) + comparators= (cmp_item **) current_thd->calloc(sizeof(cmp_item *)*n); + if (comparators) + { + item->bring_value(); + item->null_value= 0; + for (uint i=0; i < n; i++) + { + if (!comparators[i]) + if (!(comparators[i]= cmp_item::get_comparator(item->el(i)))) + break; // new failed + comparators[i]->store_value(item->el(i)); + item->null_value|= item->el(i)->null_value; + } + } + DBUG_VOID_RETURN; +} + + +void cmp_item_row::store_value_by_template(cmp_item *t, Item *item) +{ + cmp_item_row *tmpl= (cmp_item_row*) t; + if (tmpl->n != item->cols()) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), tmpl->n); + return; + } + n= tmpl->n; + if ((comparators= (cmp_item **) sql_alloc(sizeof(cmp_item *)*n))) + { + item->bring_value(); + item->null_value= 0; + for (uint i=0; i < n; i++) + { + if (!(comparators[i]= tmpl->comparators[i]->make_same())) + break; // new failed + comparators[i]->store_value_by_template(tmpl->comparators[i], + item->el(i)); + item->null_value|= item->el(i)->null_value; + } + } +} + + +int cmp_item_row::cmp(Item *arg) +{ + arg->null_value= 0; + if (arg->cols() != n) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), n); + return 1; + } + bool was_null= 0; + arg->bring_value(); + for (uint i=0; i < n; i++) + { + if (comparators[i]->cmp(arg->el(i))) + { + if (!arg->el(i)->null_value) + return 1; + was_null= 1; + } + } + return (arg->null_value= was_null); +} + + +int cmp_item_row::compare(cmp_item *c) +{ + cmp_item_row *cmp= (cmp_item_row *) c; + for (uint i=0; i < n; i++) + { + int res; + if ((res= comparators[i]->compare(cmp->comparators[i]))) + return res; + } + return 0; +} + + +bool Item_func_in::nulls_in_row() +{ + Item **arg,**arg_end; + for (arg= args+1, arg_end= args+arg_count; arg != arg_end ; arg++) + { + if ((*arg)->null_inside()) + return 1; + } + return 0; +} + + +/* + Perform context analysis of an IN item tree + + SYNOPSIS: + fix_fields() + thd reference to the global context of the query thread + tables list of all open tables involved in the query + ref pointer to Item* variable where pointer to resulting "fixed" + item is to be assigned + + DESCRIPTION + This function performs context analysis (name resolution) and calculates + various attributes of the item tree with Item_func_in as its root. + The function saves in ref the pointer to the item or to a newly created + item that is considered as a replacement for the original one. + + NOTES + Let T0(e)/T1(e) be the value of not_null_tables(e) when e is used on + a predicate/function level. Then it's easy to show that: + T0(e IN(e1,...,en)) = union(T1(e),intersection(T1(ei))) + T1(e IN(e1,...,en)) = union(T1(e),intersection(T1(ei))) + T0(e NOT IN(e1,...,en)) = union(T1(e),union(T1(ei))) + T1(e NOT IN(e1,...,en)) = union(T1(e),intersection(T1(ei))) + + RETURN + 0 ok + 1 got error +*/ + +bool +Item_func_in::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +{ + Item **arg, **arg_end; + + if (Item_func_opt_neg::fix_fields(thd, tables, ref)) + return 1; + + /* not_null_tables_cache == union(T1(e),union(T1(ei))) */ + if (pred_level && negated) + return 0; + + /* not_null_tables_cache = union(T1(e),intersection(T1(ei))) */ + not_null_tables_cache= ~(table_map) 0; + for (arg= args + 1, arg_end= args + arg_count; arg != arg_end; arg++) + not_null_tables_cache&= (*arg)->not_null_tables(); + not_null_tables_cache|= (*args)->not_null_tables(); + return 0; +} + + +static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y) +{ + return cs->coll->strnncollsp(cs, + (uchar *) x->ptr(),x->length(), + (uchar *) y->ptr(),y->length()); +} + void Item_func_in::fix_length_and_dec() { - if (const_item()) + Item **arg, **arg_end; + uint const_itm= 1; + THD *thd= current_thd; + + agg_cmp_type(&cmp_type, args, arg_count); + + if (cmp_type == STRING_RESULT && + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV)) + return; + + for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) + const_itm&= arg[0]->const_item(); + + /* + Row item with NULLs inside can return NULL or FALSE => + they can't be processed as static + */ + if (const_itm && !nulls_in_row()) { - switch (item->result_type()) { + switch (cmp_type) { case STRING_RESULT: - if (item->binary) - array=new in_string(arg_count,(qsort_cmp) stringcmp); /* purecov: inspected */ - else - array=new in_string(arg_count,(qsort_cmp) sortcmp); + array=new in_string(arg_count-1,(qsort2_cmp) srtcmp_in, + cmp_collation.collation); break; case INT_RESULT: - array= new in_longlong(arg_count); + array= new in_longlong(arg_count-1); break; case REAL_RESULT: - array= new in_double(arg_count); + array= new in_double(arg_count-1); + break; + case ROW_RESULT: + array= new in_row(arg_count-1, args[0]); break; + default: + DBUG_ASSERT(0); + return; } - if (array && !(current_thd->fatal_error)) // If not EOM + if (array && !(thd->is_fatal_error)) // If not EOM { uint j=0; - for (uint i=0 ; i < arg_count ; i++) + for (uint i=1 ; i < arg_count ; i++) { array->set(j,args[i]); if (!args[i]->null_value) // Skip NULL values j++; + else + have_null= 1; } if ((array->used_count=j)) array->sort(); @@ -1069,82 +1975,54 @@ void Item_func_in::fix_length_and_dec() } else { - switch (item->result_type()) { - case STRING_RESULT: - if (item->binary) - in_item= new cmp_item_binary_string; - else - in_item= new cmp_item_sort_string; - break; - case INT_RESULT: - in_item= new cmp_item_int; - break; - case REAL_RESULT: - in_item= new cmp_item_real; - break; - } + in_item= cmp_item::get_comparator(args[0]); + if (cmp_type == STRING_RESULT) + in_item->cmp_charset= cmp_collation.collation; } - maybe_null= item->maybe_null; - max_length=2; - used_tables_cache|= item->used_tables(); - /* not_null_tables_cache is only dependent on the argument to in */ - not_null_tables_cache= item->not_null_tables(); - const_item_cache&= item->const_item(); + maybe_null= args[0]->maybe_null; + max_length= 1; } void Item_func_in::print(String *str) { str->append('('); - item->print(str); - Item_func::print(str); - str->append(')'); + args[0]->print(str); + if (negated) + str->append(" not", 4); + str->append(" in (", 5); + print_args(str, 1); + str->append("))", 2); } longlong Item_func_in::val_int() { + DBUG_ASSERT(fixed == 1); if (array) { - int tmp=array->find(item); - null_value=item->null_value; - return tmp; + int tmp=array->find(args[0]); + null_value=args[0]->null_value || (!tmp && have_null); + return (longlong) (!null_value && tmp != negated); } - in_item->store_value(item); - if ((null_value=item->null_value)) + in_item->store_value(args[0]); + if ((null_value=args[0]->null_value)) return 0; - for (uint i=0 ; i < arg_count ; i++) + have_null= 0; + for (uint i=1 ; i < arg_count ; i++) { if (!in_item->cmp(args[i]) && !args[i]->null_value) - return 1; // Would maybe be nice with i ? + return (longlong) (!negated); + have_null|= args[i]->null_value; } - return 0; -} - - -void Item_func_in::update_used_tables() -{ - Item_func::update_used_tables(); - item->update_used_tables(); - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); -} - -void Item_func_in::split_sum_func(List<Item> &fields) -{ - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) - item->split_sum_func(fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) - { - fields.push_front(item); - item= new Item_ref((Item**) fields.head_ref(), 0, item->name); - } - Item_func::split_sum_func(fields); + null_value= have_null; + return (longlong) (!null_value && negated); } longlong Item_func_bit_or::val_int() { + DBUG_ASSERT(fixed == 1); ulonglong arg1= (ulonglong) args[0]->val_int(); if (args[0]->null_value) { @@ -1164,6 +2042,7 @@ longlong Item_func_bit_or::val_int() longlong Item_func_bit_and::val_int() { + DBUG_ASSERT(fixed == 1); ulonglong arg1= (ulonglong) args[0]->val_int(); if (args[0]->null_value) { @@ -1180,16 +2059,34 @@ longlong Item_func_bit_and::val_int() return (longlong) (arg1 & arg2); } +Item_cond::Item_cond(THD *thd, Item_cond *item) + :Item_bool_func(thd, item), + abort_on_null(item->abort_on_null), + and_tables_cache(item->and_tables_cache) +{ + /* + item->list will be copied by copy_andor_arguments() call + */ +} + + +void Item_cond::copy_andor_arguments(THD *thd, Item_cond *item) +{ + List_iterator_fast<Item> li(item->list); + while (Item *it= li++) + list.push_back(it->copy_andor_structure(thd)); +} + bool -Item_cond::fix_fields(THD *thd,TABLE_LIST *tables) +Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) { + DBUG_ASSERT(fixed == 0); List_iterator<Item> li(list); Item *item; -#ifndef EMBEDDED_LIBRARY // Avoid compiler warning +#ifndef EMBEDDED_LIBRARY char buff[sizeof(char*)]; // Max local vars in function #endif - not_null_tables_cache= used_tables_cache= 0; const_item_cache= 0; /* @@ -1198,7 +2095,7 @@ Item_cond::fix_fields(THD *thd,TABLE_LIST *tables) */ and_tables_cache= ~(table_map) 0; - if (thd && check_stack_overrun(thd,buff)) + if (check_stack_overrun(thd, buff)) return 1; // Fatal error flag is set! while ((item=li++)) { @@ -1206,17 +2103,17 @@ Item_cond::fix_fields(THD *thd,TABLE_LIST *tables) while (item->type() == Item::COND_ITEM && ((Item_cond*) item)->functype() == functype()) { // Identical function - li.replace(((Item_cond*) item)->list); ((Item_cond*) item)->list.empty(); -#ifdef DELETE_ITEMS - delete (Item_cond*) item; -#endif item= *li.ref(); // new current item } if (abort_on_null) item->top_level_item(); - if (item->fix_fields(thd,tables)) + + // item can be substituted in fix_fields + if ((!item->fixed && + item->fix_fields(thd, tables, li.ref())) || + (item= *li.ref())->check_cols(1)) return 1; /* purecov: inspected */ used_tables_cache|= item->used_tables(); tmp_table_map= item->not_null_tables(); @@ -1227,32 +2124,49 @@ Item_cond::fix_fields(THD *thd,TABLE_LIST *tables) if (item->maybe_null) maybe_null=1; } - if (thd) - thd->cond_count+=list.elements; + thd->lex->current_select->cond_count+= list.elements; fix_length_and_dec(); + fixed= 1; return 0; } +bool Item_cond::walk(Item_processor processor, byte *arg) +{ + List_iterator_fast<Item> li(list); + Item *item; + while ((item= li++)) + if (item->walk(processor, arg)) + return 1; + return Item_func::walk(processor, arg); +} + + +/* + Move SUM items out from item tree and replace with reference + + SYNOPSIS + split_sum_func() + thd Thread handler + ref_pointer_array Pointer to array of reference fields + fields All fields in select + + NOTES + This function is run on all expression (SELECT list, WHERE, HAVING etc) + that have or refer (HAVING) to a SUM expression. + + The split is done to get an unique item for each SUM function + so that we can easily find and calculate them. + (Calculation done by update_sum_func() and copy_sum_funcs() in + sql_select.cc) +*/ -void Item_cond::split_sum_func(List<Item> &fields) +void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array, + List<Item> &fields) { List_iterator<Item> li(list); Item *item; - used_tables_cache=0; - const_item_cache=0; - while ((item=li++)) - { - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) - item->split_sum_func(fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) - { - fields.push_front(item); - li.replace(new Item_ref((Item**) fields.head_ref(),0,item->name)); - } - item->update_used_tables(); - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); - } + while ((item= li++)) + item->split_sum_func2(thd, ref_pointer_array, fields, li.ref()); } @@ -1296,6 +2210,24 @@ void Item_cond::print(String *str) str->append(')'); } + +void Item_cond::neg_arguments(THD *thd) +{ + List_iterator<Item> li(list); + Item *item; + while ((item= li++)) /* Apply not transformation to the arguments */ + { + Item *new_item= item->neg_transformer(thd); + if (!new_item) + { + if (!(new_item= new Item_func_not(item))) + return; // Fatal OEM error + } + VOID(li.replace(new_item)); + } +} + + /* Evalution of AND(expr, expr, expr ...) @@ -1316,6 +2248,7 @@ void Item_cond::print(String *str) longlong Item_cond_and::val_int() { + DBUG_ASSERT(fixed == 1); List_iterator_fast<Item> li(list); Item *item; null_value= 0; @@ -1333,6 +2266,7 @@ longlong Item_cond_and::val_int() longlong Item_cond_or::val_int() { + DBUG_ASSERT(fixed == 1); List_iterator_fast<Item> li(list); Item *item; null_value=0; @@ -1394,6 +2328,7 @@ Item *and_expressions(Item *a, Item *b, Item **org_item) longlong Item_func_isnull::val_int() { + DBUG_ASSERT(fixed == 1); /* Handle optimization if the argument can't be null This has to be here because of the test in update_used_tables(). @@ -1403,20 +2338,64 @@ longlong Item_func_isnull::val_int() return args[0]->is_null() ? 1: 0; } +longlong Item_is_not_null_test::val_int() +{ + DBUG_ASSERT(fixed == 1); + DBUG_ENTER("Item_is_not_null_test::val_int"); + if (!used_tables_cache) + { + owner->was_null|= (!cached_value); + DBUG_PRINT("info", ("cached :%d", cached_value)); + DBUG_RETURN(cached_value); + } + if (args[0]->is_null()) + { + DBUG_PRINT("info", ("null")) + owner->was_null|= 1; + DBUG_RETURN(0); + } + else + DBUG_RETURN(1); +} + +/* Optimize case of not_null_column IS NULL */ +void Item_is_not_null_test::update_used_tables() +{ + if (!args[0]->maybe_null) + { + used_tables_cache= 0; /* is always true */ + cached_value= (longlong) 1; + } + else + { + args[0]->update_used_tables(); + if (!(used_tables_cache=args[0]->used_tables())) + { + /* Remember if the value is always NULL or never NULL */ + cached_value= (longlong) !args[0]->is_null(); + } + } +} + + longlong Item_func_isnotnull::val_int() { + DBUG_ASSERT(fixed == 1); return args[0]->is_null() ? 0 : 1; } -void Item_func_like::fix_length_and_dec() +void Item_func_isnotnull::print(String *str) { - decimals=0; max_length=1; - // cmp_type=STRING_RESULT; // For quick select + str->append('('); + args[0]->print(str); + str->append(" is not null)", 13); } + longlong Item_func_like::val_int() { + DBUG_ASSERT(fixed == 1); String* res = args[0]->val_str(&tmp_value1); if (args[0]->null_value) { @@ -1432,10 +2411,10 @@ longlong Item_func_like::val_int() null_value=0; if (canDoTurboBM) return turboBM_matches(res->ptr(), res->length()) ? 1 : 0; - if (binary) - return wild_compare(*res,*res2,escape) ? 0 : 1; - else - return wild_case_compare(*res,*res2,escape) ? 0 : 1; + return my_wildcmp(cmp.cmp_collation.collation, + res->ptr(),res->ptr()+res->length(), + res2->ptr(),res2->ptr()+res2->length(), + escape,wild_one,wild_many) ? 0 : 1; } @@ -1459,69 +2438,131 @@ Item_func::optimize_type Item_func_like::select_optimize() const return OPTIMIZE_NONE; } -bool Item_func_like::fix_fields(THD *thd,struct st_table_list *tlist) + +bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) { - if (Item_bool_func2::fix_fields(thd, tlist)) + DBUG_ASSERT(fixed == 0); + if (Item_bool_func2::fix_fields(thd, tlist, ref) || + escape_item->fix_fields(thd, tlist, &escape_item)) return 1; - /* - TODO--we could do it for non-const, but we'd have to - recompute the tables for each row--probably not worth it. - */ - if (args[1]->const_item() && !(specialflag & SPECIAL_NO_NEW_FUNC)) + if (!escape_item->const_during_execution()) { - String* res2 = args[1]->val_str(&tmp_value2); - if (!res2) - return 0; // Null argument - - const size_t len = res2->length(); - const char* first = res2->ptr(); - const char* last = first + len - 1; - /* - len must be > 2 ('%pattern%') - heuristic: only do TurboBM for pattern_len > 2 - */ - - if (len > MIN_TURBOBM_PATTERN_LEN + 2 && - *first == wild_many && - *last == wild_many) + my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE"); + return 1; + } + + if (escape_item->const_item()) + { + /* If we are on execution stage */ + String *escape_str= escape_item->val_str(&tmp_value1); + if (escape_str) { - const char* tmp = first + 1; - for (; *tmp != wild_many && *tmp != wild_one && *tmp != escape; tmp++) ; -#ifdef USE_MB - canDoTurboBM = (tmp == last) && !use_mb(default_charset_info); -#else - canDoTurboBM = tmp == last; -#endif + if (use_mb(cmp.cmp_collation.collation)) + { + CHARSET_INFO *cs= escape_str->charset(); + my_wc_t wc; + int rc= cs->cset->mb_wc(cs, &wc, + (const uchar*) escape_str->ptr(), + (const uchar*) escape_str->ptr() + + escape_str->length()); + escape= (int) (rc > 0 ? wc : '\\'); + } + else + { + /* + In the case of 8bit character set, we pass native + code instead of Unicode code as "escape" argument. + Convert to "cs" if charset of escape differs. + */ + CHARSET_INFO *cs= cmp.cmp_collation.collation; + uint32 unused; + if (escape_str->needs_conversion(escape_str->length(), + escape_str->charset(), cs, &unused)) + { + char ch; + uint errors; + uint32 cnvlen= copy_and_convert(&ch, 1, cs, escape_str->ptr(), + escape_str->length(), + escape_str->charset(), &errors); + escape= cnvlen ? ch : '\\'; + } + else + escape= *(escape_str->ptr()); + } } - - if (canDoTurboBM) + else + escape= '\\'; + + /* + We could also do boyer-more for non-const items, but as we would have to + recompute the tables for each row it's not worth it. + */ + if (args[1]->const_item() && !use_strnxfrm(collation.collation) && + !(specialflag & SPECIAL_NO_NEW_FUNC)) { - pattern = first + 1; - pattern_len = len - 2; - DBUG_PRINT("info", ("Initializing pattern: '%s'", first)); - int *suff = (int*) thd->alloc(sizeof(int)*((pattern_len + 1)*2+ - alphabet_size)); - bmGs = suff + pattern_len + 1; - bmBc = bmGs + pattern_len + 1; - turboBM_compute_good_suffix_shifts(suff); - turboBM_compute_bad_character_shifts(); - DBUG_PRINT("info",("done")); + String* res2 = args[1]->val_str(&tmp_value2); + if (!res2) + return 0; // Null argument + + const size_t len = res2->length(); + const char* first = res2->ptr(); + const char* last = first + len - 1; + /* + len must be > 2 ('%pattern%') + heuristic: only do TurboBM for pattern_len > 2 + */ + + if (len > MIN_TURBOBM_PATTERN_LEN + 2 && + *first == wild_many && + *last == wild_many) + { + const char* tmp = first + 1; + for (; *tmp != wild_many && *tmp != wild_one && *tmp != escape; tmp++) ; + canDoTurboBM = (tmp == last) && !use_mb(args[0]->collation.collation); + } + if (canDoTurboBM) + { + pattern = first + 1; + pattern_len = len - 2; + DBUG_PRINT("info", ("Initializing pattern: '%s'", first)); + int *suff = (int*) thd->alloc(sizeof(int)*((pattern_len + 1)*2+ + alphabet_size)); + bmGs = suff + pattern_len + 1; + bmBc = bmGs + pattern_len + 1; + turboBM_compute_good_suffix_shifts(suff); + turboBM_compute_bad_character_shifts(); + DBUG_PRINT("info",("done")); + } } } return 0; } +void Item_func_like::cleanup() +{ + canDoTurboBM= FALSE; + Item_bool_func2::cleanup(); +} + #ifdef USE_REGEX bool -Item_func_regex::fix_fields(THD *thd,TABLE_LIST *tables) +Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) { - if (args[0]->fix_fields(thd,tables) || args[1]->fix_fields(thd,tables)) + DBUG_ASSERT(fixed == 0); + if ((!args[0]->fixed && + args[0]->fix_fields(thd, tables, args)) || args[0]->check_cols(1) || + (!args[1]->fixed && + args[1]->fix_fields(thd,tables, args + 1)) || args[1]->check_cols(1)) return 1; /* purecov: inspected */ with_sum_func=args[0]->with_sum_func || args[1]->with_sum_func; - max_length=1; decimals=0; - binary=args[0]->binary || args[1]->binary; + max_length= 1; + decimals= 0; + + if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV)) + return 1; + used_tables_cache=args[0]->used_tables() | args[1]->used_tables(); not_null_tables_cache= (args[0]->not_null_tables() | args[1]->not_null_tables()); @@ -1529,7 +2570,7 @@ Item_func_regex::fix_fields(THD *thd,TABLE_LIST *tables) if (!regex_compiled && args[1]->const_item()) { char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)); + String tmp(buff,sizeof(buff),&my_charset_bin); String *res=args[1]->val_str(&tmp); if (args[1]->null_value) { // Will always return NULL @@ -1537,11 +2578,14 @@ Item_func_regex::fix_fields(THD *thd,TABLE_LIST *tables) return 0; } int error; - if ((error=regcomp(&preg,res->c_ptr(), - binary ? REG_EXTENDED | REG_NOSUB : - REG_EXTENDED | REG_NOSUB | REG_ICASE))) + if ((error= my_regcomp(&preg,res->c_ptr(), + ((cmp_collation.collation->state & + (MY_CS_BINSORT | MY_CS_CSSORT)) ? + REG_EXTENDED | REG_NOSUB : + REG_EXTENDED | REG_NOSUB | REG_ICASE), + cmp_collation.collation))) { - (void) regerror(error,&preg,buff,sizeof(buff)); + (void) my_regerror(error,&preg,buff,sizeof(buff)); my_printf_error(ER_REGEXP_ERROR,ER(ER_REGEXP_ERROR),MYF(0),buff); return 1; } @@ -1550,13 +2594,16 @@ Item_func_regex::fix_fields(THD *thd,TABLE_LIST *tables) } else maybe_null=1; + fixed= 1; return 0; } + longlong Item_func_regex::val_int() { + DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; - String *res, tmp(buff,sizeof(buff)); + String *res, tmp(buff,sizeof(buff),&my_charset_bin); res=args[0]->val_str(&tmp); if (args[0]->null_value) @@ -1567,7 +2614,7 @@ longlong Item_func_regex::val_int() if (!regex_is_const) { char buff2[MAX_FIELD_WIDTH]; - String *res2, tmp2(buff2,sizeof(buff2)); + String *res2, tmp2(buff2,sizeof(buff2),&my_charset_bin); res2= args[1]->val_str(&tmp2); if (args[1]->null_value) @@ -1580,13 +2627,15 @@ longlong Item_func_regex::val_int() prev_regexp.copy(*res2); if (regex_compiled) { - regfree(&preg); + my_regfree(&preg); regex_compiled=0; } - if (regcomp(&preg,res2->c_ptr(), - binary ? REG_EXTENDED | REG_NOSUB : - REG_EXTENDED | REG_NOSUB | REG_ICASE)) - + if (my_regcomp(&preg,res2->c_ptr(), + ((cmp_collation.collation->state & + (MY_CS_BINSORT | MY_CS_CSSORT)) ? + REG_EXTENDED | REG_NOSUB : + REG_EXTENDED | REG_NOSUB | REG_ICASE), + cmp_collation.collation)) { null_value=1; return 0; @@ -1595,26 +2644,30 @@ longlong Item_func_regex::val_int() } } null_value=0; - return regexec(&preg,res->c_ptr(),0,(regmatch_t*) 0,0) ? 0 : 1; + return my_regexec(&preg,res->c_ptr(),0,(my_regmatch_t*) 0,0) ? 0 : 1; } -Item_func_regex::~Item_func_regex() +void Item_func_regex::cleanup() { + DBUG_ENTER("Item_func_regex::cleanup"); + Item_bool_func::cleanup(); if (regex_compiled) { - regfree(&preg); + my_regfree(&preg); regex_compiled=0; } + DBUG_VOID_RETURN; } + #endif /* USE_REGEX */ #ifdef LIKE_CMP_TOUPPER -#define likeconv(A) (uchar) toupper(A) +#define likeconv(cs,A) (uchar) (cs)->toupper(A) #else -#define likeconv(A) (uchar) my_sort_order[(uchar) (A)] +#define likeconv(cs,A) (uchar) (cs)->sort_order[(uchar) (A)] #endif @@ -1629,10 +2682,11 @@ void Item_func_like::turboBM_compute_suffixes(int *suff) int f = 0; int g = plm1; int *const splm1 = suff + plm1; + CHARSET_INFO *cs= cmp.cmp_collation.collation; *splm1 = pattern_len; - if (binary) + if (!cs->sort_order) { int i; for (i = pattern_len - 2; i >= 0; i--) @@ -1665,7 +2719,7 @@ void Item_func_like::turboBM_compute_suffixes(int *suff) g = i; // g = min(i, g) f = i; while (g >= 0 && - likeconv(pattern[g]) == likeconv(pattern[g + plm1 - f])) + likeconv(cs, pattern[g]) == likeconv(cs, pattern[g + plm1 - f])) g--; suff[i] = f - g; } @@ -1728,12 +2782,14 @@ void Item_func_like::turboBM_compute_bad_character_shifts() { int *i; int *end = bmBc + alphabet_size; + int j; + const int plm1 = pattern_len - 1; + CHARSET_INFO *cs= cmp.cmp_collation.collation; + for (i = bmBc; i < end; i++) *i = pattern_len; - int j; - const int plm1 = pattern_len - 1; - if (binary) + if (!cs->sort_order) { for (j = 0; j < plm1; j++) bmBc[(uint) (uchar) pattern[j]] = plm1 - j; @@ -1741,7 +2797,7 @@ void Item_func_like::turboBM_compute_bad_character_shifts() else { for (j = 0; j < plm1; j++) - bmBc[(uint) likeconv(pattern[j])] = plm1 - j; + bmBc[(uint) likeconv(cs,pattern[j])] = plm1 - j; } } @@ -1758,12 +2814,13 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const int shift = pattern_len; int j = 0; int u = 0; + CHARSET_INFO *cs= cmp.cmp_collation.collation; const int plm1= pattern_len - 1; const int tlmpl= text_len - pattern_len; /* Searching */ - if (binary) + if (!cs->sort_order) { while (j <= tlmpl) { @@ -1799,7 +2856,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const while (j <= tlmpl) { register int i = plm1; - while (i >= 0 && likeconv(pattern[i]) == likeconv(text[i + j])) + while (i >= 0 && likeconv(cs,pattern[i]) == likeconv(cs,text[i + j])) { i--; if (i == plm1 - shift) @@ -1810,7 +2867,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const register const int v = plm1 - i; turboShift = u - v; - bcShift = bmBc[(uint) likeconv(text[i + j])] - plm1 + i; + bcShift = bmBc[(uint) likeconv(cs, text[i + j])] - plm1 + i; shift = max(turboShift, bcShift); shift = max(shift, bmGs[i]); if (shift == bmGs[i]) @@ -1850,6 +2907,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const longlong Item_cond_xor::val_int() { + DBUG_ASSERT(fixed == 1); List_iterator<Item> li(list); Item *item; int result=0; @@ -1865,3 +2923,118 @@ longlong Item_cond_xor::val_int() } return (longlong) result; } + +/* + Apply NOT transformation to the item and return a new one. + + SYNPOSIS + neg_transformer() + thd thread handler + + DESCRIPTION + Transform the item using next rules: + a AND b AND ... -> NOT(a) OR NOT(b) OR ... + a OR b OR ... -> NOT(a) AND NOT(b) AND ... + NOT(a) -> a + a = b -> a != b + a != b -> a = b + a < b -> a >= b + a >= b -> a < b + a > b -> a <= b + a <= b -> a > b + IS NULL(a) -> IS NOT NULL(a) + IS NOT NULL(a) -> IS NULL(a) + + RETURN + New item or + NULL if we cannot apply NOT transformation (see Item::neg_transformer()). +*/ + +Item *Item_func_not::neg_transformer(THD *thd) /* NOT(x) -> x */ +{ + return args[0]; +} + + +Item *Item_bool_rowready_func2::neg_transformer(THD *thd) +{ + Item *item= negated_item(); + return item; +} + + +/* a IS NULL -> a IS NOT NULL */ +Item *Item_func_isnull::neg_transformer(THD *thd) +{ + Item *item= new Item_func_isnotnull(args[0]); + return item; +} + + +/* a IS NOT NULL -> a IS NULL */ +Item *Item_func_isnotnull::neg_transformer(THD *thd) +{ + Item *item= new Item_func_isnull(args[0]); + return item; +} + + +Item *Item_cond_and::neg_transformer(THD *thd) /* NOT(a AND b AND ...) -> */ + /* NOT a OR NOT b OR ... */ +{ + neg_arguments(thd); + Item *item= new Item_cond_or(list); + return item; +} + + +Item *Item_cond_or::neg_transformer(THD *thd) /* NOT(a OR b OR ...) -> */ + /* NOT a AND NOT b AND ... */ +{ + neg_arguments(thd); + Item *item= new Item_cond_and(list); + return item; +} + + +Item *Item_func_eq::negated_item() /* a = b -> a != b */ +{ + return new Item_func_ne(args[0], args[1]); +} + + +Item *Item_func_ne::negated_item() /* a != b -> a = b */ +{ + return new Item_func_eq(args[0], args[1]); +} + + +Item *Item_func_lt::negated_item() /* a < b -> a >= b */ +{ + return new Item_func_ge(args[0], args[1]); +} + + +Item *Item_func_ge::negated_item() /* a >= b -> a < b */ +{ + return new Item_func_lt(args[0], args[1]); +} + + +Item *Item_func_gt::negated_item() /* a > b -> a <= b */ +{ + return new Item_func_le(args[0], args[1]); +} + + +Item *Item_func_le::negated_item() /* a <= b -> a > b */ +{ + return new Item_func_gt(args[0], args[1]); +} + +// just fake method, should never be called +Item *Item_bool_rowready_func2::negated_item() +{ + DBUG_ASSERT(0); + return 0; +} diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index a1977e76f67..ade09113c63 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,65 +17,271 @@ /* compare and test functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif +extern Item_result item_cmp_type(Item_result a,Item_result b); +class Item_bool_func2; +class Arg_comparator; + +typedef int (Arg_comparator::*arg_cmp_func)(); + +class Arg_comparator: public Sql_alloc +{ + Item **a, **b; + arg_cmp_func func; + Item_bool_func2 *owner; + Arg_comparator *comparators; // used only for compare_row() + +public: + DTCollation cmp_collation; + + Arg_comparator() {}; + Arg_comparator(Item **a1, Item **a2): a(a1), b(a2) {}; + + int set_compare_func(Item_bool_func2 *owner, Item_result type); + inline int set_compare_func(Item_bool_func2 *owner_arg) + { + return set_compare_func(owner_arg, item_cmp_type((*a)->result_type(), + (*b)->result_type())); + } + inline int set_cmp_func(Item_bool_func2 *owner_arg, + Item **a1, Item **a2, + Item_result type) + { + a= a1; + b= a2; + return set_compare_func(owner_arg, type); + } + inline int set_cmp_func(Item_bool_func2 *owner_arg, + Item **a1, Item **a2) + { + return set_cmp_func(owner_arg, a1, a2, item_cmp_type((*a1)->result_type(), + (*a2)->result_type())); + } + inline int compare() { return (this->*func)(); } + + int compare_string(); // compare args[0] & args[1] + int compare_binary_string(); // compare args[0] & args[1] + int compare_real(); // compare args[0] & args[1] + int compare_int_signed(); // compare args[0] & args[1] + int compare_int_signed_unsigned(); + int compare_int_unsigned_signed(); + int compare_int_unsigned(); + int compare_row(); // compare args[0] & args[1] + int compare_e_string(); // compare args[0] & args[1] + int compare_e_binary_string(); // compare args[0] & args[1] + int compare_e_real(); // compare args[0] & args[1] + int compare_e_int(); // compare args[0] & args[1] + int compare_e_int_diff_signedness(); + int compare_e_row(); // compare args[0] & args[1] + + static arg_cmp_func comparator_matrix [4][2]; + + friend class Item_func; +}; + class Item_bool_func :public Item_int_func { public: Item_bool_func() :Item_int_func() {} Item_bool_func(Item *a) :Item_int_func(a) {} Item_bool_func(Item *a,Item *b) :Item_int_func(a,b) {} + Item_bool_func(THD *thd, Item_bool_func *item) :Item_int_func(thd, item) {} + bool is_bool_func() { return 1; } void fix_length_and_dec() { decimals=0; max_length=1; } - unsigned int size_of() { return sizeof(*this);} +}; + +class Item_cache; +class Item_in_optimizer: public Item_bool_func +{ +protected: + Item_cache *cache; + bool save_cache; +public: + Item_in_optimizer(Item *a, Item_in_subselect *b): + Item_bool_func(a, my_reinterpret_cast(Item *)(b)), cache(0), save_cache(0) + {} + bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_left(THD *thd, struct st_table_list *tables, Item **ref); + bool is_null(); + /* + Item_in_optimizer item is special boolean function. On value request + (one of val, val_int or val_str methods) it evaluate left expression + of IN by storing it value in cache item (one of Item_cache* items), + then it test cache is it NULL. If left expression (cache) is NULL then + Item_in_optimizer return NULL, else it evaluate Item_in_subselect. + */ + longlong val_int(); + void cleanup(); + const char *func_name() const { return "<in_optimizer>"; } + Item_cache **get_cache() { return &cache; } + void keep_top_level_cache(); +}; + +class Comp_creator +{ +public: + virtual Item_bool_func2* create(Item *a, Item *b) const = 0; + virtual const char* symbol(bool invert) const = 0; + virtual bool eqne_op() const = 0; + virtual bool l_op() const = 0; +}; + +class Eq_creator :public Comp_creator +{ +public: + virtual Item_bool_func2* create(Item *a, Item *b) const; + virtual const char* symbol(bool invert) const { return invert? "<>" : "="; } + virtual bool eqne_op() const { return 1; } + virtual bool l_op() const { return 0; } +}; + +class Ne_creator :public Comp_creator +{ +public: + virtual Item_bool_func2* create(Item *a, Item *b) const; + virtual const char* symbol(bool invert) const { return invert? "=" : "<>"; } + virtual bool eqne_op() const { return 1; } + virtual bool l_op() const { return 0; } +}; + +class Gt_creator :public Comp_creator +{ +public: + virtual Item_bool_func2* create(Item *a, Item *b) const; + virtual const char* symbol(bool invert) const { return invert? "<=" : ">"; } + virtual bool eqne_op() const { return 0; } + virtual bool l_op() const { return 0; } +}; + +class Lt_creator :public Comp_creator +{ +public: + virtual Item_bool_func2* create(Item *a, Item *b) const; + virtual const char* symbol(bool invert) const { return invert? ">=" : "<"; } + virtual bool eqne_op() const { return 0; } + virtual bool l_op() const { return 1; } +}; + +class Ge_creator :public Comp_creator +{ +public: + virtual Item_bool_func2* create(Item *a, Item *b) const; + virtual const char* symbol(bool invert) const { return invert? "<" : ">="; } + virtual bool eqne_op() const { return 0; } + virtual bool l_op() const { return 0; } +}; + +class Le_creator :public Comp_creator +{ +public: + virtual Item_bool_func2* create(Item *a, Item *b) const; + virtual const char* symbol(bool invert) const { return invert? ">" : "<="; } + virtual bool eqne_op() const { return 0; } + virtual bool l_op() const { return 1; } }; class Item_bool_func2 :public Item_int_func { /* Bool with 2 string args */ protected: + Arg_comparator cmp; String tmp_value1,tmp_value2; + public: - Item_bool_func2(Item *a,Item *b) :Item_int_func(a,b) {} + Item_bool_func2(Item *a,Item *b) + :Item_int_func(a,b), cmp(tmp_arg, tmp_arg+1) {} void fix_length_and_dec(); - void set_cmp_func(Item_result type); - int (Item_bool_func2::*cmp_func)(); - int compare_string(); /* compare arg[0] & arg[1] */ - int compare_real(); /* compare arg[0] & arg[1] */ - int compare_int(); /* compare arg[0] & arg[1] */ + void set_cmp_func() + { + cmp.set_cmp_func(this, tmp_arg, tmp_arg+1); + } optimize_type select_optimize() const { return OPTIMIZE_OP; } virtual enum Functype rev_functype() const { return UNKNOWN_FUNC; } bool have_rev_func() const { return rev_functype() != UNKNOWN_FUNC; } void print(String *str) { Item_func::print_op(str); } bool is_null() { return test(args[0]->is_null() || args[1]->is_null()); } - unsigned int size_of() { return sizeof(*this);} + bool is_bool_func() { return 1; } + CHARSET_INFO *compare_collation() { return cmp.cmp_collation.collation; } + + friend class Arg_comparator; }; +class Item_bool_rowready_func2 :public Item_bool_func2 +{ +public: + Item_bool_rowready_func2(Item *a, Item *b) :Item_bool_func2(a, b) + { + allowed_arg_cols= 0; // Fetch this value from first argument + } + Item *neg_transformer(THD *thd); + virtual Item *negated_item(); +}; class Item_func_not :public Item_bool_func { public: Item_func_not(Item *a) :Item_bool_func(a) {} longlong val_int(); + enum Functype functype() const { return NOT_FUNC; } const char *func_name() const { return "not"; } + Item *neg_transformer(THD *thd); +}; + +class Item_maxmin_subselect; +class Item_func_not_all :public Item_func_not +{ + /* allow to check presence od values in max/min optimisation */ + Item_sum_hybrid *test_sum_item; + Item_maxmin_subselect *test_sub_item; + + bool abort_on_null; +public: + bool show; + + Item_func_not_all(Item *a) + :Item_func_not(a), test_sum_item(0), test_sub_item(0), abort_on_null(0), + show(0) + {} + virtual void top_level_item() { abort_on_null= 1; } + bool top_level() { return abort_on_null; } + longlong val_int(); + enum Functype functype() const { return NOT_ALL_FUNC; } + const char *func_name() const { return "<not>"; } + void print(String *str); + void set_sum_test(Item_sum_hybrid *item) { test_sum_item= item; }; + void set_sub_test(Item_maxmin_subselect *item) { test_sub_item= item; }; + bool empty_underlying_subquery(); +}; + + +class Item_func_nop_all :public Item_func_not_all +{ +public: + + Item_func_nop_all(Item *a) :Item_func_not_all(a) {} + longlong val_int(); + const char *func_name() const { return "<nop>"; } }; -class Item_func_eq :public Item_bool_func2 + +class Item_func_eq :public Item_bool_rowready_func2 { public: - Item_func_eq(Item *a,Item *b) :Item_bool_func2(a,b) { }; + Item_func_eq(Item *a,Item *b) :Item_bool_rowready_func2(a,b) {} longlong val_int(); enum Functype functype() const { return EQ_FUNC; } enum Functype rev_functype() const { return EQ_FUNC; } cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return "="; } + Item *negated_item(); }; -class Item_func_equal :public Item_bool_func2 +class Item_func_equal :public Item_bool_rowready_func2 { - Item_result cmp_result_type; public: - Item_func_equal(Item *a,Item *b) :Item_bool_func2(a,b) { }; + Item_func_equal(Item *a,Item *b) :Item_bool_rowready_func2(a,b) {}; longlong val_int(); void fix_length_and_dec(); table_map not_null_tables() const { return 0; } @@ -83,82 +289,121 @@ public: enum Functype rev_functype() const { return EQUAL_FUNC; } cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return "<=>"; } - unsigned int size_of() { return sizeof(*this);} + Item *neg_transformer(THD *thd) { return 0; } }; -class Item_func_ge :public Item_bool_func2 +class Item_func_ge :public Item_bool_rowready_func2 { public: - Item_func_ge(Item *a,Item *b) :Item_bool_func2(a,b) { }; + Item_func_ge(Item *a,Item *b) :Item_bool_rowready_func2(a,b) {}; longlong val_int(); enum Functype functype() const { return GE_FUNC; } enum Functype rev_functype() const { return LE_FUNC; } cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return ">="; } + Item *negated_item(); }; -class Item_func_gt :public Item_bool_func2 +class Item_func_gt :public Item_bool_rowready_func2 { public: - Item_func_gt(Item *a,Item *b) :Item_bool_func2(a,b) { }; + Item_func_gt(Item *a,Item *b) :Item_bool_rowready_func2(a,b) {}; longlong val_int(); enum Functype functype() const { return GT_FUNC; } enum Functype rev_functype() const { return LT_FUNC; } cond_result eq_cmp_result() const { return COND_FALSE; } const char *func_name() const { return ">"; } + Item *negated_item(); }; -class Item_func_le :public Item_bool_func2 +class Item_func_le :public Item_bool_rowready_func2 { public: - Item_func_le(Item *a,Item *b) :Item_bool_func2(a,b) { }; + Item_func_le(Item *a,Item *b) :Item_bool_rowready_func2(a,b) {}; longlong val_int(); enum Functype functype() const { return LE_FUNC; } enum Functype rev_functype() const { return GE_FUNC; } cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return "<="; } + Item *negated_item(); }; -class Item_func_lt :public Item_bool_func2 +class Item_func_lt :public Item_bool_rowready_func2 { public: - Item_func_lt(Item *a,Item *b) :Item_bool_func2(a,b) { } + Item_func_lt(Item *a,Item *b) :Item_bool_rowready_func2(a,b) {} longlong val_int(); enum Functype functype() const { return LT_FUNC; } enum Functype rev_functype() const { return GT_FUNC; } cond_result eq_cmp_result() const { return COND_FALSE; } const char *func_name() const { return "<"; } + Item *negated_item(); }; -class Item_func_ne :public Item_bool_func2 +class Item_func_ne :public Item_bool_rowready_func2 { public: - Item_func_ne(Item *a,Item *b) :Item_bool_func2(a,b) { } + Item_func_ne(Item *a,Item *b) :Item_bool_rowready_func2(a,b) {} longlong val_int(); enum Functype functype() const { return NE_FUNC; } cond_result eq_cmp_result() const { return COND_FALSE; } - optimize_type select_optimize() const { return OPTIMIZE_NONE; } + optimize_type select_optimize() const { return OPTIMIZE_KEY; } const char *func_name() const { return "<>"; } + Item *negated_item(); }; -class Item_func_between :public Item_int_func +/* + The class Item_func_opt_neg is defined to factor out the functionality + common for the classes Item_func_between and Item_func_in. The objects + of these classes can express predicates or there negations. + The alternative approach would be to create pairs Item_func_between, + Item_func_notbetween and Item_func_in, Item_func_notin. + +*/ + +class Item_func_opt_neg :public Item_int_func { - int (*string_compare)(const String *x,const String *y); +public: + bool negated; /* <=> the item represents NOT <func> */ + bool pred_level; /* <=> [NOT] <func> is used on a predicate level */ +public: + Item_func_opt_neg(Item *a, Item *b, Item *c) + :Item_int_func(a, b, c), negated(0), pred_level(0) {} + Item_func_opt_neg(List<Item> &list) + :Item_int_func(list), negated(0), pred_level(0) {} +public: + inline void negate() { negated= !negated; } + inline void top_level_item() { pred_level= 1; } + Item *neg_transformer(THD *thd) + { + negated= !negated; + return this; + } +}; + + +class Item_func_between :public Item_func_opt_neg +{ + DTCollation cmp_collation; public: Item_result cmp_type; String value0,value1,value2; - Item_func_between(Item *a,Item *b,Item *c) :Item_int_func(a,b,c) {} + Item_func_between(Item *a, Item *b, Item *c) + :Item_func_opt_neg(a, b, c) {} longlong val_int(); optimize_type select_optimize() const { return OPTIMIZE_KEY; } enum Functype functype() const { return BETWEEN; } const char *func_name() const { return "between"; } + bool fix_fields(THD *, struct st_table_list *, Item **); void fix_length_and_dec(); + void print(String *str); + CHARSET_INFO *compare_collation() { return cmp_collation.collation; } }; @@ -167,7 +412,6 @@ class Item_func_strcmp :public Item_bool_func2 public: Item_func_strcmp(Item *a,Item *b) :Item_bool_func2(a,b) {} longlong val_int(); - void fix_length_and_dec() { max_length=2; } optimize_type select_optimize() const { return OPTIMIZE_NONE; } const char *func_name() const { return "strcmp"; } }; @@ -175,36 +419,37 @@ public: class Item_func_interval :public Item_int_func { - Item *item; + Item_row *row; double *intervals; public: - Item_func_interval(Item *a,List<Item> &list) - :Item_int_func(list),item(a),intervals(0) {} - longlong val_int(); - bool fix_fields(THD *thd,struct st_table_list *tlist) + Item_func_interval(Item_row *a) + :Item_int_func(a),row(a),intervals(0) { - return (item->fix_fields(thd,tlist) || Item_func::fix_fields(thd,tlist)); + allowed_arg_cols= 0; // Fetch this value from first argument } - void split_sum_func(List<Item> &fields); + longlong val_int(); void fix_length_and_dec(); - ~Item_func_interval() { delete item; } const char *func_name() const { return "interval"; } - void update_used_tables(); - unsigned int size_of() { return sizeof(*this);} }; class Item_func_ifnull :public Item_func { enum Item_result cached_result_type; + enum_field_types cached_field_type; + bool field_type_defined; public: - Item_func_ifnull(Item *a,Item *b) :Item_func(a,b) { } + Item_func_ifnull(Item *a,Item *b) + :Item_func(a,b), cached_result_type(INT_RESULT) + {} double val(); longlong val_int(); String *val_str(String *str); enum Item_result result_type () const { return cached_result_type; } + enum_field_types field_type() const; void fix_length_and_dec(); const char *func_name() const { return "ifnull"; } + Field *tmp_table_field(TABLE *table); table_map not_null_tables() const { return 0; } }; @@ -213,19 +458,16 @@ class Item_func_if :public Item_func { enum Item_result cached_result_type; public: - Item_func_if(Item *a,Item *b,Item *c) :Item_func(a,b,c) { } + Item_func_if(Item *a,Item *b,Item *c) + :Item_func(a,b,c), cached_result_type(INT_RESULT) + {} double val(); longlong val_int(); String *val_str(String *str); enum Item_result result_type () const { return cached_result_type; } - bool fix_fields(THD *thd,struct st_table_list *tlist) - { - args[0]->top_level_item(); - return Item_func::fix_fields(thd,tlist); - } + bool fix_fields(THD *, struct st_table_list *, Item **); void fix_length_and_dec(); const char *func_name() const { return "if"; } - table_map not_null_tables() const { return 0; } }; @@ -233,13 +475,16 @@ class Item_func_nullif :public Item_bool_func2 { enum Item_result cached_result_type; public: - Item_func_nullif(Item *a,Item *b) :Item_bool_func2(a,b) { } + Item_func_nullif(Item *a,Item *b) + :Item_bool_func2(a,b), cached_result_type(INT_RESULT) + {} double val(); longlong val_int(); String *val_str(String *str); enum Item_result result_type () const { return cached_result_type; } void fix_length_and_dec(); const char *func_name() const { return "nullif"; } + void print(String *str) { Item_func::print(str); } table_map not_null_tables() const { return 0; } bool is_null(); }; @@ -249,7 +494,9 @@ class Item_func_coalesce :public Item_func { enum Item_result cached_result_type; public: - Item_func_coalesce(List<Item> &list) :Item_func(list) {} + Item_func_coalesce(List<Item> &list) + :Item_func(list),cached_result_type(INT_RESULT) + {} double val(); longlong val_int(); String *val_str(String *); @@ -262,25 +509,40 @@ public: class Item_func_case :public Item_func { - Item * first_expr, *else_expr; + int first_expr_num, else_expr_num; enum Item_result cached_result_type; String tmp_value; + uint ncases; + Item_result cmp_type; + DTCollation cmp_collation; public: - Item_func_case(List<Item> &list, Item *first_expr_, Item *else_expr_) - :Item_func(list), first_expr(first_expr_), else_expr(else_expr_) {} + Item_func_case(List<Item> &list, Item *first_expr_arg, Item *else_expr_arg) + :Item_func(), first_expr_num(-1), else_expr_num(-1), + cached_result_type(INT_RESULT) + { + ncases= list.elements; + if (first_expr_arg) + { + first_expr_num= list.elements; + list.push_back(first_expr_arg); + } + if (else_expr_arg) + { + else_expr_num= list.elements; + list.push_back(else_expr_arg); + } + set_arguments(list); + } double val(); longlong val_int(); String *val_str(String *); void fix_length_and_dec(); - void update_used_tables(); table_map not_null_tables() const { return 0; } enum Item_result result_type () const { return cached_result_type; } const char *func_name() const { return "case"; } void print(String *str); - bool fix_fields(THD *thd,struct st_table_list *tlist); - void split_sum_func(List<Item> &fields); Item *find_item(String *str); - unsigned int size_of() { return sizeof(*this);} + CHARSET_INFO *compare_collation() { return cmp_collation.collation; } }; @@ -291,37 +553,38 @@ class in_vector :public Sql_alloc protected: char *base; uint size; - qsort_cmp compare; + qsort2_cmp compare; + CHARSET_INFO *collation; uint count; public: uint used_count; - in_vector(uint elements,uint element_length,qsort_cmp cmp_func) + in_vector() {} + in_vector(uint elements,uint element_length,qsort2_cmp cmp_func, + CHARSET_INFO *cmp_coll) :base((char*) sql_calloc(elements*element_length)), - size(element_length), compare(cmp_func), count(elements), - used_count(elements) {} + size(element_length), compare(cmp_func), collation(cmp_coll), + count(elements), used_count(elements) {} virtual ~in_vector() {} virtual void set(uint pos,Item *item)=0; virtual byte *get_value(Item *item)=0; void sort() { - qsort(base,used_count,size,compare); + qsort2(base,used_count,size,compare,collation); } int find(Item *item); }; - class in_string :public in_vector { char buff[80]; String tmp; public: - in_string(uint elements,qsort_cmp cmp_func); + in_string(uint elements,qsort2_cmp cmp_func, CHARSET_INFO *cs); ~in_string(); void set(uint pos,Item *item); byte *get_value(Item *item); }; - class in_longlong :public in_vector { longlong tmp; @@ -331,7 +594,6 @@ public: byte *get_value(Item *item); }; - class in_double :public in_vector { double tmp; @@ -341,7 +603,6 @@ public: byte *get_value(Item *item); }; - /* ** Classes for easy comparing of non const items */ @@ -349,111 +610,199 @@ public: class cmp_item :public Sql_alloc { public: - cmp_item() {} + CHARSET_INFO *cmp_charset; + cmp_item() { cmp_charset= &my_charset_bin; } virtual ~cmp_item() {} - virtual void store_value(Item *item)=0; - virtual int cmp(Item *item)=0; + virtual void store_value(Item *item)= 0; + virtual int cmp(Item *item)= 0; + // for optimized IN with row + virtual int compare(cmp_item *item)= 0; + static cmp_item* get_comparator(Item *); + virtual cmp_item *make_same()= 0; + virtual void store_value_by_template(cmp_item *tmpl, Item *item) + { + store_value(item); + } }; - -class cmp_item_sort_string :public cmp_item { - protected: - char value_buff[80]; - String value,*value_res; +class cmp_item_string :public cmp_item +{ +protected: + String *value_res; public: - cmp_item_sort_string() :value(value_buff,sizeof(value_buff)) {} - void store_value(Item *item) - { - value_res=item->val_str(&value); - } - int cmp(Item *arg) - { - char buff[80]; - String tmp(buff,sizeof(buff)),*res; - if (!(res=arg->val_str(&tmp))) - return 1; /* Can't be right */ - return sortcmp(value_res,res); - } + cmp_item_string (CHARSET_INFO *cs) { cmp_charset= cs; } + friend class cmp_item_sort_string; + friend class cmp_item_sort_string_in_static; }; -class cmp_item_binary_string :public cmp_item_sort_string { +class cmp_item_sort_string :public cmp_item_string +{ +protected: + char value_buff[80]; + String value; public: - cmp_item_binary_string() {} + cmp_item_sort_string(CHARSET_INFO *cs): + cmp_item_string(cs), + value(value_buff, sizeof(value_buff), cs) {} + void store_value(Item *item) + { + value_res= item->val_str(&value); + } int cmp(Item *arg) - { - char buff[80]; - String tmp(buff,sizeof(buff)),*res; - if (!(res=arg->val_str(&tmp))) - return 1; /* Can't be right */ - return stringcmp(value_res,res); - } + { + char buff[80]; + String tmp(buff, sizeof(buff), cmp_charset), *res; + if (!(res= arg->val_str(&tmp))) + return 1; /* Can't be right */ + return sortcmp(value_res, res, cmp_charset); + } + int compare(cmp_item *c) + { + cmp_item_string *cmp= (cmp_item_string *)c; + return sortcmp(value_res, cmp->value_res, cmp_charset); + } + cmp_item *make_same(); }; - class cmp_item_int :public cmp_item { longlong value; public: void store_value(Item *item) - { - value=item->val_int(); - } + { + value= item->val_int(); + } int cmp(Item *arg) - { - return value != arg->val_int(); - } + { + return value != arg->val_int(); + } + int compare(cmp_item *c) + { + cmp_item_int *cmp= (cmp_item_int *)c; + return (value < cmp->value) ? -1 : ((value == cmp->value) ? 0 : 1); + } + cmp_item *make_same(); }; - class cmp_item_real :public cmp_item { double value; public: void store_value(Item *item) - { - value= item->val(); - } + { + value= item->val(); + } int cmp(Item *arg) - { - return value != arg->val(); - } + { + return value != arg->val(); + } + int compare(cmp_item *c) + { + cmp_item_real *cmp= (cmp_item_real *)c; + return (value < cmp->value)? -1 : ((value == cmp->value) ? 0 : 1); + } + cmp_item *make_same(); }; +class cmp_item_row :public cmp_item +{ + cmp_item **comparators; + uint n; +public: + cmp_item_row(): comparators(0), n(0) {} + ~cmp_item_row(); + void store_value(Item *item); + int cmp(Item *arg); + int compare(cmp_item *arg); + cmp_item *make_same(); + void store_value_by_template(cmp_item *tmpl, Item *); +}; + + +class in_row :public in_vector +{ + cmp_item_row tmp; +public: + in_row(uint elements, Item *); + ~in_row(); + void set(uint pos,Item *item); + byte *get_value(Item *item); +}; + +/* + cmp_item for optimized IN with row (right part string, which never + be changed) +*/ + +class cmp_item_sort_string_in_static :public cmp_item_string +{ + protected: + String value; +public: + cmp_item_sort_string_in_static(CHARSET_INFO *cs): + cmp_item_string(cs) {} + void store_value(Item *item) + { + value_res= item->val_str(&value); + } + int cmp(Item *item) + { + // Should never be called + DBUG_ASSERT(0); + return 1; + } + int compare(cmp_item *c) + { + cmp_item_string *cmp= (cmp_item_string *)c; + return sortcmp(value_res, cmp->value_res, cmp_charset); + } + cmp_item *make_same() + { + return new cmp_item_sort_string_in_static(cmp_charset); + } +}; -class Item_func_in :public Item_int_func +class Item_func_in :public Item_func_opt_neg { - Item *item; + Item_result cmp_type; in_vector *array; cmp_item *in_item; + bool have_null; + DTCollation cmp_collation; public: - Item_func_in(Item *a,List<Item> &list) - :Item_int_func(list),item(a),array(0),in_item(0) {} - longlong val_int(); - bool fix_fields(THD *thd,struct st_table_list *tlist) + Item_func_in(List<Item> &list) + :Item_func_opt_neg(list), array(0), in_item(0), have_null(0) { - bool res= (item->fix_fields(thd,tlist) || Item_func::fix_fields(thd,tlist)); - with_sum_func= with_sum_func || item->with_sum_func; - return res; + allowed_arg_cols= 0; // Fetch this value from first argument } + longlong val_int(); + bool fix_fields(THD *, struct st_table_list *, Item **); void fix_length_and_dec(); - ~Item_func_in() { delete item; delete array; delete in_item; } + void cleanup() + { + DBUG_ENTER("Item_func_in::cleanup"); + Item_int_func::cleanup(); + delete array; + delete in_item; + array= 0; + in_item= 0; + DBUG_VOID_RETURN; + } optimize_type select_optimize() const { return array ? OPTIMIZE_KEY : OPTIMIZE_NONE; } - Item *key_item() const { return item; } void print(String *str); enum Functype functype() const { return IN_FUNC; } const char *func_name() const { return " IN "; } - void update_used_tables(); - void split_sum_func(List<Item> &fields); - unsigned int size_of() { return sizeof(*this);} + bool nulls_in_row(); + bool is_bool_func() { return 1; } + CHARSET_INFO *compare_collation() { return cmp_collation.collation; } }; - - /* Functions used by where clause */ class Item_func_isnull :public Item_bool_func { +protected: longlong cached_value; public: Item_func_isnull(Item *a) :Item_bool_func(a) {} @@ -462,11 +811,11 @@ public: void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=0; - Item_func_isnull::update_used_tables(); + update_used_tables(); } const char *func_name() const { return "isnull"; } /* Optimize case of not_null_column IS NULL */ - void update_used_tables() + virtual void update_used_tables() { if (!args[0]->maybe_null) { @@ -477,7 +826,7 @@ public: else { args[0]->update_used_tables(); - if (!(used_tables_cache=args[0]->used_tables())) + if ((const_item_cache= !(used_tables_cache= args[0]->used_tables()))) { /* Remember if the value is always NULL or never NULL */ cached_value= (longlong) args[0]->is_null(); @@ -486,7 +835,29 @@ public: } table_map not_null_tables() const { return 0; } optimize_type select_optimize() const { return OPTIMIZE_NULL; } - unsigned int size_of() { return sizeof(*this);} + Item *neg_transformer(THD *thd); + CHARSET_INFO *compare_collation() { return args[0]->collation.collation; } +}; + +/* Functions used by HAVING for rewriting IN subquery */ + +class Item_in_subselect; +class Item_is_not_null_test :public Item_func_isnull +{ + Item_in_subselect* owner; +public: + Item_is_not_null_test(Item_in_subselect* ow, Item *a) + :Item_func_isnull(a), owner(ow) + {} + enum Functype functype() const { return ISNOTNULLTEST_FUNC; } + longlong val_int(); + const char *func_name() const { return "<is_not_null_test>"; } + void update_used_tables(); + /* + we add RAND_TABLE_BIT to prevent moving this item from HAVING to WHERE + */ + table_map used_tables() const + { return used_tables_cache | RAND_TABLE_BIT; } }; @@ -503,6 +874,9 @@ public: const char *func_name() const { return "isnotnull"; } optimize_type select_optimize() const { return OPTIMIZE_NULL; } table_map not_null_tables() const { return 0; } + Item *neg_transformer(THD *thd); + void print(String *str); + CHARSET_INFO *compare_collation() { return args[0]->collation.collation; } }; @@ -523,40 +897,43 @@ class Item_func_like :public Item_bool_func2 bool turboBM_matches(const char* text, int text_len) const; enum { alphabet_size = 256 }; + Item *escape_item; + public: - char escape; + int escape; - Item_func_like(Item *a,Item *b, char* escape_arg) - :Item_bool_func2(a,b), canDoTurboBM(false), pattern(0), pattern_len(0), - bmGs(0), bmBc(0), escape(*escape_arg) {} + Item_func_like(Item *a,Item *b, Item *escape_arg) + :Item_bool_func2(a,b), canDoTurboBM(FALSE), pattern(0), pattern_len(0), + bmGs(0), bmBc(0), escape_item(escape_arg) {} longlong val_int(); enum Functype functype() const { return LIKE_FUNC; } optimize_type select_optimize() const; cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return "like"; } - void fix_length_and_dec(); - bool fix_fields(THD *thd,struct st_table_list *tlist); - unsigned int size_of() { return sizeof(*this);} + bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref); + void cleanup(); }; #ifdef USE_REGEX -#include <regex.h> +#include "my_regex.h" class Item_func_regex :public Item_bool_func { - regex_t preg; + my_regex_t preg; bool regex_compiled; bool regex_is_const; String prev_regexp; + DTCollation cmp_collation; public: Item_func_regex(Item *a,Item *b) :Item_bool_func(a,b), regex_compiled(0),regex_is_const(0) {} - ~Item_func_regex(); + void cleanup(); longlong val_int(); - bool fix_fields(THD *thd,struct st_table_list *tlist); - const char *func_name() const { return "regex"; } - unsigned int size_of() { return sizeof(*this);} + bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref); + const char *func_name() const { return "regexp"; } + void print(String *str) { print_op(str); } + CHARSET_INFO *compare_collation() { return cmp_collation.collation; } }; #else @@ -567,6 +944,7 @@ public: Item_func_regex(Item *a,Item *b) :Item_bool_func(a,b) {} longlong val_int() { return 0;} const char *func_name() const { return "regex"; } + void print(String *str) { print_op(str); } }; #endif /* USE_REGEX */ @@ -583,21 +961,31 @@ protected: public: /* Item_cond() is only used to create top level items */ - Item_cond() : Item_bool_func(), abort_on_null(1) { const_item_cache=0; } - Item_cond(Item *i1,Item *i2) :Item_bool_func(), abort_on_null(0) - { list.push_back(i1); list.push_back(i2); } + Item_cond(): Item_bool_func(), abort_on_null(1) + { const_item_cache=0; } + Item_cond(Item *i1,Item *i2) + :Item_bool_func(), abort_on_null(0) + { + list.push_back(i1); + list.push_back(i2); + } + Item_cond(THD *thd, Item_cond *item); + Item_cond(List<Item> &nlist) + :Item_bool_func(), list(nlist), abort_on_null(0) {} bool add(Item *item) { return list.push_back(item); } - bool fix_fields(THD *,struct st_table_list *); + bool fix_fields(THD *, struct st_table_list *, Item **ref); enum Type type() const { return COND_ITEM; } List<Item>* argument_list() { return &list; } table_map used_tables() const; void update_used_tables(); void print(String *str); - void split_sum_func(List<Item> &fields); + void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); friend int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds); - unsigned int size_of() { return sizeof(*this);} void top_level_item() { abort_on_null=1; } + void copy_andor_arguments(THD *thd, Item_cond *item); + bool walk(Item_processor processor, byte *arg); + void neg_arguments(THD *thd); }; @@ -606,9 +994,21 @@ class Item_cond_and :public Item_cond public: Item_cond_and() :Item_cond() {} Item_cond_and(Item *i1,Item *i2) :Item_cond(i1,i2) {} + Item_cond_and(THD *thd, Item_cond_and *item) :Item_cond(thd, item) {} + Item_cond_and(List<Item> &list): Item_cond(list) {} enum Functype functype() const { return COND_AND_FUNC; } longlong val_int(); const char *func_name() const { return "and"; } + table_map not_null_tables() const + { return abort_on_null ? not_null_tables_cache: and_tables_cache; } + Item* copy_andor_structure(THD *thd) + { + Item_cond_and *item; + if ((item= new Item_cond_and(thd, this))) + item->copy_andor_arguments(thd, this); + return item; + } + Item *neg_transformer(THD *thd); }; class Item_cond_or :public Item_cond @@ -616,29 +1016,28 @@ class Item_cond_or :public Item_cond public: Item_cond_or() :Item_cond() {} Item_cond_or(Item *i1,Item *i2) :Item_cond(i1,i2) {} + Item_cond_or(THD *thd, Item_cond_or *item) :Item_cond(thd, item) {} + Item_cond_or(List<Item> &list): Item_cond(list) {} enum Functype functype() const { return COND_OR_FUNC; } longlong val_int(); const char *func_name() const { return "or"; } table_map not_null_tables() const { return and_tables_cache; } + Item* copy_andor_structure(THD *thd) + { + Item_cond_or *item; + if ((item= new Item_cond_or(thd, this))) + item->copy_andor_arguments(thd, this); + return item; + } + Item *neg_transformer(THD *thd); }; -/* Some usefull inline functions */ - -inline Item *and_conds(Item *a,Item *b) -{ - if (!b) return a; - if (!a) return b; - Item *cond=new Item_cond_and(a,b); - if (cond) - cond->update_used_tables(); - return cond; -} - /* XOR is Item_cond, not an Item_int_func bevause we could like to optimize (a XOR b) later on. It's low prio, though */ + class Item_cond_xor :public Item_cond { public: @@ -649,7 +1048,17 @@ public: enum Type type() const { return FUNC_ITEM; } longlong val_int(); const char *func_name() const { return "xor"; } + void top_level_item() {} }; +/* Some usefull inline functions */ + +inline Item *and_conds(Item *a, Item *b) +{ + if (!b) return a; + if (!a) return b; + return new Item_cond_and(a, b); +} + Item *and_expressions(Item *a, Item *b, Item **org_item); diff --git a/sql/item_create.cc b/sql/item_create.cc index 877e63047d6..2b12a1310b9 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -34,14 +34,14 @@ Item *create_func_acos(Item* a) Item *create_func_aes_encrypt(Item* a, Item* b) { - return new Item_func_aes_encrypt(a, b); + return new Item_func_aes_encrypt(a, b); } - + Item *create_func_aes_decrypt(Item* a, Item* b) { return new Item_func_aes_decrypt(a, b); } - + Item *create_func_ascii(Item* a) { return new Item_func_ascii(a); @@ -76,9 +76,13 @@ Item *create_func_ceiling(Item* a) Item *create_func_connection_id(void) { THD *thd=current_thd; - thd->safe_to_cache_query=0; - return new Item_int(NullS,(longlong) thd->thread_id,10); -} + thd->lex->safe_to_cache_query= 0; + return new Item_int(NullS,(longlong) + ((thd->slave_thread) ? + thd->variables.pseudo_thread_id : + thd->thread_id), + 10); +} Item *create_func_conv(Item* a, Item *b, Item *c) { @@ -144,8 +148,8 @@ Item *create_func_floor(Item* a) Item *create_func_found_rows(void) { THD *thd=current_thd; - thd->safe_to_cache_query=0; - return new Item_int(NullS,(longlong) thd->found_rows(),21); + thd->lex->safe_to_cache_query= 0; + return new Item_func_found_rows(); } Item *create_func_from_days(Item* a) @@ -155,7 +159,7 @@ Item *create_func_from_days(Item* a) Item *create_func_get_lock(Item* a, Item *b) { - current_thd->safe_to_cache_query=0; + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); return new Item_func_get_lock(a, b); } @@ -215,6 +219,11 @@ Item *create_func_bit_length(Item* a) return new Item_func_bit_length(a); } +Item *create_func_coercibility(Item* a) +{ + return new Item_func_coercibility(a); +} + Item *create_func_char_length(Item* a) { return new Item_func_char_length(a); @@ -242,7 +251,7 @@ Item *create_func_lpad(Item* a, Item *b, Item *c) Item *create_func_ltrim(Item* a) { - return new Item_func_ltrim(a,new Item_string(" ",1)); + return new Item_func_ltrim(a); } Item *create_func_md5(Item* a) @@ -283,7 +292,7 @@ Item *create_func_period_diff(Item* a, Item *b) Item *create_func_pi(void) { - return new Item_real(NullS,M_PI,6,8); + return new Item_real("pi()",M_PI,6,8); } Item *create_func_pow(Item* a, Item *b) @@ -297,10 +306,11 @@ Item *create_func_current_user() char buff[HOSTNAME_LENGTH+USERNAME_LENGTH+2]; uint length; - thd->safe_to_cache_query= 0; + thd->lex->safe_to_cache_query= 0; length= (uint) (strxmov(buff, thd->priv_user, "@", thd->priv_host, NullS) - buff); - return new Item_string(NullS, thd->memdup(buff, length), length); + return new Item_string(NullS, thd->memdup(buff, length), length, + system_charset_info); } Item *create_func_quarter(Item* a) @@ -308,11 +318,6 @@ Item *create_func_quarter(Item* a) return new Item_func_quarter(a); } -Item *create_func_password(Item* a) -{ - return new Item_func_password(a); -} - Item *create_func_radians(Item *a) { return new Item_func_units((char*) "radians",a,M_PI/180,0.0); @@ -320,7 +325,7 @@ Item *create_func_radians(Item *a) Item *create_func_release_lock(Item* a) { - current_thd->safe_to_cache_query=0; + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); return new Item_func_release_lock(a); } @@ -341,7 +346,7 @@ Item *create_func_rpad(Item* a, Item *b, Item *c) Item *create_func_rtrim(Item* a) { - return new Item_func_rtrim(a,new Item_string(" ",1)); + return new Item_func_rtrim(a); } Item *create_func_sec_to_time(Item* a) @@ -361,12 +366,26 @@ Item *create_func_sin(Item* a) Item *create_func_sha(Item* a) { - return new Item_func_sha(a); + return new Item_func_sha(a); } - + Item *create_func_space(Item *a) { - return new Item_func_repeat(new Item_string(" ",1),a); + CHARSET_INFO *cs= current_thd->variables.collation_connection; + Item *sp; + + if (cs->mbminlen > 1) + { + uint dummy_errors; + sp= new Item_string("",0,cs); + if (sp) + sp->str_value.copy(" ", 1, &my_charset_latin1, cs, &dummy_errors); + } + else + { + sp= new Item_string(" ",1,cs); + } + return sp ? new Item_func_repeat(sp, a) : 0; } Item *create_func_soundex(Item* a) @@ -409,9 +428,21 @@ Item *create_func_ucase(Item* a) return new Item_func_ucase(a); } +Item *create_func_unhex(Item* a) +{ + return new Item_func_unhex(a); +} + +Item *create_func_uuid(void) +{ + return new Item_func_uuid(); +} + Item *create_func_version(void) { - return new Item_string(NullS,server_version, strlen(server_version)); + return new Item_string(NullS,server_version, + (uint) strlen(server_version), + system_charset_info, DERIVATION_SYSCONST); } Item *create_func_weekday(Item* a) @@ -426,33 +457,273 @@ Item *create_func_year(Item* a) Item *create_load_file(Item* a) { - current_thd->safe_to_cache_query=0; + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); return new Item_load_file(a); } -Item *create_func_cast(Item *a, Item_cast cast_type) + +Item *create_func_cast(Item *a, Cast_target cast_type, int len, + CHARSET_INFO *cs) { Item *res; LINT_INIT(res); + switch (cast_type) { case ITEM_CAST_BINARY: res= new Item_func_binary(a); break; - case ITEM_CAST_CHAR: res= new Item_char_typecast(a); break; case ITEM_CAST_SIGNED_INT: res= new Item_func_signed(a); break; case ITEM_CAST_UNSIGNED_INT: res= new Item_func_unsigned(a); break; case ITEM_CAST_DATE: res= new Item_date_typecast(a); break; case ITEM_CAST_TIME: res= new Item_time_typecast(a); break; case ITEM_CAST_DATETIME: res= new Item_datetime_typecast(a); break; + case ITEM_CAST_CHAR: + res= new Item_char_typecast(a, len, cs ? cs : + current_thd->variables.collation_connection); + break; } return res; } Item *create_func_is_free_lock(Item* a) { - current_thd->safe_to_cache_query=0; + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); return new Item_func_is_free_lock(a); } +Item *create_func_is_used_lock(Item* a) +{ + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + return new Item_func_is_used_lock(a); +} + Item *create_func_quote(Item* a) { return new Item_func_quote(a); } + +#ifdef HAVE_SPATIAL +Item *create_func_as_wkt(Item *a) +{ + return new Item_func_as_wkt(a); +} + +Item *create_func_as_wkb(Item *a) +{ + return new Item_func_as_wkb(a); +} + +Item *create_func_srid(Item *a) +{ + return new Item_func_srid(a); +} + +Item *create_func_startpoint(Item *a) +{ + return new Item_func_spatial_decomp(a, Item_func::SP_STARTPOINT); +} + +Item *create_func_endpoint(Item *a) +{ + return new Item_func_spatial_decomp(a, Item_func::SP_ENDPOINT); +} + +Item *create_func_exteriorring(Item *a) +{ + return new Item_func_spatial_decomp(a, Item_func::SP_EXTERIORRING); +} + +Item *create_func_pointn(Item *a, Item *b) +{ + return new Item_func_spatial_decomp_n(a, b, Item_func::SP_POINTN); +} + +Item *create_func_interiorringn(Item *a, Item *b) +{ + return new Item_func_spatial_decomp_n(a, b, Item_func::SP_INTERIORRINGN); +} + +Item *create_func_geometryn(Item *a, Item *b) +{ + return new Item_func_spatial_decomp_n(a, b, Item_func::SP_GEOMETRYN); +} + +Item *create_func_centroid(Item *a) +{ + return new Item_func_centroid(a); +} + +Item *create_func_envelope(Item *a) +{ + return new Item_func_envelope(a); +} + +Item *create_func_equals(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_EQUALS_FUNC); +} + +Item *create_func_disjoint(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_DISJOINT_FUNC); +} + +Item *create_func_intersects(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_INTERSECTS_FUNC); +} + +Item *create_func_touches(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_TOUCHES_FUNC); +} + +Item *create_func_crosses(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_CROSSES_FUNC); +} + +Item *create_func_within(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_WITHIN_FUNC); +} + +Item *create_func_contains(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_CONTAINS_FUNC); +} + +Item *create_func_overlaps(Item *a, Item *b) +{ + return new Item_func_spatial_rel(a, b, Item_func::SP_OVERLAPS_FUNC); +} + +Item *create_func_isempty(Item *a) +{ + return new Item_func_isempty(a); +} + +Item *create_func_issimple(Item *a) +{ + return new Item_func_issimple(a); +} + +Item *create_func_isclosed(Item *a) +{ + return new Item_func_isclosed(a); +} + +Item *create_func_geometry_type(Item *a) +{ + return new Item_func_geometry_type(a); +} + +Item *create_func_dimension(Item *a) +{ + return new Item_func_dimension(a); +} + +Item *create_func_x(Item *a) +{ + return new Item_func_x(a); +} + +Item *create_func_y(Item *a) +{ + return new Item_func_y(a); +} + +Item *create_func_numpoints(Item *a) +{ + return new Item_func_numpoints(a); +} + +Item *create_func_numinteriorring(Item *a) +{ + return new Item_func_numinteriorring(a); +} + +Item *create_func_numgeometries(Item *a) +{ + return new Item_func_numgeometries(a); +} + +Item *create_func_area(Item *a) +{ + return new Item_func_area(a); +} + +Item *create_func_glength(Item *a) +{ + return new Item_func_glength(a); +} + +Item *create_func_point(Item *a, Item *b) +{ + return new Item_func_point(a, b); +} +#endif /*HAVE_SPATIAL*/ + +Item *create_func_crc32(Item* a) +{ + return new Item_func_crc32(a); +} + +Item *create_func_compress(Item* a) +{ + return new Item_func_compress(a); +} + +Item *create_func_uncompress(Item* a) +{ + return new Item_func_uncompress(a); +} + +Item *create_func_uncompressed_length(Item* a) +{ + return new Item_func_uncompressed_length(a); +} + +Item *create_func_datediff(Item *a, Item *b) +{ + return new Item_func_minus(new Item_func_to_days(a), + new Item_func_to_days(b)); +} + +Item *create_func_weekofyear(Item *a) +{ + return new Item_func_week(a, new Item_int((char*) "0", 3, 1)); +} + +Item *create_func_makedate(Item* a,Item* b) +{ + return new Item_func_makedate(a, b); +} + +Item *create_func_addtime(Item* a,Item* b) +{ + return new Item_func_add_time(a, b, 0, 0); +} + +Item *create_func_subtime(Item* a,Item* b) +{ + return new Item_func_add_time(a, b, 0, 1); +} + +Item *create_func_timediff(Item* a,Item* b) +{ + return new Item_func_timediff(a, b); +} + +Item *create_func_maketime(Item* a,Item* b,Item* c) +{ + return new Item_func_maketime(a, b, c); +} + +Item *create_func_str_to_date(Item* a,Item* b) +{ + return new Item_func_str_to_date(a, b); +} + +Item *create_func_last_day(Item *a) +{ + return new Item_func_last_day(a); +} diff --git a/sql/item_create.h b/sql/item_create.h index 5381ad946ae..faff6f45220 100644 --- a/sql/item_create.h +++ b/sql/item_create.h @@ -25,12 +25,15 @@ Item *create_func_asin(Item* a); Item *create_func_bin(Item* a); Item *create_func_bit_count(Item* a); Item *create_func_bit_length(Item* a); +Item *create_func_coercibility(Item* a); Item *create_func_ceiling(Item* a); Item *create_func_char_length(Item* a); +Item *create_func_cast(Item *a, Cast_target cast_type, int len, CHARSET_INFO *cs); Item *create_func_connection_id(void); Item *create_func_conv(Item* a, Item *b, Item *c); Item *create_func_cos(Item* a); Item *create_func_cot(Item* a); +Item *create_func_crc32(Item* a); Item *create_func_date_format(Item* a,Item *b); Item *create_func_dayname(Item* a); Item *create_func_dayofmonth(Item* a); @@ -70,7 +73,6 @@ Item *create_func_pi(void); Item *create_func_pow(Item* a, Item *b); Item *create_func_current_user(void); Item *create_func_quarter(Item* a); -Item *create_func_password(Item* a); Item *create_func_radians(Item *a); Item *create_func_release_lock(Item* a); Item *create_func_repeat(Item* a, Item *b); @@ -85,13 +87,73 @@ Item *create_func_soundex(Item* a); Item *create_func_space(Item *); Item *create_func_sqrt(Item* a); Item *create_func_strcmp(Item* a, Item *b); -Item *create_func_tan(Item* a);; +Item *create_func_tan(Item* a); Item *create_func_time_format(Item *a, Item *b); Item *create_func_time_to_sec(Item* a); Item *create_func_to_days(Item* a); Item *create_func_ucase(Item* a); +Item *create_func_unhex(Item* a); +Item *create_func_uuid(void); Item *create_func_version(void); Item *create_func_weekday(Item* a); Item *create_load_file(Item* a); Item *create_func_is_free_lock(Item* a); +Item *create_func_is_used_lock(Item* a); Item *create_func_quote(Item* a); + +#ifdef HAVE_SPATIAL + +Item *create_func_geometry_from_text(Item *a); +Item *create_func_as_wkt(Item *a); +Item *create_func_as_wkb(Item *a); +Item *create_func_srid(Item *a); +Item *create_func_startpoint(Item *a); +Item *create_func_endpoint(Item *a); +Item *create_func_exteriorring(Item *a); +Item *create_func_centroid(Item *a); +Item *create_func_envelope(Item *a); +Item *create_func_pointn(Item *a, Item *b); +Item *create_func_interiorringn(Item *a, Item *b); +Item *create_func_geometryn(Item *a, Item *b); + +Item *create_func_equals(Item *a, Item *b); +Item *create_func_disjoint(Item *a, Item *b); +Item *create_func_intersects(Item *a, Item *b); +Item *create_func_touches(Item *a, Item *b); +Item *create_func_crosses(Item *a, Item *b); +Item *create_func_within(Item *a, Item *b); +Item *create_func_contains(Item *a, Item *b); +Item *create_func_overlaps(Item *a, Item *b); + +Item *create_func_isempty(Item *a); +Item *create_func_issimple(Item *a); +Item *create_func_isclosed(Item *a); + +Item *create_func_geometry_type(Item *a); +Item *create_func_dimension(Item *a); +Item *create_func_x(Item *a); +Item *create_func_y(Item *a); +Item *create_func_area(Item *a); +Item *create_func_glength(Item *a); + +Item *create_func_numpoints(Item *a); +Item *create_func_numinteriorring(Item *a); +Item *create_func_numgeometries(Item *a); + +Item *create_func_point(Item *a, Item *b); + +#endif /*HAVE_SPATIAL*/ + +Item *create_func_compress(Item *a); +Item *create_func_uncompress(Item *a); +Item *create_func_uncompressed_length(Item *a); + +Item *create_func_datediff(Item *a, Item *b); +Item *create_func_weekofyear(Item *a); +Item *create_func_makedate(Item* a,Item* b); +Item *create_func_addtime(Item* a,Item* b); +Item *create_func_subtime(Item* a,Item* b); +Item *create_func_timediff(Item* a,Item* b); +Item *create_func_maketime(Item* a,Item* b,Item* c); +Item *create_func_str_to_date(Item* a,Item* b); +Item *create_func_last_day(Item *a); diff --git a/sql/item_func.cc b/sql/item_func.cc index e83d1f35db8..174a8c55d01 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,16 +17,27 @@ /* This file defines all numerical functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" +#include "slave.h" // for wait_for_master_pos #include <m_ctype.h> #include <hash.h> #include <time.h> #include <ft_global.h> -#include "slave.h" // for wait_for_master_pos + + +bool check_reserved_words(LEX_STRING *name) +{ + if (!my_strcasecmp(system_charset_info, name->str, "GLOBAL") || + !my_strcasecmp(system_charset_info, name->str, "LOCAL") || + !my_strcasecmp(system_charset_info, name->str, "SESSION")) + return TRUE; + return FALSE; +} + /* return TRUE if item is a constant */ @@ -37,8 +48,9 @@ eval_const_cond(COND *cond) } -Item_func::Item_func(List<Item> &list) +void Item_func::set_arguments(List<Item> &list) { + allowed_arg_cols= 1; arg_count=list.elements; if ((args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) { @@ -55,31 +67,112 @@ Item_func::Item_func(List<Item> &list) list.empty(); // Fields are used } +Item_func::Item_func(List<Item> &list) + :allowed_arg_cols(1) +{ + set_arguments(list); +} + +Item_func::Item_func(THD *thd, Item_func *item) + :Item_result_field(thd, item), + allowed_arg_cols(item->allowed_arg_cols), + arg_count(item->arg_count), + used_tables_cache(item->used_tables_cache), + not_null_tables_cache(item->not_null_tables_cache), + const_item_cache(item->const_item_cache) +{ + if (arg_count) + { + if (arg_count <=2) + args= tmp_arg; + else + { + if (!(args=(Item**) thd->alloc(sizeof(Item*)*arg_count))) + return; + } + memcpy((char*) args, (char*) item->args, sizeof(Item*)*arg_count); + } +} + + +/* + Resolve references to table column for a function and it's argument + + SYNOPSIS: + fix_fields() + thd Thread object + tables List of all open tables involved in the query + ref Pointer to where this object is used. This reference + is used if we want to replace this object with another + one (for example in the summary functions). + + DESCRIPTION + Call fix_fields() for all arguments to the function. The main intention + is to allow all Item_field() objects to setup pointers to the table fields. + + Sets as a side effect the following class variables: + maybe_null Set if any argument may return NULL + with_sum_func Set if any of the arguments contains a sum function + used_tables_cache Set to union of the tables used by arguments + + str_value.charset If this is a string function, set this to the + character set for the first argument. + If any argument is binary, this is set to binary + + If for any item any of the defaults are wrong, then this can + be fixed in the fix_length_and_dec() function that is called + after this one or by writing a specialized fix_fields() for the + item. + + RETURN VALUES + 0 ok + 1 Got error. Stored with my_error(). +*/ + bool -Item_func::fix_fields(THD *thd,TABLE_LIST *tables) +Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) { + DBUG_ASSERT(fixed == 0); Item **arg,**arg_end; #ifndef EMBEDDED_LIBRARY // Avoid compiler warning char buff[STACK_BUFF_ALLOC]; // Max argument in function #endif - binary=0; used_tables_cache= not_null_tables_cache= 0; const_item_cache=1; - if (thd && check_stack_overrun(thd,buff)) + if (check_stack_overrun(thd, buff)) return 1; // Fatal error if flag is set! if (arg_count) { // Print purify happy for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++) { - Item *item=*arg; - if (item->fix_fields(thd,tables)) + Item *item; + /* + We can't yet set item to *arg as fix_fields may change *arg + We shouldn't call fix_fields() twice, so check 'fixed' field first + */ + if ((!(*arg)->fixed && (*arg)->fix_fields(thd, tables, arg))) return 1; /* purecov: inspected */ + + item= *arg; + + if (allowed_arg_cols) + { + if (item->check_cols(allowed_arg_cols)) + return 1; + } + else + { + /* we have to fetch allowed_arg_cols from first argument */ + DBUG_ASSERT(arg == args); // it is first argument + allowed_arg_cols= item->cols(); + DBUG_ASSERT(allowed_arg_cols); // Can't be 0 any more + } + if (item->maybe_null) maybe_null=1; - if (item->binary) - binary=1; + with_sum_func= with_sum_func || item->with_sum_func; used_tables_cache|= item->used_tables(); not_null_tables_cache|= item->not_null_tables(); @@ -87,24 +180,35 @@ Item_func::fix_fields(THD *thd,TABLE_LIST *tables) } } fix_length_and_dec(); + if (thd->net.last_errno) // An error inside fix_length_and_dec occured + return 1; + fixed= 1; return 0; } - -void Item_func::split_sum_func(List<Item> &fields) +bool Item_func::walk (Item_processor processor, byte *argument) { - Item **arg,**arg_end; - for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++) + if (arg_count) { - Item *item=*arg; - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) - item->split_sum_func(fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) + Item **arg,**arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) { - fields.push_front(*arg); - *arg=new Item_ref((Item**) fields.head_ref(),0,item->name); + if ((*arg)->walk(processor, argument)) + return 1; } } + return (this->*processor)(argument); +} + + +/* See comments in Item_cmp_func::split_sum_func() */ + +void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, + List<Item> &fields) +{ + Item **arg, **arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end ; arg++) + (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg); } @@ -137,13 +241,19 @@ void Item_func::print(String *str) { str->append(func_name()); str->append('('); - for (uint i=0 ; i < arg_count ; i++) + print_args(str, 0); + str->append(')'); +} + + +void Item_func::print_args(String *str, uint from) +{ + for (uint i=from ; i < arg_count ; i++) { - if (i) + if (i != from) str->append(','); args[i]->print(str); } - str->append(')'); } @@ -161,6 +271,7 @@ void Item_func::print_op(String *str) str->append(')'); } + bool Item_func::eq(const Item *item, bool binary_cmp) const { /* Assume we don't have rtti */ @@ -178,14 +289,11 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const return 1; } - Field *Item_func::tmp_table_field(TABLE *t_arg) { Field *res; LINT_INIT(res); - if (!t_arg) - return result_field; switch (result_type()) { case INT_RESULT: if (max_length > 11) @@ -199,10 +307,15 @@ Field *Item_func::tmp_table_field(TABLE *t_arg) res= new Field_double(max_length, maybe_null, name, t_arg, decimals); break; case STRING_RESULT: - if (max_length > 255) - res= new Field_blob(max_length, maybe_null, name, t_arg, binary); + if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB) + res= new Field_blob(max_length, maybe_null, name, t_arg, collation.collation); else - res= new Field_string(max_length, maybe_null, name, t_arg, binary); + res= new Field_string(max_length, maybe_null, name, t_arg, collation.collation); + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); break; } return res; @@ -211,32 +324,34 @@ Field *Item_func::tmp_table_field(TABLE *t_arg) String *Item_real_func::val_str(String *str) { + DBUG_ASSERT(fixed == 1); double nr=val(); if (null_value) return 0; /* purecov: inspected */ - str->set(nr,decimals); + str->set(nr,decimals, &my_charset_bin); return str; } String *Item_num_func::val_str(String *str) { + DBUG_ASSERT(fixed == 1); if (hybrid_type == INT_RESULT) { longlong nr=val_int(); if (null_value) return 0; /* purecov: inspected */ if (!unsigned_flag) - str->set(nr); + str->set(nr,&my_charset_bin); else - str->set((ulonglong) nr); + str->set((ulonglong) nr,&my_charset_bin); } else { double nr=val(); if (null_value) return 0; /* purecov: inspected */ - str->set(nr,decimals); + str->set(nr,decimals,&my_charset_bin); } return str; } @@ -244,21 +359,38 @@ String *Item_num_func::val_str(String *str) void Item_func::fix_num_length_and_dec() { + uint fl_length= 0; decimals=0; for (uint i=0 ; i < arg_count ; i++) + { set_if_bigger(decimals,args[i]->decimals); + set_if_bigger(fl_length, args[i]->max_length); + } max_length=float_length(decimals); + if (fl_length > max_length) + { + decimals= NOT_FIXED_DEC; + max_length= float_length(NOT_FIXED_DEC); + } +} + +Item *Item_func::get_tmp_table_item(THD *thd) +{ + if (!with_sum_func && !const_item()) + return new Item_field(result_field); + return copy_or_same(thd); } String *Item_int_func::val_str(String *str) { + DBUG_ASSERT(fixed == 1); longlong nr=val_int(); if (null_value) return 0; if (!unsigned_flag) - str->set(nr); + str->set(nr,&my_charset_bin); else - str->set((ulonglong) nr); + str->set((ulonglong) nr,&my_charset_bin); return str; } @@ -279,29 +411,122 @@ void Item_num_op::find_num_type(void) String *Item_num_op::val_str(String *str) { + DBUG_ASSERT(fixed == 1); if (hybrid_type == INT_RESULT) { longlong nr=val_int(); if (null_value) return 0; /* purecov: inspected */ if (!unsigned_flag) - str->set(nr); + str->set(nr,&my_charset_bin); else - str->set((ulonglong) nr); + str->set((ulonglong) nr,&my_charset_bin); } else { double nr=val(); if (null_value) return 0; /* purecov: inspected */ - str->set(nr,decimals); + str->set(nr,decimals,&my_charset_bin); } return str; } +void Item_func_signed::print(String *str) +{ + str->append("cast(", 5); + args[0]->print(str); + str->append(" as signed)", 11); + +} + + +longlong Item_func_signed::val_int_from_str(int *error) +{ + char buff[MAX_FIELD_WIDTH], *end; + String tmp(buff,sizeof(buff), &my_charset_bin), *res; + longlong value; + + /* + For a string result, we must first get the string and then convert it + to a longlong + */ + + if (!(res= args[0]->val_str(&tmp))) + { + null_value= 1; + *error= 0; + return 0; + } + null_value= 0; + end= (char*) res->ptr()+ res->length(); + value= my_strtoll10(res->ptr(), &end, error); + if (*error > 0 || end != res->ptr()+ res->length()) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", + res->c_ptr()); + return value; +} + + +longlong Item_func_signed::val_int() +{ + longlong value; + int error; + + if (args[0]->cast_to_int_type() != STRING_RESULT) + { + value= args[0]->val_int(); + null_value= args[0]->null_value; + return value; + } + + value= val_int_from_str(&error); + if (value < 0 && error == 0) + { + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Cast to signed converted positive out-of-range integer to " + "it's negative complement"); + } + return value; +} + + +void Item_func_unsigned::print(String *str) +{ + str->append("cast(", 5); + args[0]->print(str); + str->append(" as unsigned)", 13); + +} + + +longlong Item_func_unsigned::val_int() +{ + longlong value; + int error; + + if (args[0]->cast_to_int_type() != STRING_RESULT) + { + value= args[0]->val_int(); + null_value= args[0]->null_value; + return value; + } + + value= val_int_from_str(&error); + if (error < 0) + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Cast to unsigned converted negative integer to it's " + "positive complement"); + return value; +} + + double Item_func_plus::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val()+args[1]->val(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; @@ -310,6 +535,7 @@ double Item_func_plus::val() longlong Item_func_plus::val_int() { + DBUG_ASSERT(fixed == 1); if (hybrid_type == INT_RESULT) { longlong value=args[0]->val_int()+args[1]->val_int(); @@ -330,13 +556,14 @@ void Item_func_minus::fix_length_and_dec() { Item_num_op::fix_length_and_dec(); if (unsigned_flag && - (current_thd->sql_mode & MODE_NO_UNSIGNED_SUBTRACTION)) + (current_thd->variables.sql_mode & MODE_NO_UNSIGNED_SUBTRACTION)) unsigned_flag=0; } double Item_func_minus::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val() - args[1]->val(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; @@ -345,6 +572,7 @@ double Item_func_minus::val() longlong Item_func_minus::val_int() { + DBUG_ASSERT(fixed == 1); if (hybrid_type == INT_RESULT) { longlong value=args[0]->val_int() - args[1]->val_int(); @@ -358,6 +586,7 @@ longlong Item_func_minus::val_int() double Item_func_mul::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val()*args[1]->val(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; /* purecov: inspected */ @@ -366,6 +595,7 @@ double Item_func_mul::val() longlong Item_func_mul::val_int() { + DBUG_ASSERT(fixed == 1); if (hybrid_type == INT_RESULT) { longlong value=args[0]->val_int()*args[1]->val_int(); @@ -379,6 +609,7 @@ longlong Item_func_mul::val_int() double Item_func_div::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); double val2=args[1]->val(); if ((null_value= val2 == 0.0 || args[0]->null_value || args[1]->null_value)) @@ -388,6 +619,7 @@ double Item_func_div::val() longlong Item_func_div::val_int() { + DBUG_ASSERT(fixed == 1); if (hybrid_type == INT_RESULT) { longlong value=args[0]->val_int(); @@ -409,17 +641,42 @@ void Item_func_div::fix_length_and_dec() maybe_null=1; } + +/* Integer division */ +longlong Item_func_int_div::val_int() +{ + DBUG_ASSERT(fixed == 1); + longlong value=args[0]->val_int(); + longlong val2=args[1]->val_int(); + if ((null_value= val2 == 0 || args[0]->null_value || args[1]->null_value)) + return 0; + return (unsigned_flag ? + (ulonglong) value / (ulonglong) val2 : + value / val2); +} + + +void Item_func_int_div::fix_length_and_dec() +{ + find_num_type(); + max_length=args[0]->max_length - args[0]->decimals; + maybe_null=1; +} + + double Item_func_mod::val() { - double value= floor(args[0]->val()+0.5); - double val2=floor(args[1]->val()+0.5); - if ((null_value=val2 == 0.0 || args[0]->null_value || args[1]->null_value)) + DBUG_ASSERT(fixed == 1); + double x= args[0]->val(); + double y= args[1]->val(); + if ((null_value= (y == 0.0) || args[0]->null_value || args[1]->null_value)) return 0.0; /* purecov: inspected */ - return fmod(value,val2); + return fmod(x, y); } longlong Item_func_mod::val_int() { + DBUG_ASSERT(fixed == 1); longlong value= args[0]->val_int(); longlong val2= args[1]->val_int(); if ((null_value=val2 == 0 || args[0]->null_value || args[1]->null_value)) @@ -429,58 +686,102 @@ longlong Item_func_mod::val_int() void Item_func_mod::fix_length_and_dec() { - max_length=args[1]->max_length; - decimals=0; - maybe_null=1; - find_num_type(); + Item_num_op::fix_length_and_dec(); } double Item_func_neg::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); null_value=args[0]->null_value; return -value; } + longlong Item_func_neg::val_int() { + DBUG_ASSERT(fixed == 1); longlong value=args[0]->val_int(); null_value=args[0]->null_value; return -value; } + void Item_func_neg::fix_length_and_dec() { + enum Item_result arg_result= args[0]->result_type(); + enum Item::Type arg_type= args[0]->type(); decimals=args[0]->decimals; max_length=args[0]->max_length; - hybrid_type= args[0]->result_type() == INT_RESULT ? INT_RESULT : REAL_RESULT; + hybrid_type= REAL_RESULT; + + /* + We need to account for added '-' in the following cases: + A) argument is a real or integer positive constant - in this case + argument's max_length is set to actual number of bytes occupied, and not + maximum number of bytes real or integer may require. Note that all + constants are non negative so we don't need to account for removed '-'. + B) argument returns a string. + Use val() to get value as arg_type doesn't mean that item is + Item_int or Item_real due to existence of Item_param. + */ + if (arg_result == STRING_RESULT || + (arg_type == REAL_ITEM && args[0]->val() >= 0) || + (arg_type == INT_ITEM && args[0]->val_int() > 0)) + max_length++; + + if (args[0]->result_type() == INT_RESULT) + { + /* + If this is in integer context keep the context as integer + (This is how multiplication and other integer functions works) + + We must however do a special case in the case where the argument + is a unsigned bigint constant as in this case the only safe + number to convert in integer context is 9223372036854775808. + (This is needed because the lex parser doesn't anymore handle + signed integers) + */ + if (args[0]->type() != INT_ITEM || + (((ulonglong) args[0]->val_int()) <= (ulonglong) LONGLONG_MIN)) + hybrid_type= INT_RESULT; + } } + double Item_func_abs::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); null_value=args[0]->null_value; return fabs(value); } + longlong Item_func_abs::val_int() { + DBUG_ASSERT(fixed == 1); longlong value=args[0]->val_int(); null_value=args[0]->null_value; return value >= 0 ? value : -value; } + void Item_func_abs::fix_length_and_dec() { decimals=args[0]->decimals; max_length=args[0]->max_length; - hybrid_type= args[0]->result_type() == INT_RESULT ? INT_RESULT : REAL_RESULT; + hybrid_type= REAL_RESULT; + if (args[0]->result_type() == INT_RESULT) + hybrid_type= INT_RESULT; } + /* Gateway to natural LOG function */ double Item_func_ln::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; @@ -494,6 +795,7 @@ double Item_func_ln::val() */ double Item_func_log::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; @@ -509,6 +811,7 @@ double Item_func_log::val() double Item_func_log2::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; @@ -517,6 +820,7 @@ double Item_func_log2::val() double Item_func_log10::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; /* purecov: inspected */ @@ -525,6 +829,7 @@ double Item_func_log10::val() double Item_func_exp::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=args[0]->null_value)) return 0.0; /* purecov: inspected */ @@ -533,6 +838,7 @@ double Item_func_exp::val() double Item_func_sqrt::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=(args[0]->null_value || value < 0))) return 0.0; /* purecov: inspected */ @@ -541,6 +847,7 @@ double Item_func_sqrt::val() double Item_func_pow::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); double val2=args[1]->val(); if ((null_value=(args[0]->null_value || args[1]->null_value))) @@ -552,6 +859,7 @@ double Item_func_pow::val() double Item_func_acos::val() { + DBUG_ASSERT(fixed == 1); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) volatile double value=args[0]->val(); if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0)))) @@ -561,6 +869,7 @@ double Item_func_acos::val() double Item_func_asin::val() { + DBUG_ASSERT(fixed == 1); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) volatile double value=args[0]->val(); if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0)))) @@ -570,6 +879,7 @@ double Item_func_asin::val() double Item_func_atan::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=args[0]->null_value)) return 0.0; @@ -585,6 +895,7 @@ double Item_func_atan::val() double Item_func_cos::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=args[0]->null_value)) return 0.0; @@ -593,6 +904,7 @@ double Item_func_cos::val() double Item_func_sin::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=args[0]->null_value)) return 0.0; @@ -601,6 +913,7 @@ double Item_func_sin::val() double Item_func_tan::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=args[0]->null_value)) return 0.0; @@ -613,6 +926,7 @@ double Item_func_tan::val() longlong Item_func_shift_left::val_int() { + DBUG_ASSERT(fixed == 1); uint shift; ulonglong res= ((ulonglong) args[0]->val_int() << (shift=(uint) args[1]->val_int())); @@ -627,6 +941,7 @@ longlong Item_func_shift_left::val_int() longlong Item_func_shift_right::val_int() { + DBUG_ASSERT(fixed == 1); uint shift; ulonglong res= (ulonglong) args[0]->val_int() >> (shift=(uint) args[1]->val_int()); @@ -642,6 +957,7 @@ longlong Item_func_shift_right::val_int() longlong Item_func_bit_neg::val_int() { + DBUG_ASSERT(fixed == 1); ulonglong res= (ulonglong) args[0]->val_int(); if ((null_value=args[0]->null_value)) return 0; @@ -661,6 +977,7 @@ void Item_func_integer::fix_length_and_dec() longlong Item_func_ceiling::val_int() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); null_value=args[0]->null_value; return (longlong) ceil(value); @@ -668,6 +985,7 @@ longlong Item_func_ceiling::val_int() longlong Item_func_floor::val_int() { + DBUG_ASSERT(fixed == 1); // the volatile's for BUG #3051 to calm optimizer down (because of gcc's bug) volatile double value=args[0]->val(); null_value=args[0]->null_value; @@ -685,11 +1003,14 @@ void Item_func_round::fix_length_and_dec() decimals=0; else decimals=min(tmp,NOT_FIXED_DEC); + if ((tmp= decimals - args[0]->decimals) > 0) + max_length+= tmp; } } double Item_func_round::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); int dec=(int) args[1]->val_int(); uint abs_dec=abs(dec); @@ -718,20 +1039,44 @@ double Item_func_round::val() } -void Item_func_rand::fix_length_and_dec() +bool Item_func_rand::fix_fields(THD *thd, struct st_table_list *tables, + Item **ref) { - decimals=NOT_FIXED_DEC; - max_length=float_length(decimals); + if (Item_real_func::fix_fields(thd, tables, ref)) + return TRUE; + used_tables_cache|= RAND_TABLE_BIT; if (arg_count) { // Only use argument once in query - uint32 tmp= (uint32) (args[0]->val_int()); - if ((rand= (struct rand_struct*) sql_alloc(sizeof(*rand)))) - randominit(rand,(uint32) (tmp*0x10001L+55555555L), - (uint32) (tmp*0x10000001L)); + if (!args[0]->const_during_execution()) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), "RAND"); + return TRUE; + } + /* + Allocate rand structure once: we must use thd->current_arena + to create rand in proper mem_root if it's a prepared statement or + stored procedure. + */ + if (!rand && !(rand= (struct rand_struct*) + thd->current_arena->alloc(sizeof(*rand)))) + return TRUE; + /* + PARAM_ITEM is returned if we're in statement prepare and consequently + no placeholder value is set yet. + */ + if (args[0]->type() != PARAM_ITEM) + { + /* + TODO: do not do reinit 'rand' for every execute of PS/SP if + args[0] is a constant. + */ + uint32 tmp= (uint32) args[0]->val_int(); + randominit(rand, (uint32) (tmp*0x10001L+55555555L), + (uint32) (tmp*0x10000001L)); + } } else { - THD *thd= current_thd; /* No need to send a Rand log event if seed was given eg: RAND(seed), as it will be replicated in the query as such. @@ -745,16 +1090,25 @@ void Item_func_rand::fix_length_and_dec() thd->rand_saved_seed2=thd->rand.seed2; rand= &thd->rand; } + return FALSE; +} + +void Item_func_rand::update_used_tables() +{ + Item_real_func::update_used_tables(); + used_tables_cache|= RAND_TABLE_BIT; } double Item_func_rand::val() { + DBUG_ASSERT(fixed == 1); return my_rnd(rand); } longlong Item_func_sign::val_int() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); null_value=args[0]->null_value; return value < 0.0 ? -1 : (value > 0 ? 1 : 0); @@ -763,6 +1117,7 @@ longlong Item_func_sign::val_int() double Item_func_units::val() { + DBUG_ASSERT(fixed == 1); double value=args[0]->val(); if ((null_value=args[0]->null_value)) return 0; @@ -775,8 +1130,8 @@ void Item_func_min_max::fix_length_and_dec() decimals=0; max_length=0; maybe_null=1; - binary=0; cmp_type=args[0]->result_type(); + for (uint i=0 ; i < arg_count ; i++) { if (max_length < args[i]->max_length) @@ -786,14 +1141,15 @@ void Item_func_min_max::fix_length_and_dec() if (!args[i]->maybe_null) maybe_null=0; cmp_type=item_cmp_type(cmp_type,args[i]->result_type()); - if (args[i]->binary) - binary=1; } + if (cmp_type == STRING_RESULT) + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV); } String *Item_func_min_max::val_str(String *str) { + DBUG_ASSERT(fixed == 1); switch (cmp_type) { case INT_RESULT: { @@ -801,9 +1157,9 @@ String *Item_func_min_max::val_str(String *str) if (null_value) return 0; if (!unsigned_flag) - str->set(nr); + str->set(nr,&my_charset_bin); else - str->set((ulonglong) nr); + str->set((ulonglong) nr,&my_charset_bin); return str; } case REAL_RESULT: @@ -811,7 +1167,7 @@ String *Item_func_min_max::val_str(String *str) double nr=val(); if (null_value) return 0; /* purecov: inspected */ - str->set(nr,decimals); + str->set(nr,decimals,&my_charset_bin); return str; } case STRING_RESULT: @@ -832,14 +1188,21 @@ String *Item_func_min_max::val_str(String *str) res2= args[i]->val_str(res == str ? &tmp_value : str); if (res2) { - int cmp=binary ? stringcmp(res,res2) : sortcmp(res,res2); + int cmp= sortcmp(res,res2,collation.collation); if ((cmp_sign < 0 ? cmp : -cmp) < 0) res=res2; } } } + if (res) // If !NULL + res->set_charset(collation.collation); return res; } + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + return 0; } return 0; // Keep compiler happy } @@ -847,6 +1210,7 @@ String *Item_func_min_max::val_str(String *str) double Item_func_min_max::val() { + DBUG_ASSERT(fixed == 1); double value=0.0; null_value=1; for (uint i=0; i < arg_count ; i++) @@ -869,6 +1233,7 @@ double Item_func_min_max::val() longlong Item_func_min_max::val_int() { + DBUG_ASSERT(fixed == 1); longlong value=0; null_value=1; for (uint i=0; i < arg_count ; i++) @@ -888,9 +1253,9 @@ longlong Item_func_min_max::val_int() return value; } - longlong Item_func_length::val_int() { + DBUG_ASSERT(fixed == 1); String *res=args[0]->val_str(&value); if (!res) { @@ -901,8 +1266,10 @@ longlong Item_func_length::val_int() return (longlong) res->length(); } + longlong Item_func_char_length::val_int() { + DBUG_ASSERT(fixed == 1); String *res=args[0]->val_str(&value); if (!res) { @@ -910,15 +1277,30 @@ longlong Item_func_char_length::val_int() return 0; /* purecov: inspected */ } null_value=0; - return (longlong) (!args[0]->binary) ? res->numchars() : res->length(); + return (longlong) res->numchars(); +} + + +longlong Item_func_coercibility::val_int() +{ + DBUG_ASSERT(fixed == 1); + null_value= 0; + return (longlong) args[0]->collation.derivation; +} + + +void Item_func_locate::fix_length_and_dec() +{ + maybe_null=0; max_length=11; + agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV); } longlong Item_func_locate::val_int() { + DBUG_ASSERT(fixed == 1); String *a=args[0]->val_str(&value1); String *b=args[1]->val_str(&value2); - bool binary_str = args[0]->binary || args[1]->binary; if (!a || !b) { null_value=1; @@ -926,86 +1308,101 @@ longlong Item_func_locate::val_int() } null_value=0; uint start=0; -#ifdef USE_MB uint start0=0; -#endif + my_match_t match; + if (arg_count == 3) { - start=(uint) args[2]->val_int()-1; -#ifdef USE_MB - if (use_mb(default_charset_info)) - { - start0=start; - if (!binary_str) - start=a->charpos(start); - } -#endif + start0= start =(uint) args[2]->val_int()-1; + start=a->charpos(start); + if (start > a->length() || start+b->length() > a->length()) return 0; } + if (!b->length()) // Found empty string at start return (longlong) (start+1); -#ifdef USE_MB - if (use_mb(default_charset_info) && !binary_str) - { - const char *ptr=a->ptr()+start; - const char *search=b->ptr(); - const char *strend = ptr+a->length(); - const char *end=strend-b->length()+1; - const char *search_end=search+b->length(); - register uint32 l; - while (ptr < end) - { - if (*ptr == *search) - { - register char *i,*j; - i=(char*) ptr+1; j=(char*) search+1; - while (j != search_end) - if (*i++ != *j++) goto skipp; - return (longlong) start0+1; - } - skipp: - if ((l=my_ismbchar(default_charset_info,ptr,strend))) ptr+=l; - else ++ptr; - ++start0; - } + + if (!cmp_collation.collation->coll->instr(cmp_collation.collation, + a->ptr()+start, a->length()-start, + b->ptr(), b->length(), + &match, 1)) return 0; + return (longlong) match.mblen + start0 + 1; +} + + +void Item_func_locate::print(String *str) +{ + str->append("locate(", 7); + args[1]->print(str); + str->append(','); + args[0]->print(str); + if (arg_count == 3) + { + str->append(','); + args[2]->print(str); } -#endif /* USE_MB */ - return (longlong) (binary ? a->strstr(*b,start) : - (a->strstr_case(*b,start)))+1; + str->append(')'); } longlong Item_func_field::val_int() { - String *field; - if (!(field=item->val_str(&value))) - return 0; // -1 if null ? - for (uint i=0 ; i < arg_count ; i++) + DBUG_ASSERT(fixed == 1); + + if (cmp_type == STRING_RESULT) + { + String *field; + if (!(field=args[0]->val_str(&value))) + return 0; // -1 if null ? + for (uint i=1 ; i < arg_count ; i++) + { + String *tmp_value=args[i]->val_str(&tmp); + if (tmp_value && !sortcmp(field,tmp_value,cmp_collation.collation)) + return (longlong) (i); + } + } + else if (cmp_type == INT_RESULT) { - String *tmp_value=args[i]->val_str(&tmp); - if (tmp_value && field->length() == tmp_value->length() && - !memcmp(field->ptr(),tmp_value->ptr(),tmp_value->length())) - return (longlong) (i+1); + longlong val= args[0]->val_int(); + if (args[0]->null_value) + return 0; + for (uint i=1; i < arg_count ; i++) + { + if (val == args[i]->val_int() && !args[i]->null_value) + return (longlong) (i); + } + } + else + { + double val= args[0]->val(); + if (args[0]->null_value) + return 0; + for (uint i=1; i < arg_count ; i++) + { + if (val == args[i]->val() && !args[i]->null_value) + return (longlong) (i); + } } return 0; } -void Item_func_field::split_sum_func(List<Item> &fields) + +void Item_func_field::fix_length_and_dec() { - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) - item->split_sum_func(fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) - { - fields.push_front(item); - item= new Item_ref((Item**) fields.head_ref(), 0, item->name); - } - Item_func::split_sum_func(fields); + maybe_null=0; max_length=3; + cmp_type= args[0]->result_type(); + for (uint i=1; i < arg_count ; i++) + cmp_type= item_cmp_type(cmp_type, args[i]->result_type()); + if (cmp_type == STRING_RESULT) + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV); } + longlong Item_func_ascii::val_int() { + DBUG_ASSERT(fixed == 1); String *res=args[0]->val_str(&value); if (!res) { @@ -1018,6 +1415,7 @@ longlong Item_func_ascii::val_int() longlong Item_func_ord::val_int() { + DBUG_ASSERT(fixed == 1); String *res=args[0]->val_str(&value); if (!res) { @@ -1027,12 +1425,12 @@ longlong Item_func_ord::val_int() null_value=0; if (!res->length()) return 0; #ifdef USE_MB - if (use_mb(default_charset_info) && !args[0]->binary) + if (use_mb(res->charset())) { register const char *str=res->ptr(); - register uint32 n=0, l=my_ismbchar(default_charset_info, - str,str+res->length()); - if (!l) return (longlong)((uchar) *str); + register uint32 n=0, l=my_ismbchar(res->charset(),str,str+res->length()); + if (!l) + return (longlong)((uchar) *str); while (l--) n=(n<<8)|(uint32)((uchar) *str++); return (longlong) n; @@ -1057,21 +1455,22 @@ void Item_func_find_in_set::fix_length_and_dec() String *find=args[0]->val_str(&value); if (find) { - enum_value=find_enum(((Field_enum*) field)->typelib,find->ptr(), - find->length()); + enum_value= find_type(((Field_enum*) field)->typelib,find->ptr(), + find->length(), 0); enum_bit=0; if (enum_value) enum_bit=LL(1) << (enum_value-1); } } } + agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV); } static const char separator=','; longlong Item_func_find_in_set::val_int() { - bool binary_cmp= args[0]->binary || args[1]->binary; + DBUG_ASSERT(fixed == 1); if (enum_value) { ulonglong tmp=(ulonglong) args[1]->val_int(); @@ -1095,48 +1494,51 @@ longlong Item_func_find_in_set::val_int() int diff; if ((diff=buffer->length() - find->length()) >= 0) { - const char *f_pos=find->ptr(); - const char *f_end=f_pos+find->length(); - const char *str=buffer->ptr(); - const char *end=str+diff+1; - const char *real_end=str+buffer->length(); - uint position=1; - do + my_wc_t wc; + CHARSET_INFO *cs= cmp_collation.collation; + const char *str_begin= buffer->ptr(); + const char *str_end= buffer->ptr(); + const char *real_end= str_end+buffer->length(); + const uchar *find_str= (const uchar *) find->ptr(); + uint find_str_len= find->length(); + int position= 0; + while (1) { - const char *pos= f_pos; - if (binary_cmp) + int symbol_len; + if ((symbol_len= cs->cset->mb_wc(cs, &wc, (uchar*) str_end, + (uchar*) real_end)) > 0) { - while (pos != f_end) + const char *substr_end= str_end + symbol_len; + bool is_last_item= (substr_end == real_end); + bool is_separator= (wc == (my_wc_t) separator); + if (is_separator || is_last_item) { - if (*str != *pos) - goto not_found; - str++; - pos++; + position++; + if (is_last_item && !is_separator) + str_end= substr_end; + if (!my_strnncoll(cs, (const uchar *) str_begin, + str_end - str_begin, + find_str, find_str_len)) + return (longlong) position; + else + str_begin= substr_end; } + str_end= substr_end; } + else if (str_end - str_begin == 0 && + find_str_len == 0 && + wc == (my_wc_t) separator) + return (longlong) ++position; else - { - while (pos != f_end) - { - if (toupper(*str) != toupper(*pos)) - goto not_found; - str++; - pos++; - } - } - if (str == real_end || str[0] == separator) - return (longlong) position; - not_found: - while (str < end && str[0] != separator) - str++; - position++; - } while (++str <= end); + return (longlong) 0; + } } return 0; } longlong Item_func_bit_count::val_int() { + DBUG_ASSERT(fixed == 1); ulonglong value= (ulonglong) args[0]->val_int(); if ((null_value= args[0]->null_value)) return 0; /* purecov: inspected */ @@ -1154,23 +1556,35 @@ longlong Item_func_bit_count::val_int() udf_handler::~udf_handler() { - if (initialized) + /* Everything should be properly cleaned up by this moment. */ + DBUG_ASSERT(not_original || !(initialized || buffers)); +} + + +void udf_handler::cleanup() +{ + if (!not_original) { - if (u_d->func_deinit != NULL) + if (initialized) { - void (*deinit)(UDF_INIT *) = (void (*)(UDF_INIT*)) - u_d->func_deinit; - (*deinit)(&initid); + if (u_d->func_deinit != NULL) + { + void (*deinit)(UDF_INIT *) = (void (*)(UDF_INIT*)) + u_d->func_deinit; + (*deinit)(&initid); + } + free_udf(u_d); + initialized= FALSE; } - free_udf(u_d); + if (buffers) // Because of bug in ecc + delete [] buffers; + buffers= 0; } - if (buffers) // Because of bug in ecc - delete [] buffers; } bool -udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func, +udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, uint arg_count, Item **arguments) { #ifndef EMBEDDED_LIBRARY // Avoid compiler warning @@ -1178,18 +1592,14 @@ udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func, #endif DBUG_ENTER("Item_udf_func::fix_fields"); - if (thd) - { - if (check_stack_overrun(thd,buff)) - DBUG_RETURN(1); // Fatal error flag is set! - } - else - thd=current_thd; // In WHERE / const clause - udf_func *tmp_udf=find_udf(u_d->name,(uint) strlen(u_d->name),1); + if (check_stack_overrun(thd, buff)) + DBUG_RETURN(1); // Fatal error flag is set! + + udf_func *tmp_udf=find_udf(u_d->name.str,(uint) u_d->name.length,1); if (!tmp_udf) { - my_printf_error(ER_CANT_FIND_UDF,ER(ER_CANT_FIND_UDF),MYF(0),u_d->name, + my_printf_error(ER_CANT_FIND_UDF,ER(ER_CANT_FIND_UDF),MYF(0),u_d->name.str, errno); DBUG_RETURN(1); } @@ -1197,7 +1607,7 @@ udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func, args=arguments; /* Fix all arguments */ - func->binary=func->maybe_null=0; + func->maybe_null=0; used_tables_cache=0; const_item_cache=1; @@ -1216,11 +1626,26 @@ udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func, arg != arg_end ; arg++,i++) { - Item *item=*arg; - if (item->fix_fields(thd,tables)) - return 1; - if (item->binary) - func->binary=1; + if (!(*arg)->fixed && + (*arg)->fix_fields(thd, tables, arg)) + DBUG_RETURN(1); + // we can't assign 'item' before, because fix_fields() can change arg + Item *item= *arg; + if (item->check_cols(1)) + DBUG_RETURN(1); + /* + TODO: We should think about this. It is not always + right way just to set an UDF result to return my_charset_bin + if one argument has binary sorting order. + The result collation should be calculated according to arguments + derivations in some cases and should not in other cases. + Moreover, some arguments can represent a numeric input + which doesn't effect the result character set and collation. + There is no a general rule for UDF. Everything depends on + the particular user definted function. + */ + if (item->collation.collation->state & MY_CS_BINSORT) + func->collation.set(&my_charset_bin); if (item->maybe_null) func->maybe_null=1; func->with_sum_func= func->with_sum_func || item->with_sum_func; @@ -1290,7 +1715,7 @@ udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func, if ((error=(uchar) init(&initid, &f_args, thd->net.last_error))) { my_printf_error(ER_CANT_INITIALIZE_UDF,ER(ER_CANT_INITIALIZE_UDF),MYF(0), - u_d->name,thd->net.last_error); + u_d->name.str, thd->net.last_error); free_udf(u_d); DBUG_RETURN(1); } @@ -1303,7 +1728,7 @@ udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func, if (error) { my_printf_error(ER_CANT_INITIALIZE_UDF,ER(ER_CANT_INITIALIZE_UDF),MYF(0), - u_d->name, ER(ER_UNKNOWN_ERROR)); + u_d->name.str, ER(ER_UNKNOWN_ERROR)); DBUG_RETURN(1); } DBUG_RETURN(0); @@ -1346,6 +1771,11 @@ bool udf_handler::get_arguments() to+= ALIGN_SIZE(sizeof(double)); } break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; } } return 0; @@ -1355,7 +1785,7 @@ bool udf_handler::get_arguments() String *udf_handler::val_str(String *str,String *save_str) { - uchar is_null=0; + uchar is_null_tmp=0; ulong res_length; if (get_arguments()) @@ -1372,9 +1802,9 @@ String *udf_handler::val_str(String *str,String *save_str) return 0; } } - char *res=func(&initid, &f_args, (char*) str->ptr(), &res_length, &is_null, - &error); - if (is_null || !res || error) // The !res is for safety + char *res=func(&initid, &f_args, (char*) str->ptr(), &res_length, + &is_null_tmp, &error); + if (is_null_tmp || !res || error) // The !res is for safety { return 0; } @@ -1383,14 +1813,21 @@ String *udf_handler::val_str(String *str,String *save_str) str->length(res_length); return str; } - save_str->set(res, res_length); + save_str->set(res, res_length, str->charset()); return save_str; } +void Item_udf_func::cleanup() +{ + udf.cleanup(); + Item_func::cleanup(); +} + double Item_func_udf_float::val() { + DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_udf_float::val"); DBUG_PRINT("info",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -1400,16 +1837,18 @@ double Item_func_udf_float::val() String *Item_func_udf_float::val_str(String *str) { + DBUG_ASSERT(fixed == 1); double nr=val(); if (null_value) return 0; /* purecov: inspected */ - str->set(nr,decimals); + str->set(nr,decimals,&my_charset_bin); return str; } longlong Item_func_udf_int::val_int() { + DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_udf_int::val_int"); DBUG_PRINT("info",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -1420,13 +1859,14 @@ longlong Item_func_udf_int::val_int() String *Item_func_udf_int::val_str(String *str) { + DBUG_ASSERT(fixed == 1); longlong nr=val_int(); if (null_value) return 0; if (!unsigned_flag) - str->set(nr); + str->set(nr,&my_charset_bin); else - str->set((ulonglong) nr); + str->set((ulonglong) nr,&my_charset_bin); return str; } @@ -1443,6 +1883,7 @@ void Item_func_udf_str::fix_length_and_dec() String *Item_func_udf_str::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res=udf.val_str(str,&str_value); null_value = !res; return res; @@ -1459,7 +1900,7 @@ bool udf_handler::get_arguments() { return 0; } pthread_mutex_t LOCK_user_locks; static HASH hash_user_locks; -class ULL +class User_level_lock { char *key; uint key_length; @@ -1469,21 +1910,23 @@ public: bool locked; pthread_cond_t cond; pthread_t thread; + ulong thread_id; - ULL(const char *key_arg,uint length) :key_length(length),count(1),locked(1) + User_level_lock(const char *key_arg,uint length, ulong id) + :key_length(length),count(1),locked(1), thread_id(id) { key=(char*) my_memdup((byte*) key_arg,length,MYF(0)); pthread_cond_init(&cond,NULL); if (key) { - if (hash_insert(&hash_user_locks,(byte*) this)) + if (my_hash_insert(&hash_user_locks,(byte*) this)) { my_free((gptr) key,MYF(0)); key=0; } } } - ~ULL() + ~User_level_lock() { if (key) { @@ -1493,41 +1936,51 @@ public: pthread_cond_destroy(&cond); } inline bool initialized() { return key != 0; } - friend void item_user_lock_release(ULL *ull); - friend char *ull_get_key(const ULL *ull,uint *length,my_bool not_used); + friend void item_user_lock_release(User_level_lock *ull); + friend char *ull_get_key(const User_level_lock *ull, uint *length, + my_bool not_used); }; -char *ull_get_key(const ULL *ull,uint *length, +char *ull_get_key(const User_level_lock *ull, uint *length, my_bool not_used __attribute__((unused))) { *length=(uint) ull->key_length; return (char*) ull->key; } + +static bool item_user_lock_inited= 0; + void item_user_lock_init(void) { pthread_mutex_init(&LOCK_user_locks,MY_MUTEX_INIT_SLOW); - hash_init(&hash_user_locks,16,0,0,(hash_get_key) ull_get_key,NULL,0); + hash_init(&hash_user_locks,system_charset_info, + 16,0,0,(hash_get_key) ull_get_key,NULL,0); + item_user_lock_inited= 1; } void item_user_lock_free(void) { - hash_free(&hash_user_locks); - pthread_mutex_destroy(&LOCK_user_locks); + if (item_user_lock_inited) + { + item_user_lock_inited= 0; + hash_free(&hash_user_locks); + pthread_mutex_destroy(&LOCK_user_locks); + } } -void item_user_lock_release(ULL *ull) +void item_user_lock_release(User_level_lock *ull) { ull->locked=0; if (mysql_bin_log.is_open()) { char buf[256]; - String tmp(buf,sizeof(buf)); - tmp.length(0); - tmp.append("DO RELEASE_LOCK(\""); + const char *command="DO RELEASE_LOCK(\""; + String tmp(buf,sizeof(buf), system_charset_info); + tmp.copy(command, strlen(command), tmp.charset()); tmp.append(ull->key,ull->key_length); - tmp.append("\")"); - Query_log_event qev(current_thd, tmp.ptr(), tmp.length(),1); + tmp.append("\")", 2); + Query_log_event qev(current_thd, tmp.ptr(), tmp.length(),0, FALSE); qev.error_code=0; // this query is always safe to run on slave mysql_bin_log.write(&qev); } @@ -1544,9 +1997,10 @@ void item_user_lock_release(ULL *ull) longlong Item_master_pos_wait::val_int() { + DBUG_ASSERT(fixed == 1); THD* thd = current_thd; String *log_name = args[0]->val_str(&value); - int event_count; + int event_count= 0; null_value=0; if (thd->slave_thread || !log_name || !log_name->length()) @@ -1554,13 +2008,15 @@ longlong Item_master_pos_wait::val_int() null_value = 1; return 0; } - longlong pos = args[1]->val_int(); + longlong pos = (ulong)args[1]->val_int(); longlong timeout = (arg_count==3) ? args[2]->val_int() : 0 ; +#ifdef HAVE_REPLICATION if ((event_count = active_mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2) { null_value = 1; event_count=0; } +#endif return event_count; } @@ -1568,7 +2024,7 @@ longlong Item_master_pos_wait::val_int() void debug_sync_point(const char* lock_name, uint lock_timeout) { THD* thd=current_thd; - ULL* ull; + User_level_lock* ull; struct timespec abstime; int lock_name_len,error=0; lock_name_len=strlen(lock_name); @@ -1586,7 +2042,7 @@ void debug_sync_point(const char* lock_name, uint lock_timeout) this case, we will not be waiting, but rather, just waste CPU and memory on the whole deal */ - if (!(ull= ((ULL*) hash_search(&hash_user_locks,lock_name, + if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks, lock_name, lock_name_len)))) { pthread_mutex_unlock(&LOCK_user_locks); @@ -1643,11 +2099,12 @@ void debug_sync_point(const char* lock_name, uint lock_timeout) longlong Item_func_get_lock::val_int() { + DBUG_ASSERT(fixed == 1); String *res=args[0]->val_str(&value); longlong timeout=args[1]->val_int(); struct timespec abstime; THD *thd=current_thd; - ULL *ull; + User_level_lock *ull; int error=0; pthread_mutex_lock(&LOCK_user_locks); @@ -1666,10 +2123,11 @@ longlong Item_func_get_lock::val_int() thd->ull=0; } - if (!(ull= ((ULL*) hash_search(&hash_user_locks,(byte*) res->ptr(), - res->length())))) + if (!(ull= ((User_level_lock *) hash_search(&hash_user_locks, + (byte*) res->ptr(), + res->length())))) { - ull=new ULL(res->ptr(),res->length()); + ull=new User_level_lock(res->ptr(),res->length(), thd->thread_id); if (!ull || !ull->initialized()) { delete ull; @@ -1737,8 +2195,9 @@ longlong Item_func_get_lock::val_int() longlong Item_func_release_lock::val_int() { + DBUG_ASSERT(fixed == 1); String *res=args[0]->val_str(&value); - ULL *ull; + User_level_lock *ull; longlong result; if (!res || !res->length()) { @@ -1749,8 +2208,9 @@ longlong Item_func_release_lock::val_int() result=0; pthread_mutex_lock(&LOCK_user_locks); - if (!(ull= ((ULL*) hash_search(&hash_user_locks,(const byte*) res->ptr(), - res->length())))) + if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks, + (const byte*) res->ptr(), + res->length())))) { null_value=1; } @@ -1768,20 +2228,27 @@ longlong Item_func_release_lock::val_int() } -longlong Item_func_set_last_insert_id::val_int() +longlong Item_func_last_insert_id::val_int() { - longlong value=args[0]->val_int(); - current_thd->insert_id(value); - null_value=args[0]->null_value; - return value; + DBUG_ASSERT(fixed == 1); + if (arg_count) + { + longlong value=args[0]->val_int(); + current_thd->insert_id(value); + null_value=args[0]->null_value; + } + else + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + return current_thd->insert_id(); } /* This function is just used to test speed of different functions */ longlong Item_func_benchmark::val_int() { + DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)); + String tmp(buff,sizeof(buff), &my_charset_bin); THD *thd=current_thd; for (ulong loop=0 ; loop < loop_count && !thd->killed; loop++) @@ -1796,12 +2263,30 @@ longlong Item_func_benchmark::val_int() case STRING_RESULT: (void) args[0]->val_str(&tmp); break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + return 0; } } return 0; } +void Item_func_benchmark::print(String *str) +{ + str->append("benchmark(", 10); + char buffer[20]; + // my_charset_bin is good enough for numbers + String st(buffer, sizeof(buffer), &my_charset_bin); + st.set((ulonglong)loop_count, &my_charset_bin); + str->append(st); + str->append(','); + args[0]->print(str); + str->append(')'); +} + #define extra_size sizeof(double) static user_var_entry *get_variable(HASH *hash, LEX_STRING &name, @@ -1824,9 +2309,21 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name, entry->value=0; entry->length=0; entry->update_query_id=0; + entry->collation.set(NULL, DERIVATION_IMPLICIT); + /* + If we are here, we were called from a SET or a query which sets a + variable. Imagine it is this: + INSERT INTO t SELECT @a:=10, @a:=@a+1. + Then when we have a Item_func_get_user_var (because of the @a+1) so we + think we have to write the value of @a to the binlog. But before that, + we have a Item_func_set_user_var to create @a (@a:=10), in this we mark + the variable as "already logged" (line below) so that it won't be logged + by Item_func_get_user_var (because that's not necessary). + */ + entry->used_query_id=current_thd->query_id; entry->type=STRING_RESULT; memcpy(entry->name.str, name.str, name.length+1); - if (hash_insert(hash,(byte*) entry)) + if (my_hash_insert(hash,(byte*) entry)) { my_free((char*) entry,MYF(0)); return 0; @@ -1835,15 +2332,43 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name, return entry; } +/* + When a user variable is updated (in a SET command or a query like + SELECT @a:= ). +*/ -bool Item_func_set_user_var::fix_fields(THD *thd,TABLE_LIST *tables) +bool Item_func_set_user_var::fix_fields(THD *thd, TABLE_LIST *tables, + Item **ref) { - if (!thd) - thd=current_thd; // Should never happen - if (Item_func::fix_fields(thd,tables) || + DBUG_ASSERT(fixed == 0); + /* fix_fields will call Item_func_set_user_var::fix_length_and_dec */ + if (Item_func::fix_fields(thd, tables, ref) || !(entry= get_variable(&thd->user_vars, name, 1))) return 1; + /* + Remember the last query which updated it, this way a query can later know + if this variable is a constant item in the query (it is if update_query_id + is different from query_id). + */ entry->update_query_id= thd->query_id; + /* + As it is wrong and confusing to associate any + character set with NULL, @a should be latin2 + after this query sequence: + + SET @a=_latin2'string'; + SET @a=NULL; + + I.e. the second query should not change the charset + to the current default value, but should keep the + original value assigned during the first query. + In order to do it, we don't copy charset + from the argument if the argument is NULL + and the variable has previously been initialized. + */ + if (!entry->collation.collation || !args[0]->null_value) + entry->collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT); + collation.set(entry->collation.collation, DERIVATION_IMPLICIT); cached_result_type= args[0]->result_type(); return 0; } @@ -1855,11 +2380,14 @@ Item_func_set_user_var::fix_length_and_dec() maybe_null=args[0]->maybe_null; max_length=args[0]->max_length; decimals=args[0]->decimals; + collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT); } -bool Item_func_set_user_var::update_hash(const void *ptr, uint length, - Item_result type) +bool Item_func_set_user_var::update_hash(void *ptr, uint length, + Item_result type, + CHARSET_INFO *cs, + Derivation dv) { if ((null_value=args[0]->null_value)) { @@ -1905,13 +2433,14 @@ bool Item_func_set_user_var::update_hash(const void *ptr, uint length, memcpy(entry->value,ptr,length); entry->length= length; entry->type=type; + entry->collation.set(cs, dv); } return 0; err: - current_thd->fatal_error=1; // Probably end of memory - null_value=1; - return 1; // Error + current_thd->fatal_error(); // Probably end of memory + null_value= 1; + return 1; } @@ -1928,7 +2457,10 @@ double user_var_entry::val(my_bool *null_value) case INT_RESULT: return (double) *(longlong*) value; case STRING_RESULT: - return atof(value); // This is null terminated + return my_atof(value); // This is null terminated + case ROW_RESULT: + DBUG_ASSERT(1); // Impossible + break; } return 0.0; // Impossible } @@ -1947,7 +2479,13 @@ longlong user_var_entry::val_int(my_bool *null_value) case INT_RESULT: return *(longlong*) value; case STRING_RESULT: - return strtoull(value,NULL,10); // String is null terminated + { + int error; + return my_strtoll10(value, (char**) 0, &error);// String is null terminated + } + case ROW_RESULT: + DBUG_ASSERT(1); // Impossible + break; } return LL(0); // Impossible } @@ -1963,18 +2501,66 @@ String *user_var_entry::val_str(my_bool *null_value, String *str, switch (type) { case REAL_RESULT: - str->set(*(double*) value, decimals); + str->set(*(double*) value, decimals, &my_charset_bin); break; case INT_RESULT: - str->set(*(longlong*) value); + str->set(*(longlong*) value, &my_charset_bin); break; case STRING_RESULT: - if (str->copy(value, length)) + if (str->copy(value, length, collation.collation)) str= 0; // EOM error + case ROW_RESULT: + DBUG_ASSERT(1); // Impossible + break; } return(str); } +/* + This functions is invoked on SET @variable or @variable:= expression. + Evaluete (and check expression), store results. + + SYNOPSYS + Item_func_set_user_var::check() + + NOTES + For now it always return OK. All problem with value evalueting + will be catched by thd->net.report_error check in sql_set_variables(). + + RETURN + 0 - OK. +*/ + +bool +Item_func_set_user_var::check() +{ + DBUG_ENTER("Item_func_set_user_var::check"); + + switch (cached_result_type) { + case REAL_RESULT: + { + save_result.vreal= args[0]->val(); + break; + } + case INT_RESULT: + { + save_result.vint= args[0]->val_int(); + break; + } + case STRING_RESULT: + { + save_result.vstr= args[0]->val_str(&value); + break; + } + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; + } + DBUG_RETURN(0); +} + /* This functions is invoked on SET @variable or @variable:= expression. @@ -1992,7 +2578,6 @@ String *user_var_entry::val_str(my_bool *null_value, String *str, */ - bool Item_func_set_user_var::update() { @@ -2003,23 +2588,32 @@ Item_func_set_user_var::update() switch (cached_result_type) { case REAL_RESULT: { - double value=args[0]->val(); - res= update_hash((void*) &value,sizeof(value), REAL_RESULT); + res= update_hash((void*) &save_result.vreal,sizeof(save_result.vreal), + REAL_RESULT, &my_charset_bin, DERIVATION_IMPLICIT); break; } case INT_RESULT: { - longlong value=args[0]->val_int(); - res= update_hash((void*) &value,sizeof(longlong), INT_RESULT); + res= update_hash((void*) &save_result.vint, sizeof(save_result.vint), + INT_RESULT, &my_charset_bin, DERIVATION_IMPLICIT); break; } case STRING_RESULT: - String *tmp; - tmp=args[0]->val_str(&value); - if (!tmp) // Null value - res= update_hash((void*) 0,0,STRING_RESULT); + { + if (!save_result.vstr) // Null value + res= update_hash((void*) 0, 0, STRING_RESULT, &my_charset_bin, + DERIVATION_IMPLICIT); else - res= update_hash((void*) tmp->ptr(),tmp->length(),STRING_RESULT); + res= update_hash((void*) save_result.vstr->ptr(), + save_result.vstr->length(), STRING_RESULT, + save_result.vstr->charset(), + DERIVATION_IMPLICIT); + break; + } + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); break; } DBUG_RETURN(res); @@ -2028,18 +2622,24 @@ Item_func_set_user_var::update() double Item_func_set_user_var::val() { + DBUG_ASSERT(fixed == 1); + check(); update(); // Store expression return entry->val(&null_value); } longlong Item_func_set_user_var::val_int() { + DBUG_ASSERT(fixed == 1); + check(); update(); // Store expression return entry->val_int(&null_value); } String *Item_func_set_user_var::val_str(String *str) { + DBUG_ASSERT(fixed == 1); + check(); update(); // Store expression return entry->val_str(&null_value, str, decimals); } @@ -2047,27 +2647,28 @@ String *Item_func_set_user_var::val_str(String *str) void Item_func_set_user_var::print(String *str) { - str->append('('); - str->append(name.str,name.length); - str->append(":=",2); + str->append("(@", 2); + str->append(name.str, name.length); + str->append(":=", 2); args[0]->print(str); str->append(')'); } - String * Item_func_get_user_var::val_str(String *str) { + DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_get_user_var::val_str"); if (!var_entry) - return (String*) 0; // No such variable + DBUG_RETURN((String*) 0); // No such variable DBUG_RETURN(var_entry->val_str(&null_value, str, decimals)); } double Item_func_get_user_var::val() { + DBUG_ASSERT(fixed == 1); if (!var_entry) return 0.0; // No such variable return (var_entry->val(&null_value)); @@ -2076,20 +2677,163 @@ double Item_func_get_user_var::val() longlong Item_func_get_user_var::val_int() { + DBUG_ASSERT(fixed == 1); if (!var_entry) return LL(0); // No such variable return (var_entry->val_int(&null_value)); } +/* + Get variable by name and, if necessary, put the record of variable + use into the binary log. + + SYNOPSIS + get_var_with_binlog() + thd Current thread + name Variable name + out_entry [out] variable structure or NULL. The pointer is set + regardless of whether function succeeded or not. + + When a user variable is invoked from an update query (INSERT, UPDATE etc), + stores this variable and its value in thd->user_var_events, so that it can be + written to the binlog (will be written just before the query is written, see + log.cc). + + RETURN + 0 OK + 1 Failed to put appropiate record into binary log + +*/ + +int get_var_with_binlog(THD *thd, LEX_STRING &name, + user_var_entry **out_entry) +{ + BINLOG_USER_VAR_EVENT *user_var_event; + user_var_entry *var_entry; + var_entry= get_variable(&thd->user_vars, name, 0); + + if (!(opt_bin_log && is_update_query(thd->lex->sql_command))) + { + *out_entry= var_entry; + return 0; + } + + if (!var_entry) + { + /* + If the variable does not exist, it's NULL, but we want to create it so + that it gets into the binlog (if it didn't, the slave could be + influenced by a variable of the same name previously set by another + thread). + We create it like if it had been explicitely set with SET before. + The 'new' mimicks what sql_yacc.yy does when 'SET @a=10;'. + sql_set_variables() is what is called from 'case SQLCOM_SET_OPTION' + in dispatch_command()). Instead of building a one-element list to pass to + sql_set_variables(), we could instead manually call check() and update(); + this would save memory and time; but calling sql_set_variables() makes + one unique place to maintain (sql_set_variables()). + */ + + List<set_var_base> tmp_var_list; + tmp_var_list.push_back(new set_var_user(new Item_func_set_user_var(name, + new Item_null()))); + /* Create the variable */ + if (sql_set_variables(thd, &tmp_var_list)) + goto err; + if (!(var_entry= get_variable(&thd->user_vars, name, 0))) + goto err; + } + else if (var_entry->used_query_id == thd->query_id) + { + /* + If this variable was already stored in user_var_events by this query + (because it's used in more than one place in the query), don't store + it. + */ + *out_entry= var_entry; + return 0; + } + + uint size; + /* + First we need to store value of var_entry, when the next situation + appers: + > set @a:=1; + > insert into t1 values (@a), (@a:=@a+1), (@a:=@a+1); + We have to write to binlog value @a= 1; + */ + size= ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT)) + var_entry->length; + if (!(user_var_event= (BINLOG_USER_VAR_EVENT *) thd->alloc(size))) + goto err; + + user_var_event->value= (char*) user_var_event + + ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT)); + user_var_event->user_var_event= var_entry; + user_var_event->type= var_entry->type; + user_var_event->charset_number= var_entry->collation.collation->number; + if (!var_entry->value) + { + /* NULL value*/ + user_var_event->length= 0; + user_var_event->value= 0; + } + else + { + user_var_event->length= var_entry->length; + memcpy(user_var_event->value, var_entry->value, + var_entry->length); + } + /* Mark that this variable has been used by this query */ + var_entry->used_query_id= thd->query_id; + if (insert_dynamic(&thd->user_var_events, (gptr) &user_var_event)) + goto err; + + *out_entry= var_entry; + return 0; + +err: + *out_entry= var_entry; + return 1; +} + + void Item_func_get_user_var::fix_length_and_dec() { THD *thd=current_thd; + int error; maybe_null=1; decimals=NOT_FIXED_DEC; max_length=MAX_BLOB_WIDTH; - if (!(var_entry= get_variable(&thd->user_vars, name, 0))) + + error= get_var_with_binlog(thd, name, &var_entry); + + if (var_entry) + { + collation.set(var_entry->collation); + switch (var_entry->type) { + case REAL_RESULT: + max_length= DBL_DIG + 8; + case INT_RESULT: + max_length= MAX_BIGINT_WIDTH; + break; + case STRING_RESULT: + max_length= MAX_BLOB_WIDTH; + break; + case ROW_RESULT: // Keep compiler happy + break; + } + } + else + { + collation.set(&my_charset_bin, DERIVATION_IMPLICIT); null_value= 1; + } + + if (error) + thd->fatal_error(); + + return; } @@ -2112,7 +2856,7 @@ enum Item_result Item_func_get_user_var::result_type() const void Item_func_get_user_var::print(String *str) { - str->append('@'); + str->append("(@", 2); str->append(name.str,name.length); str->append(')'); } @@ -2124,29 +2868,57 @@ bool Item_func_get_user_var::eq(const Item *item, bool binary_cmp) const if (this == item) return 1; // Same item is same. /* Check if other type is also a get_user_var() object */ -#ifdef FIX_THIS - if (item->eq == &Item_func_get_user_var::eq) - return 0; -#else if (item->type() != FUNC_ITEM || ((Item_func*) item)->func_name() != func_name()) return 0; -#endif Item_func_get_user_var *other=(Item_func_get_user_var*) item; return (name.length == other->name.length && !memcmp(name.str, other->name.str, name.length)); } +Item_func_get_system_var:: +Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg, + LEX_STRING *component_arg, const char *name_arg, + size_t name_len_arg) + :var(var_arg), var_type(var_type_arg), component(*component_arg) +{ + /* set_name() will allocate the name */ + set_name(name_arg, name_len_arg, system_charset_info); +} + + +bool +Item_func_get_system_var::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +{ + Item *item; + DBUG_ENTER("Item_func_get_system_var::fix_fields"); + + /* + Evaluate the system variable and substitute the result (a basic constant) + instead of this item. If the variable can not be evaluated, + the error is reported in sys_var::item(). + */ + if (!(item= var->item(thd, var_type, &component))) + DBUG_RETURN(1); // Impossible + item->set_name(name, 0, system_charset_info); // don't allocate a new name + thd->change_item_tree(ref, item); + + DBUG_RETURN(0); +} + + longlong Item_func_inet_aton::val_int() { + DBUG_ASSERT(fixed == 1); uint byte_result = 0; ulonglong result = 0; // We are ready for 64 bit addresses const char *p,* end; char c = '.'; // we mark c to indicate invalid IP in case length is 0 char buff[36]; + int dot_count= 0; - String *s,tmp(buff,sizeof(buff)); + String *s,tmp(buff,sizeof(buff),&my_charset_bin); if (!(s = args[0]->val_str(&tmp))) // If null value goto err; null_value=0; @@ -2163,6 +2935,7 @@ longlong Item_func_inet_aton::val_int() } else if (c == '.') { + dot_count++; result= (result << 8) + (ulonglong) byte_result; byte_result = 0; } @@ -2170,7 +2943,19 @@ longlong Item_func_inet_aton::val_int() goto err; // Invalid character } if (c != '.') // IP number can't end on '.' + { + /* + Handle short-forms addresses according to standard. Examples: + 127 -> 0.0.0.127 + 127.1 -> 127.0.0.1 + 127.2.1 -> 127.2.0.1 + */ + switch (dot_count) { + case 1: result<<= 8; /* Fall through */ + case 2: result<<= 8; /* Fall through */ + } return (result << 8) + (ulonglong) byte_result; + } err: null_value=1; @@ -2181,11 +2966,26 @@ err: void Item_func_match::init_search(bool no_order) { DBUG_ENTER("Item_func_match::init_search"); + + /* Check if init_search() has been called before */ if (ft_handler) DBUG_VOID_RETURN; if (key == NO_SUCH_KEY) - concat= new Item_func_concat_ws(new Item_string(" ",1), fields); + { + List<Item> fields; + fields.push_back(new Item_string(" ",1, cmp_collation.collation)); + for (uint i=1; i < arg_count; i++) + fields.push_back(args[i]); + concat=new Item_func_concat_ws(fields); + /* + Above function used only to get value and do not need fix_fields for it: + Item_string - basic constant + fields - fix_fields() was already called for this arguments + Item_func_concat_ws - do not need fix_fields() to produce value + */ + concat->quick_fix_field(); + } if (master) { @@ -2196,21 +2996,26 @@ void Item_func_match::init_search(bool no_order) DBUG_VOID_RETURN; } - String *ft_tmp=0; - char tmp1[FT_QUERY_MAXLEN]; - String tmp2(tmp1,sizeof(tmp1)); + String *ft_tmp= 0; // MATCH ... AGAINST (NULL) is meaningless, but possible - if (!(ft_tmp=key_item()->val_str(&tmp2))) + if (!(ft_tmp=key_item()->val_str(&value))) { - ft_tmp= &tmp2; - tmp2.set("",0); + ft_tmp= &value; + value.set("",0,cmp_collation.collation); } - ft_handler=table->file->ft_init_ext(mode, key, - (byte*) ft_tmp->ptr(), - ft_tmp->length(), - join_key && !no_order); + if (ft_tmp->charset() != cmp_collation.collation) + { + uint dummy_errors; + search_value.copy(ft_tmp->ptr(), ft_tmp->length(), ft_tmp->charset(), + cmp_collation.collation, &dummy_errors); + ft_tmp= &search_value; + } + + if (join_key && !no_order) + flags|=FT_SORTED; + ft_handler=table->file->ft_init_ext(flags, key, ft_tmp); if (join_key) table->file->ft_handler=ft_handler; @@ -2219,10 +3024,11 @@ void Item_func_match::init_search(bool no_order) } -bool Item_func_match::fix_fields(THD *thd,struct st_table_list *tlist) +bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) { - List_iterator<Item> li(fields); + DBUG_ASSERT(fixed == 0); Item *item; + LINT_INIT(item); // Safe as arg_count is > 1 maybe_null=1; join_key=0; @@ -2233,58 +3039,61 @@ bool Item_func_match::fix_fields(THD *thd,struct st_table_list *tlist) modifications to find_best and auto_close as complement to auto_init code above. */ - if (Item_func::fix_fields(thd,tlist) || !const_item()) + if (Item_func::fix_fields(thd, tlist, ref) || + !args[0]->const_during_execution()) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"AGAINST"); return 1; } - while ((item=li++)) + const_item_cache=0; + for (uint i=1 ; i < arg_count ; i++) { - if (item->fix_fields(thd,tlist)) - return 1; + item=args[i]; if (item->type() == Item::REF_ITEM) - li.replace(item= *((Item_ref *)item)->ref); - if (item->type() != Item::FIELD_ITEM || !item->used_tables()) + args[i]= item= *((Item_ref *)item)->ref; + if (item->type() != Item::FIELD_ITEM) key=NO_SUCH_KEY; - used_tables_cache|=item->used_tables(); } - /* check that all columns come from the same table */ - if (my_count_bits(used_tables_cache) != 1) + /* + Check that all columns come from the same table. + We've already checked that columns in MATCH are fields so + PARAM_TABLE_BIT can only appear from AGAINST argument. + */ + if ((used_tables_cache & ~PARAM_TABLE_BIT) != item->used_tables()) key=NO_SUCH_KEY; - const_item_cache=0; - table=((Item_field *)fields.head())->field->table; - if (!(table->file->table_flags() & HA_CAN_FULLTEXT)) + + if (key == NO_SUCH_KEY && !(flags & FT_BOOL)) { - my_error(ER_TABLE_CANT_HANDLE_FULLTEXT, MYF(0)); + my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH"); return 1; } - table->fulltext_searched=1; - record=table->record[0]; - if (key == NO_SUCH_KEY && mode != FT_BOOL) + table=((Item_field *)item)->field->table; + if (!(table->file->table_flags() & HA_CAN_FULLTEXT)) { - my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH"); + my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0)); return 1; } - - return 0; + table->fulltext_searched=1; + return agg_arg_collations_for_comparison(cmp_collation, args+1, arg_count-1); } - bool Item_func_match::fix_index() { - List_iterator_fast<Item> li(fields); Item_field *item; uint ft_to_key[MAX_KEY], ft_cnt[MAX_KEY], fts=0, keynr; - uint max_cnt=0, mkeys=0; + uint max_cnt=0, mkeys=0, i; if (key == NO_SUCH_KEY) return 0; + + if (!table) + goto err; for (keynr=0 ; keynr < table->keys ; keynr++) { if ((table->key_info[keynr].flags & HA_FULLTEXT) && - (table->keys_in_use_for_query & (((key_map)1) << keynr))) + (table->keys_in_use_for_query.is_set(keynr))) { ft_to_key[fts]=keynr; ft_cnt[fts]=0; @@ -2295,8 +3104,9 @@ bool Item_func_match::fix_index() if (!fts) goto err; - while ((item=(Item_field*)(li++))) + for (i=1; i < arg_count; i++) { + item=(Item_field*)args[i]; for (keynr=0 ; keynr < fts ; keynr++) { KEY *ft_key=&table->key_info[ft_to_key[keynr]]; @@ -2330,8 +3140,8 @@ bool Item_func_match::fix_index() for (keynr=0 ; keynr <= mkeys ; keynr++) { - // for now, partial keys won't work. SerG - if (max_cnt < fields.elements || + // partial keys doesn't work + if (max_cnt < arg_count-1 || max_cnt < table->key_info[ft_to_key[keynr]].key_parts) continue; @@ -2341,21 +3151,20 @@ bool Item_func_match::fix_index() } err: - if (mode == FT_BOOL) + if (flags & FT_BOOL) { key=NO_SUCH_KEY; return 0; } - my_printf_error(ER_FT_MATCHING_KEY_NOT_FOUND, - ER(ER_FT_MATCHING_KEY_NOT_FOUND),MYF(0)); + my_error(ER_FT_MATCHING_KEY_NOT_FOUND,MYF(0)); return 1; } bool Item_func_match::eq(const Item *item, bool binary_cmp) const { - if (item->type() != FUNC_ITEM || - func_name() != ((Item_func*)item)->func_name()) + if (item->type() != FUNC_ITEM || ((Item_func*)item)->functype() != FT_FUNC || + flags != ((Item_func_match*)item)->flags) return 0; Item_func_match *ifm=(Item_func_match*) item; @@ -2370,6 +3179,7 @@ bool Item_func_match::eq(const Item *item, bool binary_cmp) const double Item_func_match::val() { + DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_match::val"); if (ft_handler == NULL) DBUG_RETURN(-1.0); @@ -2393,11 +3203,26 @@ double Item_func_match::val() (byte *)a->ptr(), a->length())); } else - DBUG_RETURN(ft_handler->please->find_relevance(ft_handler, record, 0)); + DBUG_RETURN(ft_handler->please->find_relevance(ft_handler, + table->record[0], 0)); +} + +void Item_func_match::print(String *str) +{ + str->append("(match ", 7); + print_args(str, 1); + str->append(" against (", 10); + args[0]->print(str); + if (flags & FT_BOOL) + str->append(" in boolean mode", 16); + else if (flags & FT_EXPAND) + str->append(" with query expansion", 21); + str->append("))", 2); } longlong Item_func_bit_xor::val_int() { + DBUG_ASSERT(fixed == 1); ulonglong arg1= (ulonglong) args[0]->val_int(); ulonglong arg2= (ulonglong) args[1]->val_int(); if ((null_value= (args[0]->null_value || args[1]->null_value))) @@ -2410,33 +3235,64 @@ longlong Item_func_bit_xor::val_int() System variables ****************************************************************************/ -Item *get_system_var(enum_var_type var_type, LEX_STRING name) -{ - if (!my_strcasecmp(name.str,"VERSION")) - return new Item_string("@@VERSION",server_version, - (uint) strlen(server_version)); +/* + Return value of an system variable base[.name] as a constant item - THD *thd=current_thd; - Item *item; + SYNOPSIS + get_system_var() + thd Thread handler + var_type global / session + name Name of base or system variable + component Component. + + NOTES + If component.str = 0 then the variable name is in 'name' + + RETURN + 0 error + # constant item +*/ + + +Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name, + LEX_STRING component) +{ sys_var *var; - char buff[MAX_SYS_VAR_LENGTH+3+8], *pos; + LEX_STRING *base_name, *component_name; + + if (component.str == 0 && + !my_strcasecmp(system_charset_info, name.str, "VERSION")) + return new Item_string(NULL, server_version, + (uint) strlen(server_version), + system_charset_info, DERIVATION_SYSCONST); + + if (component.str) + { + base_name= &component; + component_name= &name; + } + else + { + base_name= &name; + component_name= &component; // Empty string + } - if (!(var= find_sys_var(name.str))) + if (!(var= find_sys_var(base_name->str, base_name->length))) return 0; - if (!(item=var->item(thd, var_type))) - return 0; // Impossible - thd->safe_to_cache_query=0; - buff[0]='@'; - buff[1]='@'; - pos=buff+2; - if (var_type == OPT_SESSION) - pos=strmov(pos,"session."); - else if (var_type == OPT_GLOBAL) - pos=strmov(pos,"global."); - memcpy(pos, var->name, var->name_length+1); - // set_name() will allocate the name - item->set_name(buff,(uint) (pos-buff)+var->name_length); - return item; + if (component.str) + { + if (!var->is_struct()) + { + net_printf(thd, ER_VARIABLE_IS_NOT_STRUCT, base_name->str); + return 0; + } + } + thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + + set_if_smaller(component_name->length, MAX_SYS_VAR_LENGTH); + + return new Item_func_get_system_var(var, var_type, component_name, + NULL, 0); } @@ -2454,10 +3310,9 @@ Item *get_system_var(enum_var_type var_type, LEX_STRING name) longlong Item_func_is_free_lock::val_int() { + DBUG_ASSERT(fixed == 1); String *res=args[0]->val_str(&value); - THD *thd=current_thd; - ULL *ull; - int error=0; + User_level_lock *ull; null_value=0; if (!res || !res->length()) @@ -2467,10 +3322,40 @@ longlong Item_func_is_free_lock::val_int() } pthread_mutex_lock(&LOCK_user_locks); - ull= (ULL*) hash_search(&hash_user_locks,(byte*) res->ptr(), + ull= (User_level_lock *) hash_search(&hash_user_locks, (byte*) res->ptr(), res->length()); pthread_mutex_unlock(&LOCK_user_locks); if (!ull || !ull->locked) return 1; return 0; } + +longlong Item_func_is_used_lock::val_int() +{ + DBUG_ASSERT(fixed == 1); + String *res=args[0]->val_str(&value); + User_level_lock *ull; + + null_value=1; + if (!res || !res->length()) + return 0; + + pthread_mutex_lock(&LOCK_user_locks); + ull= (User_level_lock *) hash_search(&hash_user_locks, (byte*) res->ptr(), + res->length()); + pthread_mutex_unlock(&LOCK_user_locks); + if (!ull || !ull->locked) + return 0; + + null_value=0; + return ull->thread_id; +} + + +longlong Item_func_found_rows::val_int() +{ + DBUG_ASSERT(fixed == 1); + THD *thd= current_thd; + + return thd->found_rows(); +} diff --git a/sql/item_func.h b/sql/item_func.h index 3627af4ebb1..51f9d3fb36f 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -17,7 +17,7 @@ /* Function items used by mysql */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -31,7 +31,12 @@ extern "C" /* Bug in BSDI include file */ class Item_func :public Item_result_field { protected: - Item **args,*tmp_arg[2]; + Item **args, *tmp_arg[2]; + /* + Allowed numbers of columns in result (usually 1, which means scalar value) + 0 means get this number from first argument + */ + uint allowed_arg_cols; public: uint arg_count; table_map used_tables_cache, not_null_tables_cache; @@ -39,63 +44,74 @@ public: enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC, GE_FUNC,GT_FUNC,FT_FUNC, LIKE_FUNC,NOTLIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC, - COND_AND_FUNC,COND_OR_FUNC,COND_XOR_FUNC,BETWEEN,IN_FUNC,INTERVAL_FUNC}; + COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC, BETWEEN, IN_FUNC, + INTERVAL_FUNC, ISNOTNULLTEST_FUNC, + SP_EQUALS_FUNC, SP_DISJOINT_FUNC,SP_INTERSECTS_FUNC, + SP_TOUCHES_FUNC,SP_CROSSES_FUNC,SP_WITHIN_FUNC, + SP_CONTAINS_FUNC,SP_OVERLAPS_FUNC, + SP_STARTPOINT,SP_ENDPOINT,SP_EXTERIORRING, + SP_POINTN,SP_GEOMETRYN,SP_INTERIORRINGN, + NOT_FUNC, NOT_ALL_FUNC, NOW_FUNC, VAR_VALUE_FUNC}; enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL }; enum Type type() const { return FUNC_ITEM; } virtual enum Functype functype() const { return UNKNOWN_FUNC; } - Item_func(void) + Item_func(void): + allowed_arg_cols(1), arg_count(0) { - arg_count=0; with_sum_func=0; + with_sum_func= 0; } - Item_func(Item *a) + Item_func(Item *a): + allowed_arg_cols(1), arg_count(1) { - arg_count=1; - args=tmp_arg; - args[0]=a; - with_sum_func=a->with_sum_func; + args= tmp_arg; + args[0]= a; + with_sum_func= a->with_sum_func; } - Item_func(Item *a,Item *b) + Item_func(Item *a,Item *b): + allowed_arg_cols(1), arg_count(2) { - arg_count=2; - args=tmp_arg; - args[0]=a; args[1]=b; - with_sum_func=a->with_sum_func || b->with_sum_func; + args= tmp_arg; + args[0]= a; args[1]= b; + with_sum_func= a->with_sum_func || b->with_sum_func; } - Item_func(Item *a,Item *b,Item *c) + Item_func(Item *a,Item *b,Item *c): + allowed_arg_cols(1) { - arg_count=0; - if ((args=(Item**) sql_alloc(sizeof(Item*)*3))) + arg_count= 0; + if ((args= (Item**) sql_alloc(sizeof(Item*)*3))) { - arg_count=3; - args[0]=a; args[1]=b; args[2]=c; - with_sum_func=a->with_sum_func || b->with_sum_func || c->with_sum_func; + arg_count= 3; + args[0]= a; args[1]= b; args[2]= c; + with_sum_func= a->with_sum_func || b->with_sum_func || c->with_sum_func; } } - Item_func(Item *a,Item *b,Item *c,Item *d) + Item_func(Item *a,Item *b,Item *c,Item *d): + allowed_arg_cols(1) { - arg_count=0; - if ((args=(Item**) sql_alloc(sizeof(Item*)*4))) + arg_count= 0; + if ((args= (Item**) sql_alloc(sizeof(Item*)*4))) { - arg_count=4; - args[0]=a; args[1]=b; args[2]=c; args[3]=d; - with_sum_func=a->with_sum_func || b->with_sum_func || c->with_sum_func || - d->with_sum_func; + arg_count= 4; + args[0]= a; args[1]= b; args[2]= c; args[3]= d; + with_sum_func= a->with_sum_func || b->with_sum_func || + c->with_sum_func || d->with_sum_func; } } - Item_func(Item *a,Item *b,Item *c,Item *d,Item* e) + Item_func(Item *a,Item *b,Item *c,Item *d,Item* e): + allowed_arg_cols(1) { - arg_count=5; - if ((args=(Item**) sql_alloc(sizeof(Item*)*5))) + arg_count= 5; + if ((args= (Item**) sql_alloc(sizeof(Item*)*5))) { - args[0]=a; args[1]=b; args[2]=c; args[3]=d; args[4]=e; - with_sum_func=a->with_sum_func || b->with_sum_func || c->with_sum_func || - d->with_sum_func || e->with_sum_func ; + args[0]= a; args[1]= b; args[2]= c; args[3]= d; args[4]= e; + with_sum_func= a->with_sum_func || b->with_sum_func || + c->with_sum_func || d->with_sum_func || e->with_sum_func ; } } Item_func(List<Item> &list); - ~Item_func() {} /* Nothing to do; Items are freed automaticly */ - bool fix_fields(THD *,struct st_table_list *); - void make_field(Send_field *field); + // Constructor used for Item_cond_and/or (see Item comment) + Item_func(THD *thd, Item_func *item); + bool fix_fields(THD *,struct st_table_list *, Item **ref); table_map used_tables() const; table_map not_null_tables() const; void update_used_tables(); @@ -106,15 +122,17 @@ public: virtual const char *func_name() const { return "?"; } virtual bool const_item() const { return const_item_cache; } inline Item **arguments() const { return args; } + void set_arguments(List<Item> &list); inline uint argument_count() const { return arg_count; } inline void remove_arguments() { arg_count=0; } - virtual void split_sum_func(List<Item> &fields); + void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); void print(String *str); void print_op(String *str); + void print_args(String *str, uint from); void fix_num_length_and_dec(); - inline bool get_arg0_date(TIME *ltime,bool fuzzy_date) + inline bool get_arg0_date(TIME *ltime, uint fuzzy_date) { - return (null_value=args[0]->get_date(ltime,fuzzy_date)); + return (null_value=args[0]->get_date(ltime, fuzzy_date)); } inline bool get_arg0_time(TIME *ltime) { @@ -122,8 +140,28 @@ public: } bool is_null() { (void) val_int(); return null_value; } friend class udf_handler; - unsigned int size_of() { return sizeof(*this);} + Field *tmp_table_field() { return result_field; } Field *tmp_table_field(TABLE *t_arg); + Item *get_tmp_table_item(THD *thd); + + bool agg_arg_collations(DTCollation &c, Item **items, uint nitems, + uint flags= 0) + { + return agg_item_collations(c, func_name(), items, nitems, flags); + } + bool agg_arg_collations_for_comparison(DTCollation &c, + Item **items, uint nitems, + uint flags= 0) + { + return agg_item_collations_for_comparison(c, func_name(), + items, nitems, flags); + } + bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems, + uint flags= 0) + { + return agg_item_charsets(c, func_name(), items, nitems, flags); + } + bool walk(Item_processor processor, byte *arg); }; @@ -135,10 +173,9 @@ public: Item_real_func(Item *a,Item *b) :Item_func(a,b) {} Item_real_func(List<Item> &list) :Item_func(list) {} String *val_str(String*str); - longlong val_int() { return (longlong) val(); } + longlong val_int() { DBUG_ASSERT(fixed == 1); return (longlong) val(); } enum Item_result result_type () const { return REAL_RESULT; } void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); } - unsigned int size_of() { return sizeof(*this);} }; @@ -150,11 +187,10 @@ public: Item_num_func(Item *a) :Item_func(a),hybrid_type(REAL_RESULT) {} Item_num_func(Item *a,Item *b) :Item_func(a,b),hybrid_type(REAL_RESULT) {} String *val_str(String*str); - longlong val_int() { return (longlong) val(); } + longlong val_int() { DBUG_ASSERT(fixed == 1); return (longlong) val(); } enum Item_result result_type () const { return hybrid_type; } void fix_length_and_dec() { fix_num_length_and_dec(); } bool is_null() { (void) val(); return null_value; } - unsigned int size_of() { return sizeof(*this);} }; @@ -170,7 +206,6 @@ class Item_num_op :public Item_func void fix_length_and_dec() { fix_num_length_and_dec(); find_num_type(); } void find_num_type(void); bool is_null() { (void) val(); return null_value; } - unsigned int size_of() { return sizeof(*this);} }; @@ -182,7 +217,8 @@ public: Item_int_func(Item *a,Item *b) :Item_func(a,b) { max_length=21; } Item_int_func(Item *a,Item *b,Item *c) :Item_func(a,b,c) { max_length=21; } Item_int_func(List<Item> &list) :Item_func(list) { max_length=21; } - double val() { return (double) val_int(); } + Item_int_func(THD *thd, Item_int_func *item) :Item_func(thd, item) {} + double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String*str); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() {} @@ -193,21 +229,30 @@ class Item_func_signed :public Item_int_func { public: Item_func_signed(Item *a) :Item_int_func(a) {} - double val() { return args[0]->val(); } - longlong val_int() { return args[0]->val_int(); } + const char *func_name() const { return "cast_as_signed"; } + double val() + { + double tmp= args[0]->val(); + null_value= args[0]->null_value; + return tmp; + } + longlong val_int(); + longlong val_int_from_str(int *error); void fix_length_and_dec() { max_length=args[0]->max_length; unsigned_flag=0; } + void print(String *str); }; -class Item_func_unsigned :public Item_int_func +class Item_func_unsigned :public Item_func_signed { public: - Item_func_unsigned(Item *a) :Item_int_func(a) {} - double val() { return args[0]->val(); } - longlong val_int() { return args[0]->val_int(); } + Item_func_unsigned(Item *a) :Item_func_signed(a) {} + const char *func_name() const { return "cast_as_unsigned"; } void fix_length_and_dec() { max_length=args[0]->max_length; unsigned_flag=1; } + longlong val_int(); + void print(String *str); }; @@ -252,6 +297,18 @@ public: }; +class Item_func_int_div :public Item_num_op +{ +public: + Item_func_int_div(Item *a,Item *b) :Item_num_op(a,b) + { hybrid_type=INT_RESULT; } + double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + longlong val_int(); + const char *func_name() const { return "DIV"; } + void fix_length_and_dec(); +}; + + class Item_func_mod :public Item_num_op { public: @@ -303,6 +360,7 @@ class Item_dec_func :public Item_real_func #ifndef HAVE_FINITE return value; #else + /* The following should be safe, even if we compare doubles */ if (finite(value) && value != POSTFIX_ERROR) return value; null_value=1; @@ -467,13 +525,13 @@ class Item_func_rand :public Item_real_func { struct rand_struct *rand; public: - Item_func_rand(Item *a) :Item_real_func(a) {} - Item_func_rand() :Item_real_func() {} + Item_func_rand(Item *a) :Item_real_func(a), rand(0) {} + Item_func_rand() :Item_real_func() {} double val(); const char *func_name() const { return "rand"; } bool const_item() const { return 0; } - table_map used_tables() const { return RAND_TABLE_BIT; } - void fix_length_and_dec(); + void update_used_tables(); + bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref); }; @@ -496,7 +554,6 @@ class Item_func_units :public Item_real_func double val(); const char *func_name() const { return name; } void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); } - unsigned int size_of() { return sizeof(*this);} }; @@ -507,13 +564,12 @@ class Item_func_min_max :public Item_func int cmp_sign; public: Item_func_min_max(List<Item> &list,int cmp_sign_arg) :Item_func(list), - cmp_sign(cmp_sign_arg) {} + cmp_type(INT_RESULT), cmp_sign(cmp_sign_arg) {} double val(); longlong val_int(); String *val_str(String *); void fix_length_and_dec(); enum Item_result result_type () const { return cmp_type; } - unsigned int size_of() { return sizeof(*this);} table_map not_null_tables() const { return 0; } }; @@ -540,14 +596,14 @@ public: longlong val_int(); const char *func_name() const { return "length"; } void fix_length_and_dec() { max_length=10; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_bit_length :public Item_func_length { public: Item_func_bit_length(Item *a) :Item_func_length(a) {} - longlong val_int() { return Item_func_length::val_int()*8; } + longlong val_int() + { DBUG_ASSERT(fixed == 1); return Item_func_length::val_int()*8; } const char *func_name() const { return "bit_length"; } }; @@ -559,52 +615,42 @@ public: longlong val_int(); const char *func_name() const { return "char_length"; } void fix_length_and_dec() { max_length=10; } - unsigned int size_of() { return sizeof(*this);} +}; + +class Item_func_coercibility :public Item_int_func +{ +public: + Item_func_coercibility(Item *a) :Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "coercibility"; } + void fix_length_and_dec() { max_length=10; maybe_null= 0; } + table_map not_null_tables() const { return 0; } }; class Item_func_locate :public Item_int_func { String value1,value2; + DTCollation cmp_collation; public: Item_func_locate(Item *a,Item *b) :Item_int_func(a,b) {} Item_func_locate(Item *a,Item *b,Item *c) :Item_int_func(a,b,c) {} const char *func_name() const { return "locate"; } longlong val_int(); - void fix_length_and_dec() { maybe_null=0; max_length=11; } - unsigned int size_of() { return sizeof(*this);} + void fix_length_and_dec(); + void print(String *str); }; class Item_func_field :public Item_int_func { - Item *item; String value,tmp; + Item_result cmp_type; + DTCollation cmp_collation; public: - Item_func_field(Item *a,List<Item> &list) :Item_int_func(list),item(a) {} - ~Item_func_field() { delete item; } + Item_func_field(List<Item> &list) :Item_int_func(list) {} longlong val_int(); - bool fix_fields(THD *thd,struct st_table_list *tlist) - { - return (item->fix_fields(thd,tlist) || Item_func::fix_fields(thd,tlist)); - } - void split_sum_func(List<Item> &fields); - void update_used_tables() - { - item->update_used_tables(); - Item_func::update_used_tables(); - used_tables_cache|= item->used_tables(); - const_item_cache&= item->const_item(); - } const char *func_name() const { return "field"; } - void fix_length_and_dec() - { - maybe_null=0; max_length=3; - used_tables_cache|= item->used_tables(); - not_null_tables_cache= item->not_null_tables(); - const_item_cache&= item->const_item(); - with_sum_func= with_sum_func || item->with_sum_func; - } - unsigned int size_of() { return sizeof(*this);} + void fix_length_and_dec(); }; @@ -616,7 +662,6 @@ public: longlong val_int(); const char *func_name() const { return "ascii"; } void fix_length_and_dec() { max_length=3; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_ord :public Item_int_func @@ -626,7 +671,6 @@ public: Item_func_ord(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "ord"; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_find_in_set :public Item_int_func @@ -634,12 +678,12 @@ class Item_func_find_in_set :public Item_int_func String value,value2; uint enum_value; ulonglong enum_bit; + DTCollation cmp_collation; public: Item_func_find_in_set(Item *a,Item *b) :Item_int_func(a,b),enum_value(0) {} longlong val_int(); const char *func_name() const { return "find_in_set"; } void fix_length_and_dec(); - unsigned int size_of() { return sizeof(*this);} }; /* Base class for all bit functions: '~', '|', '^', '&', '>>', '<<' */ @@ -650,6 +694,7 @@ public: Item_func_bit(Item *a, Item *b) :Item_int_func(a, b) {} Item_func_bit(Item *a) :Item_int_func(a) {} void fix_length_and_dec() { unsigned_flag= 1; } + void print(String *str) { print_op(str); } }; class Item_func_bit_or :public Item_func_bit @@ -699,15 +744,18 @@ public: Item_func_bit_neg(Item *a) :Item_func_bit(a) {} longlong val_int(); const char *func_name() const { return "~"; } + void print(String *str) { Item_func::print(str); } }; -class Item_func_set_last_insert_id :public Item_int_func + +class Item_func_last_insert_id :public Item_int_func { public: - Item_func_set_last_insert_id(Item *a) :Item_int_func(a) {} + Item_func_last_insert_id() :Item_int_func() {} + Item_func_last_insert_id(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "last_insert_id"; } - void fix_length_and_dec() { max_length=args[0]->max_length; } + void fix_length_and_dec() { if (arg_count) max_length= args[0]->max_length; } }; class Item_func_benchmark :public Item_int_func @@ -720,7 +768,7 @@ class Item_func_benchmark :public Item_int_func longlong val_int(); const char *func_name() const { return "benchmark"; } void fix_length_and_dec() { max_length=1; maybe_null=0; } - unsigned int size_of() { return sizeof(*this);} + void print(String *str); }; @@ -735,18 +783,19 @@ public: Item_udf_func(udf_func *udf_arg) :Item_func(), udf(udf_arg) {} Item_udf_func(udf_func *udf_arg, List<Item> &list) :Item_func(list), udf(udf_arg) {} - ~Item_udf_func() {} const char *func_name() const { return udf.name(); } - bool fix_fields(THD *thd,struct st_table_list *tables) + bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref) { - bool res=udf.fix_fields(thd,tables,this,arg_count,args); - used_tables_cache=udf.used_tables_cache; - const_item_cache=udf.const_item_cache; + DBUG_ASSERT(fixed == 0); + bool res= udf.fix_fields(thd, tables, this, arg_count, args); + used_tables_cache= udf.used_tables_cache; + const_item_cache= udf.const_item_cache; + fixed= 1; return res; } + void cleanup(); Item_result result_type () const { return udf.result_type(); } table_map not_null_tables() const { return 0; } - unsigned int size_of() { return sizeof(*this);} }; @@ -756,8 +805,8 @@ class Item_func_udf_float :public Item_udf_func Item_func_udf_float(udf_func *udf_arg) :Item_udf_func(udf_arg) {} Item_func_udf_float(udf_func *udf_arg, List<Item> &list) :Item_udf_func(udf_arg,list) {} - ~Item_func_udf_float() {} - longlong val_int() { return (longlong) Item_func_udf_float::val(); } + longlong val_int() + { DBUG_ASSERT(fixed == 1); return (longlong) Item_func_udf_float::val(); } double val(); String *val_str(String *str); void fix_length_and_dec() { fix_num_length_and_dec(); } @@ -770,7 +819,6 @@ public: Item_func_udf_int(udf_func *udf_arg) :Item_udf_func(udf_arg) {} Item_func_udf_int(udf_func *udf_arg, List<Item> &list) :Item_udf_func(udf_arg,list) {} - ~Item_func_udf_int() {} longlong val_int(); double val() { return (double) Item_func_udf_int::val_int(); } String *val_str(String *str); @@ -785,17 +833,21 @@ public: Item_func_udf_str(udf_func *udf_arg) :Item_udf_func(udf_arg) {} Item_func_udf_str(udf_func *udf_arg, List<Item> &list) :Item_udf_func(udf_arg,list) {} - ~Item_func_udf_str() {} String *val_str(String *); double val() { - String *res; res=val_str(&str_value); - return res ? atof(res->c_ptr()) : 0.0; + int err; + String *res; + char *end_not_used; + res=val_str(&str_value); + return res ? my_strntod(res->charset(), (char*) res->ptr(), res->length(), + &end_not_used, &err) : 0.0; } longlong val_int() { + int err; String *res; res=val_str(&str_value); - return res ? strtoll(res->c_ptr(),(char**) 0,10) : (longlong) 0; + return res ? my_strntoll(res->charset(),res->ptr(),res->length(),10,(char**) 0,&err) : (longlong) 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec(); @@ -808,8 +860,7 @@ class Item_func_udf_float :public Item_real_func public: Item_func_udf_float(udf_func *udf_arg) :Item_real_func() {} Item_func_udf_float(udf_func *udf_arg, List<Item> &list) :Item_real_func(list) {} - ~Item_func_udf_float() {} - double val() { return 0.0; } + double val() { DBUG_ASSERT(fixed == 1); return 0.0; } }; @@ -818,8 +869,7 @@ class Item_func_udf_int :public Item_int_func public: Item_func_udf_int(udf_func *udf_arg) :Item_int_func() {} Item_func_udf_int(udf_func *udf_arg, List<Item> &list) :Item_int_func(list) {} - ~Item_func_udf_int() {} - longlong val_int() { return 0; } + longlong val_int() { DBUG_ASSERT(fixed == 1); return 0; } }; @@ -828,10 +878,10 @@ class Item_func_udf_str :public Item_func public: Item_func_udf_str(udf_func *udf_arg) :Item_func() {} Item_func_udf_str(udf_func *udf_arg, List<Item> &list) :Item_func(list) {} - ~Item_func_udf_str() {} - String *val_str(String *) { null_value=1; return 0; } - double val() { null_value=1; return 0.0; } - longlong val_int() { null_value=1; return 0; } + String *val_str(String *) + { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } + double val() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } + longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec() { maybe_null=1; max_length=0; } }; @@ -842,9 +892,9 @@ public: ** User level locks */ -class ULL; +class User_level_lock; void item_user_lock_init(void); -void item_user_lock_release(ULL *ull); +void item_user_lock_release(User_level_lock *ull); void item_user_lock_free(void); class Item_func_get_lock :public Item_int_func @@ -855,7 +905,6 @@ class Item_func_get_lock :public Item_int_func longlong val_int(); const char *func_name() const { return "get_lock"; } void fix_length_and_dec() { max_length=1; maybe_null=1;} - unsigned int size_of() { return sizeof(*this);} }; class Item_func_release_lock :public Item_int_func @@ -866,7 +915,6 @@ class Item_func_release_lock :public Item_int_func longlong val_int(); const char *func_name() const { return "release_lock"; } void fix_length_and_dec() { max_length=1; maybe_null=1;} - unsigned int size_of() { return sizeof(*this);} }; /* replication functions */ @@ -880,7 +928,6 @@ class Item_master_pos_wait :public Item_int_func longlong val_int(); const char *func_name() const { return "master_pos_wait"; } void fix_length_and_dec() { max_length=21; maybe_null=1;} - unsigned int size_of() { return sizeof(*this);} }; @@ -891,34 +938,44 @@ class user_var_entry; class Item_func_set_user_var :public Item_func { enum Item_result cached_result_type; - LEX_STRING name; user_var_entry *entry; char buffer[MAX_FIELD_WIDTH]; String value; + union + { + longlong vint; + double vreal; + String *vstr; + } save_result; + String save_buff; + public: - Item_func_set_user_var(LEX_STRING a,Item *b): - Item_func(b), name(a), value(buffer,sizeof(buffer)) {} + LEX_STRING name; // keep it public + Item_func_set_user_var(LEX_STRING a,Item *b) + :Item_func(b), cached_result_type(INT_RESULT), name(a) + {} double val(); longlong val_int(); String *val_str(String *str); - bool update_hash(const void *ptr, uint length, enum Item_result type); + bool update_hash(void *ptr, uint length, enum Item_result type, + CHARSET_INFO *cs, Derivation dv); + bool check(); bool update(); enum Item_result result_type () const { return cached_result_type; } - bool fix_fields(THD *thd,struct st_table_list *tables); + bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref); void fix_length_and_dec(); void print(String *str); const char *func_name() const { return "set_user_var"; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_get_user_var :public Item_func { - LEX_STRING name; user_var_entry *var_entry; public: + LEX_STRING name; // keep it public Item_func_get_user_var(LEX_STRING a): Item_func(), name(a) {} double val(); @@ -927,12 +984,40 @@ public: void fix_length_and_dec(); void print(String *str); enum Item_result result_type() const; + /* + We must always return variables as strings to guard against selects of type + select @t1:=1,@t1,@t:="hello",@t from foo where (@t1:= t2.b) + */ + enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum Functype functype() const { return VAR_VALUE_FUNC; } const char *func_name() const { return "get_user_var"; } bool const_item() const; table_map used_tables() const { return const_item() ? 0 : RAND_TABLE_BIT; } bool eq(const Item *item, bool binary_cmp) const; - unsigned int size_of() { return sizeof(*this);} +}; + + +/* A system variable */ + +class Item_func_get_system_var :public Item_func +{ + sys_var *var; + enum_var_type var_type; + LEX_STRING component; +public: + Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg, + LEX_STRING *component_arg, const char *name_arg, + size_t name_len_arg); + bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + /* + Stubs for pure virtual methods. Should never be called: this + item is always substituted with a constant in fix_fields(). + */ + double val() { DBUG_ASSERT(0); return 0.0; } + longlong val_int() { DBUG_ASSERT(0); return 0; } + String* val_str(String*) { DBUG_ASSERT(0); return 0; } + void fix_length_and_dec() { DBUG_ASSERT(0); } }; @@ -952,92 +1037,92 @@ public: class Item_func_match :public Item_real_func { public: - List<Item> fields; - String value; - TABLE *table; - Item_func_match *master; - FT_INFO * ft_handler; - Item *concat; - byte *record; - uint key, mode; + uint key, flags; bool join_key; - - Item_func_match(List<Item> &a, Item *b): Item_real_func(b), - fields(a), table(0), master(0), ft_handler(0), - concat(0), key(0), join_key(0) - {} - ~Item_func_match() + DTCollation cmp_collation; + FT_INFO *ft_handler; + TABLE *table; + Item_func_match *master; // for master-slave optimization + Item *concat; // Item_func_concat_ws + String value; // value of concat + String search_value; // key_item()'s value converted to cmp_collation + + Item_func_match(List<Item> &a, uint b): Item_real_func(a), key(0), flags(b), + join_key(0), ft_handler(0), table(0), master(0), concat(0) { } + void cleanup() { + DBUG_ENTER("Item_func_match"); + Item_real_func::cleanup(); if (!master && ft_handler) { ft_handler->please->close_search(ft_handler); - ft_handler=0; - if(join_key) - table->file->ft_handler=0; - table->fulltext_searched=0; } if (concat) + { delete concat; + concat= 0; + } + ft_handler= 0; + DBUG_VOID_RETURN; } enum Functype functype() const { return FT_FUNC; } + const char *func_name() const { return "match"; } void update_used_tables() {} table_map not_null_tables() const { return 0; } - bool fix_fields(THD *thd,struct st_table_list *tlist); + bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref); bool eq(const Item *, bool binary_cmp) const; - longlong val_int() { return val()!=0.0; } + /* The following should be safe, even if we compare doubles */ + longlong val_int() { DBUG_ASSERT(fixed == 1); return val()!=0.0; } double val(); + void print(String *str); bool fix_index(); void init_search(bool no_order); - unsigned int size_of() { return sizeof(*this);} }; -class Item_func_match_nl :public Item_func_match +class Item_func_bit_xor : public Item_func_bit { public: - Item_func_match_nl(List<Item> &a, Item *b) - :Item_func_match(a,b) - { mode=FT_NL; } - const char *func_name() const { return "match_nl"; } + Item_func_bit_xor(Item *a, Item *b) :Item_func_bit(a, b) {} + longlong val_int(); + const char *func_name() const { return "^"; } }; +class Item_func_is_free_lock :public Item_int_func +{ + String value; +public: + Item_func_is_free_lock(Item *a) :Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "is_free_lock"; } + void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=1;} +}; -class Item_func_match_bool :public Item_func_match +class Item_func_is_used_lock :public Item_int_func { + String value; public: - Item_func_match_bool(List<Item> &a, Item *b) - :Item_func_match(a,b) - { mode=FT_BOOL; } - const char *func_name() const { return "match_bool"; } + Item_func_is_used_lock(Item *a) :Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "is_used_lock"; } + void fix_length_and_dec() { decimals=0; max_length=10; maybe_null=1;} }; /* For type casts */ -enum Item_cast +enum Cast_target { ITEM_CAST_BINARY, ITEM_CAST_SIGNED_INT, ITEM_CAST_UNSIGNED_INT, ITEM_CAST_DATE, ITEM_CAST_TIME, ITEM_CAST_DATETIME, ITEM_CAST_CHAR }; -Item *create_func_cast(Item *a, Item_cast cast_type); - - -class Item_func_bit_xor : public Item_func_bit -{ -public: - Item_func_bit_xor(Item *a, Item *b) :Item_func_bit(a, b) {} - longlong val_int(); - const char *func_name() const { return "^"; } -}; -class Item_func_is_free_lock :public Item_int_func +class Item_func_found_rows :public Item_int_func { - String value; public: - Item_func_is_free_lock(Item *a) :Item_int_func(a) {} + Item_func_found_rows() :Item_int_func() {} longlong val_int(); - const char *func_name() const { return "check_lock"; } - void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=1;} - unsigned int size_of() { return sizeof(*this);} + const char *func_name() const { return "found_rows"; } + void fix_length_and_dec() { decimals= 0; maybe_null=0; } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc new file mode 100644 index 00000000000..6bd2e65632f --- /dev/null +++ b/sql/item_geofunc.cc @@ -0,0 +1,691 @@ +/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +/* This file defines all spatial functions */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "mysql_priv.h" +#ifdef HAVE_SPATIAL +#include <m_ctype.h> + +void Item_geometry_func::fix_length_and_dec() +{ + collation.set(&my_charset_bin); + decimals=0; + max_length=MAX_BLOB_WIDTH; +} + + +String *Item_func_geometry_from_text::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + Geometry_buffer buffer; + String arg_val; + String *wkt= args[0]->val_str(&arg_val); + + if ((null_value= args[0]->null_value)) + return 0; + + Gis_read_stream trs(wkt->charset(), wkt->ptr(), wkt->length()); + uint32 srid= 0; + + if ((arg_count == 2) && !args[1]->null_value) + srid= (uint32)args[1]->val_int(); + + str->set_charset(&my_charset_bin); + if (str->reserve(SRID_SIZE, 512)) + return 0; + str->length(0); + str->q_append(srid); + if ((null_value= !Geometry::create_from_wkt(&buffer, &trs, str, 0))) + return 0; + return str; +} + + +String *Item_func_geometry_from_wkb::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_val; + String *wkb= args[0]->val_str(&arg_val); + Geometry_buffer buffer; + uint32 srid= 0; + + if ((arg_count == 2) && !args[1]->null_value) + srid= (uint32)args[1]->val_int(); + + str->set_charset(&my_charset_bin); + if (str->reserve(SRID_SIZE, 512)) + return 0; + str->length(0); + str->q_append(srid); + if ((null_value= + (args[0]->null_value || + !Geometry::create_from_wkb(&buffer, wkb->ptr(), wkb->length(), str)))) + return 0; + return str; +} + + +String *Item_func_as_wkt::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_val; + String *swkb= args[0]->val_str(&arg_val); + Geometry_buffer buffer; + Geometry *geom= NULL; + const char *dummy; + + if ((null_value= + (args[0]->null_value || + !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length()))))) + return 0; + + str->length(0); + if ((null_value= geom->as_wkt(str, &dummy))) + return 0; + + return str; +} + + +void Item_func_as_wkt::fix_length_and_dec() +{ + max_length=MAX_BLOB_WIDTH; +} + + +String *Item_func_as_wkb::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_val; + String *swkb= args[0]->val_str(&arg_val); + Geometry_buffer buffer; + + if ((null_value= + (args[0]->null_value || + !(Geometry::construct(&buffer, swkb->ptr(), swkb->length()))))) + return 0; + + str->copy(swkb->ptr() + SRID_SIZE, swkb->length() - SRID_SIZE, + &my_charset_bin); + return str; +} + + +String *Item_func_geometry_type::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String *swkb= args[0]->val_str(str); + Geometry_buffer buffer; + Geometry *geom= NULL; + + if ((null_value= + (args[0]->null_value || + !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length()))))) + return 0; + /* String will not move */ + str->copy(geom->get_class_info()->m_name.str, + geom->get_class_info()->m_name.length, + default_charset()); + return str; +} + + +String *Item_func_envelope::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_val; + String *swkb= args[0]->val_str(&arg_val); + Geometry_buffer buffer; + Geometry *geom= NULL; + uint32 srid; + + if ((null_value= + args[0]->null_value || + !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length())))) + return 0; + + srid= uint4korr(swkb->ptr()); + str->set_charset(&my_charset_bin); + str->length(0); + if (str->reserve(SRID_SIZE, 512)) + return 0; + str->q_append(srid); + return (null_value= geom->envelope(str)) ? 0 : str; +} + + +String *Item_func_centroid::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_val; + String *swkb= args[0]->val_str(&arg_val); + Geometry_buffer buffer; + Geometry *geom= NULL; + uint32 srid; + + if ((null_value= args[0]->null_value || + !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length())))) + return 0; + + str->set_charset(&my_charset_bin); + if (str->reserve(SRID_SIZE, 512)) + return 0; + str->length(0); + srid= uint4korr(swkb->ptr()); + str->q_append(srid); + + return (null_value= test(geom->centroid(str))) ? 0 : str; +} + + +/* + Spatial decomposition functions +*/ + +String *Item_func_spatial_decomp::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_val; + String *swkb= args[0]->val_str(&arg_val); + Geometry_buffer buffer; + Geometry *geom= NULL; + uint32 srid; + + if ((null_value= + (args[0]->null_value || + !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length()))))) + return 0; + + srid= uint4korr(swkb->ptr()); + str->set_charset(&my_charset_bin); + if (str->reserve(SRID_SIZE, 512)) + goto err; + str->length(0); + str->q_append(srid); + switch (decomp_func) { + case SP_STARTPOINT: + if (geom->start_point(str)) + goto err; + break; + + case SP_ENDPOINT: + if (geom->end_point(str)) + goto err; + break; + + case SP_EXTERIORRING: + if (geom->exterior_ring(str)) + goto err; + break; + + default: + goto err; + } + return str; + +err: + null_value= 1; + return 0; +} + + +String *Item_func_spatial_decomp_n::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_val; + String *swkb= args[0]->val_str(&arg_val); + long n= (long) args[1]->val_int(); + Geometry_buffer buffer; + Geometry *geom= NULL; + uint32 srid; + + if ((null_value= + (args[0]->null_value || args[1]->null_value || + !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length()))))) + return 0; + + str->set_charset(&my_charset_bin); + if (str->reserve(SRID_SIZE, 512)) + goto err; + srid= uint4korr(swkb->ptr()); + str->length(0); + str->q_append(srid); + switch (decomp_func_n) + { + case SP_POINTN: + if (geom->point_n(n,str)) + goto err; + break; + + case SP_GEOMETRYN: + if (geom->geometry_n(n,str)) + goto err; + break; + + case SP_INTERIORRINGN: + if (geom->interior_ring_n(n,str)) + goto err; + break; + + default: + goto err; + } + return str; + +err: + null_value=1; + return 0; +} + + +/* + Functions to concatenate various spatial objects +*/ + + +/* +* Concatenate doubles into Point +*/ + + +String *Item_func_point::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + double x= args[0]->val(); + double y= args[1]->val(); + + if ((null_value= (args[0]->null_value || + args[1]->null_value || + str->realloc(1 + 4 + SIZEOF_STORED_DOUBLE*2)))) + return 0; + + str->set_charset(&my_charset_bin); + str->length(0); + str->q_append((char)Geometry::wkb_ndr); + str->q_append((uint32)Geometry::wkb_point); + str->q_append(x); + str->q_append(y); + return str; +} + + +/* + Concatenates various items into various collections + with checkings for valid wkb type of items. + For example, MultiPoint can be a collection of Points only. + coll_type contains wkb type of target collection. + item_type contains a valid wkb type of items. + In the case when coll_type is wkbGeometryCollection, + we do not check wkb type of items, any is valid. +*/ + +String *Item_func_spatial_collection::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String arg_value; + uint i; + + str->set_charset(&my_charset_bin); + str->length(0); + if (str->reserve(1 + 4 + 4, 512)) + goto err; + + str->q_append((char) Geometry::wkb_ndr); + str->q_append((uint32) coll_type); + str->q_append((uint32) arg_count); + + for (i= 0; i < arg_count; ++i) + { + String *res= args[i]->val_str(&arg_value); + if (args[i]->null_value) + goto err; + + if (coll_type == Geometry::wkb_geometrycollection) + { + /* + In the case of GeometryCollection we don't need any checkings + for item types, so just copy them into target collection + */ + if (str->append(res->ptr(), res->length(), (uint32) 512)) + goto err; + } + else + { + enum Geometry::wkbType wkb_type; + uint32 len=res->length(); + const char *data= res->ptr() + 1; + + /* + In the case of named collection we must check that items + are of specific type, let's do this checking now + */ + + if (len < 5) + goto err; + wkb_type= (Geometry::wkbType) uint4korr(data); + data+= 4; + len-= 5; + if (wkb_type != item_type) + goto err; + + switch (coll_type) { + case Geometry::wkb_multipoint: + case Geometry::wkb_multilinestring: + case Geometry::wkb_multipolygon: + if (len < WKB_HEADER_SIZE || + str->append(data-WKB_HEADER_SIZE, len+WKB_HEADER_SIZE, 512)) + goto err; + break; + + case Geometry::wkb_linestring: + if (str->append(data, POINT_DATA_SIZE, 512)) + goto err; + break; + case Geometry::wkb_polygon: + { + uint32 n_points; + double x1, y1, x2, y2; + const char *org_data= data; + + if (len < 4 + 2 * POINT_DATA_SIZE) + goto err; + + n_points= uint4korr(data); + data+= 4; + float8get(x1, data); + data+= SIZEOF_STORED_DOUBLE; + float8get(y1, data); + data+= SIZEOF_STORED_DOUBLE; + + data+= (n_points - 2) * POINT_DATA_SIZE; + + float8get(x2, data); + float8get(y2, data + SIZEOF_STORED_DOUBLE); + + if ((x1 != x2) || (y1 != y2) || + str->append(org_data, len, 512)) + goto err; + } + break; + + default: + goto err; + } + } + } + if (str->length() > current_thd->variables.max_allowed_packet) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), current_thd->variables.max_allowed_packet); + goto err; + } + + null_value = 0; + return str; + +err: + null_value= 1; + return 0; +} + + +/* + Functions for spatial relations +*/ + +longlong Item_func_spatial_rel::val_int() +{ + DBUG_ASSERT(fixed == 1); + String *res1= args[0]->val_str(&tmp_value1); + String *res2= args[1]->val_str(&tmp_value2); + Geometry_buffer buffer1, buffer2; + Geometry *g1, *g2; + MBR mbr1, mbr2; + const char *dummy; + + if ((null_value= + (args[0]->null_value || + args[1]->null_value || + !(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) || + !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) || + g1->get_mbr(&mbr1, &dummy) || + g2->get_mbr(&mbr2, &dummy)))) + return 0; + + switch (spatial_rel) { + case SP_CONTAINS_FUNC: + return mbr1.contains(&mbr2); + case SP_WITHIN_FUNC: + return mbr1.within(&mbr2); + case SP_EQUALS_FUNC: + return mbr1.equals(&mbr2); + case SP_DISJOINT_FUNC: + return mbr1.disjoint(&mbr2); + case SP_INTERSECTS_FUNC: + return mbr1.intersects(&mbr2); + case SP_TOUCHES_FUNC: + return mbr1.touches(&mbr2); + case SP_OVERLAPS_FUNC: + return mbr1.overlaps(&mbr2); + case SP_CROSSES_FUNC: + return 0; + default: + break; + } + + null_value=1; + return 0; +} + + +longlong Item_func_isempty::val_int() +{ + DBUG_ASSERT(fixed == 1); + String tmp; + null_value=0; + return args[0]->null_value ? 1 : 0; +} + + +longlong Item_func_issimple::val_int() +{ + DBUG_ASSERT(fixed == 1); + String tmp; + String *wkb=args[0]->val_str(&tmp); + + if ((null_value= (!wkb || args[0]->null_value))) + return 0; + /* TODO: Ramil or Holyfoot, add real IsSimple calculation */ + return 0; +} + + +longlong Item_func_isclosed::val_int() +{ + DBUG_ASSERT(fixed == 1); + String tmp; + String *swkb= args[0]->val_str(&tmp); + Geometry_buffer buffer; + Geometry *geom; + int isclosed= 0; // In case of error + + null_value= (!swkb || + args[0]->null_value || + !(geom= + Geometry::construct(&buffer, swkb->ptr(), swkb->length())) || + geom->is_closed(&isclosed)); + + return (longlong) isclosed; +} + +/* + Numerical functions +*/ + + +longlong Item_func_dimension::val_int() +{ + DBUG_ASSERT(fixed == 1); + uint32 dim= 0; // In case of error + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + const char *dummy; + + null_value= (!swkb || + args[0]->null_value || + !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length())) || + geom->dimension(&dim, &dummy)); + return (longlong) dim; +} + + +longlong Item_func_numinteriorring::val_int() +{ + DBUG_ASSERT(fixed == 1); + uint32 num= 0; // In case of error + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + + null_value= (!swkb || + !(geom= Geometry::construct(&buffer, + swkb->ptr(), swkb->length())) || + geom->num_interior_ring(&num)); + return (longlong) num; +} + + +longlong Item_func_numgeometries::val_int() +{ + DBUG_ASSERT(fixed == 1); + uint32 num= 0; // In case of errors + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + + null_value= (!swkb || + !(geom= Geometry::construct(&buffer, + swkb->ptr(), swkb->length())) || + geom->num_geometries(&num)); + return (longlong) num; +} + + +longlong Item_func_numpoints::val_int() +{ + DBUG_ASSERT(fixed == 1); + uint32 num= 0; // In case of errors + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + + null_value= (!swkb || + args[0]->null_value || + !(geom= Geometry::construct(&buffer, + swkb->ptr(), swkb->length())) || + geom->num_points(&num)); + return (longlong) num; +} + + +double Item_func_x::val() +{ + DBUG_ASSERT(fixed == 1); + double res= 0.0; // In case of errors + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + + null_value= (!swkb || + !(geom= Geometry::construct(&buffer, + swkb->ptr(), swkb->length())) || + geom->get_x(&res)); + return res; +} + + +double Item_func_y::val() +{ + DBUG_ASSERT(fixed == 1); + double res= 0; // In case of errors + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + + null_value= (!swkb || + !(geom= Geometry::construct(&buffer, + swkb->ptr(), swkb->length())) || + geom->get_y(&res)); + return res; +} + + +double Item_func_area::val() +{ + DBUG_ASSERT(fixed == 1); + double res= 0; // In case of errors + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + const char *dummy; + + null_value= (!swkb || + !(geom= Geometry::construct(&buffer, + swkb->ptr(), swkb->length())) || + geom->area(&res, &dummy)); + return res; +} + +double Item_func_glength::val() +{ + DBUG_ASSERT(fixed == 1); + double res= 0; // In case of errors + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + Geometry *geom; + + null_value= (!swkb || + !(geom= Geometry::construct(&buffer, + swkb->ptr(), swkb->length())) || + geom->length(&res)); + return res; +} + +longlong Item_func_srid::val_int() +{ + DBUG_ASSERT(fixed == 1); + String *swkb= args[0]->val_str(&value); + Geometry_buffer buffer; + + null_value= (!swkb || + !Geometry::construct(&buffer, + swkb->ptr(), swkb->length())); + if (null_value) + return 0; + + return (longlong) (uint4korr(swkb->ptr())); +} + +#endif /*HAVE_SPATIAL*/ diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h new file mode 100644 index 00000000000..5f060416ff3 --- /dev/null +++ b/sql/item_geofunc.h @@ -0,0 +1,351 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +/* This file defines all spatial functions */ + +#ifdef HAVE_SPATIAL + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +class Item_geometry_func: public Item_str_func +{ +public: + Item_geometry_func() :Item_str_func() {} + Item_geometry_func(Item *a) :Item_str_func(a) {} + Item_geometry_func(Item *a,Item *b) :Item_str_func(a,b) {} + Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} + Item_geometry_func(List<Item> &list) :Item_str_func(list) {} + void fix_length_and_dec(); +}; + +class Item_func_geometry_from_text: public Item_geometry_func +{ +public: + Item_func_geometry_from_text(Item *a) :Item_geometry_func(a) {} + Item_func_geometry_from_text(Item *a, Item *srid) :Item_geometry_func(a, srid) {} + const char *func_name() const { return "geometryfromtext"; } + String *val_str(String *); +}; + +class Item_func_geometry_from_wkb: public Item_geometry_func +{ +public: + Item_func_geometry_from_wkb(Item *a): Item_geometry_func(a) {} + Item_func_geometry_from_wkb(Item *a, Item *srid): Item_geometry_func(a, srid) {} + const char *func_name() const { return "geometryfromwkb"; } + String *val_str(String *); +}; + +class Item_func_as_wkt: public Item_str_func +{ +public: + Item_func_as_wkt(Item *a): Item_str_func(a) {} + const char *func_name() const { return "astext"; } + String *val_str(String *); + void fix_length_and_dec(); +}; + +class Item_func_as_wkb: public Item_geometry_func +{ +public: + Item_func_as_wkb(Item *a): Item_geometry_func(a) {} + const char *func_name() const { return "aswkb"; } + String *val_str(String *); +}; + +class Item_func_geometry_type: public Item_str_func +{ +public: + Item_func_geometry_type(Item *a): Item_str_func(a) {} + String *val_str(String *); + const char *func_name() const { return "geometrytype"; } + void fix_length_and_dec() + { + max_length=20; // "GeometryCollection" is the most long + }; +}; + +class Item_func_centroid: public Item_geometry_func +{ +public: + Item_func_centroid(Item *a): Item_geometry_func(a) {} + const char *func_name() const { return "centroid"; } + String *val_str(String *); +}; + +class Item_func_envelope: public Item_geometry_func +{ +public: + Item_func_envelope(Item *a): Item_geometry_func(a) {} + const char *func_name() const { return "envelope"; } + String *val_str(String *); +}; + +class Item_func_point: public Item_geometry_func +{ +public: + Item_func_point(Item *a, Item *b): Item_geometry_func(a, b) {} + Item_func_point(Item *a, Item *b, Item *srid): Item_geometry_func(a, b, srid) {} + const char *func_name() const { return "point"; } + String *val_str(String *); +}; + +class Item_func_spatial_decomp: public Item_geometry_func +{ + enum Functype decomp_func; +public: + Item_func_spatial_decomp(Item *a, Item_func::Functype ft) : + Item_geometry_func(a) { decomp_func = ft; } + const char *func_name() const + { + switch (decomp_func) + { + case SP_STARTPOINT: + return "startpoint"; + case SP_ENDPOINT: + return "endpoint"; + case SP_EXTERIORRING: + return "exteriorring"; + default: + DBUG_ASSERT(0); // Should never happened + return "spatial_decomp_unknown"; + } + } + String *val_str(String *); +}; + +class Item_func_spatial_decomp_n: public Item_geometry_func +{ + enum Functype decomp_func_n; +public: + Item_func_spatial_decomp_n(Item *a, Item *b, Item_func::Functype ft): + Item_geometry_func(a, b) { decomp_func_n = ft; } + const char *func_name() const + { + switch (decomp_func_n) + { + case SP_POINTN: + return "pointn"; + case SP_GEOMETRYN: + return "geometryn"; + case SP_INTERIORRINGN: + return "interiorringn"; + default: + DBUG_ASSERT(0); // Should never happened + return "spatial_decomp_n_unknown"; + } + } + String *val_str(String *); +}; + +class Item_func_spatial_collection: public Item_geometry_func +{ + String tmp_value; + enum Geometry::wkbType coll_type; + enum Geometry::wkbType item_type; +public: + Item_func_spatial_collection( + List<Item> &list, enum Geometry::wkbType ct, enum Geometry::wkbType it): + Item_geometry_func(list) + { + coll_type=ct; + item_type=it; + } + String *val_str(String *); + const char *func_name() const { return "multipoint"; } +}; + +/* + Spatial relations +*/ + +class Item_func_spatial_rel: public Item_bool_func2 +{ + enum Functype spatial_rel; +public: + Item_func_spatial_rel(Item *a,Item *b, enum Functype sp_rel) : + Item_bool_func2(a,b) { spatial_rel = sp_rel; } + longlong val_int(); + enum Functype functype() const + { + switch (spatial_rel) { + case SP_CONTAINS_FUNC: + return SP_WITHIN_FUNC; + case SP_WITHIN_FUNC: + return SP_CONTAINS_FUNC; + default: + return spatial_rel; + } + } + enum Functype rev_functype() const { return spatial_rel; } + const char *func_name() const + { + switch (spatial_rel) { + case SP_CONTAINS_FUNC: + return "contains"; + case SP_WITHIN_FUNC: + return "within"; + case SP_EQUALS_FUNC: + return "equals"; + case SP_DISJOINT_FUNC: + return "disjoint"; + case SP_INTERSECTS_FUNC: + return "intersects"; + case SP_TOUCHES_FUNC: + return "touches"; + case SP_CROSSES_FUNC: + return "crosses"; + case SP_OVERLAPS_FUNC: + return "overlaps"; + default: + DBUG_ASSERT(0); // Should never happened + return "sp_unknown"; + } + } + void print(String *str) { Item_func::print(str); } +}; + +class Item_func_isempty: public Item_bool_func +{ +public: + Item_func_isempty(Item *a): Item_bool_func(a) {} + longlong val_int(); + optimize_type select_optimize() const { return OPTIMIZE_NONE; } + const char *func_name() const { return "isempty"; } +}; + +class Item_func_issimple: public Item_bool_func +{ +public: + Item_func_issimple(Item *a): Item_bool_func(a) {} + longlong val_int(); + optimize_type select_optimize() const { return OPTIMIZE_NONE; } + const char *func_name() const { return "issimple"; } +}; + +class Item_func_isclosed: public Item_bool_func +{ +public: + Item_func_isclosed(Item *a): Item_bool_func(a) {} + longlong val_int(); + optimize_type select_optimize() const { return OPTIMIZE_NONE; } + const char *func_name() const { return "isclosed"; } +}; + +class Item_func_dimension: public Item_int_func +{ + String value; +public: + Item_func_dimension(Item *a): Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "dimension"; } + void fix_length_and_dec() { max_length=10; } +}; + +class Item_func_x: public Item_real_func +{ + String value; +public: + Item_func_x(Item *a): Item_real_func(a) {} + double val(); + const char *func_name() const { return "x"; } +}; + + +class Item_func_y: public Item_real_func +{ + String value; +public: + Item_func_y(Item *a): Item_real_func(a) {} + double val(); + const char *func_name() const { return "y"; } +}; + + +class Item_func_numgeometries: public Item_int_func +{ + String value; +public: + Item_func_numgeometries(Item *a): Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "numgeometries"; } + void fix_length_and_dec() { max_length=10; } +}; + + +class Item_func_numinteriorring: public Item_int_func +{ + String value; +public: + Item_func_numinteriorring(Item *a): Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "numinteriorrings"; } + void fix_length_and_dec() { max_length=10; } +}; + + +class Item_func_numpoints: public Item_int_func +{ + String value; +public: + Item_func_numpoints(Item *a): Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "numpoints"; } + void fix_length_and_dec() { max_length=10; } +}; + + +class Item_func_area: public Item_real_func +{ + String value; +public: + Item_func_area(Item *a): Item_real_func(a) {} + double val(); + const char *func_name() const { return "area"; } +}; + + +class Item_func_glength: public Item_real_func +{ + String value; +public: + Item_func_glength(Item *a): Item_real_func(a) {} + double val(); + const char *func_name() const { return "glength"; } +}; + + +class Item_func_srid: public Item_int_func +{ + String value; +public: + Item_func_srid(Item *a): Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "srid"; } + void fix_length_and_dec() { max_length= 10; } +}; + +#define GEOM_NEW(obj_constructor) new obj_constructor + +#else /*HAVE_SPATIAL*/ + +#define GEOM_NEW(obj_constructor) NULL + +#endif + diff --git a/sql/item_row.cc b/sql/item_row.cc new file mode 100644 index 00000000000..493eefc9ff0 --- /dev/null +++ b/sql/item_row.cc @@ -0,0 +1,159 @@ +/* Copyright (C) 2000 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" + +/* + Row items used for comparing rows and IN operations on rows: + + (a, b, c) > (10, 10, 30) + (a, b, c) = (select c, d, e, from t1 where x=12) + (a, b, c) IN ((1,2,2), (3,4,5), (6,7,8) + (a, b, c) IN (select c, d, e, from t1) +*/ + +Item_row::Item_row(List<Item> &arg): + Item(), used_tables_cache(0), const_item_cache(1), with_null(0) +{ + + //TODO: think placing 2-3 component items in item (as it done for function) + if ((arg_count= arg.elements)) + items= (Item**) sql_alloc(sizeof(Item*)*arg_count); + else + items= 0; + List_iterator<Item> li(arg); + uint i= 0; + Item *item; + while ((item= li++)) + { + items[i]= item; + i++; + } +} + +void Item_row::illegal_method_call(const char *method) +{ + DBUG_ENTER("Item_row::illegal_method_call"); + DBUG_PRINT("error", ("!!! %s method was called for row item", method)); + DBUG_ASSERT(0); + my_error(ER_OPERAND_COLUMNS, MYF(0), 1); + DBUG_VOID_RETURN; +} + +bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref) +{ + DBUG_ASSERT(fixed == 0); + null_value= 0; + maybe_null= 0; + Item **arg, **arg_end; + for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++) + { + if ((*arg)->fix_fields(thd, tabl, arg)) + return 1; + // we can't assign 'item' before, because fix_fields() can change arg + Item *item= *arg; + used_tables_cache |= item->used_tables(); + const_item_cache&= item->const_item() && !with_null; + if (const_item_cache) + { + if (item->cols() > 1) + with_null|= item->null_inside(); + else + { + item->val_int(); + with_null|= item->null_value; + } + } + maybe_null|= item->maybe_null; + with_sum_func= with_sum_func || item->with_sum_func; + } + fixed= 1; + return 0; +} + + +void Item_row::cleanup() +{ + DBUG_ENTER("Item_row::cleanup"); + + Item::cleanup(); + /* Reset to the original values */ + used_tables_cache= 0; + const_item_cache= 1; + with_null= 0; + + DBUG_VOID_RETURN; +} + + +void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array, + List<Item> &fields) +{ + Item **arg, **arg_end; + for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++) + (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg); +} + + +void Item_row::update_used_tables() +{ + used_tables_cache= 0; + const_item_cache= 1; + for (uint i= 0; i < arg_count; i++) + { + items[i]->update_used_tables(); + used_tables_cache|= items[i]->used_tables(); + const_item_cache&= items[i]->const_item(); + } +} + +bool Item_row::check_cols(uint c) +{ + if (c != arg_count) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), c); + return 1; + } + return 0; +} + +void Item_row::print(String *str) +{ + str->append('('); + for (uint i= 0; i < arg_count; i++) + { + if (i) + str->append(','); + items[i]->print(str); + } + str->append(')'); +} + +bool Item_row::walk(Item_processor processor, byte *arg) +{ + for (uint i= 0; i < arg_count; i++) + { + if (items[i]->walk(processor, arg)) + return 1; + } + return (this->*processor)(arg); +} + +void Item_row::bring_value() +{ + for (uint i= 0; i < arg_count; i++) + items[i]->bring_value(); +} diff --git a/sql/item_row.h b/sql/item_row.h new file mode 100644 index 00000000000..28cb47b6815 --- /dev/null +++ b/sql/item_row.h @@ -0,0 +1,74 @@ +/* Copyright (C) 2000 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +class Item_row: public Item +{ + Item **items; + table_map used_tables_cache; + uint arg_count; + bool const_item_cache; + bool with_null; +public: + Item_row(List<Item> &); + Item_row(Item_row *item): + Item(), + items(item->items), + used_tables_cache(item->used_tables_cache), + arg_count(item->arg_count), + const_item_cache(item->const_item_cache), + with_null(0) + {} + + enum Type type() const { return ROW_ITEM; }; + void illegal_method_call(const char *); + bool is_null() { return null_value; } + void make_field(Send_field *) + { + illegal_method_call((const char*)"make_field"); + }; + double val() + { + illegal_method_call((const char*)"val"); + return 0; + }; + longlong val_int() + { + illegal_method_call((const char*)"val_int"); + return 0; + }; + String *val_str(String *) + { + illegal_method_call((const char*)"val_str"); + return 0; + }; + bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + void cleanup(); + void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); + table_map used_tables() const { return used_tables_cache; }; + bool const_item() const { return const_item_cache; }; + enum Item_result result_type() const { return ROW_RESULT; } + void update_used_tables(); + void print(String *str); + + bool walk(Item_processor processor, byte *arg); + + uint cols() { return arg_count; } + Item* el(uint i) { return items[i]; } + Item** addr(uint i) { return items + i; } + bool check_cols(uint c); + bool null_inside() { return with_null; }; + void bring_value(); +}; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index f070382e5c1..f0127ed2a5d 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -20,12 +20,11 @@ ** (This shouldn't be needed) */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" -#include "sql_acl.h" #include <m_ctype.h> #ifdef HAVE_OPENSSL #include <openssl/des.h> @@ -33,15 +32,27 @@ #include "md5.h" #include "sha1.h" #include "my_aes.h" +C_MODE_START +#include "../mysys/my_static.h" // For soundex_map +C_MODE_END -String empty_string(""); +String my_empty_string("",default_charset_info); + +static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, + const char *fname) +{ + my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), + c1.collation->name,c1.derivation_name(), + c2.collation->name,c2.derivation_name(), + fname); +} uint nr_of_decimals(const char *str) { if ((str=strchr(str,'.'))) { const char *start= ++str; - for (; isdigit(*str) ; str++) ; + for (; my_isdigit(system_charset_info,*str) ; str++) ; return (uint) (str-start); } return 0; @@ -49,21 +60,33 @@ uint nr_of_decimals(const char *str) double Item_str_func::val() { - String *res; - res=val_str(&str_value); - return res ? atof(res->c_ptr()) : 0.0; + DBUG_ASSERT(fixed == 1); + int err; + char buff[64]; + char *end_not_used; + String *res, tmp(buff,sizeof(buff), &my_charset_bin); + res= val_str(&tmp); + return res ? my_strntod(res->charset(), (char*) res->ptr(),res->length(), + &end_not_used, &err) : 0.0; } longlong Item_str_func::val_int() { - String *res; - res=val_str(&str_value); - return res ? strtoll(res->c_ptr(),NULL,10) : (longlong) 0; + DBUG_ASSERT(fixed == 1); + int err; + char buff[22]; + String *res, tmp(buff,sizeof(buff), &my_charset_bin); + res= val_str(&tmp); + return (res ? + my_strntoll(res->charset(), res->ptr(), res->length(), 10, NULL, + &err) : + (longlong) 0); } String *Item_func_md5::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); if (sptr) { @@ -101,17 +124,20 @@ void Item_func_md5::fix_length_and_dec() String *Item_func_sha::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); if (sptr) /* If we got value different from NULL */ { SHA1_CONTEXT context; /* Context used to generate SHA1 hash */ /* Temporary buffer to store 160bit digest */ uint8 digest[SHA1_HASH_SIZE]; - sha1_reset(&context); /* We do not have to check for error here */ + mysql_sha1_reset(&context); /* We do not have to check for error here */ /* No need to check error as the only case would be too long message */ - sha1_input(&context,(const unsigned char *) sptr->ptr(), sptr->length()); + mysql_sha1_input(&context, + (const unsigned char *) sptr->ptr(), sptr->length()); /* Ensure that memory is free and we got result */ - if (!( str->alloc(SHA1_HASH_SIZE*2) || (sha1_result(&context,digest)))) + if (!( str->alloc(SHA1_HASH_SIZE*2) || + (mysql_sha1_result(&context,digest)))) { sprintf((char *) str->ptr(), "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\ @@ -121,12 +147,12 @@ String *Item_func_sha::val_str(String *str) digest[8], digest[9], digest[10], digest[11], digest[12], digest[13], digest[14], digest[15], digest[16], digest[17], digest[18], digest[19]); - + str->length((uint) SHA1_HASH_SIZE*2); null_value=0; return str; } - } + } null_value=1; return 0; } @@ -141,8 +167,9 @@ void Item_func_sha::fix_length_and_dec() String *Item_func_aes_encrypt::val_str(String *str) { + DBUG_ASSERT(fixed == 1); char key_buff[80]; - String tmp_key_value(key_buff, sizeof(key_buff)); + String tmp_key_value(key_buff, sizeof(key_buff), system_charset_info); String *sptr= args[0]->val_str(str); // String to encrypt String *key= args[1]->val_str(&tmp_key_value); // key int aes_length; @@ -150,13 +177,13 @@ String *Item_func_aes_encrypt::val_str(String *str) { null_value=0; aes_length=my_aes_get_size(sptr->length()); // Calculate result length - + if (!str_value.alloc(aes_length)) // Ensure that memory is free { // finally encrypt directly to allocated buffer. if (my_aes_encrypt(sptr->ptr(),sptr->length(), (char*) str_value.ptr(), key->ptr(), key->length()) == aes_length) - { + { // We got the expected result length str_value.length((uint) aes_length); return &str_value; @@ -176,8 +203,10 @@ void Item_func_aes_encrypt::fix_length_and_dec() String *Item_func_aes_decrypt::val_str(String *str) { + DBUG_ASSERT(fixed == 1); char key_buff[80]; - String tmp_key_value(key_buff, sizeof(key_buff)), *sptr, *key; + String tmp_key_value(key_buff, sizeof(key_buff), system_charset_info); + String *sptr, *key; DBUG_ENTER("Item_func_aes_decrypt::val_str"); sptr= args[0]->val_str(str); // String to decrypt @@ -193,7 +222,7 @@ String *Item_func_aes_decrypt::val_str(String *str) (char*) str_value.ptr(), key->ptr(), key->length()); if (length >= 0) // if we got correct data data - { + { str_value.length((uint) length); DBUG_RETURN(&str_value); } @@ -220,6 +249,7 @@ void Item_func_aes_decrypt::fix_length_and_dec() String *Item_func_concat::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res,*res2,*use_as_buff; uint i; @@ -242,7 +272,13 @@ String *Item_func_concat::val_str(String *str) continue; if (res->length()+res2->length() > current_thd->variables.max_allowed_packet) - goto null; // Error check + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), + current_thd->variables.max_allowed_packet); + goto null; + } if (res->alloced_length() >= res->length()+res2->length()) { // Use old buffer res->append(*res2); @@ -256,7 +292,8 @@ String *Item_func_concat::val_str(String *str) str->copy(*res); str->append(*res2); } - res=str; + res= str; + use_as_buff= &tmp_value; } else if (res == &tmp_value) { @@ -299,6 +336,7 @@ String *Item_func_concat::val_str(String *str) } } } + res->set_charset(collation.collation); return res; null: @@ -309,17 +347,23 @@ null: void Item_func_concat::fix_length_and_dec() { - max_length=0; + ulonglong max_result_length= 0; + + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + return; + for (uint i=0 ; i < arg_count ; i++) - max_length+=args[i]->max_length; - if (max_length > MAX_BLOB_WIDTH) + max_result_length+= args[i]->max_length; + + if (max_result_length >= MAX_BLOB_WIDTH) { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_result_length= MAX_BLOB_WIDTH; + maybe_null= 1; } + max_length= (ulong) max_result_length; } -/* +/* Function des_encrypt() by tonu@spam.ee & monty Works only if compiled with OpenSSL library support. This returns a binary string where first character is CHAR(128 | key-number). @@ -330,7 +374,9 @@ void Item_func_concat::fix_length_and_dec() String *Item_func_des_encrypt::val_str(String *str) { + DBUG_ASSERT(fixed == 1); #ifdef HAVE_OPENSSL + uint code= ER_WRONG_PARAMETERS_TO_PROCEDURE; DES_cblock ivec; struct st_des_keyblock keyblock; struct st_des_keyschedule keyschedule; @@ -338,10 +384,10 @@ String *Item_func_des_encrypt::val_str(String *str) uint key_number, res_length, tail; String *res= args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; + if ((null_value= args[0]->null_value)) + return 0; // ENCRYPT(NULL) == NULL if ((res_length=res->length()) == 0) - return &empty_string; + return &my_empty_string; if (arg_count == 1) { @@ -376,10 +422,10 @@ String *Item_func_des_encrypt::val_str(String *str) DES_set_key_unchecked(&keyblock.key3,&keyschedule.ks3); } - /* + /* The problem: DES algorithm requires original data to be in 8-bytes - chunks. Missing bytes get filled with '*'s and result of encryption - can be up to 8 bytes longer than original string. When decrypted, + chunks. Missing bytes get filled with '*'s and result of encryption + can be up to 8 bytes longer than original string. When decrypted, we do not know the size of original string :( We add one byte with value 0x1..0x8 as the last byte of the padded string marking change of string length. @@ -387,6 +433,7 @@ String *Item_func_des_encrypt::val_str(String *str) tail= (8-(res_length) % 8); // 1..8 marking extra length res_length+=tail; + code= ER_OUT_OF_RESOURCES; if (tail && res->append(append_str, tail) || tmp_value.alloc(res_length+1)) goto error; (*res)[res_length-1]=tail; // save extra length @@ -404,6 +451,13 @@ String *Item_func_des_encrypt::val_str(String *str) return &tmp_value; error: + push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR, + code, ER(code), + "des_encrypt"); +#else + push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED), + "des_encrypt","--with-openssl"); #endif /* HAVE_OPENSSL */ null_value=1; return 0; @@ -412,17 +466,19 @@ error: String *Item_func_des_decrypt::val_str(String *str) { + DBUG_ASSERT(fixed == 1); #ifdef HAVE_OPENSSL + uint code= ER_WRONG_PARAMETERS_TO_PROCEDURE; DES_key_schedule ks1, ks2, ks3; DES_cblock ivec; struct st_des_keyblock keyblock; struct st_des_keyschedule keyschedule; String *res= args[0]->val_str(str); - uint length=res->length(),tail; + uint length,tail; - if ((null_value=args[0]->null_value)) + if ((null_value= args[0]->null_value)) return 0; - length=res->length(); + length= res->length(); if (length < 9 || (length % 8) != 1 || !((*res)[0] & 128)) return res; // Skip decryption if not encrypted @@ -432,6 +488,7 @@ String *Item_func_des_decrypt::val_str(String *str) // Check if automatic key and that we have privilege to uncompress using it if (!(current_thd->master_access & SUPER_ACL) || key_number > 9) goto error; + VOID(pthread_mutex_lock(&LOCK_des_key_file)); keyschedule= des_keyschedule[key_number]; VOID(pthread_mutex_unlock(&LOCK_des_key_file)); @@ -450,8 +507,9 @@ String *Item_func_des_decrypt::val_str(String *str) // Here we set all 64-bit keys (56 effective) one by one DES_set_key_unchecked(&keyblock.key1,&keyschedule.ks1); DES_set_key_unchecked(&keyblock.key2,&keyschedule.ks2); - DES_set_key_unchecked(&keyblock.key3,&keyschedule.ks3); + DES_set_key_unchecked(&keyblock.key3,&keyschedule.ks3); } + code= ER_OUT_OF_RESOURCES; if (tmp_value.alloc(length-1)) goto error; @@ -465,31 +523,40 @@ String *Item_func_des_decrypt::val_str(String *str) &ivec, FALSE); /* Restore old length of key */ if ((tail=(uint) (uchar) tmp_value[length-2]) > 8) - goto error; // Wrong key + goto wrong_key; // Wrong key tmp_value.length(length-1-tail); return &tmp_value; error: + push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR, + code, ER(code), + "des_decrypt"); +wrong_key: +#else + push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED), + "des_decrypt","--with-openssl"); #endif /* HAVE_OPENSSL */ null_value=1; return 0; } -/* +/* concat with separator. First arg is the separator concat_ws takes at least two arguments. */ String *Item_func_concat_ws::val_str(String *str) { + DBUG_ASSERT(fixed == 1); char tmp_str_buff[10]; - String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff)), + String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff),default_charset_info), *sep_str, *res, *res2,*use_as_buff; uint i; null_value=0; - if (!(sep_str= separator->val_str(&tmp_sep_str))) + if (!(sep_str= args[0]->val_str(&tmp_sep_str))) goto null; use_as_buff= &tmp_value; @@ -498,11 +565,11 @@ String *Item_func_concat_ws::val_str(String *str) // Skip until non-null argument is found. // If not, return the empty string - for (i=0; i < arg_count; i++) + for (i=1; i < arg_count; i++) if ((res= args[i]->val_str(str))) break; if (i == arg_count) - return &empty_string; + return &my_empty_string; for (i++; i < arg_count ; i++) { @@ -511,7 +578,13 @@ String *Item_func_concat_ws::val_str(String *str) if (res->length() + sep_str->length() + res2->length() > current_thd->variables.max_allowed_packet) - goto null; // Error check + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), + current_thd->variables.max_allowed_packet); + goto null; + } if (res->alloced_length() >= res->length() + sep_str->length() + res2->length()) { // Use old buffer @@ -578,6 +651,7 @@ String *Item_func_concat_ws::val_str(String *str) use_as_buff=str; } } + res->set_charset(collation.collation); return res; null: @@ -585,45 +659,35 @@ null: return 0; } -void Item_func_concat_ws::split_sum_func(List<Item> &fields) -{ - if (separator->with_sum_func && separator->type() != SUM_FUNC_ITEM) - separator->split_sum_func(fields); - else if (separator->used_tables() || separator->type() == SUM_FUNC_ITEM) - { - fields.push_front(separator); - separator= new Item_ref((Item**) fields.head_ref(), 0, separator->name); - } - Item_str_func::split_sum_func(fields); -} void Item_func_concat_ws::fix_length_and_dec() { - max_length=separator->max_length*(arg_count-1); - for (uint i=0 ; i < arg_count ; i++) - max_length+=args[i]->max_length; - if (max_length > MAX_BLOB_WIDTH) + ulonglong max_result_length; + + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + return; + + /* + arg_count cannot be less than 2, + it is done on parser level in sql_yacc.yy + so, (arg_count - 2) is safe here. + */ + max_result_length= (ulonglong) args[0]->max_length * (arg_count - 2); + for (uint i=1 ; i < arg_count ; i++) + max_result_length+=args[i]->max_length; + + if (max_result_length >= MAX_BLOB_WIDTH) { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_result_length= MAX_BLOB_WIDTH; + maybe_null= 1; } - used_tables_cache|= separator->used_tables(); - not_null_tables_cache&= separator->not_null_tables(); - const_item_cache&= separator->const_item(); - with_sum_func= with_sum_func || separator->with_sum_func; -} - -void Item_func_concat_ws::update_used_tables() -{ - Item_func::update_used_tables(); - separator->update_used_tables(); - used_tables_cache|=separator->used_tables(); - const_item_cache&=separator->const_item(); + max_length= (ulong) max_result_length; } String *Item_func_reverse::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res = args[0]->val_str(str); char *ptr,*end; @@ -631,12 +695,12 @@ String *Item_func_reverse::val_str(String *str) return 0; /* An empty string is a special case as the string pointer may be null */ if (!res->length()) - return &empty_string; + return &my_empty_string; res=copy_if_not_alloced(str,res,res->length()); ptr = (char *) res->ptr(); end=ptr+res->length(); #ifdef USE_MB - if (use_mb(default_charset_info) && !binary) + if (use_mb(res->charset())) { String tmpstr; tmpstr.copy(*res); @@ -644,7 +708,7 @@ String *Item_func_reverse::val_str(String *str) register uint32 l; while (ptr < end) { - if ((l=my_ismbchar(default_charset_info, ptr,end))) + if ((l=my_ismbchar(res->charset(), ptr,end))) tmp-=l, memcpy(tmp,ptr,l), ptr+=l; else *--tmp=*ptr++; @@ -668,6 +732,7 @@ String *Item_func_reverse::val_str(String *str) void Item_func_reverse::fix_length_and_dec() { + collation.set(args[0]->collation); max_length = args[0]->max_length; } @@ -680,6 +745,7 @@ void Item_func_reverse::fix_length_and_dec() String *Item_func_replace::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res,*res2,*res3; int offset; uint from_length,to_length; @@ -687,8 +753,7 @@ String *Item_func_replace::val_str(String *str) #ifdef USE_MB const char *ptr,*end,*strend,*search,*search_end; register uint32 l; - bool binary_str = (args[0]->binary || args[1]->binary || - !use_mb(default_charset_info)); + bool binary_cmp; #endif null_value=0; @@ -699,6 +764,12 @@ String *Item_func_replace::val_str(String *str) if (args[1]->null_value) goto null; + res->set_charset(collation.collation); + +#ifdef USE_MB + binary_cmp = ((res->charset()->state & MY_CS_BINSORT) || !use_mb(res->charset())); +#endif + if (res2->length() == 0) return res; #ifndef USE_MB @@ -706,7 +777,7 @@ String *Item_func_replace::val_str(String *str) return res; #else offset=0; - if (binary_str && (offset=res->strstr(*res2)) < 0) + if (binary_cmp && (offset=res->strstr(*res2)) < 0) return res; #endif if (!(res3=args[2]->val_str(&tmp_value2))) @@ -715,7 +786,7 @@ String *Item_func_replace::val_str(String *str) to_length= res3->length(); #ifdef USE_MB - if (!binary_str) + if (!binary_cmp) { search=res2->ptr(); search_end=search+from_length; @@ -730,11 +801,19 @@ redo: register char *i,*j; i=(char*) ptr+1; j=(char*) search+1; while (j != search_end) - if (*i++ != *j++) goto skipp; + if (*i++ != *j++) goto skip; offset= (int) (ptr-res->ptr()); if (res->length()-from_length + to_length > current_thd->variables.max_allowed_packet) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), + current_thd->variables.max_allowed_packet); + goto null; + } if (!alloced) { alloced=1; @@ -744,8 +823,8 @@ redo: offset+=(int) to_length; goto redo; } -skipp: - if ((l=my_ismbchar(default_charset_info, ptr,strend))) ptr+=l; +skip: + if ((l=my_ismbchar(res->charset(), ptr,strend))) ptr+=l; else ++ptr; } } @@ -755,7 +834,13 @@ skipp: { if (res->length()-from_length + to_length > current_thd->variables.max_allowed_packet) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), + current_thd->variables.max_allowed_packet); goto null; + } if (!alloced) { alloced=1; @@ -775,23 +860,28 @@ null: void Item_func_replace::fix_length_and_dec() { - max_length=args[0]->max_length; + ulonglong max_result_length= args[0]->max_length; int diff=(int) (args[2]->max_length - args[1]->max_length); if (diff > 0 && args[1]->max_length) { // Calculate of maxreplaces - uint max_substrs= max_length/args[1]->max_length; - max_length+= max_substrs * (uint)diff; + ulonglong max_substrs= max_result_length/args[1]->max_length; + max_result_length+= max_substrs * (uint) diff; } - if (max_length > MAX_BLOB_WIDTH) + if (max_result_length >= MAX_BLOB_WIDTH) { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_result_length= MAX_BLOB_WIDTH; + maybe_null= 1; } + max_length= (ulong) max_result_length; + + if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV)) + return; } String *Item_func_insert::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res,*res2; uint start,length; @@ -803,20 +893,21 @@ String *Item_func_insert::val_str(String *str) if (args[0]->null_value || args[1]->null_value || args[2]->null_value || args[3]->null_value) goto null; /* purecov: inspected */ -#ifdef USE_MB - if (use_mb(default_charset_info) && !args[0]->binary) - { - start=res->charpos(start); - length=res->charpos(length,start); - } -#endif + start=res->charpos(start); + length=res->charpos(length,start); if (start > res->length()+1) return res; // Wrong param; skip insert if (length > res->length()-start) length=res->length()-start; if (res->length() - length + res2->length() > current_thd->variables.max_allowed_packet) - goto null; // OOM check + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), current_thd->variables.max_allowed_packet); + goto null; + } res=copy_if_not_alloced(str,res,res->length()); res->replace(start,length,*res2); return res; @@ -828,17 +919,29 @@ null: void Item_func_insert::fix_length_and_dec() { - max_length=args[0]->max_length+args[3]->max_length; - if (max_length > MAX_BLOB_WIDTH) + Item *cargs[2]; + ulonglong max_result_length; + + cargs[0]= args[0]; + cargs[1]= args[3]; + if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + return; + args[0]= cargs[0]; + args[3]= cargs[1]; + max_result_length= ((ulonglong) args[0]->max_length+ + (ulonglong) args[3]->max_length); + if (max_result_length >= MAX_BLOB_WIDTH) { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_result_length= MAX_BLOB_WIDTH; + maybe_null= 1; } + max_length= (ulong) max_result_length; } String *Item_func_lcase::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res; if (!(res=args[0]->val_str(str))) { @@ -854,6 +957,7 @@ String *Item_func_lcase::val_str(String *str) String *Item_func_ucase::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res; if (!(res=args[0]->val_str(str))) { @@ -869,27 +973,21 @@ String *Item_func_ucase::val_str(String *str) String *Item_func_left::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); long length =(long) args[1]->val_int(); + uint char_pos; if ((null_value=args[0]->null_value)) return 0; if (length <= 0) - return &empty_string; -#ifdef USE_MB - if (use_mb(default_charset_info) && !binary) - length = res->charpos(length); -#endif - if (res->length() > (ulong) length) - { // Safe even if const arg - if (!res->alloced_length()) - { // Don't change const str - str_value= *res; // Not malloced string - res= &str_value; - } - res->length((uint) length); - } - return res; + return &my_empty_string; + if (res->length() <= (uint) length || + res->length() <= (char_pos= res->charpos(length))) + return res; + + tmp_value.set(*res, 0, char_pos); + return &tmp_value; } @@ -898,7 +996,7 @@ void Item_str_func::left_right_max_length() max_length=args[0]->max_length; if (args[1]->const_item()) { - int length=(int) args[1]->val_int()*default_charset_info->mbmaxlen; + int length=(int) args[1]->val_int()*collation.collation->mbmaxlen; if (length <= 0) max_length=0; else @@ -909,63 +1007,56 @@ void Item_str_func::left_right_max_length() void Item_func_left::fix_length_and_dec() { + collation.set(args[0]->collation); left_right_max_length(); } String *Item_func_right::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); long length =(long) args[1]->val_int(); if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ if (length <= 0) - return &empty_string; /* purecov: inspected */ + return &my_empty_string; /* purecov: inspected */ if (res->length() <= (uint) length) return res; /* purecov: inspected */ -#ifdef USE_MB - if (use_mb(default_charset_info) && !binary) - { - uint start=res->numchars()-(uint) length; - if (start<=0) return res; - start=res->charpos(start); - tmp_value.set(*res,start,res->length()-start); - } - else -#endif - { - tmp_value.set(*res,(res->length()- (uint) length),(uint) length); - } + + uint start=res->numchars(); + if (start <= (uint) length) + return res; + start=res->charpos(start - (uint) length); + tmp_value.set(*res,start,res->length()-start); return &tmp_value; } void Item_func_right::fix_length_and_dec() { + collation.set(args[0]->collation); left_right_max_length(); } String *Item_func_substr::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res = args[0]->val_str(str); - int32 start = (int32) args[1]->val_int()-1; + int32 start = (int32) args[1]->val_int(); int32 length = arg_count == 3 ? (int32) args[2]->val_int() : INT_MAX32; int32 tmp_length; if ((null_value=(args[0]->null_value || args[1]->null_value || (arg_count == 3 && args[2]->null_value)))) return 0; /* purecov: inspected */ -#ifdef USE_MB - if (use_mb(default_charset_info) && !binary) - { - start=res->charpos(start); - length=res->charpos(length,start); - } -#endif + start= (int32)((start < 0) ? res->numchars() + start : start -1); + start=res->charpos(start); + length=res->charpos(length,start); if (start < 0 || (uint) start+1 > res->length() || length <= 0) - return &empty_string; + return &my_empty_string; tmp_length=(int32) res->length()-start; length=min(length,tmp_length); @@ -981,9 +1072,11 @@ void Item_func_substr::fix_length_and_dec() { max_length=args[0]->max_length; + collation.set(args[0]->collation); if (args[1]->const_item()) { - int32 start=(int32) args[1]->val_int()-1; + int32 start= (int32) args[1]->val_int(); + start= (int32)((start < 0) ? max_length + start : start - 1); if (start < 0 || start >= (int32) max_length) max_length=0; /* purecov: inspected */ else @@ -991,7 +1084,7 @@ void Item_func_substr::fix_length_and_dec() } if (arg_count == 3 && args[2]->const_item()) { - int32 length= (int32) args[2]->val_int() * default_charset_info->mbmaxlen; + int32 length= (int32) args[2]->val_int() * collation.collation->mbmaxlen; if (length <= 0) max_length=0; /* purecov: inspected */ else @@ -1000,8 +1093,18 @@ void Item_func_substr::fix_length_and_dec() } +void Item_func_substr_index::fix_length_and_dec() +{ + max_length= args[0]->max_length; + + if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV)) + return; +} + + String *Item_func_substr_index::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); String *delimeter =args[1]->val_str(&tmp_value); int32 count = (int32) args[2]->val_int(); @@ -1015,10 +1118,12 @@ String *Item_func_substr_index::val_str(String *str) null_value=0; uint delimeter_length=delimeter->length(); if (!res->length() || !delimeter_length || !count) - return &empty_string; // Wrong parameters + return &my_empty_string; // Wrong parameters + + res->set_charset(collation.collation); #ifdef USE_MB - if (use_mb(default_charset_info) && !binary) + if (use_mb(res->charset())) { const char *ptr=res->ptr(); const char *strend = ptr+res->length(); @@ -1036,14 +1141,14 @@ String *Item_func_substr_index::val_str(String *str) register char *i,*j; i=(char*) ptr+1; j=(char*) search+1; while (j != search_end) - if (*i++ != *j++) goto skipp; + if (*i++ != *j++) goto skip; if (pass==0) ++n; else if (!--c) break; ptr+=delimeter_length; continue; } - skipp: - if ((l=my_ismbchar(default_charset_info, ptr,strend))) ptr+=l; + skip: + if ((l=my_ismbchar(res->charset(), ptr,strend))) ptr+=l; else ++ptr; } /* either not found or got total number when count<0 */ if (pass == 0) /* count<0 */ @@ -1123,12 +1228,13 @@ String *Item_func_substr_index::val_str(String *str) String *Item_func_ltrim::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)); - String *remove_str=args[1]->val_str(&tmp); + String tmp(buff,sizeof(buff),res->charset()); + String *remove_str= (arg_count==2) ? args[1]->val_str(&tmp) : &remove; uint remove_length; LINT_INIT(remove_length); @@ -1161,12 +1267,13 @@ String *Item_func_ltrim::val_str(String *str) String *Item_func_rtrim::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)); - String *remove_str=args[1]->val_str(&tmp); + String tmp(buff,sizeof(buff),res->charset()); + String *remove_str= (arg_count==2) ? args[1]->val_str(&tmp) : &remove; uint remove_length; LINT_INIT(remove_length); @@ -1184,11 +1291,11 @@ String *Item_func_rtrim::val_str(String *str) { char chr=(*remove_str)[0]; #ifdef USE_MB - if (use_mb(default_charset_info) && !binary) + if (use_mb(res->charset())) { while (ptr < end) { - if ((l=my_ismbchar(default_charset_info, ptr,end))) ptr+=l,p=ptr; + if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l,p=ptr; else ++ptr; } ptr=p; @@ -1201,12 +1308,12 @@ String *Item_func_rtrim::val_str(String *str) { const char *r_ptr=remove_str->ptr(); #ifdef USE_MB - if (use_mb(default_charset_info) && !binary) + if (use_mb(res->charset())) { loop: while (ptr + remove_length < end) { - if ((l=my_ismbchar(default_charset_info, ptr,end))) ptr+=l; + if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l; else ++ptr; } if (ptr + remove_length == end && !memcmp(ptr,r_ptr,remove_length)) @@ -1233,14 +1340,24 @@ String *Item_func_rtrim::val_str(String *str) String *Item_func_trim::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)); - String *remove_str=args[1]->val_str(&tmp); + String tmp(buff,sizeof(buff),res->charset()); uint remove_length; LINT_INIT(remove_length); + String *remove_str; /* The string to remove from res. */ + + if (arg_count == 2) + { + remove_str= args[1]->val_str(&tmp); + if ((null_value= args[1]->null_value)) + return 0; + } + else + remove_str= &remove; /* Default value. */ if (!remove_str || (remove_length=remove_str->length()) == 0 || remove_length > res->length()) @@ -1252,14 +1369,14 @@ String *Item_func_trim::val_str(String *str) while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length)) ptr+=remove_length; #ifdef USE_MB - if (use_mb(default_charset_info) && !binary) + if (use_mb(res->charset())) { char *p=ptr; register uint32 l; loop: while (ptr + remove_length < end) { - if ((l=my_ismbchar(default_charset_info, ptr,end))) ptr+=l; + if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l; else ++ptr; } if (ptr + remove_length == end && !memcmp(ptr,r_ptr,remove_length)) @@ -1283,23 +1400,80 @@ String *Item_func_trim::val_str(String *str) return &tmp_value; } +void Item_func_trim::fix_length_and_dec() +{ + max_length= args[0]->max_length; + if (arg_count == 1) + { + collation.set(args[0]->collation); + remove.set_charset(collation.collation); + remove.set_ascii(" ",1); + } + else + { + Item *cargs[2]; + cargs[0]= args[1]; + cargs[1]= args[0]; + if (agg_arg_charsets(collation, cargs, 2, MY_COLL_CMP_CONV)) + return; + args[0]= cargs[1]; + args[1]= cargs[0]; + } +} + + +/* Item_func_password */ String *Item_func_password::val_str(String *str) { - String *res =args[0]->val_str(str); + DBUG_ASSERT(fixed == 1); + String *res= args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + if (res->length() == 0) + return &my_empty_string; + make_scrambled_password(tmp_value, res->c_ptr()); + str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, res->charset()); + return str; +} + +char *Item_func_password::alloc(THD *thd, const char *password) +{ + char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1); + if (buff) + make_scrambled_password(buff, password); + return buff; +} + +/* Item_func_old_password */ + +String *Item_func_old_password::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String *res= args[0]->val_str(str); if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &empty_string; - make_scrambled_password(tmp_value,res->c_ptr()); - str->set(tmp_value,16); + return &my_empty_string; + make_scrambled_password_323(tmp_value, res->c_ptr()); + str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH_323, res->charset()); return str; } +char *Item_func_old_password::alloc(THD *thd, const char *password) +{ + char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1); + if (buff) + make_scrambled_password_323(buff, password); + return buff; +} + + #define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.') String *Item_func_encrypt::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); #ifdef HAVE_CRYPT @@ -1307,7 +1481,7 @@ String *Item_func_encrypt::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &empty_string; + return &my_empty_string; if (arg_count == 1) { // generate random salt @@ -1325,8 +1499,14 @@ String *Item_func_encrypt::val_str(String *str) salt_ptr= salt_str->c_ptr(); } pthread_mutex_lock(&LOCK_crypt); - char *tmp=crypt(res->c_ptr(),salt_ptr); - str->set(tmp,(uint) strlen(tmp)); + char *tmp= crypt(res->c_ptr(),salt_ptr); + if (!tmp) + { + pthread_mutex_unlock(&LOCK_crypt); + null_value= 1; + return 0; + } + str->set(tmp,(uint) strlen(tmp),res->charset()); str->copy(); pthread_mutex_unlock(&LOCK_crypt); return str; @@ -1340,10 +1520,12 @@ void Item_func_encode::fix_length_and_dec() { max_length=args[0]->max_length; maybe_null=args[0]->maybe_null; + collation.set(&my_charset_bin); } String *Item_func_encode::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res; if (!(res=args[0]->val_str(str))) { @@ -1354,11 +1536,13 @@ String *Item_func_encode::val_str(String *str) res=copy_if_not_alloced(str,res,res->length()); sql_crypt.init(); sql_crypt.encode((char*) res->ptr(),res->length()); + res->set_charset(&my_charset_bin); return res; } String *Item_func_decode::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res; if (!(res=args[0]->val_str(str))) { @@ -1373,47 +1557,87 @@ String *Item_func_decode::val_str(String *str) } +Item *Item_func_sysconst::safe_charset_converter(CHARSET_INFO *tocs) +{ + Item_string *conv; + uint conv_errors; + String tmp, cstr, *ostr= val_str(&tmp); + cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); + if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), + cstr.charset(), + collation.derivation))) + { + return NULL; + } + conv->str_value.copy(); + conv->str_value.shrink_to_length(); + return conv; +} + + String *Item_func_database::val_str(String *str) { - if (!current_thd->db) - str->length(0); + DBUG_ASSERT(fixed == 1); + THD *thd= current_thd; + if (!thd->db) + { + null_value= 1; + return 0; + } else - str->set((const char*) current_thd->db,(uint) strlen(current_thd->db)); + str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info); return str; } +// TODO: make USER() replicate properly (currently it is replicated to "") + String *Item_func_user::val_str(String *str) { - // TODO: make USER() replicate properly (currently it is replicated to "") - THD *thd=current_thd; - if (!(thd->user) || // for system threads (e.g. replication SQL thread) - str->copy((const char*) thd->user,(uint) strlen(thd->user)) || - str->append('@') || - str->append(thd->host ? thd->host : thd->ip ? thd->ip : "")) - return &empty_string; + DBUG_ASSERT(fixed == 1); + THD *thd=current_thd; + CHARSET_INFO *cs= system_charset_info; + const char *host= thd->host_or_ip; + uint res_length; + + // For system threads (e.g. replication SQL thread) user may be empty + if (!thd->user) + return &my_empty_string; + res_length= (strlen(thd->user)+strlen(host)+2) * cs->mbmaxlen; + + if (str->alloc(res_length)) + { + null_value=1; + return 0; + } + res_length=cs->cset->snprintf(cs, (char*)str->ptr(), res_length, "%s@%s", + thd->user, host); + str->length(res_length); + str->set_charset(cs); return str; } void Item_func_soundex::fix_length_and_dec() { + collation.set(args[0]->collation); max_length=args[0]->max_length; set_if_bigger(max_length,4); } - /* - If alpha, map input letter to soundex code. - If not alpha and remove_garbage is set then skip to next char - else return 0 - */ +/* + If alpha, map input letter to soundex code. + If not alpha and remove_garbage is set then skip to next char + else return 0 +*/ -extern "C" { -extern const char *soundex_map; // In mysys/static.c +static char soundex_toupper(char ch) +{ + return (ch >= 'a' && ch <= 'z') ? ch - 'a' + 'A' : ch; } static char get_scode(char *ptr) { - uchar ch=toupper(*ptr); + uchar ch= soundex_toupper(*ptr); if (ch < 'A' || ch > 'Z') { // Thread extended alfa (country spec) @@ -1425,8 +1649,11 @@ static char get_scode(char *ptr) String *Item_func_soundex::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); char last_ch,ch; + CHARSET_INFO *cs= collation.collation; + if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ @@ -1434,12 +1661,13 @@ String *Item_func_soundex::val_str(String *str) return str; /* purecov: inspected */ char *to= (char *) tmp_value.ptr(); char *from= (char *) res->ptr(), *end=from+res->length(); - - while (from != end && !isalpha(*from)) // Skip pre-space + tmp_value.set_charset(cs); + + while (from != end && !my_isalpha(cs,*from)) // Skip pre-space from++; /* purecov: inspected */ if (from == end) - return &empty_string; // No alpha characters. - *to++ = toupper(*from); // Copy first letter + return &my_empty_string; // No alpha characters. + *to++ = soundex_toupper(*from); // Copy first letter last_ch = get_scode(from); // code of the first letter // for the first 'double-letter check. // Loop on input letters until @@ -1447,7 +1675,7 @@ String *Item_func_soundex::val_str(String *str) // letter code count = 3 for (from++ ; from < end ; from++) { - if (!isalpha(*from)) + if (!my_isalpha(cs,*from)) continue; ch=get_scode(from); if ((ch != '0') && (ch != last_ch)) // if not skipped or double @@ -1475,20 +1703,25 @@ Item_func_format::Item_func_format(Item *org,int dec) :Item_str_func(org) } +/* + TODO: This needs to be fixed for multi-byte character set where numbers + are stored in more than one byte +*/ + String *Item_func_format::val_str(String *str) { + DBUG_ASSERT(fixed == 1); double nr =args[0]->val(); - uint32 length,str_length; int diff; + uint32 length, str_length; uint dec; if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ dec= decimals ? decimals+1 : 0; - str->set(nr,decimals); -#ifdef HAVE_ISNAN + /* Here default_charset() is right as this is not an automatic conversion */ + str->set(nr,decimals, default_charset()); if (isnan(nr)) return str; -#endif str_length=str->length(); if (nr < 0) str_length--; // Don't count sign @@ -1505,9 +1738,12 @@ String *Item_func_format::val_str(String *str) pos[0]= pos[-diff]; while (diff) { - pos[0]=pos[-diff]; pos--; - pos[0]=pos[-diff]; pos--; - pos[0]=pos[-diff]; pos--; + *pos= *(pos - diff); + pos--; + *pos= *(pos - diff); + pos--; + *pos= *(pos - diff); + pos--; pos[0]=','; pos--; diff--; @@ -1517,106 +1753,97 @@ String *Item_func_format::val_str(String *str) } +void Item_func_format::print(String *str) +{ + str->append("format(", 7); + args[0]->print(str); + str->append(','); + // my_charset_bin is good enough for numbers + char buffer[20]; + String st(buffer, sizeof(buffer), &my_charset_bin); + st.set((ulonglong)decimals, &my_charset_bin); + str->append(st); + str->append(')'); +} + void Item_func_elt::fix_length_and_dec() { max_length=0; decimals=0; -#if MYSQL_VERSION_ID < 40100 - for (uint i= 0; i < arg_count ; i++) -#else - for (uint i= 1; i < arg_count ; i++) -#endif + + if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV)) + return; + + for (uint i= 1 ; i < arg_count ; i++) { set_if_bigger(max_length,args[i]->max_length); set_if_bigger(decimals,args[i]->decimals); } maybe_null=1; // NULL if wrong first arg - with_sum_func= with_sum_func || item->with_sum_func; - used_tables_cache|= item->used_tables(); - not_null_tables_cache&= item->not_null_tables(); - const_item_cache&= item->const_item(); -} - - -void Item_func_elt::split_sum_func(List<Item> &fields) -{ - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) - item->split_sum_func(fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) - { - fields.push_front(item); - item= new Item_ref((Item**) fields.head_ref(), 0, item->name); - } - Item_str_func::split_sum_func(fields); -} - - -void Item_func_elt::update_used_tables() -{ - Item_func::update_used_tables(); - item->update_used_tables(); - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); } double Item_func_elt::val() { + DBUG_ASSERT(fixed == 1); uint tmp; null_value=1; - if ((tmp=(uint) item->val_int()) == 0 || tmp > arg_count) + if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) return 0.0; - - double result= args[tmp-1]->val(); - null_value= args[tmp-1]->null_value; + double result= args[tmp]->val(); + null_value= args[tmp]->null_value; return result; } longlong Item_func_elt::val_int() { + DBUG_ASSERT(fixed == 1); uint tmp; null_value=1; - if ((tmp=(uint) item->val_int()) == 0 || tmp > arg_count) + if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) return 0; - - longlong result= args[tmp-1]->val_int(); - null_value= args[tmp-1]->null_value; + + longlong result= args[tmp]->val_int(); + null_value= args[tmp]->null_value; return result; } String *Item_func_elt::val_str(String *str) { + DBUG_ASSERT(fixed == 1); uint tmp; null_value=1; - if ((tmp=(uint) item->val_int()) == 0 || tmp > arg_count) + if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) return NULL; - String *result= args[tmp-1]->val_str(str); - null_value= args[tmp-1]->null_value; + String *result= args[tmp]->val_str(str); + if (result) + result->set_charset(collation.collation); + null_value= args[tmp]->null_value; return result; } -void Item_func_make_set::split_sum_func(List<Item> &fields) +void Item_func_make_set::split_sum_func(THD *thd, Item **ref_pointer_array, + List<Item> &fields) { - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) - item->split_sum_func(fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) - { - fields.push_front(item); - item= new Item_ref((Item**) fields.head_ref(), 0, item->name); - } - Item_str_func::split_sum_func(fields); + item->split_sum_func2(thd, ref_pointer_array, fields, &item); + Item_str_func::split_sum_func(thd, ref_pointer_array, fields); } void Item_func_make_set::fix_length_and_dec() { max_length=arg_count-1; - for (uint i=1 ; i < arg_count ; i++) + + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + return; + + for (uint i=0 ; i < arg_count ; i++) max_length+=args[i]->max_length; + used_tables_cache|= item->used_tables(); not_null_tables_cache&= item->not_null_tables(); const_item_cache&= item->const_item(); @@ -1635,10 +1862,11 @@ void Item_func_make_set::update_used_tables() String *Item_func_make_set::val_str(String *str) { + DBUG_ASSERT(fixed == 1); ulonglong bits; bool first_found=0; Item **ptr=args; - String *result=&empty_string; + String *result=&my_empty_string; bits=item->val_int(); if ((null_value=item->null_value)) @@ -1662,7 +1890,7 @@ String *Item_func_make_set::val_str(String *str) else { if (tmp_str.copy(*res)) // Don't use 'str' - return &empty_string; + return &my_empty_string; result= &tmp_str; } } @@ -1672,11 +1900,11 @@ String *Item_func_make_set::val_str(String *str) { // Copy data to tmp_str if (tmp_str.alloc(result->length()+res->length()+1) || tmp_str.copy(*result)) - return &empty_string; + return &my_empty_string; result= &tmp_str; } if (tmp_str.append(',') || tmp_str.append(*res)) - return &empty_string; + return &my_empty_string; } } } @@ -1685,15 +1913,30 @@ String *Item_func_make_set::val_str(String *str) } +void Item_func_make_set::print(String *str) +{ + str->append("make_set(", 9); + item->print(str); + if (arg_count) + { + str->append(','); + print_args(str, 0); + } + str->append(')'); +} + + String *Item_func_char::val_str(String *str) { + DBUG_ASSERT(fixed == 1); str->length(0); for (uint i=0 ; i < arg_count ; i++) { int32 num=(int32) args[i]->val_int(); if (!args[i]->null_value) + { #ifdef USE_MB - if (use_mb(default_charset_info)) + if (use_mb(collation.collation)) { if (num&0xFF000000L) { str->append((char)(num>>24)); @@ -1701,13 +1944,15 @@ String *Item_func_char::val_str(String *str) } else if (num&0xFF0000L) { b2: str->append((char)(num>>16)); goto b1; - } else if (num&0xFF00L) { + } else if (num&0xFF00L) { b1: str->append((char)(num>>8)); } } #endif str->append((char)num); + } } + str->set_charset(collation.collation); str->realloc(str->length()); // Add end 0 (for Purify) return str; } @@ -1737,19 +1982,22 @@ inline String* alloc_buffer(String *res,String *str,String *tmp_value, void Item_func_repeat::fix_length_and_dec() { + collation.set(args[0]->collation); if (args[1]->const_item()) { - max_length=(long) (args[0]->max_length * args[1]->val_int()); - if (max_length >= MAX_BLOB_WIDTH) + ulonglong max_result_length= ((ulonglong) args[0]->max_length * + args[1]->val_int()); + if (max_result_length >= MAX_BLOB_WIDTH) { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_result_length= MAX_BLOB_WIDTH; + maybe_null= 1; } + max_length= (ulong) max_result_length; } else { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_length= MAX_BLOB_WIDTH; + maybe_null= 1; } } @@ -1760,6 +2008,7 @@ void Item_func_repeat::fix_length_and_dec() String *Item_func_repeat::val_str(String *str) { + DBUG_ASSERT(fixed == 1); uint length,tot_length; char *to; long count= (long) args[1]->val_int(); @@ -1769,13 +2018,19 @@ String *Item_func_repeat::val_str(String *str) goto err; // string and/or delim are null null_value=0; if (count <= 0) // For nicer SQL code - return &empty_string; + return &my_empty_string; if (count == 1) // To avoid reallocs return res; length=res->length(); // Safe length check if (length > current_thd->variables.max_allowed_packet/count) - goto err; // Probably an error + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), current_thd->variables.max_allowed_packet); + goto err; + } tot_length= length*(uint) count; if (!(res= alloc_buffer(res,str,&tmp_value,tot_length))) goto err; @@ -1796,56 +2051,83 @@ err: void Item_func_rpad::fix_length_and_dec() { + Item *cargs[2]; + + cargs[0]= args[0]; + cargs[1]= args[2]; + if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + return; + args[0]= cargs[0]; + args[2]= cargs[1]; if (args[1]->const_item()) { - uint32 length= (uint32) args[1]->val_int(); - max_length=max(args[0]->max_length,length); - if (max_length >= MAX_BLOB_WIDTH) + ulonglong length= ((ulonglong) args[1]->val_int() * + collation.collation->mbmaxlen); + if (length >= MAX_BLOB_WIDTH) { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + length= MAX_BLOB_WIDTH; + maybe_null= 1; } + max_length= (ulong) length; } else { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_length= MAX_BLOB_WIDTH; + maybe_null= 1; } } String *Item_func_rpad::val_str(String *str) { - uint32 res_length,length_pad; + DBUG_ASSERT(fixed == 1); + uint32 res_byte_length,res_char_length,pad_char_length,pad_byte_length; char *to; const char *ptr_pad; int32 count= (int32) args[1]->val_int(); + int32 byte_count= count * collation.collation->mbmaxlen; String *res =args[0]->val_str(str); String *rpad = args[2]->val_str(&rpad_str); if (!res || args[1]->null_value || !rpad || count < 0) goto err; null_value=0; - if (count <= (int32) (res_length=res->length())) + if (count <= (int32) (res_char_length=res->numchars())) { // String to pad is big enough - res->length(count); // Shorten result if longer + res->length(res->charpos(count)); // Shorten result if longer return (res); } - length_pad= rpad->length(); - if ((ulong) count > current_thd->variables.max_allowed_packet || - args[2]->null_value || !length_pad) + pad_char_length= rpad->numchars(); + if ((ulong) byte_count > current_thd->variables.max_allowed_packet) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), current_thd->variables.max_allowed_packet); goto err; - if (!(res= alloc_buffer(res,str,&tmp_value,count))) + } + if(args[2]->null_value || !pad_char_length) + goto err; + res_byte_length= res->length(); /* Must be done before alloc_buffer */ + if (!(res= alloc_buffer(res,str,&tmp_value,byte_count))) goto err; - to= (char*) res->ptr()+res_length; + to= (char*) res->ptr()+res_byte_length; ptr_pad=rpad->ptr(); - for (count-= res_length; (uint32) count > length_pad; count-= length_pad) + pad_byte_length= rpad->length(); + count-= res_char_length; + for ( ; (uint32) count > pad_char_length; count-= pad_char_length) { - memcpy(to,ptr_pad,length_pad); - to+= length_pad; + memcpy(to,ptr_pad,pad_byte_length); + to+= pad_byte_length; } - memcpy(to,ptr_pad,(size_t) count); + if (count) + { + pad_byte_length= rpad->charpos(count); + memcpy(to,ptr_pad,(size_t) pad_byte_length); + to+= pad_byte_length; + } + res->length(to- (char*) res->ptr()); return (res); err: @@ -1856,88 +2138,98 @@ String *Item_func_rpad::val_str(String *str) void Item_func_lpad::fix_length_and_dec() { + Item *cargs[2]; + cargs[0]= args[0]; + cargs[1]= args[2]; + if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + return; + args[0]= cargs[0]; + args[2]= cargs[1]; + if (args[1]->const_item()) { - uint32 length= (uint32) args[1]->val_int(); - max_length=max(args[0]->max_length,length); - if (max_length >= MAX_BLOB_WIDTH) + ulonglong length= ((ulonglong) args[1]->val_int() * + collation.collation->mbmaxlen); + if (length >= MAX_BLOB_WIDTH) { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + length= MAX_BLOB_WIDTH; + maybe_null= 1; } + max_length= (ulong) length; } else { - max_length=MAX_BLOB_WIDTH; - maybe_null=1; + max_length= MAX_BLOB_WIDTH; + maybe_null= 1; } } String *Item_func_lpad::val_str(String *str) { - uint32 res_length,length_pad; - char *to; - const char *ptr_pad; - ulong count= (long) args[1]->val_int(); - String *res= args[0]->val_str(str); - String *lpad= args[2]->val_str(&lpad_str); + DBUG_ASSERT(fixed == 1); + uint32 res_char_length,pad_char_length; + ulong count= (long) args[1]->val_int(), byte_count; + String *res= args[0]->val_str(&tmp_value); + String *pad= args[2]->val_str(&lpad_str); - if (!res || args[1]->null_value || !lpad) + if (!res || args[1]->null_value || !pad) goto err; + null_value=0; - if (count <= (res_length=res->length())) - { // String to pad is big enough - res->length(count); // Shorten result if longer - return (res); - } - length_pad= lpad->length(); - if (count > current_thd->variables.max_allowed_packet || - args[2]->null_value || !length_pad) - goto err; + res_char_length= res->numchars(); - if (res->alloced_length() < count) + if (count <= res_char_length) { - if (str->alloced_length() >= count) - { - memcpy((char*) str->ptr()+(count-res_length),res->ptr(),res_length); - res=str; - } - else - { - if (tmp_value.alloc(count)) - goto err; - memcpy((char*) tmp_value.ptr()+(count-res_length),res->ptr(),res_length); - res=&tmp_value; - } + res->length(res->charpos(count)); + return res; + } + + pad_char_length= pad->numchars(); + byte_count= count * collation.collation->mbmaxlen; + + if (byte_count > current_thd->variables.max_allowed_packet) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), current_thd->variables.max_allowed_packet); + goto err; } - else - bmove_upp((char*) res->ptr()+count,res->ptr()+res_length,res_length); - res->length(count); - to= (char*) res->ptr(); - ptr_pad= lpad->ptr(); - for (count-= res_length; count > length_pad; count-= length_pad) + if (args[2]->null_value || !pad_char_length || str->alloc(byte_count)) + goto err; + + str->length(0); + str->set_charset(collation.collation); + count-= res_char_length; + while (count >= pad_char_length) { - memcpy(to,ptr_pad,length_pad); - to+= length_pad; + str->append(*pad); + count-= pad_char_length; } - memcpy(to,ptr_pad,(size_t) count); - return (res); + if (count > 0) + str->append(pad->ptr(), pad->charpos(count), collation.collation); - err: - null_value=1; + str->append(*res); + null_value= 0; + return str; + +err: + null_value= 1; return 0; } String *Item_func_conv::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *res= args[0]->val_str(str); char *endptr,ans[65],*ptr; longlong dec; int from_base= (int) args[1]->val_int(); int to_base= (int) args[2]->val_int(); + int err; if (args[0]->null_value || args[1]->null_value || args[2]->null_value || abs(to_base) > 36 || abs(to_base) < 2 || @@ -1947,29 +2239,160 @@ String *Item_func_conv::val_str(String *str) return 0; } null_value=0; + unsigned_flag= !(from_base < 0); if (from_base < 0) - dec= strtoll(res->c_ptr(),&endptr,-from_base); + dec= my_strntoll(res->charset(),res->ptr(),res->length(),-from_base,&endptr,&err); else - dec= (longlong) strtoull(res->c_ptr(),&endptr,from_base); + dec= (longlong) my_strntoull(res->charset(),res->ptr(),res->length(),from_base,&endptr,&err); ptr= longlong2str(dec,ans,to_base); - if (str->copy(ans,(uint32) (ptr-ans))) - return &empty_string; + if (str->copy(ans,(uint32) (ptr-ans), default_charset())) + return &my_empty_string; + return str; +} + + +String *Item_func_conv_charset::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + if (use_cached_value) + return null_value ? 0 : &str_value; + String *arg= args[0]->val_str(str); + uint dummy_errors; + if (!arg) + { + null_value=1; + return 0; + } + null_value= str_value.copy(arg->ptr(),arg->length(),arg->charset(), + conv_charset, &dummy_errors); + return null_value ? 0 : &str_value; +} + +void Item_func_conv_charset::fix_length_and_dec() +{ + collation.set(conv_charset, DERIVATION_IMPLICIT); + max_length = args[0]->max_length*conv_charset->mbmaxlen; +} + +void Item_func_conv_charset::print(String *str) +{ + str->append("convert(", 8); + args[0]->print(str); + str->append(" using ", 7); + str->append(conv_charset->csname); + str->append(')'); +} + +String *Item_func_set_collation::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + str=args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + str->set_charset(collation.collation); + return str; +} + +void Item_func_set_collation::fix_length_and_dec() +{ + CHARSET_INFO *set_collation; + const char *colname; + String tmp, *str= args[1]->val_str(&tmp); + colname= str->c_ptr(); + if (colname == binary_keyword) + set_collation= get_charset_by_csname(args[0]->collation.collation->csname, + MY_CS_BINSORT,MYF(0)); + else + { + if (!(set_collation= get_charset_by_name(colname,MYF(0)))) + { + my_error(ER_UNKNOWN_COLLATION, MYF(0), colname); + return; + } + } + + if (!set_collation || + !my_charset_same(args[0]->collation.collation,set_collation)) + { + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + colname,args[0]->collation.collation->csname); + return; + } + collation.set(set_collation, DERIVATION_EXPLICIT); + max_length= args[0]->max_length; +} + + +bool Item_func_set_collation::eq(const Item *item, bool binary_cmp) const +{ + /* Assume we don't have rtti */ + if (this == item) + return 1; + if (item->type() != FUNC_ITEM) + return 0; + Item_func *item_func=(Item_func*) item; + if (arg_count != item_func->arg_count || + func_name() != item_func->func_name()) + return 0; + Item_func_set_collation *item_func_sc=(Item_func_set_collation*) item; + if (collation.collation != item_func_sc->collation.collation) + return 0; + for (uint i=0; i < arg_count ; i++) + if (!args[i]->eq(item_func_sc->args[i], binary_cmp)) + return 0; + return 1; +} + +String *Item_func_charset::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + uint dummy_errors; + + CHARSET_INFO *cs= args[0]->collation.collation; + null_value= 0; + str->copy(cs->csname, strlen(cs->csname), + &my_charset_latin1, collation.collation, &dummy_errors); + return str; +} + +String *Item_func_collation::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + uint dummy_errors; + CHARSET_INFO *cs= args[0]->collation.collation; + + null_value= 0; + str->copy(cs->name, strlen(cs->name), + &my_charset_latin1, collation.collation, &dummy_errors); return str; } String *Item_func_hex::val_str(String *str) { + DBUG_ASSERT(fixed == 1); if (args[0]->result_type() != STRING_RESULT) { - /* Return hex of unsigned longlong value */ - longlong dec= args[0]->val_int(); + ulonglong dec; char ans[65],*ptr; + /* Return hex of unsigned longlong value */ + if (args[0]->result_type() == REAL_RESULT) + { + double val= args[0]->val(); + if ((val <= (double) LONGLONG_MIN) || + (val >= (double) (ulonglong) ULONGLONG_MAX)) + dec= ~(longlong) 0; + else + dec= (ulonglong) (val + (val > 0 ? 0.5 : -0.5)); + } + else + dec= (ulonglong) args[0]->val_int(); + if ((null_value= args[0]->null_value)) return 0; ptr= longlong2str(dec,ans,16); - if (str->copy(ans,(uint32) (ptr-ans))) - return &empty_string; // End of memory + if (str->copy(ans,(uint32) (ptr-ans),default_charset())) + return &my_empty_string; // End of memory return str; } @@ -1985,30 +2408,90 @@ String *Item_func_hex::val_str(String *str) null_value=0; tmp_value.length(res->length()*2); for (from=res->ptr(), end=from+res->length(), to= (char*) tmp_value.ptr(); - from != end ; + from < end ; from++, to+=2) { uint tmp=(uint) (uchar) *from; - to[0]=_dig_vec[tmp >> 4]; - to[1]=_dig_vec[tmp & 15]; + to[0]=_dig_vec_upper[tmp >> 4]; + to[1]=_dig_vec_upper[tmp & 15]; } return &tmp_value; } + /* Convert given hex string to a binary string */ + +String *Item_func_unhex::val_str(String *str) +{ + const char *from, *end; + char *to; + String *res; + uint length; + DBUG_ASSERT(fixed == 1); + + res= args[0]->val_str(str); + if (!res || tmp_value.alloc(length= (1+res->length())/2)) + { + null_value=1; + return 0; + } + + from= res->ptr(); + null_value= 0; + tmp_value.length(length); + to= (char*) tmp_value.ptr(); + if (res->length() % 2) + { + int hex_char; + *to++= hex_char= hexchar_to_int(*from++); + if ((null_value= (hex_char == -1))) + return 0; + } + for (end=res->ptr()+res->length(); from < end ; from+=2, to++) + { + int hex_char; + *to= (hex_char= hexchar_to_int(from[0])) << 4; + if ((null_value= (hex_char == -1))) + return 0; + *to|= hex_char= hexchar_to_int(from[1]); + if ((null_value= (hex_char == -1))) + return 0; + } + return &tmp_value; +} + + +void Item_func_binary::print(String *str) +{ + str->append("cast(", 5); + args[0]->print(str); + str->append(" as binary)", 11); +} + #include <my_dir.h> // For my_stat String *Item_load_file::val_str(String *str) { + DBUG_ASSERT(fixed == 1); String *file_name; File file; MY_STAT stat_info; + char path[FN_REFLEN]; DBUG_ENTER("load_file"); - if (!(file_name= args[0]->val_str(str)) || - !(current_thd->master_access & FILE_ACL) || - !my_stat(file_name->c_ptr(), &stat_info, MYF(MY_WME))) + if (!(file_name= args[0]->val_str(str)) +#ifndef NO_EMBEDDED_ACCESS_CHECKS + || !(current_thd->master_access & FILE_ACL) +#endif + ) + goto err; + + (void) fn_format(path, file_name->c_ptr(), mysql_real_data_home, "", + MY_RELATIVE_PATH | MY_UNPACK_FILENAME); + + if (!my_stat(path, &stat_info, MYF(MY_WME))) goto err; + if (!(stat_info.st_mode & S_IROTH)) { /* my_error(ER_TEXTFILE_NOT_READABLE, MYF(0), file_name->c_ptr()); */ @@ -2016,7 +2499,10 @@ String *Item_load_file::val_str(String *str) } if (stat_info.st_size > (long) current_thd->variables.max_allowed_packet) { - /* my_error(ER_TOO_LONG_STRING, MYF(0), file_name->c_ptr()); */ + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), current_thd->variables.max_allowed_packet); goto err; } if (tmp_value.alloc(stat_info.st_size)) @@ -2041,16 +2527,18 @@ err: String* Item_func_export_set::val_str(String* str) { + DBUG_ASSERT(fixed == 1); ulonglong the_set = (ulonglong) args[0]->val_int(); - String yes_buf, *yes; + String yes_buf, *yes; yes = args[1]->val_str(&yes_buf); - String no_buf, *no; + String no_buf, *no; no = args[2]->val_str(&no_buf); - String *sep = NULL, sep_buf ; + String *sep = NULL, sep_buf ; uint num_set_values = 64; ulonglong mask = 0x1; str->length(0); + str->set_charset(collation.collation); /* Check if some argument is a NULL value */ if (args[0]->null_value || args[1]->null_value || args[2]->null_value) @@ -2081,8 +2569,11 @@ String* Item_func_export_set::val_str(String* str) } break; case 3: - sep_buf.set(",", 1); + sep_buf.set(",", 1, default_charset()); sep = &sep_buf; + break; + default: + DBUG_ASSERT(0); // cannot happen } null_value=0; @@ -2103,10 +2594,15 @@ void Item_func_export_set::fix_length_and_dec() uint length=max(args[1]->max_length,args[2]->max_length); uint sep_length=(arg_count > 3 ? args[3]->max_length : 1); max_length=length*64+sep_length*63; + + if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1), + MY_COLL_ALLOW_CONV) + return; } String* Item_func_inet_ntoa::val_str(String* str) { + DBUG_ASSERT(fixed == 1); uchar buf[8], *p; ulonglong n = (ulonglong) args[0]->val_int(); char num[4]; @@ -2124,7 +2620,7 @@ String* Item_func_inet_ntoa::val_str(String* str) int4store(buf,n); /* Now we can assume little endian. */ - + num[3]='.'; for (p=buf+4 ; p-- > buf ; ) { @@ -2145,6 +2641,7 @@ String* Item_func_inet_ntoa::val_str(String* str) return str; } + /* QUOTE() function returns argument string in single quotes suitable for using in a SQL statement. @@ -2152,7 +2649,7 @@ String* Item_func_inet_ntoa::val_str(String* str) DESCRIPTION Adds a \ before all characters that needs to be escaped in a SQL string. We also escape '^Z' (END-OF-FILE in windows) to avoid probelms when - running commands from a file in windows. + running commands from a file in windows. This function is very useful when you want to generate SQL statements @@ -2168,10 +2665,11 @@ String* Item_func_inet_ntoa::val_str(String* str) String *Item_func_quote::val_str(String *str) { + DBUG_ASSERT(fixed == 1); /* Bit mask that has 1 for set for the position of the following characters: 0, \, ' and ^Z - */ + */ static uchar escmask[32]= { @@ -2186,7 +2684,7 @@ String *Item_func_quote::val_str(String *str) uint arg_length, new_length; if (!arg) // Null argument { - str->copy("NULL", 4); // Return the string 'NULL' + str->copy("NULL", 4, collation.collation); // Return the string 'NULL' null_value= 0; return str; } @@ -2232,6 +2730,7 @@ String *Item_func_quote::val_str(String *str) } *to= '\''; tmp_value.length(new_length); + tmp_value.set_charset(collation.collation); null_value= 0; return &tmp_value; @@ -2239,3 +2738,254 @@ null: null_value= 1; return 0; } + +longlong Item_func_uncompressed_length::val_int() +{ + DBUG_ASSERT(fixed == 1); + String *res= args[0]->val_str(&value); + if (!res) + { + null_value=1; + return 0; /* purecov: inspected */ + } + null_value=0; + if (res->is_empty()) return 0; + + /* + res->ptr() using is safe because we have tested that string is not empty, + res->c_ptr() is not used because: + - we do not need \0 terminated string to get first 4 bytes + - c_ptr() tests simbol after string end (uninitialiozed memory) which + confuse valgrind + */ + return uint4korr(res->ptr()) & 0x3FFFFFFF; +} + +longlong Item_func_crc32::val_int() +{ + DBUG_ASSERT(fixed == 1); + String *res=args[0]->val_str(&value); + if (!res) + { + null_value=1; + return 0; /* purecov: inspected */ + } + null_value=0; + return (longlong) crc32(0L, (uchar*)res->ptr(), res->length()); +} + +#ifdef HAVE_COMPRESS +#include "zlib.h" + +String *Item_func_compress::val_str(String *str) +{ + int err= Z_OK, code; + ulong new_size; + String *res; + Byte *body; + char *tmp, *last_char; + DBUG_ASSERT(fixed == 1); + + if (!(res= args[0]->val_str(str))) + { + null_value= 1; + return 0; + } + if (res->is_empty()) return res; + + /* + Citation from zlib.h (comment for compress function): + + Compresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total + size of the destination buffer, which must be at least 0.1% larger than + sourceLen plus 12 bytes. + We assume here that the buffer can't grow more than .25 %. + */ + new_size= res->length() + res->length() / 5 + 12; + + // Check new_size overflow: new_size <= res->length() + if (((uint32) (new_size+5) <= res->length()) || + buffer.realloc((uint32) new_size + 4 + 1)) + { + null_value= 1; + return 0; + } + + body= ((Byte*)buffer.ptr()) + 4; + + // As far as we have checked res->is_empty() we can use ptr() + if ((err= compress(body, &new_size, + (const Bytef*)res->ptr(), res->length())) != Z_OK) + { + code= err==Z_MEM_ERROR ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_BUF_ERROR; + push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,code,ER(code)); + null_value= 1; + return 0; + } + + tmp= (char*)buffer.ptr(); // int4store is a macro; avoid side effects + int4store(tmp, res->length() & 0x3FFFFFFF); + + /* This is to ensure that things works for CHAR fields, which trim ' ': */ + last_char= ((char*)body)+new_size-1; + if (*last_char == ' ') + { + *++last_char= '.'; + new_size++; + } + + buffer.length((uint32)new_size + 4); + return &buffer; +} + + +String *Item_func_uncompress::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String *res= args[0]->val_str(str); + ulong new_size; + int err; + uint code; + + if (!res) + goto err; + null_value= 0; + if (res->is_empty()) + return res; + + new_size= uint4korr(res->ptr()) & 0x3FFFFFFF; + if (new_size > current_thd->variables.max_allowed_packet) + { + push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_TOO_BIG_FOR_UNCOMPRESS, + ER(ER_TOO_BIG_FOR_UNCOMPRESS), + current_thd->variables.max_allowed_packet); + goto err; + } + if (buffer.realloc((uint32)new_size)) + goto err; + + if ((err= uncompress((Byte*)buffer.ptr(), &new_size, + ((const Bytef*)res->ptr())+4,res->length())) == Z_OK) + { + buffer.length((uint32) new_size); + return &buffer; + } + + code= ((err == Z_BUF_ERROR) ? ER_ZLIB_Z_BUF_ERROR : + ((err == Z_MEM_ERROR) ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_DATA_ERROR)); + push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,code,ER(code)); + +err: + null_value= 1; + return 0; +} +#endif + +/* + UUID, as in + DCE 1.1: Remote Procedure Call, + Open Group Technical Standard Document Number C706, October 1997, + (supersedes C309 DCE: Remote Procedure Call 8/1994, + which was basis for ISO/IEC 11578:1996 specification) +*/ + +static struct rand_struct uuid_rand; +static uint nanoseq; +static ulonglong uuid_time=0; +static char clock_seq_and_node_str[]="-0000-000000000000"; + +/* number of 100-nanosecond intervals between + 1582-10-15 00:00:00.00 and 1970-01-01 00:00:00.00 */ +#define UUID_TIME_OFFSET ((ulonglong) 141427 * 24 * 60 * 60 * 1000 * 10 ) + +#define UUID_VERSION 0x1000 +#define UUID_VARIANT 0x8000 + +static void tohex(char *to, uint from, uint len) +{ + to+= len; + while (len--) + { + *--to= _dig_vec_lower[from & 15]; + from >>= 4; + } +} + +static void set_clock_seq_str() +{ + uint16 clock_seq= ((uint)(my_rnd(&uuid_rand)*16383)) | UUID_VARIANT; + tohex(clock_seq_and_node_str+1, clock_seq, 4); + nanoseq= 0; +} + +String *Item_func_uuid::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + char *s; + pthread_mutex_lock(&LOCK_uuid_generator); + if (! uuid_time) /* first UUID() call. initializing data */ + { + ulong tmp=sql_rnd_with_mutex(); + uchar mac[6]; + int i; + if (my_gethwaddr(mac)) + { + /* + generating random "hardware addr" + and because specs explicitly specify that it should NOT correlate + with a clock_seq value (initialized random below), we use a separate + randominit() here + */ + randominit(&uuid_rand, tmp + (ulong)current_thd, tmp + query_id); + for (i=0; i < (int)sizeof(mac); i++) + mac[i]=(uchar)(my_rnd(&uuid_rand)*255); + } + s=clock_seq_and_node_str+sizeof(clock_seq_and_node_str)-1; + for (i=sizeof(mac)-1 ; i>=0 ; i--) + { + *--s=_dig_vec_lower[mac[i] & 15]; + *--s=_dig_vec_lower[mac[i] >> 4]; + } + randominit(&uuid_rand, tmp + (ulong)start_time, tmp + bytes_sent); + set_clock_seq_str(); + } + + ulonglong tv=my_getsystime() + UUID_TIME_OFFSET + nanoseq; + if (unlikely(tv < uuid_time)) + set_clock_seq_str(); + else + if (unlikely(tv == uuid_time)) + { /* special protection from low-res system clocks */ + nanoseq++; + tv++; + } + else + { + if (nanoseq) + { + tv-=nanoseq; + nanoseq=0; + } + DBUG_ASSERT(tv > uuid_time); + } + uuid_time=tv; + pthread_mutex_unlock(&LOCK_uuid_generator); + + uint32 time_low= (uint32) (tv & 0xFFFFFFFF); + uint16 time_mid= (uint16) ((tv >> 32) & 0xFFFF); + uint16 time_hi_and_version= (uint16) ((tv >> 48) | UUID_VERSION); + + str->realloc(UUID_LENGTH+1); + str->length(UUID_LENGTH); + str->set_charset(system_charset_info); + s=(char *) str->ptr(); + s[8]=s[13]='-'; + tohex(s, time_low, 8); + tohex(s+9, time_mid, 4); + tohex(s+14, time_hi_and_version, 4); + strmov(s+18, clock_seq_and_node_str); + return str; +} + diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index ece15484fd9..89bab4a909c 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,7 +17,7 @@ /* This file defines all string functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -35,10 +35,8 @@ public: double val(); enum Item_result result_type () const { return STRING_RESULT; } void left_right_max_length(); - unsigned int size_of() { return sizeof(*this);} }; - class Item_func_md5 :public Item_str_func { String tmp_value; @@ -47,7 +45,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "md5"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -88,28 +85,17 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "concat"; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_concat_ws :public Item_str_func { - Item *separator; String tmp_value; - public: - Item_func_concat_ws(Item *a,List<Item> &list) - :Item_str_func(list),separator(a) {} - ~Item_func_concat_ws() { delete separator; } + Item_func_concat_ws(List<Item> &list) :Item_str_func(list) {} String *val_str(String *); void fix_length_and_dec(); - void update_used_tables(); - bool fix_fields(THD *thd,struct st_table_list *tlist) - { - return (separator->fix_fields(thd,tlist) - || Item_func::fix_fields(thd,tlist)); - } - void split_sum_func(List<Item> &fields); const char *func_name() const { return "concat_ws"; } + table_map not_null_tables() const { return 0; } }; class Item_func_reverse :public Item_str_func @@ -118,6 +104,7 @@ public: Item_func_reverse(Item *a) :Item_str_func(a) {} String *val_str(String *); void fix_length_and_dec(); + const char *func_name() const { return "reverse"; } }; @@ -130,7 +117,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "replace"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -143,7 +129,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "insert"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -151,7 +136,11 @@ class Item_str_conv :public Item_str_func { public: Item_str_conv(Item *item) :Item_str_func(item) {} - void fix_length_and_dec() { max_length = args[0]->max_length; } + void fix_length_and_dec() + { + collation.set(args[0]->collation); + max_length = args[0]->max_length; + } }; @@ -174,6 +163,7 @@ public: class Item_func_left :public Item_str_func { + String tmp_value; public: Item_func_left(Item *a,Item *b) :Item_str_func(a,b) {} String *val_str(String *); @@ -190,7 +180,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "right"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -203,7 +192,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "substr"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -213,58 +201,84 @@ class Item_func_substr_index :public Item_str_func public: Item_func_substr_index(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} String *val_str(String *); - void fix_length_and_dec() { max_length= args[0]->max_length; } + void fix_length_and_dec(); const char *func_name() const { return "substr_index"; } - unsigned int size_of() { return sizeof(*this);} }; -class Item_func_ltrim :public Item_str_func +class Item_func_trim :public Item_str_func { +protected: String tmp_value; + String remove; public: - Item_func_ltrim(Item *a,Item *b) :Item_str_func(a,b) {} + Item_func_trim(Item *a,Item *b) :Item_str_func(a,b) {} + Item_func_trim(Item *a) :Item_str_func(a) {} String *val_str(String *); - void fix_length_and_dec() { max_length= args[0]->max_length; } - const char *func_name() const { return "ltrim"; } - unsigned int size_of() { return sizeof(*this);} + void fix_length_and_dec(); + const char *func_name() const { return "trim"; } }; -class Item_func_rtrim :public Item_str_func +class Item_func_ltrim :public Item_func_trim { - String tmp_value; public: - Item_func_rtrim(Item *a,Item *b) :Item_str_func(a,b) {} + Item_func_ltrim(Item *a,Item *b) :Item_func_trim(a,b) {} + Item_func_ltrim(Item *a) :Item_func_trim(a) {} String *val_str(String *); - void fix_length_and_dec() { max_length= args[0]->max_length; } - const char *func_name() const { return "rtrim"; } - unsigned int size_of() { return sizeof(*this);} + const char *func_name() const { return "ltrim"; } }; -class Item_func_trim :public Item_str_func + +class Item_func_rtrim :public Item_func_trim { - String tmp_value; public: - Item_func_trim(Item *a,Item *b) :Item_str_func(a,b) {} + Item_func_rtrim(Item *a,Item *b) :Item_func_trim(a,b) {} + Item_func_rtrim(Item *a) :Item_func_trim(a) {} String *val_str(String *); - void fix_length_and_dec() { max_length= args[0]->max_length; } - const char *func_name() const { return "trim"; } - unsigned int size_of() { return sizeof(*this);} + const char *func_name() const { return "rtrim"; } }; +/* + Item_func_password -- new (4.1.1) PASSWORD() function implementation. + Returns strcat('*', octet2hex(sha1(sha1(password)))). '*' stands for new + password format, sha1(sha1(password) is so-called hash_stage2 value. + Length of returned string is always 41 byte. To find out how entire + authentification procedure works, see comments in password.c. +*/ + class Item_func_password :public Item_str_func { - char tmp_value[17]; + char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; public: Item_func_password(Item *a) :Item_str_func(a) {} - String *val_str(String *); - void fix_length_and_dec() { max_length = 16; } + String *val_str(String *str); + void fix_length_and_dec() { max_length= SCRAMBLED_PASSWORD_CHAR_LENGTH; } const char *func_name() const { return "password"; } - unsigned int size_of() { return sizeof(*this);} + static char *alloc(THD *thd, const char *password); }; + +/* + Item_func_old_password -- PASSWORD() implementation used in MySQL 3.21 - 4.0 + compatibility mode. This item is created in sql_yacc.yy when + 'old_passwords' session variable is set, and to handle OLD_PASSWORD() + function. +*/ + +class Item_func_old_password :public Item_str_func +{ + char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1]; +public: + Item_func_old_password(Item *a) :Item_str_func(a) {} + String *val_str(String *str); + void fix_length_and_dec() { max_length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; } + const char *func_name() const { return "old_password"; } + static char *alloc(THD *thd, const char *password); +}; + + class Item_func_des_encrypt :public Item_str_func { String tmp_value; @@ -275,7 +289,6 @@ public: void fix_length_and_dec() { maybe_null=1; max_length = args[0]->max_length+8; } const char *func_name() const { return "des_encrypt"; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_des_decrypt :public Item_str_func @@ -287,7 +300,6 @@ public: String *val_str(String *); void fix_length_and_dec() { maybe_null=1; max_length = args[0]->max_length; } const char *func_name() const { return "des_decrypt"; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_encrypt :public Item_str_func @@ -298,11 +310,12 @@ public: Item_func_encrypt(Item *a, Item *b): Item_str_func(a,b) {} String *val_str(String *); void fix_length_and_dec() { maybe_null=1; max_length = 13; } - unsigned int size_of() { return sizeof(*this);} + const char *func_name() const { return "ecrypt"; } }; #include "sql_crypt.h" + class Item_func_encode :public Item_str_func { protected: @@ -312,32 +325,49 @@ public: Item_str_func(a),sql_crypt(seed) {} String *val_str(String *); void fix_length_and_dec(); - unsigned int size_of() { return sizeof(*this);} + const char *func_name() const { return "encode"; } }; + class Item_func_decode :public Item_func_encode { public: Item_func_decode(Item *a, char *seed): Item_func_encode(a,seed) {} String *val_str(String *); + const char *func_name() const { return "decode"; } }; -class Item_func_database :public Item_str_func +class Item_func_sysconst :public Item_str_func { public: - Item_func_database() {} + Item_func_sysconst() + { collation.set(system_charset_info,DERIVATION_SYSCONST); } + Item *safe_charset_converter(CHARSET_INFO *tocs); +}; + +class Item_func_database :public Item_func_sysconst +{ +public: + Item_func_database() :Item_func_sysconst() {} String *val_str(String *); - void fix_length_and_dec() { max_length= MAX_FIELD_NAME; } + void fix_length_and_dec() + { + max_length= MAX_FIELD_NAME * system_charset_info->mbmaxlen; + maybe_null=1; + } const char *func_name() const { return "database"; } }; -class Item_func_user :public Item_str_func +class Item_func_user :public Item_func_sysconst { public: - Item_func_user() {} + Item_func_user() :Item_func_sysconst() {} String *val_str(String *); - void fix_length_and_dec() { max_length= USERNAME_LENGTH+HOSTNAME_LENGTH+1; } + void fix_length_and_dec() + { + max_length= (USERNAME_LENGTH+HOSTNAME_LENGTH+1)*system_charset_info->mbmaxlen; + } const char *func_name() const { return "user"; } }; @@ -350,29 +380,18 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "soundex"; } - unsigned int size_of() { return sizeof(*this);} }; class Item_func_elt :public Item_str_func { - Item *item; - public: - Item_func_elt(Item *a,List<Item> &list) :Item_str_func(list),item(a) {} - ~Item_func_elt() { delete item; } + Item_func_elt(List<Item> &list) :Item_str_func(list) {} double val(); longlong val_int(); String *val_str(String *str); - bool fix_fields(THD *thd,struct st_table_list *tlist) - { - return (item->fix_fields(thd,tlist) || Item_func::fix_fields(thd,tlist)); - } - void split_sum_func(List<Item> &fields); void fix_length_and_dec(); - void update_used_tables(); const char *func_name() const { return "elt"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -383,17 +402,26 @@ class Item_func_make_set :public Item_str_func public: Item_func_make_set(Item *a,List<Item> &list) :Item_str_func(list),item(a) {} - ~Item_func_make_set() { delete item; } String *val_str(String *str); - bool fix_fields(THD *thd,struct st_table_list *tlist) + bool fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) { - return (item->fix_fields(thd,tlist) || Item_func::fix_fields(thd,tlist)); + DBUG_ASSERT(fixed == 0); + return (!item->fixed && + item->fix_fields(thd, tlist, &item) || + item->check_cols(1) || + Item_func::fix_fields(thd, tlist, ref)); } - void split_sum_func(List<Item> &fields); + void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); void fix_length_and_dec(); void update_used_tables(); const char *func_name() const { return "make_set"; } - unsigned int size_of() { return sizeof(*this);} + + bool walk(Item_processor processor, byte *arg) + { + return item->walk(processor, arg) || + Item_str_func::walk(processor, arg); + } + void print(String *str); }; @@ -405,19 +433,27 @@ public: String *val_str(String *); void fix_length_and_dec() { + collation.set(default_charset()); max_length=args[0]->max_length+(args[0]->max_length-args[0]->decimals)/3; } const char *func_name() const { return "format"; } - unsigned int size_of() { return sizeof(*this);} + void print(String *); }; class Item_func_char :public Item_str_func { public: - Item_func_char(List<Item> &list) :Item_str_func(list) {} + Item_func_char(List<Item> &list) :Item_str_func(list) + { collation.set(default_charset()); } + Item_func_char(List<Item> &list, CHARSET_INFO *cs) :Item_str_func(list) + { collation.set(cs); } String *val_str(String *); - void fix_length_and_dec() { maybe_null=0; max_length=arg_count; binary=0;} + void fix_length_and_dec() + { + maybe_null=0; + max_length=arg_count * collation.collation->mbmaxlen; + } const char *func_name() const { return "char"; } }; @@ -430,7 +466,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "repeat"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -443,7 +478,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "rpad"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -456,7 +490,6 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "lpad"; } - unsigned int size_of() { return sizeof(*this);} }; @@ -466,7 +499,11 @@ public: Item_func_conv(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} const char *func_name() const { return "conv"; } String *val_str(String *); - void fix_length_and_dec() { decimals=0; max_length=64; } + void fix_length_and_dec() + { + collation.set(default_charset()); + decimals=0; max_length=64; + } }; @@ -477,8 +514,27 @@ public: Item_func_hex(Item *a) :Item_str_func(a) {} const char *func_name() const { return "hex"; } String *val_str(String *); - void fix_length_and_dec() { decimals=0; max_length=args[0]->max_length*2; } - unsigned int size_of() { return sizeof(*this);} + void fix_length_and_dec() + { + collation.set(default_charset()); + decimals=0; + max_length=args[0]->max_length*2*collation.collation->mbmaxlen; + } +}; + +class Item_func_unhex :public Item_str_func +{ + String tmp_value; +public: + Item_func_unhex(Item *a) :Item_str_func(a) {} + const char *func_name() const { return "unhex"; } + String *val_str(String *); + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + decimals=0; + max_length=(1+args[0]->max_length)/2; + } }; @@ -486,15 +542,21 @@ class Item_func_binary :public Item_str_func { public: Item_func_binary(Item *a) :Item_str_func(a) {} - const char *func_name() const { return "binary"; } String *val_str(String *a) { + DBUG_ASSERT(fixed == 1); String *tmp=args[0]->val_str(a); null_value=args[0]->null_value; + if (tmp) + tmp->set_charset(&my_charset_bin); return tmp; - } - void fix_length_and_dec() { binary=1; max_length=args[0]->max_length; } - void print(String *str) { print_op(str); } + } + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + max_length=args[0]->max_length; + } + void print(String *str); }; @@ -506,8 +568,11 @@ public: String *val_str(String *); const char *func_name() const { return "load_file"; } void fix_length_and_dec() - { binary=1; maybe_null=1; max_length=MAX_BLOB_WIDTH;} - unsigned int size_of() { return sizeof(*this);} + { + collation.set(&my_charset_bin, DERIVATION_COERCIBLE); + maybe_null=1; + max_length=MAX_BLOB_WIDTH; + } }; @@ -522,7 +587,7 @@ class Item_func_export_set: public Item_str_func const char *func_name() const { return "export_set"; } }; - class Item_func_inet_ntoa : public Item_str_func +class Item_func_inet_ntoa : public Item_str_func { public: Item_func_inet_ntoa(Item *a) :Item_str_func(a) @@ -540,5 +605,157 @@ public: Item_func_quote(Item *a) :Item_str_func(a) {} const char *func_name() const { return "quote"; } String *val_str(String *); - void fix_length_and_dec() { max_length= args[0]->max_length * 2 + 2; } + void fix_length_and_dec() + { + collation.set(args[0]->collation); + max_length= args[0]->max_length * 2 + 2; + } +}; + +class Item_func_conv_charset :public Item_str_func +{ + bool use_cached_value; +public: + bool safe; + CHARSET_INFO *conv_charset; // keep it public + Item_func_conv_charset(Item *a, CHARSET_INFO *cs) :Item_str_func(a) + { conv_charset= cs; use_cached_value= 0; safe= 0; } + Item_func_conv_charset(Item *a, CHARSET_INFO *cs, bool cache_if_const) + :Item_str_func(a) + { + DBUG_ASSERT(args[0]->fixed); + conv_charset= cs; + if (cache_if_const && args[0]->const_item()) + { + uint errors= 0; + String tmp, *str= args[0]->val_str(&tmp); + if (!str || str_value.copy(str->ptr(), str->length(), + str->charset(), conv_charset, &errors)) + null_value= 1; + use_cached_value= 1; + safe= (errors == 0); + } + else + { + use_cached_value= 0; + /* + Conversion from and to "binary" is safe. + Conversion to Unicode is safe. + Other kind of conversions are potentially lossy. + */ + safe= (args[0]->collation.collation == &my_charset_bin || + cs == &my_charset_bin || + (cs->state & MY_CS_UNICODE)); + } + } + String *val_str(String *); + void fix_length_and_dec(); + const char *func_name() const { return "convert"; } + void print(String *str); +}; + +class Item_func_set_collation :public Item_str_func +{ +public: + Item_func_set_collation(Item *a, Item *b) :Item_str_func(a,b) {}; + String *val_str(String *); + void fix_length_and_dec(); + bool eq(const Item *item, bool binary_cmp) const; + const char *func_name() const { return "collate"; } + void print(String *str) { print_op(str); } +}; + +class Item_func_charset :public Item_str_func +{ +public: + Item_func_charset(Item *a) :Item_str_func(a) {} + String *val_str(String *); + const char *func_name() const { return "charset"; } + void fix_length_and_dec() + { + collation.set(system_charset_info); + max_length= 64 * collation.collation->mbmaxlen; // should be enough + maybe_null= 0; + }; + table_map not_null_tables() const { return 0; } }; + +class Item_func_collation :public Item_str_func +{ +public: + Item_func_collation(Item *a) :Item_str_func(a) {} + String *val_str(String *); + const char *func_name() const { return "collation"; } + void fix_length_and_dec() + { + collation.set(system_charset_info); + max_length= 64 * collation.collation->mbmaxlen; // should be enough + maybe_null= 0; + }; + table_map not_null_tables() const { return 0; } +}; + +class Item_func_crc32 :public Item_int_func +{ + String value; +public: + Item_func_crc32(Item *a) :Item_int_func(a) {} + const char *func_name() const { return "crc32"; } + void fix_length_and_dec() { max_length=10; } + longlong val_int(); +}; + +class Item_func_uncompressed_length : public Item_int_func +{ + String value; +public: + Item_func_uncompressed_length(Item *a):Item_int_func(a){} + const char *func_name() const{return "uncompressed_length";} + void fix_length_and_dec() { max_length=10; } + longlong val_int(); +}; + +#ifdef HAVE_COMPRESS +#define ZLIB_DEPENDED_FUNCTION ; +#else +#define ZLIB_DEPENDED_FUNCTION { null_value=1; return 0; } +#endif + +class Item_func_compress: public Item_str_func +{ + String buffer; +public: + Item_func_compress(Item *a):Item_str_func(a){} + void fix_length_and_dec(){max_length= (args[0]->max_length*120)/100+12;} + const char *func_name() const{return "compress";} + String *val_str(String *) ZLIB_DEPENDED_FUNCTION +}; + +class Item_func_uncompress: public Item_str_func +{ + String buffer; +public: + Item_func_uncompress(Item *a): Item_str_func(a){} + void fix_length_and_dec(){max_length= MAX_BLOB_WIDTH;} + const char *func_name() const{return "uncompress";} + String *val_str(String *) ZLIB_DEPENDED_FUNCTION +}; + +#define UUID_LENGTH (8+1+4+1+4+1+4+1+12) +class Item_func_uuid: public Item_str_func +{ +public: + Item_func_uuid(): Item_str_func() {} + void fix_length_and_dec() { + collation.set(system_charset_info); + /* + NOTE! uuid() should be changed to use 'ascii' + charset when hex(), format(), md5(), etc, and implicit + number-to-string conversion will use 'ascii' + */ + max_length= UUID_LENGTH * system_charset_info->mbmaxlen; + } + const char *func_name() const{ return "uuid"; } + String *val_str(String *); +}; + diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc new file mode 100644 index 00000000000..c8405a9f8f4 --- /dev/null +++ b/sql/item_subselect.cc @@ -0,0 +1,1814 @@ +/* Copyright (C) 2000 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + subselect Item + +SUBSELECT TODO: + - add function from mysql_select that use JOIN* as parameter to JOIN methods + (sql_select.h/sql_select.cc) +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "mysql_priv.h" +#include "sql_select.h" + +inline Item * and_items(Item* cond, Item *item) +{ + return (cond? (new Item_cond_and(cond, item)) : item); +} + +Item_subselect::Item_subselect(): + Item_result_field(), value_assigned(0), thd(0), substitution(0), + engine(0), old_engine(0), used_tables_cache(0), have_to_be_excluded(0), + const_item_cache(1), engine_changed(0), changed(0) +{ + reset(); + /* + item value is NULL if select_subselect not changed this value + (i.e. some rows will be found returned) + */ + null_value= 1; +} + + +void Item_subselect::init(st_select_lex *select_lex, + select_subselect *result) +{ + + DBUG_ENTER("Item_subselect::init"); + DBUG_PRINT("subs", ("select_lex 0x%xl", (ulong) select_lex)); + unit= select_lex->master_unit(); + + if (unit->item) + { + /* + Item can be changed in JOIN::prepare while engine in JOIN::optimize + => we do not copy old_engine here + */ + engine= unit->item->engine; + parsing_place= unit->item->parsing_place; + unit->item->engine= 0; + unit->item= this; + engine->change_item(this, result); + } + else + { + SELECT_LEX *outer_select= unit->outer_select(); + /* + do not take into account expression inside aggregate functions because + they can access original table fields + */ + parsing_place= (outer_select->in_sum_expr ? + NO_MATTER : + outer_select->parsing_place); + if (select_lex->next_select()) + engine= new subselect_union_engine(unit, result, this); + else + engine= new subselect_single_select_engine(select_lex, result, this); + } + { + SELECT_LEX *upper= unit->outer_select(); + if (upper->parsing_place == IN_HAVING) + upper->subquery_in_having= 1; + } + DBUG_VOID_RETURN; +} + +void Item_subselect::cleanup() +{ + DBUG_ENTER("Item_subselect::cleanup"); + Item_result_field::cleanup(); + if (old_engine) + { + if (engine) + engine->cleanup(); + engine= old_engine; + old_engine= 0; + } + if (engine) + engine->cleanup(); + reset(); + value_assigned= 0; + DBUG_VOID_RETURN; +} + +void Item_singlerow_subselect::cleanup() +{ + DBUG_ENTER("Item_singlerow_subselect::cleanup"); + value= 0; row= 0; + Item_subselect::cleanup(); + DBUG_VOID_RETURN; +} + +Item_subselect::~Item_subselect() +{ + delete engine; +} + +Item_subselect::trans_res +Item_subselect::select_transformer(JOIN *join) +{ + DBUG_ENTER("Item_subselect::select_transformer"); + DBUG_RETURN(RES_OK); +} + + +bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) +{ + DBUG_ASSERT(fixed == 0); + engine->set_thd((thd= thd_param)); + + char const *save_where= thd->where; + int res; + + if (check_stack_overrun(thd, (gptr)&res)) + return 1; + + res= engine->prepare(); + + // all transformetion is done (used by prepared statements) + changed= 1; + + if (!res) + { + if (substitution) + { + int ret= 0; + + // did we changed top item of WHERE condition + if (unit->outer_select()->where == (*ref)) + unit->outer_select()->where= substitution; // correct WHERE for PS + else if (unit->outer_select()->having == (*ref)) + unit->outer_select()->having= substitution; // correct HAVING for PS + + (*ref)= substitution; + substitution->name= name; + if (have_to_be_excluded) + engine->exclude(); + substitution= 0; + thd->where= "checking transformed subquery"; + if (!(*ref)->fixed) + ret= (*ref)->fix_fields(thd, tables, ref); + thd->where= save_where; + return ret; + } + // Is it one field subselect? + if (engine->cols() > max_columns) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), 1); + return 1; + } + fix_length_and_dec(); + } + else + return 1; + uint8 uncacheable= engine->uncacheable(); + if (uncacheable) + { + const_item_cache= 0; + if (uncacheable & UNCACHEABLE_RAND) + used_tables_cache|= RAND_TABLE_BIT; + } + fixed= 1; + thd->where= save_where; + return res; +} + +bool Item_subselect::exec() +{ + int res; + MEM_ROOT *old_root= thd->mem_root; + + /* + As this is execution, all objects should be allocated through the main + mem root + */ + thd->mem_root= &thd->main_mem_root; + res= engine->exec(); + thd->mem_root= old_root; + + if (engine_changed) + { + engine_changed= 0; + return exec(); + } + return (res); +} + +Item::Type Item_subselect::type() const +{ + return SUBSELECT_ITEM; +} + + +void Item_subselect::fix_length_and_dec() +{ + engine->fix_length_and_dec(0); +} + + +table_map Item_subselect::used_tables() const +{ + return (table_map) (engine->uncacheable() ? used_tables_cache : 0L); +} + + +bool Item_subselect::const_item() const +{ + return const_item_cache; +} + +Item *Item_subselect::get_tmp_table_item(THD *thd) +{ + if (!with_sum_func && !const_item()) + return new Item_field(result_field); + return copy_or_same(thd); +} + +void Item_subselect::update_used_tables() +{ + if (!engine->uncacheable()) + { + // did all used tables become ststic? + if (!(used_tables_cache & ~engine->upper_select_const_tables())) + const_item_cache= 1; + } +} + + +void Item_subselect::print(String *str) +{ + str->append('('); + engine->print(str); + str->append(')'); +} + + +Item_singlerow_subselect::Item_singlerow_subselect(st_select_lex *select_lex) + :Item_subselect(), value(0) +{ + DBUG_ENTER("Item_singlerow_subselect::Item_singlerow_subselect"); + init(select_lex, new select_singlerow_subselect(this)); + maybe_null= 1; + max_columns= UINT_MAX; + DBUG_VOID_RETURN; +} + +Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent, + st_select_lex *select_lex, + bool max_arg) + :Item_singlerow_subselect(), was_values(TRUE) +{ + DBUG_ENTER("Item_maxmin_subselect::Item_maxmin_subselect"); + max= max_arg; + init(select_lex, new select_max_min_finder_subselect(this, max_arg)); + max_columns= 1; + maybe_null= 1; + max_columns= 1; + + /* + Following information was collected during performing fix_fields() + of Items belonged to subquery, which will be not repeated + */ + used_tables_cache= parent->get_used_tables_cache(); + const_item_cache= parent->get_const_item_cache(); + + DBUG_VOID_RETURN; +} + +void Item_maxmin_subselect::cleanup() +{ + DBUG_ENTER("Item_maxmin_subselect::cleanup"); + Item_singlerow_subselect::cleanup(); + + /* + By default it is TRUE to avoid TRUE reporting by + Item_func_not_all/Item_func_nop_all if this item was never called. + + Engine exec() set it to FALSE by reset_value_registration() call. + select_max_min_finder_subselect::send_data() set it back to TRUE if some + value will be found. + */ + was_values= TRUE; + DBUG_VOID_RETURN; +} + + +void Item_maxmin_subselect::print(String *str) +{ + str->append(max?"<max>":"<min>", 5); + Item_singlerow_subselect::print(str); +} + + +void Item_singlerow_subselect::reset() +{ + null_value= 1; + if (value) + value->null_value= 1; +} + + +Item_subselect::trans_res +Item_singlerow_subselect::select_transformer(JOIN *join) +{ + if (changed) + return RES_OK; + + SELECT_LEX *select_lex= join->select_lex; + + /* Juggle with current arena only if we're in prepared statement prepare */ + Item_arena *arena= join->thd->current_arena; + + if (!select_lex->master_unit()->first_select()->next_select() && + !select_lex->table_list.elements && + select_lex->item_list.elements == 1 && + !select_lex->item_list.head()->with_sum_func && + /* + We cant change name of Item_field or Item_ref, because it will + prevent it's correct resolving, but we should save name of + removed item => we do not make optimization if top item of + list is field or reference. + TODO: solve above problem + */ + !(select_lex->item_list.head()->type() == FIELD_ITEM || + select_lex->item_list.head()->type() == REF_ITEM) && + /* + switch off this optimisation for prepare statement, + because we do not rollback this changes + TODO: make rollback for it, or special name resolving mode in 5.0. + */ + !arena->is_stmt_prepare() + ) + { + + have_to_be_excluded= 1; + if (join->thd->lex->describe) + { + char warn_buff[MYSQL_ERRMSG_SIZE]; + sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number); + push_warning(join->thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_SELECT_REDUCED, warn_buff); + } + substitution= select_lex->item_list.head(); + /* + as far as we moved content to upper level, field which depend of + 'upper' select is not really dependent => we remove this dependence + */ + substitution->walk(&Item::remove_dependence_processor, + (byte *) select_lex->outer_select()); + /* SELECT without FROM clause can't have WHERE or HAVING clause */ + DBUG_ASSERT(join->conds == 0 && join->having == 0); + return RES_REDUCE; + } + return RES_OK; +} + +void Item_singlerow_subselect::store(uint i, Item *item) +{ + row[i]->store(item); +} + +enum Item_result Item_singlerow_subselect::result_type() const +{ + return engine->type(); +} + +void Item_singlerow_subselect::fix_length_and_dec() +{ + if ((max_columns= engine->cols()) == 1) + { + engine->fix_length_and_dec(row= &value); + } + else + { + if (!(row= (Item_cache**) sql_alloc(sizeof(Item_cache*)*max_columns))) + return; + engine->fix_length_and_dec(row); + value= *row; + } + /* + If there are not tables in subquery then ability to have NULL value + depends on SELECT list (if single row subquery have tables then it + always can be NULL if there are not records fetched). + */ + if (engine->no_tables()) + maybe_null= engine->may_be_null(); +} + +uint Item_singlerow_subselect::cols() +{ + return engine->cols(); +} + +bool Item_singlerow_subselect::check_cols(uint c) +{ + if (c != engine->cols()) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), c); + return 1; + } + return 0; +} + +bool Item_singlerow_subselect::null_inside() +{ + for (uint i= 0; i < max_columns ; i++) + { + if (row[i]->null_value) + return 1; + } + return 0; +} + +void Item_singlerow_subselect::bring_value() +{ + exec(); +} + +double Item_singlerow_subselect::val() +{ + DBUG_ASSERT(fixed == 1); + if (!exec() && !value->null_value) + { + null_value= 0; + return value->val(); + } + else + { + reset(); + return 0; + } +} + +longlong Item_singlerow_subselect::val_int() +{ + DBUG_ASSERT(fixed == 1); + if (!exec() && !value->null_value) + { + null_value= 0; + return value->val_int(); + } + else + { + reset(); + return 0; + } +} + +String *Item_singlerow_subselect::val_str (String *str) +{ + if (!exec() && !value->null_value) + { + null_value= 0; + return value->val_str(str); + } + else + { + reset(); + return 0; + } +} + + +Item_exists_subselect::Item_exists_subselect(st_select_lex *select_lex): + Item_subselect() +{ + DBUG_ENTER("Item_exists_subselect::Item_exists_subselect"); + init(select_lex, new select_exists_subselect(this)); + max_columns= UINT_MAX; + null_value= 0; //can't be NULL + maybe_null= 0; //can't be NULL + value= 0; + // We need only 1 row to determinate existence + select_lex->master_unit()->global_parameters->select_limit= 1; + DBUG_VOID_RETURN; +} + + +void Item_exists_subselect::print(String *str) +{ + str->append("exists", 6); + Item_subselect::print(str); +} + + +bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit) +{ + if (unit->fake_select_lex && + unit->fake_select_lex->test_limit()) + return(1); + + SELECT_LEX *sl= unit->first_select(); + for (; sl; sl= sl->next_select()) + { + if (sl->test_limit()) + return(1); + } + return(0); +} + +Item_in_subselect::Item_in_subselect(Item * left_exp, + st_select_lex *select_lex): + Item_exists_subselect(), optimizer(0), transformed(0), upper_item(0) +{ + DBUG_ENTER("Item_in_subselect::Item_in_subselect"); + left_expr= left_exp; + init(select_lex, new select_exists_subselect(this)); + max_columns= UINT_MAX; + maybe_null= 1; + abort_on_null= 0; + reset(); + //if test_limit will fail then error will be reported to client + test_limit(select_lex->master_unit()); + DBUG_VOID_RETURN; +} + +Item_allany_subselect::Item_allany_subselect(Item * left_exp, + Comp_creator *fn, + st_select_lex *select_lex, + bool all_arg) + :Item_in_subselect(), all(all_arg) +{ + DBUG_ENTER("Item_in_subselect::Item_in_subselect"); + left_expr= left_exp; + func= fn; + init(select_lex, new select_exists_subselect(this)); + max_columns= 1; + abort_on_null= 0; + reset(); + //if test_limit will fail then error will be reported to client + test_limit(select_lex->master_unit()); + DBUG_VOID_RETURN; +} + + +void Item_exists_subselect::fix_length_and_dec() +{ + decimals= 0; + max_length= 1; + max_columns= engine->cols(); +} + +double Item_exists_subselect::val() +{ + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + return 0; + } + return (double) value; +} + +longlong Item_exists_subselect::val_int() +{ + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + return 0; + } + return value; +} + +String *Item_exists_subselect::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + return 0; + } + str->set(value,&my_charset_bin); + return str; +} + + +double Item_in_subselect::val() +{ + /* + As far as Item_in_subselect called only from Item_in_optimizer this + method should not be used + */ + DBUG_ASSERT(0); + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + null_value= 1; + return 0; + } + if (was_null && !value) + null_value= 1; + return (double) value; +} + + +longlong Item_in_subselect::val_int() +{ + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + null_value= 1; + return 0; + } + if (was_null && !value) + null_value= 1; + return value; +} + + +String *Item_in_subselect::val_str(String *str) +{ + /* + As far as Item_in_subselect called only from Item_in_optimizer this + method should not be used + */ + DBUG_ASSERT(0); + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + null_value= 1; + return 0; + } + if (was_null && !value) + { + null_value= 1; + return 0; + } + str->set(value, &my_charset_bin); + return str; +} + + +/* Rewrite a single-column IN/ALL/ANY subselect. */ + +Item_subselect::trans_res +Item_in_subselect::single_value_transformer(JOIN *join, + Comp_creator *func) +{ + DBUG_ENTER("Item_in_subselect::single_value_transformer"); + + SELECT_LEX *select_lex= join->select_lex; + + /* + Check that the right part of the subselect contains no more than one + column. E.g. in SELECT 1 IN (SELECT * ..) the right part is (SELECT * ...) + */ + if (select_lex->item_list.elements > 1) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), 1); + DBUG_RETURN(RES_ERROR); + } + + /* + If this is an ALL/ANY single-value subselect, try to rewrite it with + a MIN/MAX subselect. We can do that if a possible NULL result of the + subselect can be ignored. + E.g. SELECT * FROM t1 WHERE b > ANY (SELECT a FROM t2) can be rewritten + with SELECT * FROM t1 WHERE b > (SELECT MAX(a) FROM t2). + We can't check that this optimization is safe if it's not a top-level + item of the WHERE clause (e.g. because the WHERE clause can contain IS + NULL/IS NOT NULL functions). If so, we rewrite ALL/ANY with NOT EXISTS + later in this method. + */ + if ((abort_on_null || (upper_item && upper_item->top_level())) && + !select_lex->master_unit()->uncacheable && !func->eqne_op()) + { + if (substitution) + { + // It is second (third, ...) SELECT of UNION => All is done + DBUG_RETURN(RES_OK); + } + + Item *subs; + if (!select_lex->group_list.elements && + !select_lex->having && + !select_lex->with_sum_func && + !(select_lex->next_select())) + { + Item_sum_hybrid *item; + if (func->l_op()) + { + /* + (ALL && (> || =>)) || (ANY && (< || =<)) + for ALL condition is inverted + */ + item= new Item_sum_max(*select_lex->ref_pointer_array); + } + else + { + /* + (ALL && (< || =<)) || (ANY && (> || =>)) + for ALL condition is inverted + */ + item= new Item_sum_min(*select_lex->ref_pointer_array); + } + if (upper_item) + upper_item->set_sum_test(item); + *select_lex->ref_pointer_array= item; + { + List_iterator<Item> it(select_lex->item_list); + it++; + it.replace(item); + } + + /* + Item_sum_(max|min) can't substitute other item => we can use 0 as + reference + */ + if (item->fix_fields(thd, join->tables_list, 0)) + DBUG_RETURN(RES_ERROR); + /* we added aggregate function => we have to change statistic */ + count_field_types(&join->tmp_table_param, join->all_fields, 0); + + subs= new Item_singlerow_subselect(select_lex); + } + else + { + Item_maxmin_subselect *item; + // remove LIMIT placed by ALL/ANY subquery + select_lex->master_unit()->global_parameters->select_limit= + HA_POS_ERROR; + subs= item= new Item_maxmin_subselect(this, select_lex, func->l_op()); + if (upper_item) + upper_item->set_sub_test(item); + } + /* fix fields is already called for left expression */ + substitution= func->create(left_expr, subs); + DBUG_RETURN(RES_OK); + } + + if (!substitution) + { + //first call for this unit + SELECT_LEX_UNIT *unit= select_lex->master_unit(); + substitution= optimizer; + + SELECT_LEX *current= thd->lex->current_select, *up; + + thd->lex->current_select= up= current->return_after_parsing(); + //optimizer never use Item **ref => we can pass 0 as parameter + if (!optimizer || optimizer->fix_left(thd, up->get_table_list(), 0)) + { + thd->lex->current_select= current; + DBUG_RETURN(RES_ERROR); + } + thd->lex->current_select= current; + + /* + As far as Item_ref_in_optimizer do not substitude itself on fix_fields + we can use same item for all selects. + */ + expr= new Item_direct_ref((Item**)optimizer->get_cache(), + (char *)"<no matter>", + (char *)in_left_expr_name); + + unit->uncacheable|= UNCACHEABLE_DEPENDENT; + } + + select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; + Item *item; + + item= (Item*) select_lex->item_list.head(); + /* + Add the left part of a subselect to a WHERE or HAVING clause of + the right part, e.g. SELECT 1 IN (SELECT a FROM t1) => + SELECT Item_in_optimizer(1, SELECT a FROM t1 WHERE a=1) + HAVING is used only if the right part contains a SUM function, a GROUP + BY or a HAVING clause. + */ + if (join->having || select_lex->with_sum_func || + select_lex->group_list.elements) + { + item= func->create(expr, + new Item_ref_null_helper(this, + select_lex->ref_pointer_array, + (char *)"<ref>", + this->full_name())); + /* + AND and comparison functions can't be changed during fix_fields() + we can assign select_lex->having here, and pass 0 as last + argument (reference) to fix_fields() + */ + select_lex->having= join->having= and_items(join->having, item); + select_lex->having_fix_field= 1; + if (join->having->fix_fields(thd, join->tables_list, 0)) + { + select_lex->having_fix_field= 0; + DBUG_RETURN(RES_ERROR); + } + select_lex->having_fix_field= 0; + } + else + { + select_lex->item_list.empty(); + select_lex->item_list.push_back(new Item_int("Not_used", + (longlong) 1, 21)); + select_lex->ref_pointer_array[0]= select_lex->item_list.head(); + if (select_lex->table_list.elements) + { + Item *having= item, *orig_item= item; + item= func->create(expr, item); + if (!abort_on_null && orig_item->maybe_null) + { + having= new Item_is_not_null_test(this, having); + /* + Item_is_not_null_test can't be changed during fix_fields() + we can assign select_lex->having here, and pass 0 as last + argument (reference) to fix_fields() + */ + select_lex->having= + join->having= (join->having ? + new Item_cond_and(having, join->having) : + having); + select_lex->having_fix_field= 1; + if (join->having->fix_fields(thd, join->tables_list, 0)) + { + select_lex->having_fix_field= 0; + DBUG_RETURN(RES_ERROR); + } + select_lex->having_fix_field= 0; + item= new Item_cond_or(item, + new Item_func_isnull(orig_item)); + } + item->name= (char *)in_additional_cond; + /* + AND can't be changed during fix_fields() + we can assign select_lex->having here, and pass 0 as last + argument (reference) to fix_fields() + */ + select_lex->where= join->conds= and_items(join->conds, item); + select_lex->where->top_level_item(); + if (join->conds->fix_fields(thd, join->tables_list, 0)) + DBUG_RETURN(RES_ERROR); + } + else + { + if (select_lex->master_unit()->first_select()->next_select()) + { + /* + comparison functions can't be changed during fix_fields() + we can assign select_lex->having here, and pass 0 as last + argument (reference) to fix_fields() + */ + select_lex->having= + join->having= + func->create(expr, + new Item_null_helper(this, item, + (char *)"<no matter>", + (char *)"<result>")); + select_lex->having_fix_field= 1; + if (join->having->fix_fields(thd, join->tables_list, + 0)) + { + select_lex->having_fix_field= 0; + DBUG_RETURN(RES_ERROR); + } + select_lex->having_fix_field= 0; + } + else + { + // it is single select without tables => possible optimization + item= func->create(left_expr, item); + // fix_field of item will be done in time of substituting + substitution= item; + have_to_be_excluded= 1; + if (thd->lex->describe) + { + char warn_buff[MYSQL_ERRMSG_SIZE]; + sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_SELECT_REDUCED, warn_buff); + } + DBUG_RETURN(RES_REDUCE); + } + } + } + + DBUG_RETURN(RES_OK); +} + + +Item_subselect::trans_res +Item_in_subselect::row_value_transformer(JOIN *join) +{ + SELECT_LEX *select_lex= join->select_lex; + Item *having_item= 0; + uint cols_num= left_expr->cols(); + bool is_having_used= (join->having || select_lex->with_sum_func || + select_lex->group_list.first || + !select_lex->table_list.elements); + DBUG_ENTER("Item_in_subselect::row_value_transformer"); + + if (select_lex->item_list.elements != left_expr->cols()) + { + my_error(ER_OPERAND_COLUMNS, MYF(0), left_expr->cols()); + DBUG_RETURN(RES_ERROR); + } + + if (!substitution) + { + //first call for this unit + SELECT_LEX_UNIT *unit= select_lex->master_unit(); + substitution= optimizer; + + SELECT_LEX *current= thd->lex->current_select, *up; + thd->lex->current_select= up= current->return_after_parsing(); + //optimizer never use Item **ref => we can pass 0 as parameter + if (!optimizer || optimizer->fix_left(thd, up->get_table_list(), 0)) + { + thd->lex->current_select= current; + DBUG_RETURN(RES_ERROR); + } + + // we will refer to apper level cache array => we have to save it in PS + optimizer->keep_top_level_cache(); + + thd->lex->current_select= current; + unit->uncacheable|= UNCACHEABLE_DEPENDENT; + } + + select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; + if (is_having_used) + { + /* + (l1, l2, l3) IN (SELECT v1, v2, v3 ... HAVING having) => + EXISTS (SELECT ... HAVING having and + (l1 = v1 or is null v1) and + (l2 = v2 or is null v2) and + (l3 = v3 or is null v3) and + is_not_null_test(v1) and + is_not_null_test(v2) and + is_not_null_test(v3)) + where is_not_null_test used to register nulls in case if we have + not found matching to return correct NULL value + */ + Item *item_having_part2= 0; + for (uint i= 0; i < cols_num; i++) + { + DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed); + if (select_lex->ref_pointer_array[i]-> + check_cols(left_expr->el(i)->cols())) + DBUG_RETURN(RES_ERROR); + Item *item_eq= + new Item_func_eq(new + Item_direct_ref((*optimizer->get_cache())-> + addr(i), + (char *)"<no matter>", + (char *)in_left_expr_name), + new + Item_direct_ref(select_lex->ref_pointer_array + i, + (char *)"<no matter>", + (char *)"<list ref>") + ); + Item *item_isnull= + new Item_func_isnull(new + Item_direct_ref( select_lex-> + ref_pointer_array+i, + (char *)"<no matter>", + (char *)"<list ref>") + ); + having_item= + and_items(having_item, + new Item_cond_or(item_eq, item_isnull)); + item_having_part2= + and_items(item_having_part2, + new + Item_is_not_null_test(this, + new + Item_direct_ref(select_lex-> + ref_pointer_array + i, + (char *)"<no matter>", + (char *)"<list ref>") + ) + ); + item_having_part2->top_level_item(); + } + having_item= and_items(having_item, item_having_part2); + having_item->top_level_item(); + } + else + { + /* + (l1, l2, l3) IN (SELECT v1, v2, v3 ... WHERE where) => + EXISTS (SELECT ... WHERE where and + (l1 = v1 or is null v1) and + (l2 = v2 or is null v2) and + (l3 = v3 or is null v3) + HAVING is_not_null_test(v1) and + is_not_null_test(v2) and + is_not_null_test(v3)) + where is_not_null_test register NULLs values but reject rows + + in case when we do not need correct NULL, we have simplier construction: + EXISTS (SELECT ... WHERE where and + (l1 = v1) and + (l2 = v2) and + (l3 = v3) + */ + Item *where_item= 0; + for (uint i= 0; i < cols_num; i++) + { + Item *item, *item_isnull; + DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed); + if (select_lex->ref_pointer_array[i]-> + check_cols(left_expr->el(i)->cols())) + DBUG_RETURN(RES_ERROR); + item= + new Item_func_eq(new + Item_direct_ref((*optimizer->get_cache())-> + addr(i), + (char *)"<no matter>", + (char *)in_left_expr_name), + new + Item_direct_ref( select_lex-> + ref_pointer_array+i, + (char *)"<no matter>", + (char *)"<list ref>") + ); + if (!abort_on_null) + { + having_item= + and_items(having_item, + new + Item_is_not_null_test(this, + new + Item_direct_ref(select_lex-> + ref_pointer_array + i, + (char *)"<no matter>", + (char *)"<list ref>") + ) + ); + item_isnull= new + Item_func_isnull(new + Item_direct_ref( select_lex-> + ref_pointer_array+i, + (char *)"<no matter>", + (char *)"<list ref>") + ); + + item= new Item_cond_or(item, item_isnull); + } + + where_item= and_items(where_item, item); + } + /* + AND can't be changed during fix_fields() + we can assign select_lex->where here, and pass 0 as last + argument (reference) to fix_fields() + */ + select_lex->where= join->conds= and_items(join->conds, where_item); + select_lex->where->top_level_item(); + if (join->conds->fix_fields(thd, join->tables_list, 0)) + DBUG_RETURN(RES_ERROR); + } + if (having_item) + { + bool res; + select_lex->having= join->having= and_items(join->having, having_item); + select_lex->having->top_level_item(); + /* + AND can't be changed during fix_fields() + we can assign select_lex->having here, and pass 0 as last + argument (reference) to fix_fields() + */ + select_lex->having_fix_field= 1; + res= join->having->fix_fields(thd, join->tables_list, 0); + select_lex->having_fix_field= 0; + if (res) + { + DBUG_RETURN(RES_ERROR); + } + } + DBUG_RETURN(RES_OK); +} + + +Item_subselect::trans_res +Item_in_subselect::select_transformer(JOIN *join) +{ + return select_in_like_transformer(join, &eq_creator); +} + + +/* + Prepare IN/ALL/ANY/SOME subquery transformation and call appropriate + transformation function + + SYNOPSIS + Item_in_subselect::select_in_like_transformer() + join JOIN object of transforming subquery + func creator of condition function of subquery + + DESCRIPTION + To decide which transformation procedure (scalar or row) applicable here + we have to call fix_fields() for left expression to be able to call + cols() method on it. Also this method make arena management for + underlying transformation methods. + + RETURN + RES_OK OK + RES_REDUCE OK, and current subquery was reduced during transformation + RES_ERROR Error +*/ + +Item_subselect::trans_res +Item_in_subselect::select_in_like_transformer(JOIN *join, Comp_creator *func) +{ + Item_arena *arena, backup; + SELECT_LEX *current= thd->lex->current_select, *up; + const char *save_where= thd->where; + Item_subselect::trans_res res= RES_ERROR; + bool result; + + DBUG_ENTER("Item_in_subselect::select_in_like_transformer"); + + if (changed) + { + DBUG_RETURN(RES_OK); + } + + thd->where= "IN/ALL/ANY subquery"; + + /* + In some optimisation cases we will not need this Item_in_optimizer + object, but we can't know it here, but here we need address correct + reference on left expresion. + */ + if (!optimizer) + { + arena= thd->change_arena_if_needed(&backup); + result= (!(optimizer= new Item_in_optimizer(left_expr, this))); + if (arena) + thd->restore_backup_item_arena(arena, &backup); + if (result) + goto err; + } + + thd->lex->current_select= up= current->return_after_parsing(); + result= (!left_expr->fixed && + left_expr->fix_fields(thd, up->get_table_list(), + optimizer->arguments())); + /* fix_fields can change reference to left_expr, we need reassign it */ + left_expr= optimizer->arguments()[0]; + + thd->lex->current_select= current; + if (result) + goto err; + + transformed= 1; + arena= thd->change_arena_if_needed(&backup); + /* + Both transformers call fix_fields() only for Items created inside them, + and all that items do not make permanent changes in current item arena + which allow to us call them with changed arena (if we do not know nature + of Item, we have to call fix_fields() for it only with original arena to + avoid memory leack) + */ + if (left_expr->cols() == 1) + res= single_value_transformer(join, func); + else + { + /* we do not support row operation for ALL/ANY/SOME */ + if (func != &eq_creator) + { + if (arena) + thd->restore_backup_item_arena(arena, &backup); + my_error(ER_OPERAND_COLUMNS, MYF(0), 1); + DBUG_RETURN(RES_ERROR); + } + res= row_value_transformer(join); + } + if (arena) + thd->restore_backup_item_arena(arena, &backup); +err: + thd->where= save_where; + DBUG_RETURN(res); +} + + +void Item_in_subselect::print(String *str) +{ + if (transformed) + str->append("<exists>", 8); + else + { + left_expr->print(str); + str->append(" in ", 4); + } + Item_subselect::print(str); +} + + +Item_subselect::trans_res +Item_allany_subselect::select_transformer(JOIN *join) +{ + transformed= 1; + if (upper_item) + upper_item->show= 1; + return select_in_like_transformer(join, func); +} + + +void Item_allany_subselect::print(String *str) +{ + if (transformed) + str->append("<exists>", 8); + else + { + left_expr->print(str); + str->append(' '); + str->append(func->symbol(all)); + str->append(all ? " all " : " any ", 5); + } + Item_subselect::print(str); +} + + +subselect_single_select_engine:: +subselect_single_select_engine(st_select_lex *select, + select_subselect *result, + Item_subselect *item) + :subselect_engine(item, result), + prepared(0), optimized(0), executed(0), join(0) +{ + select_lex= select; + SELECT_LEX_UNIT *unit= select_lex->master_unit(); + unit->offset_limit_cnt= unit->global_parameters->offset_limit; + unit->select_limit_cnt= unit->global_parameters->select_limit+ + unit->global_parameters ->offset_limit; + if (unit->select_limit_cnt < unit->global_parameters->select_limit) + unit->select_limit_cnt= HA_POS_ERROR; // no limit + if (unit->select_limit_cnt == HA_POS_ERROR) + select_lex->options&= ~OPTION_FOUND_ROWS; + unit->item= item; + this->select_lex= select_lex; +} + + +void subselect_single_select_engine::cleanup() +{ + DBUG_ENTER("subselect_single_select_engine::cleanup"); + prepared= optimized= executed= 0; + join= 0; + result->cleanup(); + DBUG_VOID_RETURN; +} + + +void subselect_union_engine::cleanup() +{ + DBUG_ENTER("subselect_union_engine::cleanup"); + unit->reinit_exec_mechanism(); + result->cleanup(); + DBUG_VOID_RETURN; +} + + +void subselect_uniquesubquery_engine::cleanup() +{ + DBUG_ENTER("subselect_uniquesubquery_engine::cleanup"); + /* + subselect_uniquesubquery_engine have not 'result' assigbed, so we do not + cleanup() it + */ + DBUG_VOID_RETURN; +} + + +subselect_union_engine::subselect_union_engine(st_select_lex_unit *u, + select_subselect *result_arg, + Item_subselect *item_arg) + :subselect_engine(item_arg, result_arg) +{ + unit= u; + if (!result_arg) //out of memory + current_thd->fatal_error(); + unit->item= item_arg; +} + + +int subselect_single_select_engine::prepare() +{ + if (prepared) + return 0; + join= new JOIN(thd, select_lex->item_list, + select_lex->options | SELECT_NO_UNLOCK, result); + if (!join || !result) + { + thd->fatal_error(); //out of memory + return 1; + } + prepared= 1; + SELECT_LEX *save_select= thd->lex->current_select; + thd->lex->current_select= select_lex; + if (join->prepare(&select_lex->ref_pointer_array, + (TABLE_LIST*) select_lex->table_list.first, + select_lex->with_wild, + select_lex->where, + select_lex->order_list.elements + + select_lex->group_list.elements, + (ORDER*) select_lex->order_list.first, + (ORDER*) select_lex->group_list.first, + select_lex->having, + (ORDER*) 0, select_lex, + select_lex->master_unit())) + return 1; + thd->lex->current_select= save_select; + return 0; +} + +int subselect_union_engine::prepare() +{ + return unit->prepare(thd, result, SELECT_NO_UNLOCK, ""); +} + +int subselect_uniquesubquery_engine::prepare() +{ + //this never should be called + DBUG_ASSERT(0); + return 1; +} + +static Item_result set_row(List<Item> &item_list, Item *item, + Item_cache **row, bool *maybe_null) +{ + Item_result res_type= STRING_RESULT; + Item *sel_item; + List_iterator_fast<Item> li(item_list); + for (uint i= 0; (sel_item= li++); i++) + { + item->max_length= sel_item->max_length; + res_type= sel_item->result_type(); + item->decimals= sel_item->decimals; + *maybe_null= sel_item->maybe_null; + if (!(row[i]= Item_cache::get_cache(res_type))) + return STRING_RESULT; // we should return something + row[i]->setup(sel_item); + } + if (item_list.elements > 1) + res_type= ROW_RESULT; + return res_type; +} + +void subselect_single_select_engine::fix_length_and_dec(Item_cache **row) +{ + DBUG_ASSERT(row || select_lex->item_list.elements==1); + res_type= set_row(select_lex->item_list, item, row, &maybe_null); + item->collation.set(row[0]->collation); + if (cols() != 1) + maybe_null= 0; +} + +void subselect_union_engine::fix_length_and_dec(Item_cache **row) +{ + DBUG_ASSERT(row || unit->first_select()->item_list.elements==1); + + if (unit->first_select()->item_list.elements == 1) + { + res_type= set_row(unit->types, item, row, &maybe_null); + item->collation.set(row[0]->collation); + } + else + { + bool fake= 0; + res_type= set_row(unit->types, item, row, &fake); + } +} + +void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row) +{ + //this never should be called + DBUG_ASSERT(0); +} + +int subselect_single_select_engine::exec() +{ + DBUG_ENTER("subselect_single_select_engine::exec"); + char const *save_where= join->thd->where; + SELECT_LEX *save_select= join->thd->lex->current_select; + join->thd->lex->current_select= select_lex; + if (!optimized) + { + optimized=1; + if (join->optimize()) + { + join->thd->where= save_where; + executed= 1; + join->thd->lex->current_select= save_select; + DBUG_RETURN(join->error ? join->error : 1); + } + if (item->engine_changed) + { + DBUG_RETURN(1); + } + } + if (select_lex->uncacheable && executed) + { + if (join->reinit()) + { + join->thd->where= save_where; + join->thd->lex->current_select= save_select; + DBUG_RETURN(1); + } + item->reset(); + item->assigned((executed= 0)); + } + if (!executed) + { + item->reset_value_registration(); + join->exec(); + executed= 1; + join->thd->where= save_where; + join->thd->lex->current_select= save_select; + DBUG_RETURN(join->error||thd->is_fatal_error); + } + join->thd->where= save_where; + join->thd->lex->current_select= save_select; + DBUG_RETURN(0); +} + +int subselect_union_engine::exec() +{ + char const *save_where= unit->thd->where; + int res= unit->exec(); + unit->thd->where= save_where; + return res; +} + + +int subselect_uniquesubquery_engine::exec() +{ + DBUG_ENTER("subselect_uniquesubquery_engine::exec"); + int error; + TABLE *table= tab->table; + for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) + { + if ((tab->ref.key_err= (*copy)->copy()) & 1) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(1); + } + } + + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); + error= table->file->index_read(table->record[0], + tab->ref.key_buff, + tab->ref.key_length,HA_READ_KEY_EXACT); + if (error && + error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) + error= report_error(table, error); + else + { + error= 0; + table->null_row= 0; + ((Item_in_subselect *) item)->value= (!table->status && + (!cond || cond->val_int()) ? 1 : + 0); + } + + DBUG_RETURN(error != 0); +} + + +subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine() +{ + /* Tell handler we don't need the index anymore */ + tab->table->file->ha_index_end(); +} + + +int subselect_indexsubquery_engine::exec() +{ + DBUG_ENTER("subselect_indexsubselect_engine::exec"); + int error; + bool null_finding= 0; + TABLE *table= tab->table; + + ((Item_in_subselect *) item)->value= 0; + + if (check_null) + { + /* We need to check for NULL if there wasn't a matching value */ + *tab->ref.null_ref_key= 0; // Search first for not null + ((Item_in_subselect *) item)->was_null= 0; + } + + for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) + { + if ((tab->ref.key_err= (*copy)->copy()) & 1) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(1); + } + } + + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); + error= table->file->index_read(table->record[0], + tab->ref.key_buff, + tab->ref.key_length,HA_READ_KEY_EXACT); + if (error && + error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) + error= report_error(table, error); + else + { + for (;;) + { + error= 0; + table->null_row= 0; + if (!table->status) + { + if (!cond || cond->val_int()) + { + if (null_finding) + ((Item_in_subselect *) item)->was_null= 1; + else + ((Item_in_subselect *) item)->value= 1; + break; + } + error= table->file->index_next_same(table->record[0], + tab->ref.key_buff, + tab->ref.key_length); + if (error && error != HA_ERR_END_OF_FILE) + { + error= report_error(table, error); + break; + } + } + else + { + if (!check_null || null_finding) + break; /* We don't need to check nulls */ + *tab->ref.null_ref_key= 1; + null_finding= 1; + /* Check if there exists a row with a null value in the index */ + if ((error= (safe_index_read(tab) == 1))) + break; + } + } + } + DBUG_RETURN(error != 0); +} + + +uint subselect_single_select_engine::cols() +{ + DBUG_ASSERT(select_lex->join); // should be called after fix_fields() + return select_lex->join->fields_list.elements; +} + + +uint subselect_union_engine::cols() +{ + DBUG_ASSERT(unit->is_prepared()); // should be called after fix_fields() + return unit->types.elements; +} + + +uint8 subselect_single_select_engine::uncacheable() +{ + return select_lex->uncacheable; +} + + +uint8 subselect_union_engine::uncacheable() +{ + return unit->uncacheable; +} + + +void subselect_single_select_engine::exclude() +{ + select_lex->master_unit()->exclude_level(); +} + +void subselect_union_engine::exclude() +{ + unit->exclude_level(); +} + + +void subselect_uniquesubquery_engine::exclude() +{ + //this never should be called + DBUG_ASSERT(0); +} + + +table_map subselect_engine::calc_const_tables(TABLE_LIST *table) +{ + table_map map= 0; + for (; table; table= table->next) + { + TABLE *tbl= table->table; + if (tbl && tbl->const_table) + map|= tbl->map; + } + return map; +} + + +table_map subselect_single_select_engine::upper_select_const_tables() +{ + return calc_const_tables((TABLE_LIST *) select_lex->outer_select()-> + table_list.first); +} + + +table_map subselect_union_engine::upper_select_const_tables() +{ + return calc_const_tables((TABLE_LIST *) unit->outer_select()-> + table_list.first); +} + + +void subselect_single_select_engine::print(String *str) +{ + select_lex->print(thd, str); +} + + +void subselect_union_engine::print(String *str) +{ + unit->print(str); +} + + +void subselect_uniquesubquery_engine::print(String *str) +{ + str->append("<primary_index_lookup>(", 23); + tab->ref.items[0]->print(str); + str->append(" in ", 4); + str->append(tab->table->real_name); + KEY *key_info= tab->table->key_info+ tab->ref.key; + str->append(" on ", 4); + str->append(key_info->name); + if (cond) + { + str->append(" where ", 7); + cond->print(str); + } + str->append(')'); +} + + +void subselect_indexsubquery_engine::print(String *str) +{ + str->append("<index_lookup>(", 15); + tab->ref.items[0]->print(str); + str->append(" in ", 4); + str->append(tab->table->real_name); + KEY *key_info= tab->table->key_info+ tab->ref.key; + str->append(" on ", 4); + str->append(key_info->name); + if (check_null) + str->append(" chicking NULL", 14); + if (cond) + { + str->append(" where ", 7); + cond->print(str); + } + str->append(')'); +} + +/* + change select_result object of engine + + SINOPSYS + subselect_single_select_engine::change_result() + si new subselect Item + res new select_result object + + RETURN + 0 OK + -1 error +*/ + +int subselect_single_select_engine::change_item(Item_subselect *si, + select_subselect *res) +{ + item= si; + result= res; + return select_lex->join->change_result(result); +} + + +/* + change select_result object of engine + + SINOPSYS + subselect_single_select_engine::change_result() + si new subselect Item + res new select_result object + + RETURN + 0 OK + -1 error +*/ + +int subselect_union_engine::change_item(Item_subselect *si, + select_subselect *res) +{ + item= si; + int rc= unit->change_result(res, result); + result= res; + return rc; +} + + +/* + change select_result emulation, never should be called + + SINOPSYS + subselect_single_select_engine::change_result() + si new subselect Item + res new select_result object + + RETURN + -1 error +*/ + +int subselect_uniquesubquery_engine::change_item(Item_subselect *si, + select_subselect *res) +{ + DBUG_ASSERT(0); + return -1; +} + + +/* + Report about presence of tables in subquery + + SINOPSYS + subselect_single_select_engine::no_tables() + + RETURN + TRUE there are not tables used in subquery + FALSE there are some tables in subquery +*/ +bool subselect_single_select_engine::no_tables() +{ + return(select_lex->table_list.elements == 0); +} + + +/* + Report about presence of tables in subquery + + SINOPSYS + subselect_union_engine::no_tables() + + RETURN + TRUE there are not tables used in subquery + FALSE there are some tables in subquery +*/ +bool subselect_union_engine::no_tables() +{ + for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select()) + { + if (sl->table_list.elements) + return FALSE; + } + return TRUE; +} + + +/* + Report about presence of tables in subquery + + SINOPSYS + subselect_uniquesubquery_engine::no_tables() + + RETURN + TRUE there are not tables used in subquery + FALSE there are some tables in subquery +*/ + +bool subselect_uniquesubquery_engine::no_tables() +{ + /* returning value is correct, but this method should never be called */ + return 0; +} diff --git a/sql/item_subselect.h b/sql/item_subselect.h new file mode 100644 index 00000000000..dec32398a80 --- /dev/null +++ b/sql/item_subselect.h @@ -0,0 +1,400 @@ +/* Copyright (C) 2000 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* subselect Item */ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +class st_select_lex; +class st_select_lex_unit; +class JOIN; +class select_subselect; +class subselect_engine; +class Item_bool_func2; +class Statement; + +/* base class for subselects */ + +class Item_subselect :public Item_result_field +{ + my_bool value_assigned; /* value already assigned to subselect */ +protected: + /* thread handler, will be assigned in fix_fields only */ + THD *thd; + /* substitution instead of subselect in case of optimization */ + Item *substitution; + /* unit of subquery */ + st_select_lex_unit *unit; + /* engine that perform execution of subselect (single select or union) */ + subselect_engine *engine; + /* old engine if engine was changed */ + subselect_engine *old_engine; + /* cache of used external tables */ + table_map used_tables_cache; + /* allowed number of columns (1 for single value subqueries) */ + uint max_columns; + /* where subquery is placed */ + enum_parsing_place parsing_place; + /* work with 'substitution' */ + bool have_to_be_excluded; + /* cache of constant state */ + bool const_item_cache; + +public: + /* changed engine indicator */ + bool engine_changed; + /* subquery is transformed */ + bool changed; + + enum trans_res {RES_OK, RES_REDUCE, RES_ERROR}; + enum subs_type {UNKNOWN_SUBS, SINGLEROW_SUBS, + EXISTS_SUBS, IN_SUBS, ALL_SUBS, ANY_SUBS}; + + Item_subselect(); + + virtual subs_type substype() { return UNKNOWN_SUBS; } + + /* + We need this method, because some compilers do not allow 'this' + pointer in constructor initialization list, but we need pass pointer + to subselect Item class to select_subselect classes constructor. + */ + virtual void init (st_select_lex *select_lex, + select_subselect *result); + + ~Item_subselect(); + void cleanup(); + virtual void reset() + { + null_value= 1; + } + virtual trans_res select_transformer(JOIN *join); + bool assigned() { return value_assigned; } + void assigned(bool a) { value_assigned= a; } + enum Type type() const; + bool is_null() + { + val_int(); + return null_value; + } + bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + virtual bool exec(); + virtual void fix_length_and_dec(); + table_map used_tables() const; + bool const_item() const; + inline table_map get_used_tables_cache() { return used_tables_cache; } + inline bool get_const_item_cache() { return const_item_cache; } + Item *get_tmp_table_item(THD *thd); + void update_used_tables(); + void print(String *str); + bool change_engine(subselect_engine *eng) + { + old_engine= engine; + engine= eng; + engine_changed= 1; + return eng == 0; + } + /* + Used by max/min subquery to initialize value presence registration + mechanism. Engine call this method before rexecution query. + */ + virtual void reset_value_registration() {} + + friend class select_subselect; + friend class Item_in_optimizer; + friend bool Item_field::fix_fields(THD *, TABLE_LIST *, Item **); + friend bool Item_ref::fix_fields(THD *, TABLE_LIST *, Item **); +}; + +/* single value subselect */ + +class Item_cache; +class Item_singlerow_subselect :public Item_subselect +{ +protected: + Item_cache *value, **row; +public: + Item_singlerow_subselect(st_select_lex *select_lex); + Item_singlerow_subselect() :Item_subselect(), value(0), row (0) {} + + void cleanup(); + subs_type substype() { return SINGLEROW_SUBS; } + + void reset(); + trans_res select_transformer(JOIN *join); + void store(uint i, Item* item); + double val(); + longlong val_int (); + String *val_str (String *); + enum Item_result result_type() const; + void fix_length_and_dec(); + + uint cols(); + Item* el(uint i) { return my_reinterpret_cast(Item*)(row[i]); } + Item** addr(uint i) { return (Item**)row + i; } + bool check_cols(uint c); + bool null_inside(); + void bring_value(); + + friend class select_singlerow_subselect; +}; + +/* used in static ALL/ANY optimisation */ +class select_max_min_finder_subselect; +class Item_maxmin_subselect :public Item_singlerow_subselect +{ +protected: + bool max; + bool was_values; // Set if we have found at least one row +public: + Item_maxmin_subselect(Item_subselect *parent, + st_select_lex *select_lex, bool max); + void print(String *str); + void cleanup(); + bool any_value() { return was_values; } + void register_value() { was_values= TRUE; } + void reset_value_registration() { was_values= FALSE; } +}; + +/* exists subselect */ + +class Item_exists_subselect :public Item_subselect +{ +protected: + longlong value; /* value of this item (boolean: exists/not-exists) */ + +public: + Item_exists_subselect(st_select_lex *select_lex); + Item_exists_subselect(): Item_subselect() {} + + subs_type substype() { return EXISTS_SUBS; } + void reset() + { + value= 0; + } + + enum Item_result result_type() const { return INT_RESULT;} + longlong val_int(); + double val(); + String *val_str(String*); + void fix_length_and_dec(); + void print(String *str); + + friend class select_exists_subselect; + friend class subselect_uniquesubquery_engine; + friend class subselect_indexsubquery_engine; +}; + +/* IN subselect */ + +class Item_in_subselect :public Item_exists_subselect +{ +protected: + Item *left_expr; + /* + expr & optimizer used in subselect rewriting to store Item for + all JOIN in UNION + */ + Item *expr; + Item_in_optimizer *optimizer; + bool was_null; + bool abort_on_null; + bool transformed; +public: + Item_func_not_all *upper_item; // point on NOT/NOP before ALL/SOME subquery + + Item_in_subselect(Item * left_expr, st_select_lex *select_lex); + Item_in_subselect() + :Item_exists_subselect(), optimizer(0), abort_on_null(0), transformed(0), + upper_item(0) + {} + + subs_type substype() { return IN_SUBS; } + void reset() + { + value= 0; + null_value= 0; + was_null= 0; + } + trans_res select_transformer(JOIN *join); + trans_res select_in_like_transformer(JOIN *join, Comp_creator *func); + trans_res single_value_transformer(JOIN *join, Comp_creator *func); + trans_res row_value_transformer(JOIN * join); + longlong val_int(); + double val(); + String *val_str(String*); + void top_level_item() { abort_on_null=1; } + bool test_limit(st_select_lex_unit *unit); + void print(String *str); + + friend class Item_ref_null_helper; + friend class Item_is_not_null_test; + friend class subselect_indexsubquery_engine; +}; + + +/* ALL/ANY/SOME subselect */ +class Item_allany_subselect :public Item_in_subselect +{ +protected: + Comp_creator *func; + +public: + bool all; + + Item_allany_subselect(Item * left_expr, Comp_creator *f, + st_select_lex *select_lex, bool all); + + // only ALL subquery has upper not + subs_type substype() { return all?ALL_SUBS:ANY_SUBS; } + trans_res select_transformer(JOIN *join); + void print(String *str); +}; + + +class subselect_engine: public Sql_alloc +{ +protected: + select_subselect *result; /* results storage class */ + THD *thd; /* pointer to current THD */ + Item_subselect *item; /* item, that use this engine */ + enum Item_result res_type; /* type of results */ + bool maybe_null; /* may be null (first item in select) */ +public: + + subselect_engine(Item_subselect *si, select_subselect *res) + :thd(0) + { + result= res; + item= si; + res_type= STRING_RESULT; + maybe_null= 0; + } + virtual ~subselect_engine() {}; // to satisfy compiler + virtual void cleanup()= 0; + + // set_thd should be called before prepare() + void set_thd(THD *thd_arg) { thd= thd_arg; } + THD * get_thd() { return thd; } + virtual int prepare()= 0; + virtual void fix_length_and_dec(Item_cache** row)= 0; + virtual int exec()= 0; + virtual uint cols()= 0; /* return number of columnss in select */ + virtual uint8 uncacheable()= 0; /* query is uncacheable */ + enum Item_result type() { return res_type; } + virtual void exclude()= 0; + bool may_be_null() { return maybe_null; }; + virtual table_map upper_select_const_tables()= 0; + static table_map calc_const_tables(TABLE_LIST *); + virtual void print(String *str)= 0; + virtual int change_item(Item_subselect *si, select_subselect *result)= 0; + virtual bool no_tables()= 0; +}; + + +class subselect_single_select_engine: public subselect_engine +{ + my_bool prepared; /* simple subselect is prepared */ + my_bool optimized; /* simple subselect is optimized */ + my_bool executed; /* simple subselect is executed */ + st_select_lex *select_lex; /* corresponding select_lex */ + JOIN * join; /* corresponding JOIN structure */ +public: + subselect_single_select_engine(st_select_lex *select, + select_subselect *result, + Item_subselect *item); + void cleanup(); + int prepare(); + void fix_length_and_dec(Item_cache** row); + int exec(); + uint cols(); + uint8 uncacheable(); + void exclude(); + table_map upper_select_const_tables(); + void print (String *str); + int change_item(Item_subselect *si, select_subselect *result); + bool no_tables(); +}; + + +class subselect_union_engine: public subselect_engine +{ + st_select_lex_unit *unit; /* corresponding unit structure */ +public: + subselect_union_engine(st_select_lex_unit *u, + select_subselect *result, + Item_subselect *item); + void cleanup(); + int prepare(); + void fix_length_and_dec(Item_cache** row); + int exec(); + uint cols(); + uint8 uncacheable(); + void exclude(); + table_map upper_select_const_tables(); + void print (String *str); + int change_item(Item_subselect *si, select_subselect *result); + bool no_tables(); +}; + + +struct st_join_table; +class subselect_uniquesubquery_engine: public subselect_engine +{ +protected: + st_join_table *tab; + Item *cond; +public: + + // constructor can assign THD because it will be called after JOIN::prepare + subselect_uniquesubquery_engine(THD *thd_arg, st_join_table *tab_arg, + Item_subselect *subs, Item *where) + :subselect_engine(subs, 0), tab(tab_arg), cond(where) + { + set_thd(thd_arg); + } + ~subselect_uniquesubquery_engine(); + void cleanup(); + int prepare(); + void fix_length_and_dec(Item_cache** row); + int exec(); + uint cols() { return 1; } + uint8 uncacheable() { return UNCACHEABLE_DEPENDENT; } + void exclude(); + table_map upper_select_const_tables() { return 0; } + void print (String *str); + int change_item(Item_subselect *si, select_subselect *result); + bool no_tables(); +}; + + +class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine +{ + bool check_null; +public: + + // constructor can assign THD because it will be called after JOIN::prepare + subselect_indexsubquery_engine(THD *thd, st_join_table *tab_arg, + Item_subselect *subs, Item *where, + bool chk_null) + :subselect_uniquesubquery_engine(thd, tab_arg, subs, where), + check_null(chk_null) + {} + int exec(); + void print (String *str); +}; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 4121fa65433..4b522cf06fa 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -17,16 +17,15 @@ /* Sum functions (COUNT, MIN...) */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" - Item_sum::Item_sum(List<Item> &list) + :arg_count(list.elements) { - arg_count=list.elements; if ((args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) { uint i=0; @@ -38,36 +37,51 @@ Item_sum::Item_sum(List<Item> &list) args[i++]= item; } } - with_sum_func=1; + mark_as_sum_func(); list.empty(); // Fields are used } +/* + Constructor used in processing select with temporary tebles +*/ + +Item_sum::Item_sum(THD *thd, Item_sum *item): + Item_result_field(thd, item), arg_count(item->arg_count), + quick_group(item->quick_group) +{ + if (arg_count <= 2) + args=tmp_args; + else + if (!(args= (Item**) thd->alloc(sizeof(Item*)*arg_count))) + return; + memcpy(args, item->args, sizeof(Item*)*arg_count); +} + + +void Item_sum::mark_as_sum_func() +{ + current_thd->lex->current_select->with_sum_func= 1; + with_sum_func= 1; +} + + void Item_sum::make_field(Send_field *tmp_field) { if (args[0]->type() == Item::FIELD_ITEM && keep_field_type()) { ((Item_field*) args[0])->field->make_field(tmp_field); + tmp_field->db_name=(char*)""; + tmp_field->org_table_name=tmp_field->table_name=(char*)""; + tmp_field->org_col_name=tmp_field->col_name=name; if (maybe_null) tmp_field->flags&= ~NOT_NULL_FLAG; } else - { - tmp_field->flags=0; - if (!maybe_null) - tmp_field->flags|= NOT_NULL_FLAG; - if (unsigned_flag) - tmp_field->flags |= UNSIGNED_FLAG; - tmp_field->length=max_length; - tmp_field->decimals=decimals; - tmp_field->type=(result_type() == INT_RESULT ? FIELD_TYPE_LONG : - result_type() == REAL_RESULT ? FIELD_TYPE_DOUBLE : - FIELD_TYPE_VAR_STRING); - } - tmp_field->table_name=(char*)""; - tmp_field->col_name=name; + init_make_field(tmp_field, field_type()); } + void Item_sum::print(String *str) { str->append(func_name()); @@ -89,14 +103,49 @@ void Item_sum::fix_num_length_and_dec() max_length=float_length(decimals); } +Item *Item_sum::get_tmp_table_item(THD *thd) +{ + Item_sum* sum_item= (Item_sum *) copy_or_same(thd); + if (sum_item && sum_item->result_field) // If not a const sum func + { + Field *result_field_tmp= sum_item->result_field; + for (uint i=0 ; i < sum_item->arg_count ; i++) + { + Item *arg= sum_item->args[i]; + if (!arg->const_item()) + { + if (arg->type() == Item::FIELD_ITEM) + ((Item_field*) arg)->field= result_field_tmp++; + else + sum_item->args[i]= new Item_field(result_field_tmp++); + } + } + } + return sum_item; +} + +bool Item_sum::walk (Item_processor processor, byte *argument) +{ + if (arg_count) + { + Item **arg,**arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + if ((*arg)->walk(processor, argument)) + return 1; + } + } + return (this->*processor)(argument); +} String * Item_sum_num::val_str(String *str) { + DBUG_ASSERT(fixed == 1); double nr=val(); if (null_value) return 0; - str->set(nr,decimals); + str->set(nr,decimals, &my_charset_bin); return str; } @@ -104,20 +153,23 @@ Item_sum_num::val_str(String *str) String * Item_sum_int::val_str(String *str) { + DBUG_ASSERT(fixed == 1); longlong nr= val_int(); if (null_value) return 0; if (unsigned_flag) - str->set((ulonglong) nr); + str->set((ulonglong) nr, &my_charset_bin); else - str->set(nr); + str->set(nr, &my_charset_bin); return str; } bool -Item_sum_num::fix_fields(THD *thd,TABLE_LIST *tables) +Item_sum_num::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) { + DBUG_ASSERT(fixed == 0); + if (!thd->allow_sum_func) { my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); @@ -128,7 +180,7 @@ Item_sum_num::fix_fields(THD *thd,TABLE_LIST *tables) maybe_null=0; for (uint i=0 ; i < arg_count ; i++) { - if (args[i]->fix_fields(thd,tables)) + if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1)) return 1; if (decimals < args[i]->decimals) decimals=args[i]->decimals; @@ -139,38 +191,56 @@ Item_sum_num::fix_fields(THD *thd,TABLE_LIST *tables) null_value=1; fix_length_and_dec(); thd->allow_sum_func=1; // Allow group functions + fixed= 1; return 0; } bool -Item_sum_hybrid::fix_fields(THD *thd,TABLE_LIST *tables) +Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) { - Item *item=args[0]; + DBUG_ASSERT(fixed == 0); + + Item *item= args[0]; if (!thd->allow_sum_func) { my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); return 1; } thd->allow_sum_func=0; // No included group funcs - if (item->fix_fields(thd,tables)) + + // 'item' can be changed during fix_fields + if (!item->fixed && + item->fix_fields(thd, tables, args) || + (item= args[0])->check_cols(1)) return 1; - hybrid_type=item->result_type(); + + hybrid_type= item->result_type(); if (hybrid_type == INT_RESULT) + { max_length=20; + } else if (hybrid_type == REAL_RESULT) + { max_length=float_length(decimals); - else + }else + { max_length=item->max_length; + } decimals=item->decimals; /* MIN/MAX can return NULL for empty set indepedent of the used column */ maybe_null= 1; - binary=item->binary; unsigned_flag=item->unsigned_flag; + collation.set(item->collation); result_field=0; null_value=1; fix_length_and_dec(); thd->allow_sum_func=1; // Allow group functions + if (item->type() == Item::FIELD_ITEM) + hybrid_field_type= ((Item_field*) item)->field->type(); + else + hybrid_field_type= Item::field_type(); + fixed= 1; return 0; } @@ -179,11 +249,18 @@ Item_sum_hybrid::fix_fields(THD *thd,TABLE_LIST *tables) ** reset and add of sum_func ***********************************************************************/ -void Item_sum_sum::reset() +Item *Item_sum_sum::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_sum(thd, this); +} + + +void Item_sum_sum::clear() { - null_value=1; sum=0.0; Item_sum_sum::add(); + null_value=1; sum=0.0; } + bool Item_sum_sum::add() { sum+=args[0]->val(); @@ -192,17 +269,26 @@ bool Item_sum_sum::add() return 0; } + double Item_sum_sum::val() { + DBUG_ASSERT(fixed == 1); return sum; } -void Item_sum_count::reset() +Item *Item_sum_count::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_count(thd, this); +} + + +void Item_sum_count::clear() { - count=0; add(); + count= 0; } + bool Item_sum_count::add() { if (!args[0]->maybe_null) @@ -218,18 +304,36 @@ bool Item_sum_count::add() longlong Item_sum_count::val_int() { + DBUG_ASSERT(fixed == 1); return (longlong) count; } + +void Item_sum_count::cleanup() +{ + DBUG_ENTER("Item_sum_count::cleanup"); + Item_sum_int::cleanup(); + used_table_cache= ~(table_map) 0; + DBUG_VOID_RETURN; +} + + /* -** Avgerage + Avgerage */ -void Item_sum_avg::reset() +Item *Item_sum_avg::copy_or_same(THD* thd) { - sum=0.0; count=0; Item_sum_avg::add(); + return new (thd->mem_root) Item_sum_avg(thd, this); } + +void Item_sum_avg::clear() +{ + sum=0.0; count=0; +} + + bool Item_sum_avg::add() { double nr=args[0]->val(); @@ -243,6 +347,7 @@ bool Item_sum_avg::add() double Item_sum_avg::val() { + DBUG_ASSERT(fixed == 1); if (!count) { null_value=1; @@ -254,15 +359,39 @@ double Item_sum_avg::val() /* -** Standard deviation + Standard deviation */ -void Item_sum_std::reset() +double Item_sum_std::val() +{ + DBUG_ASSERT(fixed == 1); + double tmp= Item_sum_variance::val(); + return tmp <= 0.0 ? 0.0 : sqrt(tmp); +} + +Item *Item_sum_std::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_std(thd, this); +} + + +/* + Variance +*/ + +Item *Item_sum_variance::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_variance(thd, this); +} + + +void Item_sum_variance::clear() { - sum=sum_sqr=0.0; count=0; (void) Item_sum_std::add(); + sum=sum_sqr=0.0; + count=0; } -bool Item_sum_std::add() +bool Item_sum_variance::add() { double nr=args[0]->val(); if (!args[0]->null_value) @@ -274,8 +403,9 @@ bool Item_sum_std::add() return 0; } -double Item_sum_std::val() +double Item_sum_variance::val() { + DBUG_ASSERT(fixed == 1); if (!count) { null_value=1; @@ -285,11 +415,10 @@ double Item_sum_std::val() /* Avoid problems when the precision isn't good enough */ double tmp=ulonglong2double(count); double tmp2=(sum_sqr - sum*sum/tmp)/tmp; - return tmp2 <= 0.0 ? 0.0 : sqrt(tmp2); + return tmp2 <= 0.0 ? 0.0 : tmp2; } - -void Item_sum_std::reset_field() +void Item_sum_variance::reset_field() { double nr=args[0]->val(); char *res=result_field->ptr; @@ -306,7 +435,7 @@ void Item_sum_std::reset_field() } } -void Item_sum_std::update_field() +void Item_sum_variance::update_field() { double nr,old_nr,old_sqr; longlong field_count; @@ -330,26 +459,44 @@ void Item_sum_std::update_field() /* min & max */ +void Item_sum_hybrid::clear() +{ + sum= 0.0; + sum_int= 0; + value.length(0); + null_value= 1; +} + double Item_sum_hybrid::val() { + DBUG_ASSERT(fixed == 1); + int err; + char *end_not_used; if (null_value) return 0.0; switch (hybrid_type) { case STRING_RESULT: String *res; res=val_str(&str_value); - return res ? atof(res->c_ptr()) : 0.0; + return (res ? my_strntod(res->charset(), (char*) res->ptr(),res->length(), + &end_not_used, &err) : 0.0); case INT_RESULT: if (unsigned_flag) return ulonglong2double(sum_int); return (double) sum_int; case REAL_RESULT: return sum; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + return 0; } return 0; // Keep compiler happy } longlong Item_sum_hybrid::val_int() { + DBUG_ASSERT(fixed == 1); if (null_value) return 0; if (hybrid_type == INT_RESULT) @@ -361,24 +508,61 @@ longlong Item_sum_hybrid::val_int() String * Item_sum_hybrid::val_str(String *str) { + DBUG_ASSERT(fixed == 1); if (null_value) return 0; switch (hybrid_type) { case STRING_RESULT: return &value; case REAL_RESULT: - str->set(sum,decimals); + str->set(sum,decimals, &my_charset_bin); break; case INT_RESULT: if (unsigned_flag) - str->set((ulonglong) sum_int); + str->set((ulonglong) sum_int, &my_charset_bin); else - str->set((longlong) sum_int); + str->set((longlong) sum_int, &my_charset_bin); + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); break; } return str; // Keep compiler happy } + +void Item_sum_hybrid::cleanup() +{ + DBUG_ENTER("Item_sum_hybrid::cleanup"); + Item_sum::cleanup(); + used_table_cache= ~(table_map) 0; + + /* + by default it is TRUE to avoid TRUE reporting by + Item_func_not_all/Item_func_nop_all if this item was never called. + + no_rows_in_result() set it to FALSE if was not results found. + If some results found it will be left unchanged. + */ + was_values= TRUE; + DBUG_VOID_RETURN; +} + +void Item_sum_hybrid::no_rows_in_result() +{ + was_values= FALSE; + clear(); +} + + +Item *Item_sum_min::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_min(thd, this); +} + + bool Item_sum_min::add() { switch (hybrid_type) { @@ -386,8 +570,7 @@ bool Item_sum_min::add() { String *result=args[0]->val_str(&tmp_value); if (!args[0]->null_value && - (null_value || - (binary ? stringcmp(&value,result) : sortcmp(&value,result)) > 0)) + (null_value || sortcmp(&value,result,collation.collation) > 0)) { value.copy(*result); null_value=0; @@ -417,11 +600,22 @@ bool Item_sum_min::add() } } break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; } return 0; } +Item *Item_sum_max::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_max(thd, this); +} + + bool Item_sum_max::add() { switch (hybrid_type) { @@ -429,8 +623,7 @@ bool Item_sum_max::add() { String *result=args[0]->val_str(&tmp_value); if (!args[0]->null_value && - (null_value || - (binary ? stringcmp(&value,result) : sortcmp(&value,result)) < 0)) + (null_value || sortcmp(&value,result,collation.collation) < 0)) { value.copy(*result); null_value=0; @@ -460,6 +653,11 @@ bool Item_sum_max::add() } } break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; } return 0; } @@ -469,14 +667,22 @@ bool Item_sum_max::add() longlong Item_sum_bit::val_int() { + DBUG_ASSERT(fixed == 1); return (longlong) bits; } -void Item_sum_bit::reset() + +void Item_sum_bit::clear() +{ + bits= reset_bits; +} + +Item *Item_sum_or::copy_or_same(THD* thd) { - bits=reset_bits; add(); + return new (thd->mem_root) Item_sum_or(thd, this); } + bool Item_sum_or::add() { ulonglong value= (ulonglong) args[0]->val_int(); @@ -485,6 +691,26 @@ bool Item_sum_or::add() return 0; } +Item *Item_sum_xor::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_xor(thd, this); +} + + +bool Item_sum_xor::add() +{ + ulonglong value= (ulonglong) args[0]->val_int(); + if (!args[0]->null_value) + bits^=value; + return 0; +} + +Item *Item_sum_and::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_and(thd, this); +} + + bool Item_sum_and::add() { ulonglong value= (ulonglong) args[0]->val_int(); @@ -521,7 +747,7 @@ void Item_sum_hybrid::reset_field() if (hybrid_type == STRING_RESULT) { char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)),*res; + String tmp(buff,sizeof(buff),result_field->charset()),*res; res=args[0]->val_str(&tmp); if (args[0]->null_value) @@ -532,7 +758,7 @@ void Item_sum_hybrid::reset_field() else { result_field->set_notnull(); - result_field->store(res->ptr(),res->length()); + result_field->store(res->ptr(),res->length(),tmp.charset()); } } else if (hybrid_type == INT_RESULT) @@ -705,12 +931,11 @@ Item_sum_hybrid::min_max_update_str_field() if (!args[0]->null_value) { res_str->strip_sp(); - result_field->val_str(&tmp_value,&tmp_value); + result_field->val_str(&tmp_value); if (result_field->is_null() || - (cmp_sign * (binary ? stringcmp(res_str,&tmp_value) : - sortcmp(res_str,&tmp_value)) < 0)) - result_field->store(res_str->ptr(),res_str->length()); + (cmp_sign * sortcmp(res_str,&tmp_value,collation.collation)) < 0) + result_field->store(res_str->ptr(),res_str->length(),res_str->charset()); result_field->set_notnull(); } } @@ -776,6 +1001,7 @@ Item_avg_field::Item_avg_field(Item_sum_avg *item) double Item_avg_field::val() { + // fix_fields() never calls for this Item double nr; longlong count; float8get(nr,field->ptr); @@ -793,14 +1019,27 @@ double Item_avg_field::val() String *Item_avg_field::val_str(String *str) { + // fix_fields() never calls for this Item double nr=Item_avg_field::val(); if (null_value) return 0; - str->set(nr,decimals); + str->set(nr,decimals, &my_charset_bin); return str; } Item_std_field::Item_std_field(Item_sum_std *item) + : Item_variance_field(item) +{ +} + +double Item_std_field::val() +{ + // fix_fields() never calls for this Item + double tmp= Item_variance_field::val(); + return tmp <= 0.0 ? 0.0 : sqrt(tmp); +} + +Item_variance_field::Item_variance_field(Item_sum_variance *item) { name=item->name; decimals=item->decimals; @@ -809,8 +1048,9 @@ Item_std_field::Item_std_field(Item_sum_std *item) maybe_null=1; } -double Item_std_field::val() +double Item_variance_field::val() { + // fix_fields() never calls for this Item double sum,sum_sqr; longlong count; float8get(sum,field->ptr); @@ -825,15 +1065,16 @@ double Item_std_field::val() null_value=0; double tmp= (double) count; double tmp2=(sum_sqr - sum*sum/tmp)/tmp; - return tmp2 <= 0.0 ? 0.0 : sqrt(tmp2); + return tmp2 <= 0.0 ? 0.0 : tmp2; } -String *Item_std_field::val_str(String *str) +String *Item_variance_field::val_str(String *str) { + // fix_fields() never calls for this Item double nr=val(); if (null_value) return 0; - str->set(nr,decimals); + str->set(nr,decimals, &my_charset_bin); return str; } @@ -843,14 +1084,19 @@ String *Item_std_field::val_str(String *str) #include "sql_select.h" -static int simple_raw_key_cmp(void* arg, byte* key1, byte* key2) +int simple_raw_key_cmp(void* arg, byte* key1, byte* key2) { return memcmp(key1, key2, *(uint*) arg); } -static int simple_str_key_cmp(void* arg, byte* key1, byte* key2) +int simple_str_key_cmp(void* arg, byte* key1, byte* key2) { - return my_sortcmp((char*) key1, (char*) key2, *(uint*) arg); + Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg; + CHARSET_INFO *cs=item->key_charset; + uint len=item->key_length; + return cs->coll->strnncollsp(cs, + (const uchar*) key1, len, + (const uchar*) key2, len); } /* @@ -891,9 +1137,9 @@ int dump_leaf(byte* key, uint32 count __attribute__((unused)), int error; /* The first item->rec_offset bytes are taken care of with - restore_record(table,2) in setup() + restore_record(table,default_values) in setup() */ - memcpy(buf + item->rec_offset, key, item->tree.size_of_element); + memcpy(buf + item->rec_offset, key, item->tree->size_of_element); if ((error = item->table->file->write_row(buf))) { if (error != HA_ERR_FOUND_DUPP_KEY && @@ -904,26 +1150,55 @@ int dump_leaf(byte* key, uint32 count __attribute__((unused)), } -Item_sum_count_distinct::~Item_sum_count_distinct() +void Item_sum_count_distinct::cleanup() { - if (table) - free_tmp_table(current_thd, table); - delete tmp_table_param; - if (use_tree) - delete_tree(&tree); + DBUG_ENTER("Item_sum_count_distinct::cleanup"); + Item_sum_int::cleanup(); + /* + Free table and tree if they belong to this item (if item have not pointer + to original item from which was made copy => it own its objects ) + */ + if (!original) + { + if (table) + { + free_tmp_table(current_thd, table); + table= 0; + } + delete tmp_table_param; + tmp_table_param= 0; + if (use_tree) + { + delete_tree(tree); + use_tree= 0; + } + } + DBUG_VOID_RETURN; } -bool Item_sum_count_distinct::fix_fields(THD *thd,TABLE_LIST *tables) + +/* This is used by rollup to create a separate usable copy of the function */ + +void Item_sum_count_distinct::make_unique() { - if (Item_sum_num::fix_fields(thd,tables) || - !(tmp_table_param= new TMP_TABLE_PARAM)) - return 1; - return 0; + table=0; + original= 0; + use_tree= 0; // to prevent delete_tree call on uninitialized tree + tree= &tree_base; + force_copy_fields= 1; } + bool Item_sum_count_distinct::setup(THD *thd) { List<Item> list; + SELECT_LEX *select_lex= thd->lex->current_select; + if (select_lex->linkage == GLOBAL_OPTIONS_TYPE) + return 1; + + if (!(tmp_table_param= new TMP_TABLE_PARAM)) + return 1; + /* Create a table with an unique key over all parameters */ for (uint i=0; i < arg_count ; i++) { @@ -945,9 +1220,11 @@ bool Item_sum_count_distinct::setup(THD *thd) free_tmp_table(thd, table); tmp_table_param->cleanup(); } - if (!(table=create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1, - 0, 0, - current_lex->select->options | thd->options))) + tmp_table_param->force_copy_fields= force_copy_fields; + if (!(table= create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1, + 0, + select_lex->options | thd->options, + HA_POS_ERROR, (char*)""))) return 1; table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows table->no_rows=1; @@ -960,7 +1237,7 @@ bool Item_sum_count_distinct::setup(THD *thd) void* cmp_arg; // to make things easier for dump_leaf if we ever have to dump to MyISAM - restore_record(table,2); + restore_record(table,default_values); if (table->fields == 1) { @@ -973,14 +1250,22 @@ bool Item_sum_count_distinct::setup(THD *thd) Field* field = table->field[0]; switch(field->type()) { - /* - If we have a string, we must take care of charsets and case - sensitivity - */ case FIELD_TYPE_STRING: case FIELD_TYPE_VAR_STRING: - compare_key = (qsort_cmp2)(field->binary() ? simple_raw_key_cmp: - simple_str_key_cmp); + if (field->binary()) + { + compare_key = (qsort_cmp2)simple_raw_key_cmp; + cmp_arg = (void*) &key_length; + } + else + { + /* + If we have a string, we must take care of charsets and case + sensitivity + */ + compare_key = (qsort_cmp2)simple_str_key_cmp; + cmp_arg = (void*) this; + } break; default: /* @@ -988,11 +1273,12 @@ bool Item_sum_count_distinct::setup(THD *thd) be compared with memcmp */ compare_key = (qsort_cmp2)simple_raw_key_cmp; + cmp_arg = (void*) &key_length; break; } - key_length = field->pack_length(); - cmp_arg = (void*) &key_length; - rec_offset = 1; + key_charset = field->charset(); + key_length = field->pack_length(); + rec_offset = 1; } else // too bad, cannot cheat - there is more than one field { @@ -1025,8 +1311,10 @@ bool Item_sum_count_distinct::setup(THD *thd) } } - init_tree(&tree, min(thd->variables.max_heap_table_size, - thd->variables.sortbuff_size/16), 0, + if (use_tree) + delete_tree(tree); + init_tree(tree, min(thd->variables.max_heap_table_size, + thd->variables.sortbuff_size/16), 0, key_length, compare_key, 0, NULL, cmp_arg); use_tree = 1; @@ -1038,6 +1326,12 @@ bool Item_sum_count_distinct::setup(THD *thd) */ max_elements_in_tree = ((key_length) ? thd->variables.max_heap_table_size/key_length : 1); + + } + if (original) + { + original->table= table; + original->use_tree= use_tree; } return 0; } @@ -1047,25 +1341,31 @@ int Item_sum_count_distinct::tree_to_myisam() { if (create_myisam_from_heap(current_thd, table, tmp_table_param, HA_ERR_RECORD_FILE_FULL, 1) || - tree_walk(&tree, (tree_walk_action)&dump_leaf, (void*)this, + tree_walk(tree, (tree_walk_action)&dump_leaf, (void*)this, left_root_right)) return 1; - delete_tree(&tree); + delete_tree(tree); use_tree = 0; return 0; } -void Item_sum_count_distinct::reset() + +Item *Item_sum_count_distinct::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_count_distinct(thd, this); +} + + +void Item_sum_count_distinct::clear() { if (use_tree) - reset_tree(&tree); + reset_tree(tree); else if (table) { table->file->extra(HA_EXTRA_NO_CACHE); table->file->delete_all_rows(); table->file->extra(HA_EXTRA_WRITE_CACHE); } - (void) add(); } bool Item_sum_count_distinct::add() @@ -1086,12 +1386,13 @@ bool Item_sum_count_distinct::add() If the tree got too big, convert to MyISAM, otherwise insert into the tree. */ - if (tree.elements_in_tree > max_elements_in_tree) + if (tree->elements_in_tree > max_elements_in_tree) { if (tree_to_myisam()) return 1; } - else if (!tree_insert(&tree, table->record[0] + rec_offset, 0)) + else if (!tree_insert(tree, table->record[0] + rec_offset, 0, + tree->custom_arg)) return 1; } else if ((error=table->file->write_row(table->record[0]))) @@ -1107,16 +1408,26 @@ bool Item_sum_count_distinct::add() return 0; } + longlong Item_sum_count_distinct::val_int() { + DBUG_ASSERT(fixed == 1); if (!table) // Empty query return LL(0); if (use_tree) - return tree.elements_in_tree; + return tree->elements_in_tree; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); return table->file->records; } + +void Item_sum_count_distinct::print(String *str) +{ + str->append("count(distinct ", 15); + args[0]->print(str); + str->append(')'); +} + /**************************************************************************** ** Functions to handle dynamic loadable aggregates ** Original source by: Alexis Mikhailov <root@medinf.chuvashia.su> @@ -1126,10 +1437,10 @@ longlong Item_sum_count_distinct::val_int() #ifdef HAVE_DLOPEN -void Item_udf_sum::reset() +void Item_udf_sum::clear() { - DBUG_ENTER("Item_udf_sum::reset"); - udf.reset(&null_value); + DBUG_ENTER("Item_udf_sum::clear"); + udf.clear(); DBUG_VOID_RETURN; } @@ -1140,8 +1451,25 @@ bool Item_udf_sum::add() DBUG_RETURN(0); } +void Item_udf_sum::cleanup() +{ + /* + udf_handler::cleanup() nicely handles case when we have not + original item but one created by copy_or_same() method. + */ + udf.cleanup(); + Item_sum::cleanup(); +} + + +Item *Item_sum_udf_float::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_udf_float(thd, this); +} + double Item_sum_udf_float::val() { + DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_sum_udf_float::val"); DBUG_PRINT("info",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -1150,28 +1478,38 @@ double Item_sum_udf_float::val() String *Item_sum_udf_float::val_str(String *str) { + DBUG_ASSERT(fixed == 1); double nr=val(); if (null_value) return 0; /* purecov: inspected */ - str->set(nr,decimals); + str->set(nr,decimals, &my_charset_bin); return str; } +Item *Item_sum_udf_int::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_udf_int(thd, this); +} + + longlong Item_sum_udf_int::val_int() { + DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_sum_udf_int::val_int"); DBUG_PRINT("info",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); DBUG_RETURN(udf.val_int(&null_value)); } + String *Item_sum_udf_int::val_str(String *str) { + DBUG_ASSERT(fixed == 1); longlong nr=val_int(); if (null_value) return 0; - str->set(nr); + str->set(nr, &my_charset_bin); return str; } @@ -1186,8 +1524,16 @@ void Item_sum_udf_str::fix_length_and_dec() DBUG_VOID_RETURN; } + +Item *Item_sum_udf_str::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_udf_str(thd, this); +} + + String *Item_sum_udf_str::val_str(String *str) { + DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_sum_udf_str::str"); String *res=udf.val_str(str,&str_value); null_value = !res; @@ -1195,3 +1541,599 @@ String *Item_sum_udf_str::val_str(String *str) } #endif /* HAVE_DLOPEN */ + + +/***************************************************************************** + GROUP_CONCAT function + + SQL SYNTAX: + GROUP_CONCAT([DISTINCT] expr,... [ORDER BY col [ASC|DESC],...] + [SEPARATOR str_const]) + + concat of values from "group by" operation + + BUGS + DISTINCT and ORDER BY only works if ORDER BY uses all fields and only fields + in expression list + Blobs doesn't work with DISTINCT or ORDER BY +*****************************************************************************/ + +/* + function of sort for syntax: + GROUP_CONCAT(DISTINCT expr,...) +*/ + +int group_concat_key_cmp_with_distinct(void* arg, byte* key1, + byte* key2) +{ + Item_func_group_concat* grp_item= (Item_func_group_concat*)arg; + Item **field_item, **end; + + for (field_item= grp_item->args, end= field_item + grp_item->arg_count_field; + field_item < end; + field_item++) + { + /* + We have to use get_tmp_table_field() instead of + real_item()->get_tmp_table_field() because we want the field in + the temporary table, not the original field + */ + Field *field= (*field_item)->get_tmp_table_field(); + /* + If field_item is a const item then either get_tp_table_field returns 0 + or it is an item over a const table. + */ + if (field && !(*field_item)->const_item()) + { + int res; + uint offset= field->offset(); + if ((res= field->key_cmp(key1 + offset, key2 + offset))) + return res; + } + } + return 0; +} + + +/* + function of sort for syntax: + GROUP_CONCAT(expr,... ORDER BY col,... ) +*/ + +int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) +{ + Item_func_group_concat* grp_item= (Item_func_group_concat*) arg; + ORDER **order_item, **end; + + for (order_item= grp_item->order, end=order_item+ grp_item->arg_count_order; + order_item < end; + order_item++) + { + Item *item= *(*order_item)->item; + /* + We have to use get_tmp_table_field() instead of + real_item()->get_tmp_table_field() because we want the field in + the temporary table, not the original field + */ + Field *field= item->get_tmp_table_field(); + /* + If item is a const item then either get_tp_table_field returns 0 + or it is an item over a const table. + */ + if (field && !item->const_item()) + { + int res; + uint offset= field->offset(); + if ((res= field->key_cmp(key1 + offset, key2 + offset))) + return (*order_item)->asc ? res : -res; + } + } + /* + We can't return 0 because in that case the tree class would remove this + item as double value. This would cause problems for case-changes and + if the the returned values are not the same we do the sort on. + */ + return 1; +} + + +/* + function of sort for syntax: + GROUP_CONCAT(DISTINCT expr,... ORDER BY col,... ) + + BUG: + This doesn't work in the case when the order by contains data that + is not part of the field list because tree-insert will not notice + the duplicated values when inserting things sorted by ORDER BY +*/ + +int group_concat_key_cmp_with_distinct_and_order(void* arg,byte* key1, + byte* key2) +{ + if (!group_concat_key_cmp_with_distinct(arg,key1,key2)) + return 0; + return(group_concat_key_cmp_with_order(arg,key1,key2)); +} + + +/* + Append data from current leaf to item->result +*/ + +int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), + Item_func_group_concat *item) +{ + char buff[MAX_FIELD_WIDTH]; + String tmp((char *)&buff,sizeof(buff),default_charset_info), tmp2; + + if (item->no_appended) + item->no_appended= FALSE; + else + item->result.append(*item->separator); + + tmp.length(0); + + for (uint i= 0; i < item->arg_count_field; i++) + { + Item *show_item= item->args[i]; + if (!show_item->const_item()) + { + /* + We have to use get_tmp_table_field() instead of + real_item()->get_tmp_table_field() because we want the field in + the temporary table, not the original field + */ + Field *field= show_item->get_tmp_table_field(); + String *res; + char *save_ptr= field->ptr; + DBUG_ASSERT(field->offset() < item->table->reclength); + field->ptr= (char *) key + field->offset(); + res= field->val_str(&tmp,&tmp2); + item->result.append(*res); + field->ptr= save_ptr; + } + else + { + String *res= show_item->val_str(&tmp); + if (res) + item->result.append(*res); + } + } + + /* stop if length of result more than group_concat_max_len */ + if (item->result.length() > item->group_concat_max_len) + { + item->count_cut_values++; + item->result.length(item->group_concat_max_len); + item->warning_for_row= TRUE; + return 1; + } + return 0; +} + + +/* + Constructor of Item_func_group_concat + is_distinct - distinct + is_select - list of expression for show values + is_order - list of sort columns + is_separator - string value of separator +*/ + +Item_func_group_concat::Item_func_group_concat(bool is_distinct, + List<Item> *is_select, + SQL_LIST *is_order, + String *is_separator) + :Item_sum(), tmp_table_param(0), max_elements_in_tree(0), warning(0), + key_length(0), tree_mode(0), distinct(is_distinct), warning_for_row(0), + force_copy_fields(0), + separator(is_separator), tree(&tree_base), table(0), + order(0), tables_list(0), + arg_count_order(0), arg_count_field(0), + count_cut_values(0) +{ + Item *item_select; + Item **arg_ptr; + + original= 0; + quick_group= 0; + mark_as_sum_func(); + order= 0; + group_concat_max_len= current_thd->variables.group_concat_max_len; + + arg_count_field= is_select->elements; + arg_count_order= is_order ? is_order->elements : 0; + arg_count= arg_count_field + arg_count_order; + + /* + We need to allocate: + args - arg_count_field+arg_count_order + (for possible order items in temporare tables) + order - arg_count_order + */ + if (!(args= (Item**) sql_alloc(sizeof(Item*) * arg_count + + sizeof(ORDER*)*arg_count_order))) + return; + + order= (ORDER**)(args + arg_count); + + /* fill args items of show and sort */ + List_iterator_fast<Item> li(*is_select); + + for (arg_ptr=args ; (item_select= li++) ; arg_ptr++) + *arg_ptr= item_select; + + if (arg_count_order) + { + ORDER **order_ptr= order; + for (ORDER *order_item= (ORDER*) is_order->first; + order_item != NULL; + order_item= order_item->next) + { + (*order_ptr++)= order_item; + *arg_ptr= *order_item->item; + order_item->item= arg_ptr++; + } + } +} + + +Item_func_group_concat::Item_func_group_concat(THD *thd, + Item_func_group_concat *item) + :Item_sum(thd, item),item_thd(thd), + tmp_table_param(item->tmp_table_param), + max_elements_in_tree(item->max_elements_in_tree), + warning(item->warning), + key_length(item->key_length), + tree_mode(item->tree_mode), + distinct(item->distinct), + warning_for_row(item->warning_for_row), + force_copy_fields(item->force_copy_fields), + separator(item->separator), + tree(item->tree), + table(item->table), + order(item->order), + tables_list(item->tables_list), + group_concat_max_len(item->group_concat_max_len), + arg_count_order(item->arg_count_order), + arg_count_field(item->arg_count_field), + field_list_offset(item->field_list_offset), + count_cut_values(item->count_cut_values), + original(item) +{ + quick_group= item->quick_group; +} + + + +void Item_func_group_concat::cleanup() +{ + THD *thd= current_thd; + + DBUG_ENTER("Item_func_group_concat::cleanup"); + Item_sum::cleanup(); + + /* Adjust warning message to include total number of cut values */ + if (warning) + { + char warn_buff[MYSQL_ERRMSG_SIZE]; + sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values); + warning->set_msg(thd, warn_buff); + warning= 0; + } + + /* + Free table and tree if they belong to this item (if item have not pointer + to original item from which was made copy => it own its objects ) + */ + if (!original) + { + if (table) + { + free_tmp_table(thd, table); + table= 0; + } + delete tmp_table_param; + tmp_table_param= 0; + if (tree_mode) + { + tree_mode= 0; + delete_tree(tree); + } + } + DBUG_VOID_RETURN; +} + + +Item_func_group_concat::~Item_func_group_concat() +{ +} + + +Item *Item_func_group_concat::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_func_group_concat(thd, this); +} + + +void Item_func_group_concat::clear() +{ + result.length(0); + result.copy(); + null_value= TRUE; + warning_for_row= FALSE; + no_appended= TRUE; + if (tree_mode) + reset_tree(tree); +} + + +bool Item_func_group_concat::add() +{ + if (always_null) + return 0; + copy_fields(tmp_table_param); + copy_funcs(tmp_table_param->items_to_copy); + + for (Item **arg= args, **arg_end= args + arg_count_field; + arg < arg_end; arg++) + { + if (!(*arg)->const_item() && + (*arg)->get_tmp_table_field()->is_null_in_record( + (const uchar*) table->record[0])) + return 0; // Skip row if it contains null + } + + null_value= FALSE; + + TREE_ELEMENT *el= 0; // Only for safety + if (tree_mode) + el= tree_insert(tree, table->record[0], 0, tree->custom_arg); + /* + If the row is not a duplicate (el->count == 1) + we can dump the row here in case of GROUP_CONCAT(DISTINCT...) + instead of doing tree traverse later. + */ + if (result.length() <= group_concat_max_len && + !warning_for_row && + (!tree_mode || (el->count == 1 && distinct && !arg_count_order))) + dump_leaf_key(table->record[0], 1, this); + + return 0; +} + + +void Item_func_group_concat::reset_field() +{ + DBUG_ASSERT(0); +} + + +bool +Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +{ + uint i; /* for loop variable */ + DBUG_ASSERT(fixed == 0); + + if (!thd->allow_sum_func) + { + my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); + return 1; + } + + thd->allow_sum_func= 0; + maybe_null= 1; + item_thd= thd; + + /* + Fix fields for select list and ORDER clause + */ + + for (i=0 ; i < arg_count ; i++) + { + if ((!args[i]->fixed && + args[i]->fix_fields(thd, tables, args + i)) || + args[i]->check_cols(1)) + return 1; + } + + if (agg_item_charsets(collation, func_name(), + args, arg_count, MY_COLL_ALLOW_CONV)) + return 1; + + result.set_charset(collation.collation); + result_field= 0; + null_value= 1; + max_length= group_concat_max_len; + thd->allow_sum_func= 1; + tables_list= tables; + fixed= 1; + return 0; +} + + +bool Item_func_group_concat::setup(THD *thd) +{ + List<Item> list; + SELECT_LEX *select_lex= thd->lex->current_select; + uint const_fields; + byte *record; + qsort_cmp2 compare_key; + DBUG_ENTER("Item_func_group_concat::setup"); + + if (select_lex->linkage == GLOBAL_OPTIONS_TYPE) + DBUG_RETURN(1); + + if (!(tmp_table_param= new TMP_TABLE_PARAM)) + return 1; + /* We'll convert all blobs to varchar fields in the temporary table */ + tmp_table_param->convert_blob_length= group_concat_max_len; + + /* + push all not constant fields to list and create temp table + */ + const_fields= 0; + always_null= 0; + for (uint i= 0; i < arg_count_field; i++) + { + Item *item= args[i]; + if (list.push_back(item)) + DBUG_RETURN(1); + if (item->const_item()) + { + const_fields++; + (void) item->val_int(); + if (item->null_value) + always_null= 1; + } + } + if (always_null) + DBUG_RETURN(0); + + List<Item> all_fields(list); + if (arg_count_order) + { + bool hidden_group_fields; + setup_group(thd, args, tables_list, list, all_fields, *order, + &hidden_group_fields); + } + + count_field_types(tmp_table_param,all_fields,0); + if (table) + { + /* + We come here when we are getting the result from a temporary table, + not the original tables used in the query + */ + free_tmp_table(thd, table); + tmp_table_param->cleanup(); + } + tmp_table_param->force_copy_fields= force_copy_fields; + /* + We have to create a temporary table to get descriptions of fields + (types, sizes and so on). + + Note that in the table, we first have the ORDER BY fields, then the + field list. + + We need to set set_sum_field in true for storing value of blob in buffer + of a record instead of a pointer of one. + */ + if (!(table=create_tmp_table(thd, tmp_table_param, all_fields, + (ORDER*) 0, 0, TRUE, + select_lex->options | thd->options, + HA_POS_ERROR,(char *) ""))) + DBUG_RETURN(1); + table->file->extra(HA_EXTRA_NO_ROWS); + table->no_rows= 1; + + key_length= table->reclength; + record= table->record[0]; + + /* Offset to first result field in table */ + field_list_offset= table->fields - (list.elements - const_fields); + + if (tree_mode) + delete_tree(tree); + + /* choose function of sort */ + tree_mode= distinct || arg_count_order; + if (tree_mode) + { + if (arg_count_order) + { + if (distinct) + compare_key= (qsort_cmp2) group_concat_key_cmp_with_distinct_and_order; + else + compare_key= (qsort_cmp2) group_concat_key_cmp_with_order; + } + else + { + compare_key= (qsort_cmp2) group_concat_key_cmp_with_distinct; + } + /* + Create a tree of sort. Tree is used for a sort and a remove double + values (according with syntax of the function). If function doesn't + contain DISTINCT and ORDER BY clauses, we don't create this tree. + */ + init_tree(tree, min(thd->variables.max_heap_table_size, + thd->variables.sortbuff_size/16), 0, + key_length, compare_key, 0, NULL, (void*) this); + max_elements_in_tree= (key_length ? + thd->variables.max_heap_table_size/key_length : 1); + }; + + /* + Copy table and tree_mode if they belong to this item (if item have not + pointer to original item from which was made copy => it own its objects) + */ + if (original) + { + original->table= table; + original->tree_mode= tree_mode; + } + DBUG_RETURN(0); +} + + +/* This is used by rollup to create a separate usable copy of the function */ + +void Item_func_group_concat::make_unique() +{ + table=0; + original= 0; + tree_mode= 0; // to prevent delete_tree call on uninitialized tree + tree= &tree_base; + force_copy_fields= 1; +} + + +String* Item_func_group_concat::val_str(String* str) +{ + DBUG_ASSERT(fixed == 1); + if (null_value) + return 0; + if (count_cut_values && !warning) + /* + ER_CUT_VALUE_GROUP_CONCAT needs an argument, but this gets set in + Item_func_group_concat::cleanup(). + */ + warning= push_warning(item_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_CUT_VALUE_GROUP_CONCAT, + ER(ER_CUT_VALUE_GROUP_CONCAT)); + if (result.length()) + return &result; + if (tree_mode) + { + tree_walk(tree, (tree_walk_action)&dump_leaf_key, (void*)this, + left_root_right); + } + return &result; +} + + +void Item_func_group_concat::print(String *str) +{ + str->append("group_concat(", 13); + if (distinct) + str->append("distinct ", 9); + for (uint i= 0; i < arg_count_field; i++) + { + if (i) + str->append(','); + args[i]->print(str); + } + if (arg_count_order) + { + str->append(" order by ", 10); + for (uint i= 0 ; i < arg_count_order ; i++) + { + if (i) + str->append(','); + (*order[i]->item)->print(str); + } + } + str->append(" separator \'", 12); + str->append(*separator); + str->append("\')", 2); +} diff --git a/sql/item_sum.h b/sql/item_sum.h index 802e3f1ba45..0cc2a20faa3 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -17,7 +17,7 @@ /* classes for sum functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -26,34 +26,40 @@ class Item_sum :public Item_result_field { public: - enum Sumfunctype {COUNT_FUNC,COUNT_DISTINCT_FUNC,SUM_FUNC,AVG_FUNC,MIN_FUNC, - MAX_FUNC, UNIQUE_USERS_FUNC,STD_FUNC,SUM_BIT_FUNC, - UDF_SUM_FUNC }; + enum Sumfunctype + { COUNT_FUNC,COUNT_DISTINCT_FUNC,SUM_FUNC,AVG_FUNC,MIN_FUNC, + MAX_FUNC, UNIQUE_USERS_FUNC,STD_FUNC,VARIANCE_FUNC,SUM_BIT_FUNC, + UDF_SUM_FUNC, GROUP_CONCAT_FUNC + }; - Item **args,*tmp_args[2]; + Item **args, *tmp_args[2]; uint arg_count; bool quick_group; /* If incremental update of fields */ - Item_sum() : arg_count(0),quick_group(1) { with_sum_func=1; } - Item_sum(Item *a) :quick_group(1) + void mark_as_sum_func(); + Item_sum() :arg_count(0), quick_group(1) + { + mark_as_sum_func(); + } + Item_sum(Item *a) + :args(tmp_args), arg_count(1), quick_group(1) { - arg_count=1; - args=tmp_args; args[0]=a; - with_sum_func = 1; + mark_as_sum_func(); } - Item_sum( Item *a, Item *b ) :quick_group(1) + Item_sum( Item *a, Item *b ) + :args(tmp_args), arg_count(2), quick_group(1) { - arg_count=2; - args=tmp_args; args[0]=a; args[1]=b; - with_sum_func=1; + mark_as_sum_func(); } Item_sum(List<Item> &list); - ~Item_sum() { result_field=0; } + //Copy constructor, need to perform subselects with temporary tables + Item_sum(THD *thd, Item_sum *item); enum Type type() const { return SUM_FUNC_ITEM; } virtual enum Sumfunctype sum_func () const=0; - virtual void reset()=0; + inline bool reset() { clear(); return add(); }; + virtual void clear()= 0; virtual bool add()=0; /* Called when new group is started and results are being saved in @@ -72,7 +78,7 @@ public: virtual void fix_length_and_dec() { maybe_null=1; null_value=1; } virtual const char *func_name() const { return "?"; } virtual Item *result_item(Field *field) - { return new Item_field(field);} + { return new Item_field(field);} table_map used_tables() const { return ~(table_map) 0; } /* Not used */ bool const_item() const { return 0; } bool is_null() { return null_value; } @@ -82,7 +88,10 @@ public: void fix_num_length_and_dec(); void no_rows_in_result() { reset(); } virtual bool setup(THD *thd) {return 0;} - unsigned int size_of() { return sizeof(*this);} + virtual void make_unique() {} + Item *get_tmp_table_item(THD *thd); + + bool walk (Item_processor processor, byte *argument); }; @@ -93,11 +102,12 @@ public: Item_sum_num(Item *item_par) :Item_sum(item_par) {} Item_sum_num(Item *a, Item* b) :Item_sum(a,b) {} Item_sum_num(List<Item> &list) :Item_sum(list) {} - bool fix_fields(THD *,struct st_table_list *); - longlong val_int() { return (longlong) val(); } /* Real as default */ + Item_sum_num(THD *thd, Item_sum_num *item) :Item_sum(thd, item) {} + bool fix_fields(THD *, TABLE_LIST *, Item **); + longlong val_int() + { DBUG_ASSERT(fixed == 1); return (longlong) val(); } /* Real as default */ String *val_str(String*str); void reset_field(); - unsigned int size_of() { return sizeof(*this);} }; @@ -106,10 +116,10 @@ class Item_sum_int :public Item_sum_num public: Item_sum_int(Item *item_par) :Item_sum_num(item_par) {} Item_sum_int(List<Item> &list) :Item_sum_num(list) {} - double val() { return (double) val_int(); } + Item_sum_int(THD *thd, Item_sum_int *item) :Item_sum_num(thd, item) {} + double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String*str); enum Item_result result_type () const { return INT_RESULT; } - unsigned int size_of() { return sizeof(*this);} void fix_length_and_dec() { decimals=0; max_length=21; maybe_null=null_value=0; } }; @@ -122,15 +132,17 @@ class Item_sum_sum :public Item_sum_num public: Item_sum_sum(Item *item_par) :Item_sum_num(item_par),sum(0.0) {} + Item_sum_sum(THD *thd, Item_sum_sum *item) + :Item_sum_num(thd, item), sum(item->sum) {} enum Sumfunctype sum_func () const {return SUM_FUNC;} - void reset(); + void clear(); bool add(); double val(); void reset_field(); void update_field(); void no_rows_in_result() {} const char *func_name() const { return "sum"; } - unsigned int size_of() { return sizeof(*this);} + Item *copy_or_same(THD* thd); }; @@ -143,18 +155,23 @@ class Item_sum_count :public Item_sum_int Item_sum_count(Item *item_par) :Item_sum_int(item_par),count(0),used_table_cache(~(table_map) 0) {} + Item_sum_count(THD *thd, Item_sum_count *item) + :Item_sum_int(thd, item), count(item->count), + used_table_cache(item->used_table_cache) + {} table_map used_tables() const { return used_table_cache; } bool const_item() const { return !used_table_cache; } enum Sumfunctype sum_func () const { return COUNT_FUNC; } - void reset(); + void clear(); void no_rows_in_result() { count=0; } bool add(); void make_const(longlong count_arg) { count=count_arg; used_table_cache=0; } longlong val_int(); void reset_field(); + void cleanup(); void update_field(); const char *func_name() const { return "count"; } - unsigned int size_of() { return sizeof(*this);} + Item *copy_or_same(THD* thd); }; @@ -164,51 +181,81 @@ class Item_sum_count_distinct :public Item_sum_int { TABLE *table; table_map used_table_cache; - bool fix_fields(THD *thd,TABLE_LIST *tables); uint32 *field_lengths; TMP_TABLE_PARAM *tmp_table_param; - TREE tree; - uint key_length; + TREE tree_base; + TREE *tree; + bool force_copy_fields; + /* + Following is 0 normal object and pointer to original one for copy + (to correctly free resources) + */ + Item_sum_count_distinct *original; - // calculated based on max_heap_table_size. If reached, - // walk the tree and dump it into MyISAM table + uint key_length; + CHARSET_INFO *key_charset; + + /* + Calculated based on max_heap_table_size. If reached, + walk the tree and dump it into MyISAM table + */ uint max_elements_in_tree; - // the first few bytes of record ( at least one) - // are just markers for deleted and NULLs. We want to skip them since - // they will just bloat the tree without providing any valuable info + /* + The first few bytes of record ( at least one) + are just markers for deleted and NULLs. We want to skip them since + they will just bloat the tree without providing any valuable info + */ int rec_offset; - // If there are no blobs, we can use a tree, which - // is faster than heap table. In that case, we still use the table - // to help get things set up, but we insert nothing in it + /* + If there are no blobs, we can use a tree, which + is faster than heap table. In that case, we still use the table + to help get things set up, but we insert nothing in it + */ bool use_tree; bool always_null; // Set to 1 if the result is always NULL int tree_to_myisam(); friend int composite_key_cmp(void* arg, byte* key1, byte* key2); + friend int simple_str_key_cmp(void* arg, byte* key1, byte* key2); + friend int simple_raw_key_cmp(void* arg, byte* key1, byte* key2); friend int dump_leaf(byte* key, uint32 count __attribute__((unused)), Item_sum_count_distinct* item); public: Item_sum_count_distinct(List<Item> &list) - :Item_sum_int(list),table(0),used_table_cache(~(table_map) 0), - tmp_table_param(0),use_tree(0),always_null(0) - { quick_group=0; } - ~Item_sum_count_distinct(); + :Item_sum_int(list), table(0), used_table_cache(~(table_map) 0), + tmp_table_param(0), tree(&tree_base), force_copy_fields(0), original(0), + use_tree(0), always_null(0) + { quick_group= 0; } + Item_sum_count_distinct(THD *thd, Item_sum_count_distinct *item) + :Item_sum_int(thd, item), table(item->table), + used_table_cache(item->used_table_cache), + field_lengths(item->field_lengths), + tmp_table_param(item->tmp_table_param), + tree(item->tree), force_copy_fields(item->force_copy_fields), + original(item), key_length(item->key_length), + max_elements_in_tree(item->max_elements_in_tree), + rec_offset(item->rec_offset), use_tree(item->use_tree), + always_null(item->always_null) + {} + void cleanup(); table_map used_tables() const { return used_table_cache; } enum Sumfunctype sum_func () const { return COUNT_DISTINCT_FUNC; } - void reset(); + void clear(); bool add(); longlong val_int(); void reset_field() { return ;} // Never called void update_field() { return ; } // Never called const char *func_name() const { return "count_distinct"; } bool setup(THD *thd); + void make_unique(); + Item *copy_or_same(THD* thd); void no_rows_in_result() {} - unsigned int size_of() { return sizeof(*this);} + void print(String *str); }; @@ -223,12 +270,11 @@ public: Item_avg_field(Item_sum_avg *item); enum Type type() const { return FIELD_AVG_ITEM; } double val(); - longlong val_int() { return (longlong) val(); } + longlong val_int() { /* can't be fix_fields()ed */ return (longlong) val(); } bool is_null() { (void) val_int(); return null_value; } String *val_str(String*); - void make_field(Send_field *field); + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } void fix_length_and_dec() {} - unsigned int size_of() { return sizeof(*this);} }; @@ -244,9 +290,11 @@ class Item_sum_avg :public Item_sum_num ulonglong count; public: - Item_sum_avg(Item *item_par) :Item_sum_num(item_par),count(0) {} + Item_sum_avg(Item *item_par) :Item_sum_num(item_par), sum(0.0), count(0) {} + Item_sum_avg(THD *thd, Item_sum_avg *item) + :Item_sum_num(thd, item), sum(item->sum), count(item->count) {} enum Sumfunctype sum_func () const {return AVG_FUNC;} - void reset(); + void clear(); bool add(); double val(); void reset_field(); @@ -255,30 +303,41 @@ class Item_sum_avg :public Item_sum_num { return new Item_avg_field(this); } void no_rows_in_result() {} const char *func_name() const { return "avg"; } - unsigned int size_of() { return sizeof(*this);} + Item *copy_or_same(THD* thd); }; -class Item_sum_std; +class Item_sum_variance; -class Item_std_field :public Item_result_field +class Item_variance_field :public Item_result_field { public: Field *field; - Item_std_field(Item_sum_std *item); - enum Type type() const { return FIELD_STD_ITEM; } + Item_variance_field(Item_sum_variance *item); + enum Type type() const {return FIELD_VARIANCE_ITEM; } double val(); - longlong val_int() { return (longlong) val(); } + longlong val_int() { /* can't be fix_fields()ed */ return (longlong) val(); } String *val_str(String*); bool is_null() { (void) val_int(); return null_value; } - void make_field(Send_field *field); + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } void fix_length_and_dec() {} - unsigned int size_of() { return sizeof(*this);} }; -class Item_sum_std :public Item_sum_num + +/* + variance(a) = + + = sum (ai - avg(a))^2 / count(a) ) + = sum (ai^2 - 2*ai*avg(a) + avg(a)^2) / count(a) + = (sum(ai^2) - sum(2*ai*avg(a)) + sum(avg(a)^2))/count(a) = + = (sum(ai^2) - 2*avg(a)*sum(a) + count(a)*avg(a)^2)/count(a) = + = (sum(ai^2) - 2*sum(a)*sum(a)/count(a) + count(a)*sum(a)^2/count(a)^2 )/count(a) = + = (sum(ai^2) - 2*sum(a)^2/count(a) + sum(a)^2/count(a) )/count(a) = + = (sum(ai^2) - sum(a)^2/count(a))/count(a) +*/ + +class Item_sum_variance : public Item_sum_num { - double sum; - double sum_sqr; + double sum, sum_sqr; ulonglong count; void fix_length_and_dec() { @@ -287,19 +346,51 @@ class Item_sum_std :public Item_sum_num } public: - Item_sum_std(Item *item_par) :Item_sum_num(item_par),count(0) {} - enum Sumfunctype sum_func () const { return STD_FUNC; } - void reset(); + Item_sum_variance(Item *item_par) :Item_sum_num(item_par),count(0) {} + Item_sum_variance(THD *thd, Item_sum_variance *item): + Item_sum_num(thd, item), sum(item->sum), sum_sqr(item->sum_sqr), + count(item->count) {} + enum Sumfunctype sum_func () const { return VARIANCE_FUNC; } + void clear(); bool add(); double val(); void reset_field(); void update_field(); Item *result_item(Field *field) - { return new Item_std_field(this); } - const char *func_name() const { return "std"; } - unsigned int size_of() { return sizeof(*this);} + { return new Item_variance_field(this); } + void no_rows_in_result() {} + const char *func_name() const { return "variance"; } + Item *copy_or_same(THD* thd); +}; + +class Item_sum_std; + +class Item_std_field :public Item_variance_field +{ +public: + Item_std_field(Item_sum_std *item); + enum Type type() const { return FIELD_STD_ITEM; } + double val(); }; +/* + standard_deviation(a) = sqrt(variance(a)) +*/ + +class Item_sum_std :public Item_sum_variance +{ + public: + Item_sum_std(Item *item_par) :Item_sum_variance(item_par) {} + Item_sum_std(THD *thd, Item_sum_std *item) + :Item_sum_variance(thd, item) + {} + enum Sumfunctype sum_func () const { return STD_FUNC; } + double val(); + Item *result_item(Field *field) + { return new Item_std_field(this); } + const char *func_name() const { return "std"; } + Item *copy_or_same(THD* thd); +}; // This class is a string or number function depending on num_func @@ -310,25 +401,29 @@ class Item_sum_hybrid :public Item_sum double sum; longlong sum_int; Item_result hybrid_type; + enum_field_types hybrid_field_type; int cmp_sign; table_map used_table_cache; + bool was_values; // Set if we have found at least one row (for max/min only) public: - Item_sum_hybrid(Item *item_par,int sign) :Item_sum(item_par),cmp_sign(sign), - used_table_cache(~(table_map) 0) - {} - bool fix_fields(THD *,struct st_table_list *); + Item_sum_hybrid(Item *item_par,int sign) + :Item_sum(item_par), sum(0.0), sum_int(0), + hybrid_type(INT_RESULT), hybrid_field_type(FIELD_TYPE_LONGLONG), + cmp_sign(sign), used_table_cache(~(table_map) 0), was_values(TRUE) + { collation.set(&my_charset_bin); } + Item_sum_hybrid(THD *thd, Item_sum_hybrid *item): + Item_sum(thd, item), value(item->value), + sum(item->sum), sum_int(item->sum_int), hybrid_type(item->hybrid_type), + hybrid_field_type(item->hybrid_field_type),cmp_sign(item->cmp_sign), + used_table_cache(item->used_table_cache), + was_values(TRUE) + { collation.set(item->collation); } + bool fix_fields(THD *, TABLE_LIST *, Item **); table_map used_tables() const { return used_table_cache; } bool const_item() const { return !used_table_cache; } - void reset() - { - sum=0.0; - sum_int=0; - value.length(0); - null_value=1; - add(); - } + void clear(); double val(); longlong val_int(); void reset_field(); @@ -336,11 +431,14 @@ class Item_sum_hybrid :public Item_sum void make_const() { used_table_cache=0; } bool keep_field_type(void) const { return 1; } enum Item_result result_type () const { return hybrid_type; } + enum enum_field_types field_type() const { return hybrid_field_type; } void update_field(); void min_max_update_str_field(); void min_max_update_real_field(); void min_max_update_int_field(); - unsigned int size_of() { return sizeof(*this);} + void cleanup(); + bool any_value() { return was_values; } + void no_rows_in_result(); }; @@ -348,11 +446,12 @@ class Item_sum_min :public Item_sum_hybrid { public: Item_sum_min(Item *item_par) :Item_sum_hybrid(item_par,1) {} + Item_sum_min(THD *thd, Item_sum_min *item) :Item_sum_hybrid(thd, item) {} enum Sumfunctype sum_func () const {return MIN_FUNC;} bool add(); const char *func_name() const { return "min"; } - unsigned int size_of() { return sizeof(*this);} + Item *copy_or_same(THD* thd); }; @@ -360,11 +459,12 @@ class Item_sum_max :public Item_sum_hybrid { public: Item_sum_max(Item *item_par) :Item_sum_hybrid(item_par,-1) {} + Item_sum_max(THD *thd, Item_sum_max *item) :Item_sum_hybrid(thd, item) {} enum Sumfunctype sum_func () const {return MAX_FUNC;} bool add(); const char *func_name() const { return "max"; } - unsigned int size_of() { return sizeof(*this);} + Item *copy_or_same(THD* thd); }; @@ -376,12 +476,13 @@ protected: public: Item_sum_bit(Item *item_par,ulonglong reset_arg) :Item_sum_int(item_par),reset_bits(reset_arg),bits(reset_arg) {} + Item_sum_bit(THD *thd, Item_sum_bit *item): + Item_sum_int(thd, item), reset_bits(item->reset_bits), bits(item->bits) {} enum Sumfunctype sum_func () const {return SUM_BIT_FUNC;} - void reset(); + void clear(); longlong val_int(); void reset_field(); void update_field(); - unsigned int size_of() { return sizeof(*this);} void fix_length_and_dec() { decimals=0; max_length=21; unsigned_flag=1; maybe_null=null_value=0; } }; @@ -391,24 +492,38 @@ class Item_sum_or :public Item_sum_bit { public: Item_sum_or(Item *item_par) :Item_sum_bit(item_par,LL(0)) {} + Item_sum_or(THD *thd, Item_sum_or *item) :Item_sum_bit(thd, item) {} bool add(); const char *func_name() const { return "bit_or"; } - unsigned int size_of() { return sizeof(*this);} + Item *copy_or_same(THD* thd); }; class Item_sum_and :public Item_sum_bit { -public: + public: Item_sum_and(Item *item_par) :Item_sum_bit(item_par, ULONGLONG_MAX) {} + Item_sum_and(THD *thd, Item_sum_and *item) :Item_sum_bit(thd, item) {} bool add(); const char *func_name() const { return "bit_and"; } - unsigned int size_of() { return sizeof(*this);} + Item *copy_or_same(THD* thd); }; +class Item_sum_xor :public Item_sum_bit +{ + public: + Item_sum_xor(Item *item_par) :Item_sum_bit(item_par,LL(0)) {} + Item_sum_xor(THD *thd, Item_sum_xor *item) :Item_sum_bit(thd, item) {} + bool add(); + const char *func_name() const { return "bit_xor"; } + Item *copy_or_same(THD* thd); +}; + + /* -** user defined aggregates + User defined aggregates */ + #ifdef HAVE_DLOPEN class Item_udf_sum : public Item_sum @@ -421,20 +536,23 @@ public: Item_udf_sum( udf_func *udf_arg, List<Item> &list ) :Item_sum( list ), udf(udf_arg) { quick_group=0;} - ~Item_udf_sum() {} + Item_udf_sum(THD *thd, Item_udf_sum *item) + :Item_sum(thd, item), udf(item->udf) { udf.not_original= TRUE; } const char *func_name() const { return udf.name(); } - bool fix_fields(THD *thd,struct st_table_list *tables) + bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) { + DBUG_ASSERT(fixed == 0); + fixed= 1; return udf.fix_fields(thd,tables,this,this->arg_count,this->args); } enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } virtual bool have_field_update(void) const { return 0; } - void reset(); + void clear(); bool add(); void reset_field() {}; void update_field() {}; - unsigned int size_of() { return sizeof(*this);} + void cleanup(); }; @@ -444,11 +562,14 @@ class Item_sum_udf_float :public Item_udf_sum Item_sum_udf_float(udf_func *udf_arg) :Item_udf_sum(udf_arg) {} Item_sum_udf_float(udf_func *udf_arg, List<Item> &list) :Item_udf_sum(udf_arg,list) {} - ~Item_sum_udf_float() {} - longlong val_int() { return (longlong) Item_sum_udf_float::val(); } + Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) + :Item_udf_sum(thd, item) {} + longlong val_int() + { DBUG_ASSERT(fixed == 1); return (longlong) Item_sum_udf_float::val(); } double val(); String *val_str(String*str); void fix_length_and_dec() { fix_num_length_and_dec(); } + Item *copy_or_same(THD* thd); }; @@ -458,12 +579,15 @@ public: Item_sum_udf_int(udf_func *udf_arg) :Item_udf_sum(udf_arg) {} Item_sum_udf_int(udf_func *udf_arg, List<Item> &list) :Item_udf_sum(udf_arg,list) {} - ~Item_sum_udf_int() {} + Item_sum_udf_int(THD *thd, Item_sum_udf_int *item) + :Item_udf_sum(thd, item) {} longlong val_int(); - double val() { return (double) Item_sum_udf_int::val_int(); } + double val() + { DBUG_ASSERT(fixed == 1); return (double) Item_sum_udf_int::val_int(); } String *val_str(String*str); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() { decimals=0; max_length=21; } + Item *copy_or_same(THD* thd); }; @@ -473,20 +597,27 @@ public: Item_sum_udf_str(udf_func *udf_arg) :Item_udf_sum(udf_arg) {} Item_sum_udf_str(udf_func *udf_arg, List<Item> &list) :Item_udf_sum(udf_arg,list) {} - ~Item_sum_udf_str() {} + Item_sum_udf_str(THD *thd, Item_sum_udf_str *item) + :Item_udf_sum(thd, item) {} String *val_str(String *); double val() { - String *res; res=val_str(&str_value); - return res ? atof(res->c_ptr()) : 0.0; + int err; + char *end_not_used; + String *res; + res=val_str(&str_value); + return res ? my_strntod(res->charset(),(char*) res->ptr(),res->length(), + &end_not_used, &err) : 0.0; } longlong val_int() { + int err; String *res; res=val_str(&str_value); - return res ? strtoll(res->c_ptr(),(char**) 0,10) : (longlong) 0; + return res ? my_strntoll(res->charset(),res->ptr(),res->length(),10, (char**) 0, &err) : (longlong) 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec(); + Item *copy_or_same(THD* thd); }; #else /* Dummy functions to get sql_yacc.cc compiled */ @@ -496,10 +627,11 @@ class Item_sum_udf_float :public Item_sum_num public: Item_sum_udf_float(udf_func *udf_arg) :Item_sum_num() {} Item_sum_udf_float(udf_func *udf_arg, List<Item> &list) :Item_sum_num() {} - ~Item_sum_udf_float() {} + Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) + :Item_sum_num(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - double val() { return 0.0; } - void reset() {} + double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + void clear() {} bool add() { return 0; } void update_field() {} }; @@ -510,11 +642,12 @@ class Item_sum_udf_int :public Item_sum_num public: Item_sum_udf_int(udf_func *udf_arg) :Item_sum_num() {} Item_sum_udf_int(udf_func *udf_arg, List<Item> &list) :Item_sum_num() {} - ~Item_sum_udf_int() {} + Item_sum_udf_int(THD *thd, Item_sum_udf_int *item) + :Item_sum_num(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - longlong val_int() { return 0; } - double val() { return 0; } - void reset() {} + longlong val_int() { DBUG_ASSERT(fixed == 1); return 0; } + double val() { DBUG_ASSERT(fixed == 1); return 0; } + void clear() {} bool add() { return 0; } void update_field() {} }; @@ -525,16 +658,108 @@ class Item_sum_udf_str :public Item_sum_num public: Item_sum_udf_str(udf_func *udf_arg) :Item_sum_num() {} Item_sum_udf_str(udf_func *udf_arg, List<Item> &list) :Item_sum_num() {} - ~Item_sum_udf_str() {} - String *val_str(String *) { null_value=1; return 0; } - double val() { null_value=1; return 0.0; } - longlong val_int() { null_value=1; return 0; } + Item_sum_udf_str(THD *thd, Item_sum_udf_str *item) + :Item_sum_num(thd, item) {} + String *val_str(String *) + { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } + double val() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } + longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec() { maybe_null=1; max_length=0; } enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - void reset() {} + void clear() {} bool add() { return 0; } void update_field() {} }; #endif /* HAVE_DLOPEN */ + +class MYSQL_ERROR; + +class Item_func_group_concat : public Item_sum +{ + THD *item_thd; + TMP_TABLE_PARAM *tmp_table_param; + uint max_elements_in_tree; + MYSQL_ERROR *warning; + uint key_length; + bool tree_mode; + bool distinct; + bool warning_for_row; + bool always_null; + bool force_copy_fields; + + friend int group_concat_key_cmp_with_distinct(void* arg, byte* key1, + byte* key2); + friend int group_concat_key_cmp_with_order(void* arg, byte* key1, + byte* key2); + friend int group_concat_key_cmp_with_distinct_and_order(void* arg, + byte* key1, + byte* key2); + friend int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), + Item_func_group_concat *group_concat_item); + + public: + String result; + String *separator; + TREE tree_base; + TREE *tree; + TABLE *table; + ORDER **order; + TABLE_LIST *tables_list; + ulong group_concat_max_len; + uint arg_count_order; + uint arg_count_field; + uint field_list_offset; + uint count_cut_values; + bool no_appended; + /* + Following is 0 normal object and pointer to original one for copy + (to correctly free resources) + */ + Item_func_group_concat *original; + + Item_func_group_concat(bool is_distinct,List<Item> *is_select, + SQL_LIST *is_order,String *is_separator); + + Item_func_group_concat(THD *thd, Item_func_group_concat *item); + ~Item_func_group_concat(); + void cleanup(); + + enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;} + const char *func_name() const { return "group_concat"; } + virtual Item_result result_type () const { return STRING_RESULT; } + enum_field_types field_type() const + { + if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB) + return FIELD_TYPE_BLOB; + else + return MYSQL_TYPE_VAR_STRING; + } + void clear(); + bool add(); + void reset_field(); + bool fix_fields(THD *, TABLE_LIST *, Item **); + bool setup(THD *thd); + void make_unique(); + virtual void update_field() {} + double val() + { + String *res; res=val_str(&str_value); + return res ? my_atof(res->c_ptr()) : 0.0; + } + longlong val_int() + { + String *res; + char *end_ptr; + int error; + if (!(res= val_str(&str_value))) + return (longlong) 0; + end_ptr= (char*) res->ptr()+ res->length(); + return my_strtoll10(res->ptr(), &end_ptr, &error); + } + String* val_str(String* str); + Item *copy_or_same(THD* thd); + void no_rows_in_result() {} + void print(String *str); +}; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index b03fd151383..f3d6858755c 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,7 +17,7 @@ /* This file defines all time functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -25,40 +25,727 @@ #include <m_ctype.h> #include <time.h> +/* TODO: Move month and days to language files */ + +#define MAX_DAY_NUMBER 3652424L + +static const char *month_names[]= +{ + "January", "February", "March", "April", "May", "June", "July", "August", + "September", "October", "November", "December", NullS +}; + +TYPELIB month_names_typelib= +{ array_elements(month_names)-1,"", month_names, NULL }; + +static const char *day_names[]= +{ + "Monday", "Tuesday", "Wednesday", + "Thursday", "Friday", "Saturday" ,"Sunday", NullS +}; + +TYPELIB day_names_typelib= +{ array_elements(day_names)-1,"", day_names, NULL}; + + /* -** Todo: Move month and days to language files + OPTIMIZATION TODO: + - Replace the switch with a function that should be called for each + date type. + - Remove sprintf and opencode the conversion, like we do in + Field_datetime. + + The reason for this functions existence is that as we don't have a + way to know if a datetime/time value has microseconds in them + we are now only adding microseconds to the output if the + value has microseconds. + + We can't use a standard make_date_time() for this as we don't know + if someone will use %f in the format specifier in which case we would get + the microseconds twice. */ -static String month_names[] = { "January", "February", "March", "April", - "May", "June", "July", "August", - "September", "October", "November", "December" }; -static String day_names[] = { "Monday", "Tuesday", "Wednesday", - "Thursday", "Friday", "Saturday" ,"Sunday" }; +static bool make_datetime(date_time_format_types format, TIME *ltime, + String *str) +{ + char *buff; + CHARSET_INFO *cs= &my_charset_bin; + uint length= 30; + + if (str->alloc(length)) + return 1; + buff= (char*) str->ptr(); + + switch (format) { + case TIME_ONLY: + length= cs->cset->snprintf(cs, buff, length, "%s%02d:%02d:%02d", + ltime->neg ? "-" : "", + ltime->hour, ltime->minute, ltime->second); + break; + case TIME_MICROSECOND: + length= cs->cset->snprintf(cs, buff, length, "%s%02d:%02d:%02d.%06d", + ltime->neg ? "-" : "", + ltime->hour, ltime->minute, ltime->second, + ltime->second_part); + break; + case DATE_ONLY: + length= cs->cset->snprintf(cs, buff, length, "%04d-%02d-%02d", + ltime->year, ltime->month, ltime->day); + break; + case DATE_TIME: + length= cs->cset->snprintf(cs, buff, length, + "%04d-%02d-%02d %02d:%02d:%02d", + ltime->year, ltime->month, ltime->day, + ltime->hour, ltime->minute, ltime->second); + break; + case DATE_TIME_MICROSECOND: + length= cs->cset->snprintf(cs, buff, length, + "%04d-%02d-%02d %02d:%02d:%02d.%06d", + ltime->year, ltime->month, ltime->day, + ltime->hour, ltime->minute, ltime->second, + ltime->second_part); + break; + } + + str->length(length); + str->set_charset(cs); + return 0; +} + + +/* + Date formats corresponding to compound %r and %T conversion specifiers + + Note: We should init at least first element of "positions" array + (first member) or hpux11 compiler will die horribly. +*/ +static DATE_TIME_FORMAT time_ampm_format= {{0}, '\0', 0, + {(char *)"%I:%i:%S %p", 11}}; +static DATE_TIME_FORMAT time_24hrs_format= {{0}, '\0', 0, + {(char *)"%H:%i:%S", 8}}; + +/* + Extract datetime value to TIME struct from string value + according to format string. + + SYNOPSIS + extract_date_time() + format date/time format specification + val String to decode + length Length of string + l_time Store result here + cached_timestamp_type + It uses to get an appropriate warning + in the case when the value is truncated. + sub_pattern_end if non-zero then we are parsing string which + should correspond compound specifier (like %T or + %r) and this parameter is pointer to place where + pointer to end of string matching this specifier + should be stored. + NOTE + Possibility to parse strings matching to patterns equivalent to compound + specifiers is mainly intended for use from inside of this function in + order to understand %T and %r conversion specifiers, so number of + conversion specifiers that can be used in such sub-patterns is limited. + Also most of checks are skipped in this case. + + If one adds new format specifiers to this function he should also + consider adding them to get_date_time_result_type() function. + + RETURN + 0 ok + 1 error +*/ + +static bool extract_date_time(DATE_TIME_FORMAT *format, + const char *val, uint length, TIME *l_time, + timestamp_type cached_timestamp_type, + const char **sub_pattern_end) +{ + int weekday= 0, yearday= 0, daypart= 0; + int week_number= -1; + int error= 0; + int strict_week_number_year= -1; + int frac_part; + bool usa_time= 0; + bool sunday_first_n_first_week_non_iso; + bool strict_week_number; + bool strict_week_number_year_type; + const char *val_begin= val; + const char *val_end= val + length; + const char *ptr= format->format.str; + const char *end= ptr + format->format.length; + CHARSET_INFO *cs= &my_charset_bin; + DBUG_ENTER("extract_date_time"); + + LINT_INIT(strict_week_number); + /* Remove valgrind varnings when using gcc 3.3 and -O1 */ + PURIFY_OR_LINT_INIT(strict_week_number_year_type); + PURIFY_OR_LINT_INIT(sunday_first_n_first_week_non_iso); + + if (!sub_pattern_end) + bzero((char*) l_time, sizeof(*l_time)); + + for (; ptr != end && val != val_end; ptr++) + { + + if (*ptr == '%' && ptr+1 != end) + { + int val_len; + char *tmp; + + /* Skip pre-space between each argument */ + while (val != val_end && my_isspace(cs, *val)) + val++; + + val_len= (uint) (val_end - val); + switch (*++ptr) { + /* Year */ + case 'Y': + tmp= (char*) val + min(4, val_len); + l_time->year= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + case 'y': + tmp= (char*) val + min(2, val_len); + l_time->year= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + l_time->year+= (l_time->year < YY_PART_YEAR ? 2000 : 1900); + break; + + /* Month */ + case 'm': + case 'c': + tmp= (char*) val + min(2, val_len); + l_time->month= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + case 'M': + case 'b': + if ((l_time->month= check_word(&month_names_typelib, + val, val_end, &val)) <= 0) + goto err; + break; + /* Day */ + case 'd': + case 'e': + tmp= (char*) val + min(2, val_len); + l_time->day= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + case 'D': + tmp= (char*) val + min(2, val_len); + l_time->day= (int) my_strtoll10(val, &tmp, &error); + /* Skip 'st, 'nd, 'th .. */ + val= tmp + min((int) (end-tmp), 2); + break; + + /* Hour */ + case 'h': + case 'I': + case 'l': + usa_time= 1; + /* fall through */ + case 'k': + case 'H': + tmp= (char*) val + min(2, val_len); + l_time->hour= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + + /* Minute */ + case 'i': + tmp= (char*) val + min(2, val_len); + l_time->minute= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + + /* Second */ + case 's': + case 'S': + tmp= (char*) val + min(2, val_len); + l_time->second= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + + /* Second part */ + case 'f': + tmp= (char*) val_end; + if (tmp - val > 6) + tmp= (char*) val + 6; + l_time->second_part= (int) my_strtoll10(val, &tmp, &error); + frac_part= 6 - (tmp - val); + if (frac_part > 0) + l_time->second_part*= (ulong) log_10_int[frac_part]; + val= tmp; + break; + + /* AM / PM */ + case 'p': + if (val_len < 2 || ! usa_time) + goto err; + if (!my_strnncoll(&my_charset_latin1, + (const uchar *) val, 2, + (const uchar *) "PM", 2)) + daypart= 12; + else if (my_strnncoll(&my_charset_latin1, + (const uchar *) val, 2, + (const uchar *) "AM", 2)) + goto err; + val+= 2; + break; + + /* Exotic things */ + case 'W': + case 'a': + if ((weekday= check_word(&day_names_typelib, val, val_end, &val)) <= 0) + goto err; + break; + case 'w': + tmp= (char*) val + 1; + if ((weekday= (int) my_strtoll10(val, &tmp, &error)) < 0 || + weekday >= 7) + goto err; + /* We should use the same 1 - 7 scale for %w as for %W */ + if (!weekday) + weekday= 7; + val= tmp; + break; + case 'j': + tmp= (char*) val + min(val_len, 3); + yearday= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + + /* Week numbers */ + case 'V': + case 'U': + case 'v': + case 'u': + sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V'); + strict_week_number= (*ptr=='V' || *ptr=='v'); + tmp= (char*) val + min(val_len, 2); + if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 || + strict_week_number && !week_number || + week_number > 53) + goto err; + val= tmp; + break; + + /* Year used with 'strict' %V and %v week numbers */ + case 'X': + case 'x': + strict_week_number_year_type= (*ptr=='X'); + tmp= (char*) val + min(4, val_len); + strict_week_number_year= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + + /* Time in AM/PM notation */ + case 'r': + error= extract_date_time(&time_ampm_format, val, + (uint)(val_end - val), l_time, + cached_timestamp_type, &val); + break; + + /* Time in 24-hour notation */ + case 'T': + error= extract_date_time(&time_24hrs_format, val, + (uint)(val_end - val), l_time, + cached_timestamp_type, &val); + break; + + /* Conversion specifiers that match classes of characters */ + case '.': + while (my_ispunct(cs, *val) && val != val_end) + val++; + break; + case '@': + while (my_isalpha(cs, *val) && val != val_end) + val++; + break; + case '#': + while (my_isdigit(cs, *val) && val != val_end) + val++; + break; + default: + goto err; + } + if (error) // Error from my_strtoll10 + goto err; + } + else if (!my_isspace(cs, *ptr)) + { + if (*val != *ptr) + goto err; + val++; + } + } + if (usa_time) + { + if (l_time->hour > 12 || l_time->hour < 1) + goto err; + l_time->hour= l_time->hour%12+daypart; + } + + /* + If we are recursively called for parsing string matching compound + specifiers we are already done. + */ + if (sub_pattern_end) + { + *sub_pattern_end= val; + DBUG_RETURN(0); + } + + if (yearday > 0) + { + uint days= calc_daynr(l_time->year,1,1) + yearday - 1; + if (days <= 0 || days >= MAX_DAY_NUMBER) + goto err; + get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); + } + + if (week_number >= 0 && weekday) + { + int days; + uint weekday_b; + + /* + %V,%v require %X,%x resprectively, + %U,%u should be used with %Y and not %X or %x + */ + if (strict_week_number && + (strict_week_number_year < 0 || + strict_week_number_year_type != sunday_first_n_first_week_non_iso) || + !strict_week_number && strict_week_number_year >= 0) + goto err; + + /* Number of days since year 0 till 1st Jan of this year */ + days= calc_daynr((strict_week_number ? strict_week_number_year : + l_time->year), + 1, 1); + /* Which day of week is 1st Jan of this year */ + weekday_b= calc_weekday(days, sunday_first_n_first_week_non_iso); + + /* + Below we are going to sum: + 1) number of days since year 0 till 1st day of 1st week of this year + 2) number of days between 1st week and our week + 3) and position of our day in the week + */ + if (sunday_first_n_first_week_non_iso) + { + days+= ((weekday_b == 0) ? 0 : 7) - weekday_b + + (week_number - 1) * 7 + + weekday % 7; + } + else + { + days+= ((weekday_b <= 3) ? 0 : 7) - weekday_b + + (week_number - 1) * 7 + + (weekday - 1); + } + + if (days <= 0 || days >= MAX_DAY_NUMBER) + goto err; + get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); + } + + if (l_time->month > 12 || l_time->day > 31 || l_time->hour > 23 || + l_time->minute > 59 || l_time->second > 59) + goto err; + + if (val != val_end) + { + do + { + if (!my_isspace(&my_charset_latin1,*val)) + { + make_truncated_value_warning(current_thd, val_begin, length, + cached_timestamp_type); + break; + } + } while (++val != val_end); + } + DBUG_RETURN(0); + +err: + DBUG_RETURN(1); +} + /* -** Get a array of positive numbers from a string object. -** Each number is separated by 1 non digit character -** Return error if there is too many numbers. -** If there is too few numbers, assume that the numbers are left out -** from the high end. This allows one to give: -** DAY_TO_SECOND as "D MM:HH:SS", "MM:HH:SS" "HH:SS" or as seconds. + Create a formated date/time value in a string */ -static bool get_interval_info(const char *str,uint length,uint count, - ulonglong *values) +bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, + timestamp_type type, String *str) +{ + char intbuff[15]; + uint days_i; + uint hours_i; + uint weekday; + ulong length; + const char *ptr, *end; + + str->length(0); + str->set_charset(&my_charset_bin); + + if (l_time->neg) + str->append("-", 1); + + end= (ptr= format->format.str) + format->format.length; + for (; ptr != end ; ptr++) + { + if (*ptr != '%' || ptr+1 == end) + str->append(*ptr); + else + { + switch (*++ptr) { + case 'M': + if (!l_time->month) + return 1; + str->append(month_names[l_time->month-1]); + break; + case 'b': + if (!l_time->month) + return 1; + str->append(month_names[l_time->month-1],3); + break; + case 'W': + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, + l_time->day),0); + str->append(day_names[weekday]); + break; + case 'a': + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, + l_time->day),0); + str->append(day_names[weekday],3); + break; + case 'D': + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + length= int10_to_str(l_time->day, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 1, '0'); + if (l_time->day >= 10 && l_time->day <= 19) + str->append("th", 2); + else + { + switch (l_time->day %10) { + case 1: + str->append("st",2); + break; + case 2: + str->append("nd",2); + break; + case 3: + str->append("rd",2); + break; + default: + str->append("th",2); + break; + } + } + break; + case 'Y': + length= int10_to_str(l_time->year, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 4, '0'); + break; + case 'y': + length= int10_to_str(l_time->year%100, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + break; + case 'm': + length= int10_to_str(l_time->month, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + break; + case 'c': + length= int10_to_str(l_time->month, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 1, '0'); + break; + case 'd': + length= int10_to_str(l_time->day, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + break; + case 'e': + length= int10_to_str(l_time->day, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 1, '0'); + break; + case 'f': + length= int10_to_str(l_time->second_part, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 6, '0'); + break; + case 'H': + length= int10_to_str(l_time->hour, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + break; + case 'h': + case 'I': + days_i= l_time->hour/24; + hours_i= (l_time->hour%24 + 11)%12+1 + 24*days_i; + length= int10_to_str(hours_i, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + break; + case 'i': /* minutes */ + length= int10_to_str(l_time->minute, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + break; + case 'j': + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + length= int10_to_str(calc_daynr(l_time->year,l_time->month, + l_time->day) - + calc_daynr(l_time->year,1,1) + 1, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 3, '0'); + break; + case 'k': + length= int10_to_str(l_time->hour, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 1, '0'); + break; + case 'l': + days_i= l_time->hour/24; + hours_i= (l_time->hour%24 + 11)%12+1 + 24*days_i; + length= int10_to_str(hours_i, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 1, '0'); + break; + case 'p': + hours_i= l_time->hour%24; + str->append(hours_i < 12 ? "AM" : "PM",2); + break; + case 'r': + length= my_sprintf(intbuff, + (intbuff, + (l_time->hour < 12) ? "%02d:%02d:%02d AM" : "%02d:%02d:%02d PM", + (l_time->hour+11)%12+1, + l_time->minute, + l_time->second)); + str->append(intbuff, length); + break; + case 'S': + case 's': + length= int10_to_str(l_time->second, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + break; + case 'T': + length= my_sprintf(intbuff, + (intbuff, + "%02d:%02d:%02d", + l_time->hour, + l_time->minute, + l_time->second)); + str->append(intbuff, length); + break; + case 'U': + case 'u': + { + uint year; + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + length= int10_to_str(calc_week(l_time, + (*ptr) == 'U' ? + WEEK_FIRST_WEEKDAY : WEEK_MONDAY_FIRST, + &year), + intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + } + break; + case 'v': + case 'V': + { + uint year; + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + length= int10_to_str(calc_week(l_time, + ((*ptr) == 'V' ? + (WEEK_YEAR | WEEK_FIRST_WEEKDAY) : + (WEEK_YEAR | WEEK_MONDAY_FIRST)), + &year), + intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 2, '0'); + } + break; + case 'x': + case 'X': + { + uint year; + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + (void) calc_week(l_time, + ((*ptr) == 'X' ? + WEEK_YEAR | WEEK_FIRST_WEEKDAY : + WEEK_YEAR | WEEK_MONDAY_FIRST), + &year); + length= int10_to_str(year, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 4, '0'); + } + break; + case 'w': + if (type == MYSQL_TIMESTAMP_TIME) + return 1; + weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, + l_time->day),1); + length= int10_to_str(weekday, intbuff, 10) - intbuff; + str->append_with_prefill(intbuff, length, 1, '0'); + break; + + default: + str->append(*ptr); + break; + } + } + } + return 0; +} + + +/* + Get a array of positive numbers from a string object. + Each number is separated by 1 non digit character + Return error if there is too many numbers. + If there is too few numbers, assume that the numbers are left out + from the high end. This allows one to give: + DAY_TO_SECOND as "D MM:HH:SS", "MM:HH:SS" "HH:SS" or as seconds. + + SYNOPSIS + str: string value + length: length of str + cs: charset of str + values: array of results + count: count of elements in result array + transform_msec: if value is true we suppose + that the last part of string value is microseconds + and we should transform value to six digit value. + For example, '1.1' -> '1.100000' +*/ + +static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs, + uint count, ulonglong *values, + bool transform_msec) { const char *end=str+length; uint i; - while (str != end && !isdigit(*str)) + while (str != end && !my_isdigit(cs,*str)) str++; for (i=0 ; i < count ; i++) { longlong value; - for (value=0; str != end && isdigit(*str) ; str++) - value=value*LL(10) + (long) (*str - '0'); + const char *start= str; + for (value=0; str != end && my_isdigit(cs,*str) ; str++) + value= value*LL(10) + (longlong) (*str - '0'); + if (transform_msec && i == count - 1) // microseconds always last + { + long msec_length= 6 - (str - start); + if (msec_length > 0) + value*= (long) log_10_int[msec_length]; + } values[i]= value; - while (str != end && !isdigit(*str)) + while (str != end && !my_isdigit(cs,*str)) str++; if (str == end && i != count-1) { @@ -73,8 +760,85 @@ static bool get_interval_info(const char *str,uint length,uint count, return (str != end); } + +/* + Calculate difference between two datetime values as seconds + microseconds. + + SYNOPSIS + calc_time_diff() + l_time1 - TIME/DATE/DATETIME value + l_time2 - TIME/DATE/DATETIME value + l_sign - 1 absolute values are substracted, + -1 absolute values are added. + seconds_out - Out parameter where difference between + l_time1 and l_time2 in seconds is stored. + microseconds_out- Out parameter where microsecond part of difference + between l_time1 and l_time2 is stored. + + NOTE + This function calculates difference between l_time1 and l_time2 absolute + values. So one should set l_sign and correct result if he want to take + signs into account (i.e. for TIME values). + + RETURN VALUES + Returns sign of difference. + 1 means negative result + 0 means positive result + +*/ + +static bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, + longlong *seconds_out, long *microseconds_out) +{ + long days; + bool neg; + longlong microseconds; + + /* + We suppose that if first argument is MYSQL_TIMESTAMP_TIME + the second argument should be TIMESTAMP_TIME also. + We should check it before calc_time_diff call. + */ + if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value + days= (long)l_time1->day - l_sign * (long)l_time2->day; + else + { + days= calc_daynr((uint) l_time1->year, + (uint) l_time1->month, + (uint) l_time1->day); + if (l_time2->time_type == MYSQL_TIMESTAMP_TIME) + days-= l_sign * (long)l_time2->day; + else + days-= l_sign*calc_daynr((uint) l_time2->year, + (uint) l_time2->month, + (uint) l_time2->day); + } + + microseconds= ((longlong)days*LL(86400) + + (longlong)(l_time1->hour*3600L + + l_time1->minute*60L + + l_time1->second) - + l_sign*(longlong)(l_time2->hour*3600L + + l_time2->minute*60L + + l_time2->second)) * LL(1000000) + + (longlong)l_time1->second_part - + l_sign*(longlong)l_time2->second_part; + + neg= 0; + if (microseconds < 0) + { + microseconds= -microseconds; + neg= 1; + } + *seconds_out= microseconds/1000000L; + *microseconds_out= (long) (microseconds%1000000L); + return neg; +} + + longlong Item_func_period_add::val_int() { + DBUG_ASSERT(fixed == 1); ulong period=(ulong) args[0]->val_int(); int months=(int) args[1]->val_int(); @@ -89,6 +853,7 @@ longlong Item_func_period_add::val_int() longlong Item_func_period_diff::val_int() { + DBUG_ASSERT(fixed == 1); ulong period1=(ulong) args[0]->val_int(); ulong period2=(ulong) args[1]->val_int(); @@ -102,6 +867,7 @@ longlong Item_func_period_diff::val_int() longlong Item_func_to_days::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; if (get_arg0_date(<ime,0)) return 0; @@ -110,6 +876,7 @@ longlong Item_func_to_days::val_int() longlong Item_func_dayofyear::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; if (get_arg0_date(<ime,0)) return 0; @@ -119,6 +886,7 @@ longlong Item_func_dayofyear::val_int() longlong Item_func_dayofmonth::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; (void) get_arg0_date(<ime,1); return (longlong) ltime.day; @@ -126,27 +894,36 @@ longlong Item_func_dayofmonth::val_int() longlong Item_func_month::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; (void) get_arg0_date(<ime,1); return (longlong) ltime.month; } + String* Item_func_monthname::val_str(String* str) { - uint month=(uint) Item_func_month::val_int(); + DBUG_ASSERT(fixed == 1); + const char *month_name; + uint month=(uint) Item_func_month::val_int(); + if (!month) // This is also true for NULL { null_value=1; return (String*) 0; } null_value=0; - return &month_names[month-1]; + month_name= month_names[month-1]; + str->set(month_name, strlen(month_name), system_charset_info); + return str; } + // Returns the quarter of the year longlong Item_func_quarter::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; (void) get_arg0_date(<ime,1); return (longlong) ((ltime.month+2)/3); @@ -154,6 +931,7 @@ longlong Item_func_quarter::val_int() longlong Item_func_hour::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; (void) get_arg0_time(<ime); return ltime.hour; @@ -161,6 +939,7 @@ longlong Item_func_hour::val_int() longlong Item_func_minute::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; (void) get_arg0_time(<ime); return ltime.minute; @@ -169,6 +948,7 @@ longlong Item_func_minute::val_int() longlong Item_func_second::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; (void) get_arg0_time(<ime); return ltime.second; @@ -214,6 +994,7 @@ uint week_mode(uint mode) longlong Item_func_week::val_int() { + DBUG_ASSERT(fixed == 1); uint year; TIME ltime; if (get_arg0_date(<ime,0)) @@ -226,6 +1007,7 @@ longlong Item_func_week::val_int() longlong Item_func_yearweek::val_int() { + DBUG_ASSERT(fixed == 1); uint year,week; TIME ltime; if (get_arg0_date(<ime,0)) @@ -241,6 +1023,7 @@ longlong Item_func_yearweek::val_int() longlong Item_func_weekday::val_int() { + DBUG_ASSERT(fixed == 1); ulong tmp_value=(ulong) args[0]->val_int(); if ((null_value=(args[0]->null_value || !tmp_value))) return 0; /* purecov: inspected */ @@ -248,17 +1031,25 @@ longlong Item_func_weekday::val_int() return (longlong) calc_weekday(tmp_value,odbc_type)+test(odbc_type); } + String* Item_func_dayname::val_str(String* str) { + DBUG_ASSERT(fixed == 1); uint weekday=(uint) val_int(); // Always Item_func_daynr() + const char *name; + if (null_value) return (String*) 0; - return &day_names[weekday]; + + name= day_names[weekday]; + str->set(name, strlen(name), system_charset_info); + return str; } longlong Item_func_year::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; (void) get_arg0_date(<ime,1); return (longlong) ltime.year; @@ -267,25 +1058,37 @@ longlong Item_func_year::val_int() longlong Item_func_unix_timestamp::val_int() { + TIME ltime; + bool not_used; + + DBUG_ASSERT(fixed == 1); if (arg_count == 0) return (longlong) current_thd->query_start(); if (args[0]->type() == FIELD_ITEM) { // Optimize timestamp field Field *field=((Item_field*) args[0])->field; if (field->type() == FIELD_TYPE_TIMESTAMP) - return ((Field_timestamp*) field)->get_timestamp(); + return ((Field_timestamp*) field)->get_timestamp(&null_value); } - String *str=args[0]->val_str(&value); - if ((null_value=args[0]->null_value)) + + if (get_arg0_date(<ime, 0)) { - return 0; /* purecov: inspected */ + /* + We have to set null_value again because get_arg0_date will also set it + to true if we have wrong datetime parameter (and we should return 0 in + this case). + */ + null_value= args[0]->null_value; + return 0; } - return (longlong) str_to_timestamp(str->ptr(),str->length()); + + return (longlong) TIME_to_timestamp(current_thd, <ime, ¬_used); } longlong Item_func_time_to_sec::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; longlong seconds; (void) get_arg0_time(<ime); @@ -295,28 +1098,32 @@ longlong Item_func_time_to_sec::val_int() /* -** Convert a string to a interval value -** To make code easy, allow interval objects without separators. + Convert a string to a interval value + To make code easy, allow interval objects without separators. */ static bool get_interval_value(Item *args,interval_type int_type, - String *str_value, INTERVAL *t) + String *str_value, INTERVAL *interval) { - ulonglong array[4]; + ulonglong array[5]; longlong value; const char *str; uint32 length; - LINT_INIT(value); LINT_INIT(str); LINT_INIT(length); + CHARSET_INFO *cs=str_value->charset(); + + LINT_INIT(value); + LINT_INIT(str); + LINT_INIT(length); - bzero((char*) t,sizeof(*t)); - if ((int) int_type <= INTERVAL_SECOND) + bzero((char*) interval,sizeof(*interval)); + if ((int) int_type <= INTERVAL_MICROSECOND) { value= args->val_int(); if (args->null_value) return 1; if (value < 0) { - t->neg=1; + interval->neg=1; value= -value; } } @@ -326,14 +1133,14 @@ static bool get_interval_value(Item *args,interval_type int_type, if (!(res=args->val_str(str_value))) return (1); - /* record negative intervalls in t->neg */ + /* record negative intervalls in interval->neg */ str=res->ptr(); const char *end=str+res->length(); - while (str != end && isspace(*str)) + while (str != end && my_isspace(cs,*str)) str++; if (str != end && *str == '-') { - t->neg=1; + interval->neg=1; str++; } length=(uint32) (end-str); // Set up pointers to new str @@ -341,68 +1148,101 @@ static bool get_interval_value(Item *args,interval_type int_type, switch (int_type) { case INTERVAL_YEAR: - t->year= (ulong) value; + interval->year= (ulong) value; break; case INTERVAL_MONTH: - t->month= (ulong) value; + interval->month= (ulong) value; break; case INTERVAL_DAY: - t->day= (ulong) value; + interval->day= (ulong) value; break; case INTERVAL_HOUR: - t->hour= (ulong) value; + interval->hour= (ulong) value; + break; + case INTERVAL_MICROSECOND: + interval->second_part=value; break; case INTERVAL_MINUTE: - t->minute= value; + interval->minute=value; break; case INTERVAL_SECOND: - t->second= value; + interval->second=value; break; case INTERVAL_YEAR_MONTH: // Allow YEAR-MONTH YYYYYMM - if (get_interval_info(str,length,2,array)) + if (get_interval_info(str,length,cs,2,array,0)) return (1); - t->year= (ulong) array[0]; - t->month= (ulong) array[1]; + interval->year= (ulong) array[0]; + interval->month= (ulong) array[1]; break; case INTERVAL_DAY_HOUR: - if (get_interval_info(str,length,2,array)) + if (get_interval_info(str,length,cs,2,array,0)) + return (1); + interval->day= (ulong) array[0]; + interval->hour= (ulong) array[1]; + break; + case INTERVAL_DAY_MICROSECOND: + if (get_interval_info(str,length,cs,5,array,1)) return (1); - t->day= (ulong) array[0]; - t->hour= (ulong) array[1]; + interval->day= (ulong) array[0]; + interval->hour= (ulong) array[1]; + interval->minute= array[2]; + interval->second= array[3]; + interval->second_part= array[4]; break; case INTERVAL_DAY_MINUTE: - if (get_interval_info(str,length,3,array)) + if (get_interval_info(str,length,cs,3,array,0)) return (1); - t->day= (ulong) array[0]; - t->hour= (ulong) array[1]; - t->minute= array[2]; + interval->day= (ulong) array[0]; + interval->hour= (ulong) array[1]; + interval->minute= array[2]; break; case INTERVAL_DAY_SECOND: - if (get_interval_info(str,length,4,array)) + if (get_interval_info(str,length,cs,4,array,0)) return (1); - t->day= (ulong) array[0]; - t->hour= (ulong) array[1]; - t->minute= array[2]; - t->second= array[3]; + interval->day= (ulong) array[0]; + interval->hour= (ulong) array[1]; + interval->minute= array[2]; + interval->second= array[3]; + break; + case INTERVAL_HOUR_MICROSECOND: + if (get_interval_info(str,length,cs,4,array,1)) + return (1); + interval->hour= (ulong) array[0]; + interval->minute= array[1]; + interval->second= array[2]; + interval->second_part= array[3]; break; case INTERVAL_HOUR_MINUTE: - if (get_interval_info(str,length,2,array)) + if (get_interval_info(str,length,cs,2,array,0)) return (1); - t->hour= (ulong) array[0]; - t->minute= array[1]; + interval->hour= (ulong) array[0]; + interval->minute= array[1]; break; case INTERVAL_HOUR_SECOND: - if (get_interval_info(str,length,3,array)) + if (get_interval_info(str,length,cs,3,array,0)) + return (1); + interval->hour= (ulong) array[0]; + interval->minute= array[1]; + interval->second= array[2]; + break; + case INTERVAL_MINUTE_MICROSECOND: + if (get_interval_info(str,length,cs,3,array,1)) return (1); - t->hour= (ulong) array[0]; - t->minute= array[1]; - t->second= array[2]; + interval->minute= array[0]; + interval->second= array[1]; + interval->second_part= array[2]; break; case INTERVAL_MINUTE_SECOND: - if (get_interval_info(str,length,2,array)) + if (get_interval_info(str,length,cs,2,array,0)) return (1); - t->minute= array[0]; - t->second= array[1]; + interval->minute= array[0]; + interval->second= array[1]; + break; + case INTERVAL_SECOND_MICROSECOND: + if (get_interval_info(str,length,cs,2,array,1)) + return (1); + interval->second= array[0]; + interval->second_part= array[1]; break; } return 0; @@ -411,170 +1251,263 @@ static bool get_interval_value(Item *args,interval_type int_type, String *Item_date::val_str(String *str) { - ulong value=(ulong) val_int(); - if (null_value) - return (String*) 0; - if (!value) // zero daynr + DBUG_ASSERT(fixed == 1); + TIME ltime; + if (get_date(<ime, TIME_FUZZY_DATE)) + return (String *) 0; + if (str->alloc(11)) { - str->copy("0000-00-00"); - return str; + null_value= 1; + return (String *) 0; } - if (str->alloc(11)) - return &empty_string; /* purecov: inspected */ - sprintf((char*) str->ptr(),"%04d-%02d-%02d", - (int) (value/10000L) % 10000, - (int) (value/100)%100, - (int) (value%100)); - str->length(10); + make_date((DATE_TIME_FORMAT *) 0, <ime, str); return str; } -bool Item_date::save_in_field(Field *field, bool no_conversions) +int Item_date::save_in_field(Field *field, bool no_conversions) { TIME ltime; - timestamp_type t_type=TIMESTAMP_FULL; - if (get_date(<ime,1)) - { - if (null_value) - return set_field_to_null(field); - t_type=TIMESTAMP_NONE; // Error - } + if (get_date(<ime, TIME_FUZZY_DATE)) + return set_field_to_null(field); field->set_notnull(); - field->store_time(<ime,t_type); + field->store_time(<ime, MYSQL_TIMESTAMP_DATE); return 0; } -longlong Item_func_from_days::val_int() +longlong Item_date::val_int() +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + if (get_date(<ime, TIME_FUZZY_DATE)) + return 0; + return (longlong) (ltime.year*10000L+ltime.month*100+ltime.day); +} + + +bool Item_func_from_days::get_date(TIME *ltime, uint fuzzy_date) { longlong value=args[0]->val_int(); if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ - - uint year,month,day; - get_date_from_daynr((long) value,&year,&month,&day); - return (longlong) (year*10000L+month*100+day); + return 1; + bzero(ltime, sizeof(TIME)); + get_date_from_daynr((long) value, <ime->year, <ime->month, <ime->day); + ltime->time_type= MYSQL_TIMESTAMP_DATE; + return 0; } void Item_func_curdate::fix_length_and_dec() { - struct tm tm_tmp,*start; - time_t query_start=current_thd->query_start(); - decimals=0; max_length=10; - localtime_r(&query_start,&tm_tmp); - start=&tm_tmp; - value=(longlong) ((ulong) ((uint) start->tm_year+1900)*10000L+ - ((uint) start->tm_mon+1)*100+ - (uint) start->tm_mday); - /* For getdate */ - ltime.year= start->tm_year+1900; - ltime.month= start->tm_mon+1; - ltime.day= start->tm_mday; - ltime.hour= 0; - ltime.minute= 0; - ltime.second= 0; - ltime.second_part=0; - ltime.neg=0; - ltime.time_type=TIMESTAMP_DATE; + collation.set(&my_charset_bin); + decimals=0; + max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + + store_now_in_TIME(<ime); + + /* We don't need to set second_part and neg because they already 0 */ + ltime.hour= ltime.minute= ltime.second= 0; + ltime.time_type= MYSQL_TIMESTAMP_DATE; + value= (longlong) TIME_to_ulonglong_date(<ime); } +String *Item_func_curdate::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + if (str->alloc(11)) + { + null_value= 1; + return (String *) 0; + } + make_date((DATE_TIME_FORMAT *) 0, <ime, str); + return str; +} + +/* + Converts current time in my_time_t to TIME represenatation for local + time zone. Defines time zone (local) used for whole CURDATE function. +*/ +void Item_func_curdate_local::store_now_in_TIME(TIME *now_time) +{ + THD *thd= current_thd; + thd->variables.time_zone->gmt_sec_to_TIME(now_time, + (my_time_t)thd->query_start()); + thd->time_zone_used= 1; +} + + +/* + Converts current time in my_time_t to TIME represenatation for UTC + time zone. Defines time zone (UTC) used for whole UTC_DATE function. +*/ +void Item_func_curdate_utc::store_now_in_TIME(TIME *now_time) +{ + my_tz_UTC->gmt_sec_to_TIME(now_time, + (my_time_t)(current_thd->query_start())); + /* + We are not flagging this query as using time zone, since it uses fixed + UTC-SYSTEM time-zone. + */ +} + + bool Item_func_curdate::get_date(TIME *res, - bool fuzzy_date __attribute__((unused))) + uint fuzzy_date __attribute__((unused))) { *res=ltime; return 0; } + +String *Item_func_curtime::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + str_value.set(buff, buff_length, &my_charset_bin); + return &str_value; +} + + void Item_func_curtime::fix_length_and_dec() { - struct tm tm_tmp,*start; - time_t query_start=current_thd->query_start(); - decimals=0; max_length=8; - localtime_r(&query_start,&tm_tmp); - start=&tm_tmp; - value=(longlong) ((ulong) ((uint) start->tm_hour)*10000L+ - (ulong) (((uint) start->tm_min)*100L+ - (uint) start->tm_sec)); - sprintf(buff,"%02d:%02d:%02d", - (int) start->tm_hour, - (int) start->tm_min, - (int) start->tm_sec); - buff_length=(uint) strlen(buff); + TIME ltime; + + decimals=0; + collation.set(&my_charset_bin); + store_now_in_TIME(<ime); + value= TIME_to_ulonglong_time(<ime); + buff_length= (uint) my_time_to_str(<ime, buff); + max_length= buff_length; +} + + +/* + Converts current time in my_time_t to TIME represenatation for local + time zone. Defines time zone (local) used for whole CURTIME function. +*/ +void Item_func_curtime_local::store_now_in_TIME(TIME *now_time) +{ + THD *thd= current_thd; + thd->variables.time_zone->gmt_sec_to_TIME(now_time, + (my_time_t)thd->query_start()); + thd->time_zone_used= 1; +} + + +/* + Converts current time in my_time_t to TIME represenatation for UTC + time zone. Defines time zone (UTC) used for whole UTC_TIME function. +*/ +void Item_func_curtime_utc::store_now_in_TIME(TIME *now_time) +{ + my_tz_UTC->gmt_sec_to_TIME(now_time, + (my_time_t)(current_thd->query_start())); + /* + We are not flagging this query as using time zone, since it uses fixed + UTC-SYSTEM time-zone. + */ } + +String *Item_func_now::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + str_value.set(buff,buff_length, &my_charset_bin); + return &str_value; +} + + void Item_func_now::fix_length_and_dec() { - struct tm tm_tmp,*start; - time_t query_start=current_thd->query_start(); - decimals=0; max_length=19; - localtime_r(&query_start,&tm_tmp); - start=&tm_tmp; - value=((longlong) ((ulong) ((uint) start->tm_year+1900)*10000L+ - (((uint) start->tm_mon+1)*100+ - (uint) start->tm_mday))*(longlong) 1000000L+ - (longlong) ((ulong) ((uint) start->tm_hour)*10000L+ - (ulong) (((uint) start->tm_min)*100L+ - (uint) start->tm_sec))); - sprintf(buff,"%04d-%02d-%02d %02d:%02d:%02d", - ((int) (start->tm_year+1900)) % 10000, - (int) start->tm_mon+1, - (int) start->tm_mday, - (int) start->tm_hour, - (int) start->tm_min, - (int) start->tm_sec); - buff_length=(uint) strlen(buff); - /* For getdate */ - ltime.year= start->tm_year+1900; - ltime.month= start->tm_mon+1; - ltime.day= start->tm_mday; - ltime.hour= start->tm_hour; - ltime.minute= start->tm_min; - ltime.second= start->tm_sec; - ltime.second_part=0; - ltime.neg=0; - ltime.time_type=TIMESTAMP_FULL; + decimals=0; + collation.set(&my_charset_bin); + + store_now_in_TIME(<ime); + value= (longlong) TIME_to_ulonglong_datetime(<ime); + + buff_length= (uint) my_datetime_to_str(<ime, buff); + max_length= buff_length; } + +/* + Converts current time in my_time_t to TIME represenatation for local + time zone. Defines time zone (local) used for whole NOW function. +*/ +void Item_func_now_local::store_now_in_TIME(TIME *now_time) +{ + THD *thd= current_thd; + thd->variables.time_zone->gmt_sec_to_TIME(now_time, + (my_time_t)thd->query_start()); + thd->time_zone_used= 1; +} + + +/* + Converts current time in my_time_t to TIME represenatation for UTC + time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function. +*/ +void Item_func_now_utc::store_now_in_TIME(TIME *now_time) +{ + my_tz_UTC->gmt_sec_to_TIME(now_time, + (my_time_t)(current_thd->query_start())); + /* + We are not flagging this query as using time zone, since it uses fixed + UTC-SYSTEM time-zone. + */ +} + + bool Item_func_now::get_date(TIME *res, - bool fuzzy_date __attribute__((unused))) + uint fuzzy_date __attribute__((unused))) { *res=ltime; return 0; } -bool Item_func_now::save_in_field(Field *to, bool no_conversions) +int Item_func_now::save_in_field(Field *to, bool no_conversions) { to->set_notnull(); - to->store_time(<ime,TIMESTAMP_FULL); + to->store_time(<ime, MYSQL_TIMESTAMP_DATETIME); return 0; } String *Item_func_sec_to_time::val_str(String *str) { - char buff[23]; - const char *sign=""; + DBUG_ASSERT(fixed == 1); longlong seconds=(longlong) args[0]->val_int(); - if ((null_value=args[0]->null_value)) + uint sec; + TIME ltime; + + if ((null_value=args[0]->null_value) || str->alloc(19)) + { + null_value= 1; return (String*) 0; + } + + ltime.neg= 0; if (seconds < 0) { seconds= -seconds; - sign= "-"; + ltime.neg= 1; } - uint sec= (uint) ((ulonglong) seconds % 3600); - sprintf(buff,"%s%02lu:%02u:%02u",sign,(long) (seconds/3600), - sec/60, sec % 60); - str->copy(buff,(uint) strlen(buff)); + + sec= (uint) ((ulonglong) seconds % 3600); + ltime.day= 0; + ltime.hour= (uint) (seconds/3600); + ltime.minute= sec/60; + ltime.second= sec % 60; + + make_time((DATE_TIME_FORMAT *) 0, <ime, str); return str; } longlong Item_func_sec_to_time::val_int() { + DBUG_ASSERT(fixed == 1); longlong seconds=args[0]->val_int(); longlong sign=1; if ((null_value=args[0]->null_value)) @@ -591,15 +1524,31 @@ longlong Item_func_sec_to_time::val_int() void Item_func_date_format::fix_length_and_dec() { decimals=0; + collation.set(&my_charset_bin); if (args[1]->type() == STRING_ITEM) { // Optimize the normal case fixed_length=1; - max_length=format_length(((Item_string*) args[1])->const_string()); + + /* + Force case sensitive collation on format string. + This needed because format modifiers with different case, + for example %m and %M, have different meaning. Thus eq() + will distinguish them. + */ + args[1]->collation.set( + get_charset_by_csname(args[1]->collation.collation->csname, + MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE); + /* + The result is a binary string (no reason to use collation->mbmaxlen + This is becasue make_date_time() only returns binary strings + */ + max_length= format_length(((Item_string*) args[1])->const_string()); } else { fixed_length=0; - max_length=args[1]->max_length*10; + /* The result is a binary string (no reason to use collation->mbmaxlen */ + max_length=min(args[1]->max_length,MAX_BLOB_WIDTH) * 10; set_if_smaller(max_length,MAX_BLOB_WIDTH); } maybe_null=1; // If wrong date @@ -660,6 +1609,9 @@ uint Item_func_date_format::format_length(const String *format) case 'T': /* time, 24-hour (hh:mm:ss) */ size += 8; break; + case 'f': /* microseconds */ + size += 6; + break; case 'w': /* day (of the week), numeric */ case '%': default: @@ -676,10 +1628,10 @@ String *Item_func_date_format::val_str(String *str) { String *format; TIME l_time; - char intbuff[15]; - uint size,weekday; + uint size; + DBUG_ASSERT(fixed == 1); - if (!date_or_time) + if (!is_time_format) { if (get_arg0_date(&l_time,1)) return 0; @@ -687,25 +1639,16 @@ String *Item_func_date_format::val_str(String *str) else { String *res; - if (!(res=args[0]->val_str(str))) - { - null_value=1; - return 0; - } - if (str_to_time(res->ptr(),res->length(),&l_time)) - { - null_value=1; - return 0; - } + if (!(res=args[0]->val_str(str)) || + (str_to_time_with_warn(res->ptr(), res->length(), &l_time))) + goto null_date; + l_time.year=l_time.month=l_time.day=0; null_value=0; } if (!(format = args[1]->val_str(str)) || !format->length()) - { - null_value=1; - return 0; - } + goto null_date; if (fixed_length) size=max_length; @@ -714,265 +1657,226 @@ String *Item_func_date_format::val_str(String *str) if (format == str) str= &value; // Save result here if (str->alloc(size)) - { - null_value=1; - return 0; - } - str->length(0); + goto null_date; + + DATE_TIME_FORMAT date_time_format; + date_time_format.format.str= (char*) format->ptr(); + date_time_format.format.length= format->length(); /* Create the result string */ - const char *ptr=format->ptr(); - const char *end=ptr+format->length(); - for (; ptr != end ; ptr++) - { - if (*ptr != '%' || ptr+1 == end) - str->append(*ptr); - else - { - switch (*++ptr) { - case 'M': - if (!l_time.month) - { - null_value=1; - return 0; - } - str->append(month_names[l_time.month-1]); - break; - case 'b': - if (!l_time.month) - { - null_value=1; - return 0; - } - str->append(month_names[l_time.month-1].ptr(),3); - break; - case 'W': - if (date_or_time) - { - null_value=1; - return 0; - } - weekday=calc_weekday(calc_daynr(l_time.year,l_time.month,l_time.day),0); - str->append(day_names[weekday]); - break; - case 'a': - if (date_or_time) - { - null_value=1; - return 0; - } - weekday=calc_weekday(calc_daynr(l_time.year,l_time.month,l_time.day),0); - str->append(day_names[weekday].ptr(),3); - break; - case 'D': - if (date_or_time) - { - null_value=1; - return 0; - } - sprintf(intbuff,"%d",l_time.day); - str->append(intbuff); - if (l_time.day >= 10 && l_time.day <= 19) - str->append("th"); - else - { - switch (l_time.day %10) - { - case 1: - str->append("st"); - break; - case 2: - str->append("nd"); - break; - case 3: - str->append("rd"); - break; - default: - str->append("th"); - break; - } - } - break; - case 'Y': - sprintf(intbuff,"%04d",l_time.year); - str->append(intbuff); - break; - case 'y': - sprintf(intbuff,"%02d",l_time.year%100); - str->append(intbuff); - break; - case 'm': - sprintf(intbuff,"%02d",l_time.month); - str->append(intbuff); - break; - case 'c': - sprintf(intbuff,"%d",l_time.month); - str->append(intbuff); - break; - case 'd': - sprintf(intbuff,"%02d",l_time.day); - str->append(intbuff); - break; - case 'e': - sprintf(intbuff,"%d",l_time.day); - str->append(intbuff); - break; - case 'H': - sprintf(intbuff,"%02d",l_time.hour); - str->append(intbuff); - break; - case 'h': - case 'I': - sprintf(intbuff,"%02d", (l_time.hour+11)%12+1); - str->append(intbuff); - break; - case 'i': /* minutes */ - sprintf(intbuff,"%02d",l_time.minute); - str->append(intbuff); - break; - case 'j': - if (date_or_time) - { - null_value=1; - return 0; - } - sprintf(intbuff,"%03d", - (int) (calc_daynr(l_time.year,l_time.month,l_time.day) - - calc_daynr(l_time.year,1,1)) + 1); - str->append(intbuff); - break; - case 'k': - sprintf(intbuff,"%d",l_time.hour); - str->append(intbuff); - break; - case 'l': - sprintf(intbuff,"%d", (l_time.hour+11)%12+1); - str->append(intbuff); - break; - case 'p': - str->append(l_time.hour < 12 ? "AM" : "PM"); - break; - case 'r': - sprintf(intbuff,(l_time.hour < 12) ? "%02d:%02d:%02d AM" : - "%02d:%02d:%02d PM",(l_time.hour+11)%12+1,l_time.minute, - l_time.second); - str->append(intbuff); - break; - case 'S': - case 's': - sprintf(intbuff,"%02d",l_time.second); - str->append(intbuff); - break; - case 'T': - sprintf(intbuff,"%02d:%02d:%02d",l_time.hour,l_time.minute,l_time.second); - str->append(intbuff); - break; - case 'U': - case 'u': - { - uint year; - sprintf(intbuff,"%02d", - calc_week(&l_time, - ((*ptr) == 'U' ? - WEEK_FIRST_WEEKDAY : WEEK_MONDAY_FIRST) , &year)); - str->append(intbuff); - } - break; - case 'v': - case 'V': - { - uint year; - sprintf(intbuff,"%02d", - calc_week(&l_time, - ((*ptr) == 'V' ? WEEK_YEAR | WEEK_FIRST_WEEKDAY : - WEEK_YEAR | WEEK_MONDAY_FIRST), - &year)); - str->append(intbuff); - } - break; - case 'x': - case 'X': - { - uint year; - (void) calc_week(&l_time, - ((*ptr) == 'X' ? WEEK_YEAR | WEEK_FIRST_WEEKDAY : - WEEK_YEAR | WEEK_MONDAY_FIRST), - &year); - sprintf(intbuff,"%04d",year); - str->append(intbuff); - } - break; - case 'w': - weekday=calc_weekday(calc_daynr(l_time.year,l_time.month,l_time.day),1); - sprintf(intbuff,"%01d",weekday); - str->append(intbuff); - break; - default: - str->append(*ptr); - break; - } - } - } - return str; + if (!make_date_time(&date_time_format, &l_time, + is_time_format ? MYSQL_TIMESTAMP_TIME : + MYSQL_TIMESTAMP_DATE, + str)) + return str; + +null_date: + null_value=1; + return 0; +} + + +void Item_func_from_unixtime::fix_length_and_dec() +{ + thd= current_thd; + collation.set(&my_charset_bin); + decimals=0; + max_length=MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null= 1; + thd->time_zone_used= 1; } String *Item_func_from_unixtime::val_str(String *str) { - TIME ltime; - if (get_date(<ime, 0)) + TIME time_tmp; + + DBUG_ASSERT(fixed == 1); + + if (get_date(&time_tmp, 0)) return 0; - if (str->alloc(20)) - return str; /* purecov: inspected */ - sprintf((char*) str->ptr(),"%04d-%02d-%02d %02d:%02d:%02d", - (int) ltime.year, (int) ltime.month, (int) ltime.day, - (int) ltime.hour, (int) ltime.minute, (int) ltime.second); - str->length(19); + + if (str->alloc(20*MY_CHARSET_BIN_MB_MAXLEN)) + { + null_value= 1; + return 0; + } + + make_datetime((DATE_TIME_FORMAT *) 0, &time_tmp, str); return str; } longlong Item_func_from_unixtime::val_int() { - TIME ltime; - if (get_date(<ime, 0)) + TIME time_tmp; + + DBUG_ASSERT(fixed == 1); + + if (get_date(&time_tmp, 0)) return 0; - return ((longlong)(ltime.year*10000L+ltime.month*100+ltime.day)*LL(1000000)+ - (longlong)(ltime.hour*10000L+ltime.minute*100+ltime.second)); + + return (longlong) TIME_to_ulonglong_datetime(&time_tmp); } bool Item_func_from_unixtime::get_date(TIME *ltime, - bool fuzzy_date __attribute__((unused))) + uint fuzzy_date __attribute__((unused))) { - struct tm tm_tmp; - time_t tmp; - ulonglong arg= (ulonglong)(args[0]->val_int()); + ulonglong tmp= (ulonglong)(args[0]->val_int()); /* - "arg > TIMESTAMP_MAX_VALUE" check also covers case of negative - from_unixtime() argument since arg is unsigned. + "tmp > TIMESTAMP_MAX_VALUE" check also covers case of negative + from_unixtime() argument since tmp is unsigned. */ - if ((null_value= (args[0]->null_value || arg > TIMESTAMP_MAX_VALUE))) + if ((null_value= (args[0]->null_value || tmp > TIMESTAMP_MAX_VALUE))) return 1; - tmp= arg; - localtime_r(&tmp,&tm_tmp); - ltime->year= tm_tmp.tm_year+1900; - ltime->month= tm_tmp.tm_mon+1; - ltime->day= tm_tmp.tm_mday; - ltime->hour= tm_tmp.tm_hour; - ltime->minute= tm_tmp.tm_min; - ltime->second= tm_tmp.tm_sec; - ltime->second_part= 0; - ltime->neg= 0; + + thd->variables.time_zone->gmt_sec_to_TIME(ltime, (my_time_t)tmp); + return 0; } - /* Here arg[1] is a Item_interval object */ -bool Item_date_add_interval::get_date(TIME *ltime, bool fuzzy_date) +void Item_func_convert_tz::fix_length_and_dec() +{ + collation.set(&my_charset_bin); + decimals= 0; + max_length= MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null= 1; +} + + +bool +Item_func_convert_tz::fix_fields(THD *thd_arg, TABLE_LIST *tables_arg, Item **ref) +{ + String str; + if (Item_date_func::fix_fields(thd_arg, tables_arg, ref)) + return 1; + + tz_tables= thd_arg->lex->time_zone_tables_used; + + return 0; +} + + +String *Item_func_convert_tz::val_str(String *str) +{ + TIME time_tmp; + + if (get_date(&time_tmp, 0)) + return 0; + + if (str->alloc(20*MY_CHARSET_BIN_MB_MAXLEN)) + { + null_value= 1; + return 0; + } + + make_datetime((DATE_TIME_FORMAT *) 0, &time_tmp, str); + return str; +} + + +longlong Item_func_convert_tz::val_int() +{ + TIME time_tmp; + + if (get_date(&time_tmp, 0)) + return 0; + + return (longlong)TIME_to_ulonglong_datetime(&time_tmp); +} + + +bool Item_func_convert_tz::get_date(TIME *ltime, + uint fuzzy_date __attribute__((unused))) +{ + my_time_t my_time_tmp; + bool not_used; + String str; + + if (!from_tz_cached) + { + from_tz= my_tz_find(args[1]->val_str(&str), tz_tables); + from_tz_cached= args[1]->const_item(); + } + + if (!to_tz_cached) + { + to_tz= my_tz_find(args[2]->val_str(&str), tz_tables); + to_tz_cached= args[2]->const_item(); + } + + if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, 0)) + { + null_value= 1; + return 1; + } + + /* Check if we in range where we treat datetime values as non-UTC */ + if (ltime->year < TIMESTAMP_MAX_YEAR && ltime->year > TIMESTAMP_MIN_YEAR || + ltime->year==TIMESTAMP_MAX_YEAR && ltime->month==1 && ltime->day==1 || + ltime->year==TIMESTAMP_MIN_YEAR && ltime->month==12 && ltime->day==31) + { + my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used); + if (my_time_tmp >= TIMESTAMP_MIN_VALUE && my_time_tmp <= TIMESTAMP_MAX_VALUE) + to_tz->gmt_sec_to_TIME(ltime, my_time_tmp); + } + + null_value= 0; + return 0; +} + + +void Item_func_convert_tz::cleanup() +{ + from_tz_cached= to_tz_cached= 0; + Item_date_func::cleanup(); +} + + +void Item_date_add_interval::fix_length_and_dec() +{ + enum_field_types arg0_field_type; + + collation.set(&my_charset_bin); + maybe_null=1; + max_length=MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + value.alloc(max_length); + + /* + The field type for the result of an Item_date function is defined as + follows: + + - If first arg is a MYSQL_TYPE_DATETIME result is MYSQL_TYPE_DATETIME + - If first arg is a MYSQL_TYPE_DATE and the interval type uses hours, + minutes or seconds then type is MYSQL_TYPE_DATETIME. + - Otherwise the result is MYSQL_TYPE_STRING + (This is because you can't know if the string contains a DATE, TIME or + DATETIME argument) + */ + cached_field_type= MYSQL_TYPE_STRING; + arg0_field_type= args[0]->field_type(); + if (arg0_field_type == MYSQL_TYPE_DATETIME || + arg0_field_type == MYSQL_TYPE_TIMESTAMP) + cached_field_type= MYSQL_TYPE_DATETIME; + else if (arg0_field_type == MYSQL_TYPE_DATE) + { + if (int_type <= INTERVAL_DAY || int_type == INTERVAL_YEAR_MONTH) + cached_field_type= arg0_field_type; + else + cached_field_type= MYSQL_TYPE_DATETIME; + } +} + + +/* Here arg[1] is a Item_interval object */ + +bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) { long period,sign; INTERVAL interval; + ltime->neg= 0; if (args[0]->get_date(ltime,0) || get_interval_value(args[1],int_type,&value,&interval)) goto null_date; @@ -983,47 +1887,66 @@ bool Item_date_add_interval::get_date(TIME *ltime, bool fuzzy_date) null_value=0; switch (int_type) { case INTERVAL_SECOND: + case INTERVAL_SECOND_MICROSECOND: + case INTERVAL_MICROSECOND: case INTERVAL_MINUTE: case INTERVAL_HOUR: + case INTERVAL_MINUTE_MICROSECOND: case INTERVAL_MINUTE_SECOND: + case INTERVAL_HOUR_MICROSECOND: case INTERVAL_HOUR_SECOND: case INTERVAL_HOUR_MINUTE: + case INTERVAL_DAY_MICROSECOND: case INTERVAL_DAY_SECOND: case INTERVAL_DAY_MINUTE: case INTERVAL_DAY_HOUR: - longlong sec, days, daynr; - ltime->time_type=TIMESTAMP_FULL; // Return full date + { + longlong sec, days, daynr, microseconds, extra_sec; + ltime->time_type= MYSQL_TIMESTAMP_DATETIME; // Return full date + microseconds= ltime->second_part + sign*interval.second_part; + extra_sec= microseconds/1000000L; + microseconds= microseconds%1000000L; sec=((ltime->day-1)*3600*24L+ltime->hour*3600+ltime->minute*60+ ltime->second + - sign*(longlong) (interval.day*3600*24L + - interval.hour*LL(3600)+interval.minute*LL(60)+ - interval.second)); + sign* (longlong) (interval.day*3600*24L + + interval.hour*LL(3600)+interval.minute*LL(60)+ + interval.second))+ extra_sec; + if (microseconds < 0) + { + microseconds+= LL(1000000); + sec--; + } days= sec/(3600*LL(24)); sec-= days*3600*LL(24); if (sec < 0) { days--; - sec+=3600*LL(24); + sec+= 3600*LL(24); } - ltime->second= (uint)(sec % 60); - ltime->minute= (uint)(sec/60 % 60); - ltime->hour= (uint)(sec/3600); + ltime->second_part= (uint) microseconds; + ltime->second= (uint) (sec % 60); + ltime->minute= (uint) (sec/60 % 60); + ltime->hour= (uint) (sec/3600); daynr= calc_daynr(ltime->year,ltime->month,1) + days; - if ((ulonglong) daynr >= 3652424) // Day number from year 0 to 9999-12-31 + /* Day number from year 0 to 9999-12-31 */ + if ((ulonglong) daynr >= MAX_DAY_NUMBER) goto null_date; - get_date_from_daynr((long) daynr,<ime->year,<ime->month,<ime->day); + get_date_from_daynr((long) daynr, <ime->year, <ime->month, + <ime->day); break; + } case INTERVAL_DAY: - period= calc_daynr(ltime->year,ltime->month,ltime->day) + - sign* (long) interval.day; - if (period < 0 || period >= 3652424) // Daynumber from year 0 to 9999-12-31 + period= (calc_daynr(ltime->year,ltime->month,ltime->day) + + sign * (long) interval.day); + /* Daynumber from year 0 to 9999-12-31 */ + if ((ulong) period >= MAX_DAY_NUMBER) goto null_date; get_date_from_daynr((long) period,<ime->year,<ime->month,<ime->day); break; case INTERVAL_YEAR: - ltime->year += sign*(long) interval.year; - if ((long) ltime->year < 0 || ltime->year >= 10000L) + ltime->year+= sign * (long) interval.year; + if ((ulong) ltime->year >= 10000L) goto null_date; if (ltime->month == 2 && ltime->day == 29 && calc_days_in_year(ltime->year) != 366) @@ -1031,9 +1954,9 @@ bool Item_date_add_interval::get_date(TIME *ltime, bool fuzzy_date) break; case INTERVAL_YEAR_MONTH: case INTERVAL_MONTH: - period= (ltime->year*12 + sign*(long) interval.year*12 + - ltime->month-1 + sign*(long) interval.month); - if (period < 0 || period >= 120000L) + period= (ltime->year*12 + sign * (long) interval.year*12 + + ltime->month-1 + sign * (long) interval.month); + if ((ulong) period >= 120000L) goto null_date; ltime->year= (uint) (period / 12); ltime->month= (uint) (period % 12L)+1; @@ -1057,45 +1980,70 @@ bool Item_date_add_interval::get_date(TIME *ltime, bool fuzzy_date) String *Item_date_add_interval::val_str(String *str) { + DBUG_ASSERT(fixed == 1); TIME ltime; + enum date_time_format_types format; if (Item_date_add_interval::get_date(<ime,0)) return 0; - if (ltime.time_type == TIMESTAMP_DATE) - { - if (str->alloc(11)) - goto null_date; - sprintf((char*) str->ptr(),"%04d-%02d-%02d", - ltime.year,ltime.month,ltime.day); - str->length(10); - } + + if (ltime.time_type == MYSQL_TIMESTAMP_DATE) + format= DATE_ONLY; + else if (ltime.second_part) + format= DATE_TIME_MICROSECOND; else - { - if (str->alloc(20)) - goto null_date; - sprintf((char*) str->ptr(),"%04d-%02d-%02d %02d:%02d:%02d", - ltime.year,ltime.month,ltime.day, - ltime.hour,ltime.minute,ltime.second); - str->length(19); - } - return str; + format= DATE_TIME; + + if (!make_datetime(format, <ime, str)) + return str; - null_date: null_value=1; return 0; } + longlong Item_date_add_interval::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; longlong date; if (Item_date_add_interval::get_date(<ime,0)) return (longlong) 0; date = (ltime.year*100L + ltime.month)*100L + ltime.day; - return ltime.time_type == TIMESTAMP_DATE ? date : + return ltime.time_type == MYSQL_TIMESTAMP_DATE ? date : ((date*100L + ltime.hour)*100L+ ltime.minute)*100L + ltime.second; } +static const char *interval_names[]= +{ + "year", "month", "day", "hour", "minute", + "second", "microsecond", "year_month", + "day_hour", "day_minute", "day_second", + "hour_minute", "hour_second", "minute_second", + "day_microsecond", "hour_microsecond", + "minute_microsecond", "second_microsecond" +}; + +void Item_date_add_interval::print(String *str) +{ + str->append('('); + args[0]->print(str); + str->append(date_sub_interval?" - interval ":" + interval "); + args[1]->print(str); + str->append(' '); + str->append(interval_names[int_type]); + str->append(')'); +} + +void Item_extract::print(String *str) +{ + str->append("extract(", 8); + str->append(interval_names[int_type]); + str->append(" from ", 6); + args[0]->print(str); + str->append(')'); +} + void Item_extract::fix_length_and_dec() { value.alloc(32); // alloc buffer @@ -1115,12 +2063,18 @@ void Item_extract::fix_length_and_dec() case INTERVAL_MINUTE: max_length=2; date_value=0; break; case INTERVAL_MINUTE_SECOND: max_length=4; date_value=0; break; case INTERVAL_SECOND: max_length=2; date_value=0; break; + case INTERVAL_MICROSECOND: max_length=2; date_value=0; break; + case INTERVAL_DAY_MICROSECOND: max_length=20; date_value=0; break; + case INTERVAL_HOUR_MICROSECOND: max_length=13; date_value=0; break; + case INTERVAL_MINUTE_MICROSECOND: max_length=11; date_value=0; break; + case INTERVAL_SECOND_MICROSECOND: max_length=9; date_value=0; break; } } longlong Item_extract::val_int() { + DBUG_ASSERT(fixed == 1); TIME ltime; long neg; if (date_value) @@ -1132,7 +2086,7 @@ longlong Item_extract::val_int() else { String *res= args[0]->val_str(&value); - if (!res || str_to_time(res->ptr(),res->length(),<ime)) + if (!res || str_to_time_with_warn(res->ptr(), res->length(), <ime)) { null_value=1; return 0; @@ -1140,7 +2094,6 @@ longlong Item_extract::val_int() neg= ltime.neg ? -1 : 1; null_value=0; } - switch (int_type) { case INTERVAL_YEAR: return ltime.year; case INTERVAL_YEAR_MONTH: return ltime.year*100L+ltime.month; @@ -1161,6 +2114,21 @@ longlong Item_extract::val_int() case INTERVAL_MINUTE: return (long) ltime.minute*neg; case INTERVAL_MINUTE_SECOND: return (long) (ltime.minute*100+ltime.second)*neg; case INTERVAL_SECOND: return (long) ltime.second*neg; + case INTERVAL_MICROSECOND: return (long) ltime.second_part*neg; + case INTERVAL_DAY_MICROSECOND: return (((longlong)ltime.day*1000000L + + (longlong)ltime.hour*10000L + + ltime.minute*100 + + ltime.second)*1000000L + + ltime.second_part)*neg; + case INTERVAL_HOUR_MICROSECOND: return (((longlong)ltime.hour*10000L + + ltime.minute*100 + + ltime.second)*1000000L + + ltime.second_part)*neg; + case INTERVAL_MINUTE_MICROSECOND: return (((longlong)(ltime.minute*100+ + ltime.second))*1000000L+ + ltime.second_part)*neg; + case INTERVAL_SECOND_MICROSECOND: return ((longlong)ltime.second*1000000L+ + ltime.second_part)*neg; } return 0; // Impossible } @@ -1182,11 +2150,709 @@ bool Item_extract::eq(const Item *item, bool binary_cmp) const return 1; } + +bool Item_char_typecast::eq(const Item *item, bool binary_cmp) const +{ + if (this == item) + return 1; + if (item->type() != FUNC_ITEM || + func_name() != ((Item_func*)item)->func_name()) + return 0; + + Item_char_typecast *cast= (Item_char_typecast*)item; + if (cast_length != cast->cast_length || + cast_cs != cast->cast_cs) + return 0; + + if (!args[0]->eq(cast->args[0], binary_cmp)) + return 0; + return 1; +} + void Item_typecast::print(String *str) { - str->append("CAST("); + str->append("cast(", 5); + args[0]->print(str); + str->append(" as ", 4); + str->append(cast_type()); + str->append(')'); +} + + +void Item_char_typecast::print(String *str) +{ + str->append("cast(", 5); + args[0]->print(str); + str->append(" as char", 8); + if (cast_length >= 0) + { + str->append('('); + char buffer[20]; + // my_charset_bin is good enough for numbers + String st(buffer, sizeof(buffer), &my_charset_bin); + st.set((ulonglong)cast_length, &my_charset_bin); + str->append(st); + str->append(')'); + } + if (cast_cs) + { + str->append(" charset ", 9); + str->append(cast_cs->name); + } + str->append(')'); +} + +String *Item_char_typecast::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + String *res; + uint32 length; + + if (!charset_conversion) + { + if (!(res= args[0]->val_str(str))) + { + null_value= 1; + return 0; + } + } + else + { + // Convert character set if differ + uint dummy_errors; + if (!(res= args[0]->val_str(&tmp_value)) || + str->copy(res->ptr(), res->length(), from_cs, + cast_cs, &dummy_errors)) + { + null_value= 1; + return 0; + } + res= str; + } + + res->set_charset(cast_cs); + + /* + Cut the tail if cast with length + and the result is longer than cast length, e.g. + CAST('string' AS CHAR(1)) + */ + if (cast_length >= 0 && + (res->length() > (length= (uint32) res->charpos(cast_length)))) + { // Safe even if const arg + if (!res->alloced_length()) + { // Don't change const str + str_value= *res; // Not malloced string + res= &str_value; + } + res->length((uint) length); + } + null_value= 0; + return res; +} + +void Item_char_typecast::fix_length_and_dec() +{ + uint32 char_length; + /* + We always force character set conversion if cast_cs + is a multi-byte character set. It garantees that the + result of CAST is a well-formed string. + For single-byte character sets we allow just to copy + from the argument. A single-byte character sets string + is always well-formed. + + There is a special trick to convert form a number to ucs2. + As numbers have my_charset_bin as their character set, + it wouldn't do conversion to ucs2 without an additional action. + To force conversion, we should pretend to be non-binary. + Let's choose from_cs this way: + - If the argument in a number and cast_cs is ucs2 (i.e. mbminlen > 1), + then from_cs is set to latin1, to perform latin1 -> ucs2 conversion. + - If the argument is a number and cast_cs is ASCII-compatible + (i.e. mbminlen == 1), then from_cs is set to cast_cs, + which allows just to take over the args[0]->val_str() result + and thus avoid unnecessary character set conversion. + - If the argument is not a number, then from_cs is set to + the argument's charset. + */ + from_cs= (args[0]->result_type() == INT_RESULT || + args[0]->result_type() == REAL_RESULT) ? + (cast_cs->mbminlen == 1 ? cast_cs : &my_charset_latin1) : + args[0]->collation.collation; + charset_conversion= (cast_cs->mbmaxlen > 1) || + !my_charset_same(from_cs, cast_cs) && + from_cs != &my_charset_bin && + cast_cs != &my_charset_bin; + collation.set(cast_cs, DERIVATION_IMPLICIT); + char_length= (cast_length >= 0) ? cast_length : + args[0]->max_length/from_cs->mbmaxlen; + max_length= char_length * cast_cs->mbmaxlen; +} + + +String *Item_datetime_typecast::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + if (!get_arg0_date(<ime,1) && + !make_datetime(ltime.second_part ? DATE_TIME_MICROSECOND : DATE_TIME, + <ime, str)) + return str; + + null_value=1; + return 0; +} + + +bool Item_time_typecast::get_time(TIME *ltime) +{ + bool res= get_arg0_time(ltime); + /* + For MYSQL_TIMESTAMP_TIME value we can have non-zero day part, + which we should not lose. + */ + if (ltime->time_type == MYSQL_TIMESTAMP_DATETIME) + ltime->year= ltime->month= ltime->day= 0; + ltime->time_type= MYSQL_TIMESTAMP_TIME; + return res; +} + + +String *Item_time_typecast::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + + if (!get_arg0_time(<ime) && + !make_datetime(ltime.second_part ? TIME_MICROSECOND : TIME_ONLY, + <ime, str)) + return str; + + null_value=1; + return 0; +} + + +bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date) +{ + bool res= get_arg0_date(ltime,1); + ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0; + ltime->time_type= MYSQL_TIMESTAMP_DATE; + return res; +} + + +String *Item_date_typecast::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + + if (!get_arg0_date(<ime,1) && !str->alloc(11)) + { + make_date((DATE_TIME_FORMAT *) 0, <ime, str); + return str; + } + + null_value=1; + return 0; +} + + +/* + MAKEDATE(a,b) is a date function that creates a date value + from a year and day value. +*/ + +String *Item_func_makedate::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + TIME l_time; + long daynr= (long) args[1]->val_int(); + long yearnr= (long) args[0]->val_int(); + long days; + + if (args[0]->null_value || args[1]->null_value || + yearnr < 0 || daynr <= 0) + goto err; + + days= calc_daynr(yearnr,1,1) + daynr - 1; + /* Day number from year 0 to 9999-12-31 */ + if (days >= 0 && days < MAX_DAY_NUMBER) + { + null_value=0; + get_date_from_daynr(days,&l_time.year,&l_time.month,&l_time.day); + if (str->alloc(11)) + goto err; + make_date((DATE_TIME_FORMAT *) 0, &l_time, str); + return str; + } + +err: + null_value=1; + return 0; +} + + +void Item_func_add_time::fix_length_and_dec() +{ + enum_field_types arg0_field_type; + decimals=0; + max_length=MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null= 1; + + /* + The field type for the result of an Item_func_add_time function is defined + as follows: + + - If first arg is a MYSQL_TYPE_DATETIME or MYSQL_TYPE_TIMESTAMP + result is MYSQL_TYPE_DATETIME + - If first arg is a MYSQL_TYPE_TIME result is MYSQL_TYPE_TIME + - Otherwise the result is MYSQL_TYPE_STRING + */ + + cached_field_type= MYSQL_TYPE_STRING; + arg0_field_type= args[0]->field_type(); + if (arg0_field_type == MYSQL_TYPE_DATE || + arg0_field_type == MYSQL_TYPE_DATETIME || + arg0_field_type == MYSQL_TYPE_TIMESTAMP) + cached_field_type= MYSQL_TYPE_DATETIME; + else if (arg0_field_type == MYSQL_TYPE_TIME) + cached_field_type= MYSQL_TYPE_TIME; +} + +/* + ADDTIME(t,a) and SUBTIME(t,a) are time functions that calculate a + time/datetime value + + t: time_or_datetime_expression + a: time_expression + + Result: Time value or datetime value +*/ + +String *Item_func_add_time::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + TIME l_time1, l_time2, l_time3; + bool is_time= 0; + long days, microseconds; + longlong seconds; + int l_sign= sign; + + null_value=0; + if (is_date) // TIMESTAMP function + { + if (get_arg0_date(&l_time1,1) || + args[1]->get_time(&l_time2) || + l_time1.time_type == MYSQL_TIMESTAMP_TIME || + l_time2.time_type != MYSQL_TIMESTAMP_TIME) + goto null_date; + } + else // ADDTIME function + { + if (args[0]->get_time(&l_time1) || + args[1]->get_time(&l_time2) || + l_time2.time_type == MYSQL_TIMESTAMP_DATETIME) + goto null_date; + is_time= (l_time1.time_type == MYSQL_TIMESTAMP_TIME); + } + if (l_time1.neg != l_time2.neg) + l_sign= -l_sign; + + l_time3.neg= calc_time_diff(&l_time1, &l_time2, -l_sign, + &seconds, µseconds); + + /* + If first argument was negative and diff between arguments + is non-zero we need to swap sign to get proper result. + */ + if (l_time1.neg && (seconds || microseconds)) + l_time3.neg= 1-l_time3.neg; // Swap sign of result + + if (!is_time && l_time3.neg) + goto null_date; + + days= (long)(seconds/86400L); + + calc_time_from_sec(&l_time3, (long)(seconds%86400L), microseconds); + if (!is_time) + { + get_date_from_daynr(days,&l_time3.year,&l_time3.month,&l_time3.day); + if (l_time3.day && + !make_datetime(l_time1.second_part || l_time2.second_part ? + DATE_TIME_MICROSECOND : DATE_TIME, + &l_time3, str)) + return str; + goto null_date; + } + + l_time3.hour+= days*24; + if (!make_datetime(l_time1.second_part || l_time2.second_part ? + TIME_MICROSECOND : TIME_ONLY, + &l_time3, str)) + return str; + +null_date: + null_value=1; + return 0; +} + + +void Item_func_add_time::print(String *str) +{ + if (is_date) + { + DBUG_ASSERT(sign > 0); + str->append("timestamp(", 10); + } + else + { + if (sign > 0) + str->append("addtime(", 8); + else + str->append("subtime(", 8); + } + args[0]->print(str); + str->append(','); args[0]->print(str); - str->append(" AS "); + str->append(')'); +} + + +/* + TIMEDIFF(t,s) is a time function that calculates the + time value between a start and end time. + + t and s: time_or_datetime_expression + Result: Time value +*/ + +String *Item_func_timediff::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + longlong seconds; + long microseconds; + int l_sign= 1; + TIME l_time1 ,l_time2, l_time3; + + null_value= 0; + if (args[0]->get_time(&l_time1) || + args[1]->get_time(&l_time2) || + l_time1.time_type != l_time2.time_type) + goto null_date; + + if (l_time1.neg != l_time2.neg) + l_sign= -l_sign; + + l_time3.neg= calc_time_diff(&l_time1, &l_time2, l_sign, + &seconds, µseconds); + + /* + For MYSQL_TIMESTAMP_TIME only: + If first argument was negative and diff between arguments + is non-zero we need to swap sign to get proper result. + */ + if (l_time1.neg && (seconds || microseconds)) + l_time3.neg= 1-l_time3.neg; // Swap sign of result + + calc_time_from_sec(&l_time3, (long) seconds, microseconds); + + if (!make_datetime(l_time1.second_part || l_time2.second_part ? + TIME_MICROSECOND : TIME_ONLY, + &l_time3, str)) + return str; + +null_date: + null_value=1; + return 0; +} + +/* + MAKETIME(h,m,s) is a time function that calculates a time value + from the total number of hours, minutes, and seconds. + Result: Time value +*/ + +String *Item_func_maketime::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + + long hour= (long) args[0]->val_int(); + long minute= (long) args[1]->val_int(); + long second= (long) args[2]->val_int(); + + if ((null_value=(args[0]->null_value || + args[1]->null_value || + args[2]->null_value || + minute > 59 || minute < 0 || + second > 59 || second < 0 || + str->alloc(19)))) + return 0; + + ltime.neg= 0; + if (hour < 0) + { + ltime.neg= 1; + hour= -hour; + } + ltime.hour= (ulong) hour; + ltime.minute= (ulong) minute; + ltime.second= (ulong) second; + make_time((DATE_TIME_FORMAT *) 0, <ime, str); + return str; +} + + +/* + MICROSECOND(a) is a function ( extraction) that extracts the microseconds + from a. + + a: Datetime or time value + Result: int value +*/ + +longlong Item_func_microsecond::val_int() +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + if (!get_arg0_time(<ime)) + return ltime.second_part; + return 0; +} + + +String *Item_func_get_format::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + const char *format_name; + KNOWN_DATE_TIME_FORMAT *format; + String *val= args[0]->val_str(str); + ulong val_len; + + if ((null_value= args[0]->null_value)) + return 0; + + val_len= val->length(); + for (format= &known_date_time_formats[0]; + (format_name= format->format_name); + format++) + { + uint format_name_len; + format_name_len= strlen(format_name); + if (val_len == format_name_len && + !my_strnncoll(&my_charset_latin1, + (const uchar *) val->ptr(), val_len, + (const uchar *) format_name, val_len)) + { + const char *format_str= get_date_time_format_str(format, type); + str->set(format_str, strlen(format_str), &my_charset_bin); + return str; + } + } + + null_value= 1; + return 0; +} + + +void Item_func_get_format::print(String *str) +{ str->append(func_name()); + str->append('('); + + switch (type) { + case MYSQL_TIMESTAMP_DATE: + str->append("DATE, "); + break; + case MYSQL_TIMESTAMP_DATETIME: + str->append("DATETIME, "); + break; + case MYSQL_TIMESTAMP_TIME: + str->append("TIME, "); + break; + default: + DBUG_ASSERT(0); + } + args[0]->print(str); str->append(')'); } + + +/* + Get type of datetime value (DATE/TIME/...) which will be produced + according to format string. + + SYNOPSIS + get_date_time_result_type() + format - format string + length - length of format string + + NOTE + We don't process day format's characters('D', 'd', 'e') because day + may be a member of all date/time types. + + Format specifiers supported by this function should be in sync with + specifiers supported by extract_date_time() function. + + RETURN VALUE + One of date_time_format_types values: + DATE_TIME_MICROSECOND, DATE_TIME, DATE_ONLY, TIME_MICROSECOND, TIME_ONLY +*/ + +static date_time_format_types +get_date_time_result_type(const char *format, uint length) +{ + const char *time_part_frms= "HISThiklrs"; + const char *date_part_frms= "MVUXYWabcjmvuxyw"; + bool date_part_used= 0, time_part_used= 0, frac_second_used= 0; + + const char *val= format; + const char *end= format + length; + + for (; val != end && val != end; val++) + { + if (*val == '%' && val+1 != end) + { + val++; + if (*val == 'f') + frac_second_used= time_part_used= 1; + else if (!time_part_used && strchr(time_part_frms, *val)) + time_part_used= 1; + else if (!date_part_used && strchr(date_part_frms, *val)) + date_part_used= 1; + if (date_part_used && frac_second_used) + { + /* + frac_second_used implies time_part_used, and thus we already + have all types of date-time components and can end our search. + */ + return DATE_TIME_MICROSECOND; + } + } + } + + /* We don't have all three types of date-time components */ + if (frac_second_used) + return TIME_MICROSECOND; + if (time_part_used) + { + if (date_part_used) + return DATE_TIME; + return TIME_ONLY; + } + return DATE_ONLY; +} + + +Field *Item_func_str_to_date::tmp_table_field(TABLE *t_arg) +{ + if (cached_field_type == MYSQL_TYPE_TIME) + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + if (cached_field_type == MYSQL_TYPE_DATE) + return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); + if (cached_field_type == MYSQL_TYPE_DATETIME) + return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin)); + return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin)); +} + + +void Item_func_str_to_date::fix_length_and_dec() +{ + char format_buff[64]; + String format_str(format_buff, sizeof(format_buff), &my_charset_bin); + String *format; + maybe_null= 1; + decimals=0; + cached_field_type= MYSQL_TYPE_STRING; + max_length= MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + cached_timestamp_type= MYSQL_TIMESTAMP_NONE; + format= args[1]->val_str(&format_str); + if (!args[1]->null_value && (const_item= args[1]->const_item())) + { + cached_format_type= get_date_time_result_type(format->ptr(), + format->length()); + switch (cached_format_type) { + case DATE_ONLY: + cached_timestamp_type= MYSQL_TIMESTAMP_DATE; + cached_field_type= MYSQL_TYPE_DATE; + max_length= MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + break; + case TIME_ONLY: + case TIME_MICROSECOND: + cached_timestamp_type= MYSQL_TIMESTAMP_TIME; + cached_field_type= MYSQL_TYPE_TIME; + max_length= MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + break; + default: + cached_timestamp_type= MYSQL_TIMESTAMP_DATETIME; + cached_field_type= MYSQL_TYPE_DATETIME; + break; + } + } +} + +bool Item_func_str_to_date::get_date(TIME *ltime, uint fuzzy_date) +{ + DATE_TIME_FORMAT date_time_format; + char val_buff[64], format_buff[64]; + String val_str(val_buff, sizeof(val_buff), &my_charset_bin), *val; + String format_str(format_buff, sizeof(format_buff), &my_charset_bin), *format; + + val= args[0]->val_str(&val_str); + format= args[1]->val_str(&format_str); + if (args[0]->null_value || args[1]->null_value) + goto null_date; + + null_value= 0; + bzero((char*) ltime, sizeof(ltime)); + date_time_format.format.str= (char*) format->ptr(); + date_time_format.format.length= format->length(); + if (extract_date_time(&date_time_format, val->ptr(), val->length(), + ltime, cached_timestamp_type, 0)) + goto null_date; + if (cached_timestamp_type == MYSQL_TIMESTAMP_TIME && ltime->day) + { + /* + Day part for time type can be nonzero value and so + we should add hours from day part to hour part to + keep valid time value. + */ + ltime->hour+= ltime->day*24; + ltime->day= 0; + } + return 0; + +null_date: + return (null_value=1); +} + + +String *Item_func_str_to_date::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + + if (Item_func_str_to_date::get_date(<ime, TIME_FUZZY_DATE)) + return 0; + + if (!make_datetime((const_item ? cached_format_type : + (ltime.second_part ? DATE_TIME_MICROSECOND : DATE_TIME)), + <ime, str)) + return str; + return 0; +} + + +bool Item_func_last_day::get_date(TIME *ltime, uint fuzzy_date) +{ + if (get_arg0_date(ltime, fuzzy_date & ~TIME_FUZZY_DATE)) + return 1; + uint month_idx= ltime->month-1; + ltime->day= days_in_month[month_idx]; + if ( month_idx == 1 && calc_days_in_year(ltime->year) == 366) + ltime->day= 29; + ltime->time_type= MYSQL_TIMESTAMP_DATE; + return 0; +} diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 8ee2f935a80..163b1591e52 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -17,17 +17,25 @@ /* Function items used by mysql */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif +enum date_time_format_types +{ + TIME_ONLY= 0, TIME_MICROSECOND, DATE_ONLY, DATE_TIME, DATE_TIME_MICROSECOND +}; + class Item_func_period_add :public Item_int_func { public: Item_func_period_add(Item *a,Item *b) :Item_int_func(a,b) {} longlong val_int(); const char *func_name() const { return "period_add"; } - void fix_length_and_dec() { max_length=6; } + void fix_length_and_dec() + { + max_length=6*MY_CHARSET_BIN_MB_MAXLEN; + } }; @@ -37,7 +45,11 @@ public: Item_func_period_diff(Item *a,Item *b) :Item_int_func(a,b) {} longlong val_int(); const char *func_name() const { return "period_diff"; } - void fix_length_and_dec() { decimals=0; max_length=6; } + void fix_length_and_dec() + { + decimals=0; + max_length=6*MY_CHARSET_BIN_MB_MAXLEN; + } }; @@ -47,7 +59,12 @@ public: Item_func_to_days(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "to_days"; } - void fix_length_and_dec() { decimals=0; max_length=6; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=6*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -57,7 +74,12 @@ public: Item_func_dayofmonth(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "dayofmonth"; } - void fix_length_and_dec() { decimals=0; max_length=2; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=2*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -66,11 +88,22 @@ class Item_func_month :public Item_func public: Item_func_month(Item *a) :Item_func(a) {} longlong val_int(); - double val() { return (double) Item_func_month::val_int(); } - String *val_str(String *str) { str->set(val_int()); return null_value ? 0 : str;} + double val() + { DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); } + String *val_str(String *str) + { + str->set(val_int(), &my_charset_bin); + return null_value ? 0 : str; + } const char *func_name() const { return "month"; } enum Item_result result_type () const { return INT_RESULT; } - void fix_length_and_dec() { decimals=0; max_length=2; maybe_null=1; } + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + decimals=0; + max_length=2*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -81,7 +114,13 @@ public: const char *func_name() const { return "monthname"; } String *val_str(String *str); enum Item_result result_type () const { return STRING_RESULT; } - void fix_length_and_dec() { decimals=0; max_length=10; maybe_null=1; } + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + decimals=0; + max_length=10*my_charset_bin.mbmaxlen; + maybe_null=1; + } }; @@ -91,7 +130,12 @@ public: Item_func_dayofyear(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "dayofyear"; } - void fix_length_and_dec() { decimals=0; max_length=3; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=3*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -101,7 +145,12 @@ public: Item_func_hour(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "hour"; } - void fix_length_and_dec() { decimals=0; max_length=2; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=2*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -111,7 +160,12 @@ public: Item_func_minute(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "minute"; } - void fix_length_and_dec() { decimals=0; max_length=2; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=2*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -121,7 +175,12 @@ public: Item_func_quarter(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "quarter"; } - void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=1*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -131,7 +190,12 @@ public: Item_func_second(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "second"; } - void fix_length_and_dec() { decimals=0; max_length=2; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=2*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -141,7 +205,12 @@ public: Item_func_week(Item *a,Item *b) :Item_int_func(a,b) {} longlong val_int(); const char *func_name() const { return "week"; } - void fix_length_and_dec() { decimals=0; max_length=2; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=2*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; class Item_func_yearweek :public Item_int_func @@ -150,7 +219,12 @@ public: Item_func_yearweek(Item *a,Item *b) :Item_int_func(a,b) {} longlong val_int(); const char *func_name() const { return "yearweek"; } - void fix_length_and_dec() { decimals=0; max_length=6; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=6*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -160,7 +234,12 @@ public: Item_func_year(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "year"; } - void fix_length_and_dec() { decimals=0; max_length=4; maybe_null=1; } + void fix_length_and_dec() + { + decimals=0; + max_length=4*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -171,12 +250,22 @@ public: Item_func_weekday(Item *a,bool type_arg) :Item_func(a), odbc_type(type_arg) {} longlong val_int(); - double val() { return (double) val_int(); } - String *val_str(String *str) { str->set(val_int()); return null_value ? 0 : str;} + double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + String *val_str(String *str) + { + DBUG_ASSERT(fixed == 1); + str->set(val_int(), &my_charset_bin); + return null_value ? 0 : str; + } const char *func_name() const { return "weekday"; } enum Item_result result_type () const { return INT_RESULT; } - void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=1; } - unsigned int size_of() { return sizeof(*this);} + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + decimals=0; + max_length=1*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; class Item_func_dayname :public Item_func_weekday @@ -186,7 +275,13 @@ class Item_func_dayname :public Item_func_weekday const char *func_name() const { return "dayname"; } String *val_str(String *str); enum Item_result result_type () const { return STRING_RESULT; } - void fix_length_and_dec() { decimals=0; max_length=9; maybe_null=1; } + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + decimals=0; + max_length=9*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } }; @@ -197,12 +292,12 @@ public: Item_func_unix_timestamp() :Item_int_func() {} Item_func_unix_timestamp(Item *a) :Item_int_func(a) {} longlong val_int(); - const char *func_name() const { return "timestamp"; } + const char *func_name() const { return "unix_timestamp"; } void fix_length_and_dec() { - decimals=0; max_length=10; + decimals=0; + max_length=10*MY_CHARSET_BIN_MB_MAXLEN; } - unsigned int size_of() { return sizeof(*this);} }; @@ -214,7 +309,8 @@ public: const char *func_name() const { return "time_to_sec"; } void fix_length_and_dec() { - decimals=0; max_length=10; + decimals=0; + max_length=10*MY_CHARSET_BIN_MB_MAXLEN; } }; @@ -227,20 +323,22 @@ public: Item_date() :Item_func() {} Item_date(Item *a) :Item_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_DATE; } String *val_str(String *str); - double val() { return (double) val_int(); } + longlong val_int(); + double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } const char *func_name() const { return "date"; } - void fix_length_and_dec() { decimals=0; max_length=10; } - bool save_in_field(Field *to, bool no_conversions); - void make_field(Send_field *tmp_field) - { - init_make_field(tmp_field,FIELD_TYPE_DATE); + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + decimals=0; + max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } + int save_in_field(Field *to, bool no_conversions); Field *tmp_table_field(TABLE *t_arg) { - return (!t_arg) ? result_field : new Field_date(maybe_null, name, t_arg); + return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); } - unsigned int size_of() { return sizeof(*this);} }; @@ -250,79 +348,138 @@ public: Item_date_func() :Item_str_func() {} Item_date_func(Item *a) :Item_str_func(a) {} Item_date_func(Item *a,Item *b) :Item_str_func(a,b) {} - void make_field(Send_field *tmp_field) - { - init_make_field(tmp_field,FIELD_TYPE_DATETIME); - } + Item_date_func(Item *a,Item *b, Item *c) :Item_str_func(a,b,c) {} + enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; } Field *tmp_table_field(TABLE *t_arg) { - return (!t_arg) ? result_field : new Field_datetime(maybe_null, name, - t_arg); + return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin)); } - unsigned int size_of() { return sizeof(*this);} }; +/* Abstract CURTIME function. Children should define what time zone is used */ + class Item_func_curtime :public Item_func { longlong value; - char buff[9]; + char buff[9*2+32]; uint buff_length; public: Item_func_curtime() :Item_func() {} Item_func_curtime(Item *a) :Item_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } - double val() { return (double) value; } - longlong val_int() { return value; } - String *val_str(String *str) - { str_value.set(buff,buff_length); return &str_value; } - const char *func_name() const { return "curtime"; } + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } + String *val_str(String *str); void fix_length_and_dec(); - void make_field(Send_field *tmp_field) - { - init_make_field(tmp_field,FIELD_TYPE_TIME); - } Field *tmp_table_field(TABLE *t_arg) { - return (!t_arg) ? result_field : new Field_time(maybe_null, name, t_arg); - } - unsigned int size_of() { return sizeof(*this);} + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + } + /* + Abstract method that defines which time zone is used for conversion. + Converts time current time in my_time_t representation to broken-down + TIME representation using UTC-SYSTEM or per-thread time zone. + */ + virtual void store_now_in_TIME(TIME *now_time)=0; }; +class Item_func_curtime_local :public Item_func_curtime +{ +public: + Item_func_curtime_local() :Item_func_curtime() {} + Item_func_curtime_local(Item *a) :Item_func_curtime(a) {} + const char *func_name() const { return "curtime"; } + virtual void store_now_in_TIME(TIME *now_time); +}; + + +class Item_func_curtime_utc :public Item_func_curtime +{ +public: + Item_func_curtime_utc() :Item_func_curtime() {} + Item_func_curtime_utc(Item *a) :Item_func_curtime(a) {} + const char *func_name() const { return "utc_time"; } + virtual void store_now_in_TIME(TIME *now_time); +}; + + +/* Abstract CURDATE function. See also Item_func_curtime. */ + class Item_func_curdate :public Item_date { longlong value; TIME ltime; public: Item_func_curdate() :Item_date() {} - longlong val_int() { return (value) ; } + longlong val_int() { DBUG_ASSERT(fixed == 1); return (value) ; } + String *val_str(String *str); + void fix_length_and_dec(); + bool get_date(TIME *res, uint fuzzy_date); + virtual void store_now_in_TIME(TIME *now_time)=0; +}; + + +class Item_func_curdate_local :public Item_func_curdate +{ +public: + Item_func_curdate_local() :Item_func_curdate() {} const char *func_name() const { return "curdate"; } - void fix_length_and_dec(); /* Retrieves curtime */ - bool get_date(TIME *res,bool fuzzy_date); - unsigned int size_of() { return sizeof(*this);} + void store_now_in_TIME(TIME *now_time); }; +class Item_func_curdate_utc :public Item_func_curdate +{ +public: + Item_func_curdate_utc() :Item_func_curdate() {} + const char *func_name() const { return "utc_date"; } + void store_now_in_TIME(TIME *now_time); +}; + + +/* Abstract CURRENT_TIMESTAMP function. See also Item_func_curtime */ + class Item_func_now :public Item_date_func { longlong value; - char buff[20]; + char buff[20*2+32]; // +32 to make my_snprintf_{8bit|ucs2} happy uint buff_length; TIME ltime; public: Item_func_now() :Item_date_func() {} Item_func_now(Item *a) :Item_date_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } - double val() { return (double) value; } - longlong val_int() { return value; } - bool save_in_field(Field *to, bool no_conversions); - String *val_str(String *str) - { str_value.set(buff,buff_length); return &str_value; } - const char *func_name() const { return "now"; } + double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } + int save_in_field(Field *to, bool no_conversions); + String *val_str(String *str); void fix_length_and_dec(); - bool get_date(TIME *res,bool fuzzy_date); - unsigned int size_of() { return sizeof(*this);} + bool get_date(TIME *res, uint fuzzy_date); + virtual void store_now_in_TIME(TIME *now_time)=0; +}; + + +class Item_func_now_local :public Item_func_now +{ +public: + Item_func_now_local() :Item_func_now() {} + Item_func_now_local(Item *a) :Item_func_now(a) {} + const char *func_name() const { return "now"; } + virtual void store_now_in_TIME(TIME *now_time); + virtual enum Functype functype() const { return NOW_FUNC; } +}; + + +class Item_func_now_utc :public Item_func_now +{ +public: + Item_func_now_utc() :Item_func_now() {} + Item_func_now_utc(Item *a) :Item_func_now(a) {} + const char *func_name() const { return "utc_timestamp"; } + virtual void store_now_in_TIME(TIME *now_time); }; @@ -330,37 +487,81 @@ class Item_func_from_days :public Item_date { public: Item_func_from_days(Item *a) :Item_date(a) {} - longlong val_int(); const char *func_name() const { return "from_days"; } + bool get_date(TIME *res, uint fuzzy_date); }; class Item_func_date_format :public Item_str_func { int fixed_length; - const bool date_or_time; + const bool is_time_format; String value; public: - Item_func_date_format(Item *a,Item *b,bool date_or_time_arg) - :Item_str_func(a,b),date_or_time(date_or_time_arg) {} + Item_func_date_format(Item *a,Item *b,bool is_time_format_arg) + :Item_str_func(a,b),is_time_format(is_time_format_arg) {} String *val_str(String *str); const char *func_name() const { return "date_format"; } void fix_length_and_dec(); uint format_length(const String *format); - unsigned int size_of() { return sizeof(*this);} }; class Item_func_from_unixtime :public Item_date_func { + THD *thd; public: Item_func_from_unixtime(Item *a) :Item_date_func(a) {} - double val() { return (double) Item_func_from_unixtime::val_int(); } + double val() + { + DBUG_ASSERT(fixed == 1); + return (double) Item_func_from_unixtime::val_int(); + } longlong val_int(); String *val_str(String *str); const char *func_name() const { return "from_unixtime"; } - void fix_length_and_dec() { decimals=0; max_length=19; maybe_null= 1; } - bool get_date(TIME *res,bool fuzzy_date); + void fix_length_and_dec(); + bool get_date(TIME *res, uint fuzzy_date); +}; + + +/* + We need Time_zone class declaration for storing pointers in + Item_func_convert_tz. +*/ +class Time_zone; + +/* + This class represents CONVERT_TZ() function. + The important fact about this function that it is handled in special way. + When such function is met in expression time_zone system tables are added + to global list of tables to open, so later those already opened and locked + tables can be used during this function calculation for loading time zone + descriptions. +*/ +class Item_func_convert_tz :public Item_date_func +{ + /* Cached pointer to list of pre-opened time zone tables. */ + TABLE_LIST *tz_tables; + /* + If time zone parameters are constants we are caching objects that + represent them (we use separate from_tz_cached/to_tz_cached members + to indicate this fact, since NULL is legal value for from_tz/to_tz + members. + */ + bool from_tz_cached, to_tz_cached; + Time_zone *from_tz, *to_tz; + public: + Item_func_convert_tz(Item *a, Item *b, Item *c): + Item_date_func(a, b, c), from_tz_cached(0), to_tz_cached(0) {} + longlong val_int(); + double val() { return (double) val_int(); } + String *val_str(String *str); + const char *func_name() const { return "convert_tz"; } + bool fix_fields(THD *, struct st_table_list *, Item **); + void fix_length_and_dec(); + bool get_date(TIME *res, uint fuzzy_date); + void cleanup(); }; @@ -368,61 +569,76 @@ class Item_func_sec_to_time :public Item_str_func { public: Item_func_sec_to_time(Item *item) :Item_str_func(item) {} - double val() { return (double) Item_func_sec_to_time::val_int(); } + double val() + { + DBUG_ASSERT(fixed == 1); + return (double) Item_func_sec_to_time::val_int(); + } longlong val_int(); String *val_str(String *); - void fix_length_and_dec() { maybe_null=1; max_length=13; } - const char *func_name() const { return "sec_to_time"; } - void make_field(Send_field *tmp_field) - { - init_make_field(tmp_field,FIELD_TYPE_TIME); + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + maybe_null=1; + max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + const char *func_name() const { return "sec_to_time"; } Field *tmp_table_field(TABLE *t_arg) { - return (!t_arg) ? result_field : new Field_time(maybe_null, name, t_arg); + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); } }; +/* + The following must be sorted so that simple intervals comes first. + (get_interval_value() depends on this) +*/ -enum interval_type { INTERVAL_YEAR, INTERVAL_MONTH, INTERVAL_DAY, - INTERVAL_HOUR, INTERVAL_MINUTE, INTERVAL_SECOND, - INTERVAL_YEAR_MONTH, INTERVAL_DAY_HOUR, - INTERVAL_DAY_MINUTE, INTERVAL_DAY_SECOND, - INTERVAL_HOUR_MINUTE, INTERVAL_HOUR_SECOND, - INTERVAL_MINUTE_SECOND}; +enum interval_type +{ + INTERVAL_YEAR, INTERVAL_MONTH, INTERVAL_DAY, INTERVAL_HOUR, INTERVAL_MINUTE, + INTERVAL_SECOND, INTERVAL_MICROSECOND ,INTERVAL_YEAR_MONTH, + INTERVAL_DAY_HOUR, INTERVAL_DAY_MINUTE, INTERVAL_DAY_SECOND, + INTERVAL_HOUR_MINUTE, INTERVAL_HOUR_SECOND, INTERVAL_MINUTE_SECOND, + INTERVAL_DAY_MICROSECOND, INTERVAL_HOUR_MICROSECOND, + INTERVAL_MINUTE_MICROSECOND, INTERVAL_SECOND_MICROSECOND +}; class Item_date_add_interval :public Item_date_func { - const interval_type int_type; String value; - const bool date_sub_interval; + enum_field_types cached_field_type; public: + const interval_type int_type; // keep it public + const bool date_sub_interval; // keep it public Item_date_add_interval(Item *a,Item *b,interval_type type_arg,bool neg_arg) :Item_date_func(a,b),int_type(type_arg), date_sub_interval(neg_arg) {} String *val_str(String *); const char *func_name() const { return "date_add_interval"; } - void fix_length_and_dec() { maybe_null=1; max_length=19; value.alloc(32);} - double val() { return (double) val_int(); } + void fix_length_and_dec(); + enum_field_types field_type() const { return cached_field_type; } + double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } longlong val_int(); - bool get_date(TIME *res,bool fuzzy_date); - unsigned int size_of() { return sizeof(*this);} + bool get_date(TIME *res, uint fuzzy_date); + void print(String *str); }; class Item_extract :public Item_int_func { - const interval_type int_type; String value; bool date_value; public: + const interval_type int_type; // keep it public Item_extract(interval_type type_arg, Item *a) :Item_int_func(a), int_type(type_arg) {} longlong val_int(); const char *func_name() const { return "extract"; } void fix_length_and_dec(); bool eq(const Item *item, bool binary_cmp) const; - unsigned int size_of() { return sizeof(*this);} + void print(String *str); }; @@ -430,66 +646,256 @@ class Item_typecast :public Item_str_func { public: Item_typecast(Item *a) :Item_str_func(a) {} - const char *func_name() const { return "char"; } String *val_str(String *a) - { a=args[0]->val_str(a); null_value=args[0]->null_value; return a; } - void fix_length_and_dec() { max_length=args[0]->max_length; } + { + DBUG_ASSERT(fixed == 1); + String *tmp=args[0]->val_str(a); + null_value=args[0]->null_value; + if (tmp) + tmp->set_charset(collation.collation); + return tmp; + } + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + max_length=args[0]->max_length; + } + virtual const char* cast_type() const= 0; void print(String *str); }; +class Item_typecast_maybe_null :public Item_typecast +{ +public: + Item_typecast_maybe_null(Item *a) :Item_typecast(a) {} + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + max_length=args[0]->max_length; + maybe_null= 1; + } +}; + + class Item_char_typecast :public Item_typecast { + int cast_length; + CHARSET_INFO *cast_cs, *from_cs; + bool charset_conversion; + String tmp_value; public: - Item_char_typecast(Item *a) :Item_typecast(a) {} - void fix_length_and_dec() { binary=0; max_length=args[0]->max_length; } + Item_char_typecast(Item *a, int length_arg, CHARSET_INFO *cs_arg) + :Item_typecast(a), cast_length(length_arg), cast_cs(cs_arg) {} + bool eq(const Item *item, bool binary_cmp) const; + const char *func_name() const { return "cast_as_char"; } + const char* cast_type() const { return "char"; }; + String *val_str(String *a); + void fix_length_and_dec(); + void print(String *str); }; -class Item_date_typecast :public Item_typecast +class Item_date_typecast :public Item_typecast_maybe_null { public: - Item_date_typecast(Item *a) :Item_typecast(a) {} - const char *func_name() const { return "date"; } - void make_field(Send_field *tmp_field) + Item_date_typecast(Item *a) :Item_typecast_maybe_null(a) {} + const char *func_name() const { return "cast_as_date"; } + String *val_str(String *str); + bool get_date(TIME *ltime, uint fuzzy_date); + const char *cast_type() const { return "date"; } + enum_field_types field_type() const { return MYSQL_TYPE_DATE; } + Field *tmp_table_field(TABLE *t_arg) { - init_make_field(tmp_field,FIELD_TYPE_DATE); + return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); + } + void fix_length_and_dec() + { + collation.set(&my_charset_bin); + max_length= 10; + maybe_null= 1; } +}; + + +class Item_time_typecast :public Item_typecast_maybe_null +{ +public: + Item_time_typecast(Item *a) :Item_typecast_maybe_null(a) {} + const char *func_name() const { return "cast_as_time"; } + String *val_str(String *str); + bool get_time(TIME *ltime); + const char *cast_type() const { return "time"; } + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } Field *tmp_table_field(TABLE *t_arg) { - return (!t_arg) ? result_field : new Field_date(maybe_null, name, t_arg); - } + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + } }; -class Item_time_typecast :public Item_typecast +class Item_datetime_typecast :public Item_typecast_maybe_null { public: - Item_time_typecast(Item *a) :Item_typecast(a) {} - const char *func_name() const { return "time"; } - void make_field(Send_field *tmp_field) + Item_datetime_typecast(Item *a) :Item_typecast_maybe_null(a) {} + const char *func_name() const { return "cast_as_datetime"; } + String *val_str(String *str); + const char *cast_type() const { return "datetime"; } + enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; } + Field *tmp_table_field(TABLE *t_arg) { - init_make_field(tmp_field,FIELD_TYPE_TIME); + return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin)); } +}; + +class Item_func_makedate :public Item_str_func +{ +public: + Item_func_makedate(Item *a,Item *b) :Item_str_func(a,b) {} + String *val_str(String *str); + const char *func_name() const { return "makedate"; } + enum_field_types field_type() const { return MYSQL_TYPE_DATE; } + void fix_length_and_dec() + { + decimals=0; + max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + } + Field *tmp_table_field(TABLE *t_arg) + { + return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); + } +}; + + +class Item_func_add_time :public Item_str_func +{ + const bool is_date; + int sign; + enum_field_types cached_field_type; + +public: + Item_func_add_time(Item *a, Item *b, bool type_arg, bool neg_arg) + :Item_str_func(a, b), is_date(type_arg) { sign= neg_arg ? -1 : 1; } + String *val_str(String *str); + enum_field_types field_type() const { return cached_field_type; } + void fix_length_and_dec(); + +/* + TODO: + Change this when we support + microseconds in TIME/DATETIME +*/ Field *tmp_table_field(TABLE *t_arg) { - return (!t_arg) ? result_field : new Field_time(maybe_null, name, t_arg); + if (cached_field_type == MYSQL_TYPE_TIME) + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + else if (cached_field_type == MYSQL_TYPE_DATETIME) + return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin)); + return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin)); } + void print(String *str); }; +class Item_func_timediff :public Item_str_func +{ +public: + Item_func_timediff(Item *a, Item *b) + :Item_str_func(a, b) {} + String *val_str(String *str); + const char *func_name() const { return "timediff"; } + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + void fix_length_and_dec() + { + decimals=0; + max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null= 1; + } + Field *tmp_table_field(TABLE *t_arg) + { + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + } +}; -class Item_datetime_typecast :public Item_typecast +class Item_func_maketime :public Item_str_func { public: - Item_datetime_typecast(Item *a) :Item_typecast(a) {} - const char *func_name() const { return "datetime"; } - void make_field(Send_field *tmp_field) + Item_func_maketime(Item *a, Item *b, Item *c) + :Item_str_func(a, b ,c) {} + String *val_str(String *str); + const char *func_name() const { return "maketime"; } + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + void fix_length_and_dec() { - init_make_field(tmp_field,FIELD_TYPE_DATETIME); + decimals=0; + max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } Field *tmp_table_field(TABLE *t_arg) { - return (!t_arg) ? result_field : new Field_datetime(maybe_null, name, - t_arg); + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + } +}; + +class Item_func_microsecond :public Item_int_func +{ +public: + Item_func_microsecond(Item *a) :Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "microsecond"; } + void fix_length_and_dec() + { + decimals=0; + maybe_null=1; } }; + + +enum date_time_format +{ + USA_FORMAT, JIS_FORMAT, ISO_FORMAT, EUR_FORMAT, INTERNAL_FORMAT +}; + +class Item_func_get_format :public Item_str_func +{ +public: + const timestamp_type type; // keep it public + Item_func_get_format(timestamp_type type_arg, Item *a) + :Item_str_func(a), type(type_arg) + {} + String *val_str(String *str); + const char *func_name() const { return "get_format"; } + void fix_length_and_dec() + { + maybe_null= 1; + decimals=0; + max_length=17*MY_CHARSET_BIN_MB_MAXLEN; + } + void print(String *str); +}; + + +class Item_func_str_to_date :public Item_str_func +{ + enum_field_types cached_field_type; + date_time_format_types cached_format_type; + timestamp_type cached_timestamp_type; + bool const_item; +public: + Item_func_str_to_date(Item *a, Item *b) + :Item_str_func(a, b) + {} + String *val_str(String *str); + bool get_date(TIME *ltime, uint fuzzy_date); + const char *func_name() const { return "str_to_date"; } + enum_field_types field_type() const { return cached_field_type; } + void fix_length_and_dec(); + Field *tmp_table_field(TABLE *t_arg); +}; + + +class Item_func_last_day :public Item_date +{ +public: + Item_func_last_day(Item *a) :Item_date(a) {} + const char *func_name() const { return "last_day"; } + bool get_date(TIME *res, uint fuzzy_date); +}; diff --git a/sql/item_uniq.cc b/sql/item_uniq.cc index 88e0cbbc0e6..7701bbbb63e 100644 --- a/sql/item_uniq.cc +++ b/sql/item_uniq.cc @@ -15,8 +15,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Compability file */ -#ifdef __GNUC__ -#pragma implementation + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" diff --git a/sql/item_uniq.h b/sql/item_uniq.h index de239d3a8ec..b7e00f9f080 100644 --- a/sql/item_uniq.h +++ b/sql/item_uniq.h @@ -16,7 +16,7 @@ /* Compability file ; This file only contains dummy functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface #endif @@ -27,22 +27,34 @@ class Item_func_unique_users :public Item_real_func public: Item_func_unique_users(Item *name_arg,int start,int end,List<Item> &list) :Item_real_func(list) {} - double val() { return 0.0; } + double val() { DBUG_ASSERT(fixed == 1); return 0.0; } void fix_length_and_dec() { decimals=0; max_length=6; } - unsigned int size_of() { return sizeof(*this);} + void print(String *str) { str->append("0.0", 3); } }; + class Item_sum_unique_users :public Item_sum_num { public: Item_sum_unique_users(Item *name_arg,int start,int end,Item *item_arg) :Item_sum_num(item_arg) {} - double val() { return 0.0; } + Item_sum_unique_users(THD *thd, Item_sum_unique_users *item) + :Item_sum_num(thd, item) {} + double val() { DBUG_ASSERT(fixed == 1); return 0.0; } enum Sumfunctype sum_func () const {return UNIQUE_USERS_FUNC;} - void reset() {} + void clear() {} bool add() { return 0; } void reset_field() {} void update_field() {} - bool fix_fields(THD *thd,struct st_table_list *tlist) { return 0;} - unsigned int size_of() { return sizeof(*this);} + bool fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) + { + DBUG_ASSERT(fixed == 0); + fixed= 1; + return 0; + } + Item *copy_or_same(THD* thd) + { + return new Item_sum_unique_users(thd, this); + } + void print(String *str) { str->append("0.0", 3); } }; diff --git a/sql/key.cc b/sql/key.cc index 52eb108a5df..7ddd40de2c9 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -67,7 +67,7 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) /* Copy a key from record to some buffer */ - /* if length == 0 then copy hole key */ + /* if length == 0 then copy whole key */ void key_copy(byte *key,TABLE *table,uint idx,uint key_length) { @@ -156,9 +156,28 @@ void key_restore(TABLE *table,byte *key,uint idx,uint key_length) } /* key_restore */ - /* Compare if a key has changed */ +/* + Compare if a key has changed + + SYNOPSIS + key_cmp_if_same() + table TABLE + key key to compare to row + idx Index used + key_length Length of key + + NOTES + In theory we could just call field->cmp() for all field types, + but as we are only interested if a key has changed (not if the key is + larger or smaller than the previous value) we can do things a bit + faster by using memcmp() instead. + + RETURN + 0 If key is equal + 1 Key has changed +*/ -int key_cmp(TABLE *table,const byte *key,uint idx,uint key_length) +bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length) { uint length; KEY_PART_INFO *key_part; @@ -192,8 +211,17 @@ int key_cmp(TABLE *table,const byte *key,uint idx,uint key_length) if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ FIELDFLAG_PACK))) { - if (my_sortcmp((char*) key,(char*) table->record[0]+key_part->offset, - length)) + CHARSET_INFO *cs= key_part->field->charset(); + uint char_length= key_part->length / cs->mbmaxlen; + const byte *pos= table->record[0] + key_part->offset; + if (length > char_length) + { + char_length= my_charpos(cs, pos, pos + length, char_length); + set_if_smaller(char_length, length); + } + if (cs->coll->strnncollsp(cs, + (const uchar*) key, length, + (const uchar*) pos, char_length)) return 1; } else if (memcmp(key,table->record[0]+key_part->offset,length)) @@ -225,19 +253,19 @@ void key_unpack(String *to,TABLE *table,uint idx) { if (table->record[0][key_part->null_offset] & key_part->null_bit) { - to->append("NULL"); + to->append("NULL", 4); continue; } } if ((field=key_part->field)) { - field->val_str(&tmp,&tmp); + field->val_str(&tmp); if (key_part->length < field->pack_length()) tmp.length(min(tmp.length(),key_part->length)); to->append(tmp); } else - to->append("???"); + to->append("???", 3); } DBUG_VOID_RETURN; } @@ -279,3 +307,56 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields) return check_if_key_used(table, table->primary_key, fields); return 0; } + + +/* + Compare key in row to a given key + + SYNOPSIS + key_cmp() + key_part Key part handler + key Key to compare to value in table->record[0] + key_length length of 'key' + + RETURN + The return value is SIGN(key_in_row - range_key): + + 0 Key is equal to range or 'range' == 0 (no range) + -1 Key is less than range + 1 Key is larger than range +*/ + +int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length) +{ + uint store_length; + + for (const byte *end=key + key_length; + key < end; + key+= store_length, key_part++) + { + int cmp; + store_length= key_part->store_length; + if (key_part->null_bit) + { + /* This key part allows null values; NULL is lower than everything */ + register bool field_is_null= key_part->field->is_null(); + if (*key) // If range key is null + { + /* the range is expecting a null value */ + if (!field_is_null) + return 1; // Found key is > range + /* null -- exact match, go to next key part */ + continue; + } + else if (field_is_null) + return -1; // NULL is less than any value + key++; // Skip null byte + store_length--; + } + if ((cmp=key_part->field->key_cmp((byte*) key, key_part->length)) < 0) + return -1; + if (cmp > 0) + return 1; + } + return 0; // Keys are equal +} diff --git a/sql/lex.h b/sql/lex.h index 10ba11f2169..325d052de90 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2002 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -20,13 +20,25 @@ #include "lex_symbol.h" /* We don't want to include sql_yacc.h into gen_lex_hash */ +SYM_GROUP sym_group_common= {"", ""}; +SYM_GROUP sym_group_geom= {"Spatial extentions", "HAVE_SPATIAL"}; +SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"}; #ifdef NO_YACC_SYMBOLS -#define SYM(A) 0 -#define CREATE_FUNC(A) 0 +#define SYM_OR_NULL(A) 0 #else -#define SYM(A) A -#define CREATE_FUNC(A) (void*) (A) +#define SYM_OR_NULL(A) A +#endif + +#define SYM(A) SYM_OR_NULL(A),0,0,&sym_group_common +#define F_SYM(A) SYM_OR_NULL(A) + +#define CREATE_FUNC(A) (void *)(SYM_OR_NULL(A)), &sym_group_common + +#ifdef HAVE_SPATIAL +#define CREATE_FUNC_GEOM(A) (void *)(SYM_OR_NULL(A)), &sym_group_geom +#else +#define CREATE_FUNC_GEOM(A) 0, &sym_group_geom #endif /* @@ -36,495 +48,649 @@ */ static SYMBOL symbols[] = { - { "&&", SYM(AND),0,0}, - { "<", SYM(LT),0,0}, - { "<=", SYM(LE),0,0}, - { "<>", SYM(NE),0,0}, - { "!=", SYM(NE),0,0}, - { "=", SYM(EQ),0,0}, - { ">", SYM(GT_SYM),0,0}, - { ">=", SYM(GE),0,0}, - { "<<", SYM(SHIFT_LEFT),0,0}, - { ">>", SYM(SHIFT_RIGHT),0,0}, - { "<=>", SYM(EQUAL_SYM),0,0}, - { "ACTION", SYM(ACTION),0,0}, - { "ADD", SYM(ADD),0,0}, - { "AGGREGATE", SYM(AGGREGATE_SYM),0,0}, - { "ALL", SYM(ALL),0,0}, - { "ALTER", SYM(ALTER),0,0}, - { "AFTER", SYM(AFTER_SYM),0,0}, - { "AGAINST", SYM(AGAINST),0,0}, - { "ANALYZE", SYM(ANALYZE_SYM),0,0}, - { "AND", SYM(AND),0,0}, - { "AS", SYM(AS),0,0}, - { "ASC", SYM(ASC),0,0}, - { "AVG", SYM(AVG_SYM),0,0}, - { "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH),0,0}, - { "AUTO_INCREMENT", SYM(AUTO_INC),0,0}, - { "BACKUP", SYM(BACKUP_SYM),0,0}, - { "BEGIN", SYM(BEGIN_SYM),0,0}, - { "BERKELEYDB", SYM(BERKELEY_DB_SYM),0,0}, - { "BDB", SYM(BERKELEY_DB_SYM),0,0}, - { "BETWEEN", SYM(BETWEEN_SYM),0,0}, - { "BIGINT", SYM(BIGINT),0,0}, - { "BIT", SYM(BIT_SYM),0,0}, - { "BINARY", SYM(BINARY),0,0}, - { "BINLOG", SYM(BINLOG_SYM),0,0}, - { "BLOB", SYM(BLOB_SYM),0,0}, - { "BOOL", SYM(BOOL_SYM),0,0}, - { "BOOLEAN", SYM(BOOLEAN_SYM),0,0}, - { "BOTH", SYM(BOTH),0,0}, - { "BY", SYM(BY),0,0}, - { "CACHE", SYM(CACHE_SYM),0,0}, - { "CASCADE", SYM(CASCADE),0,0}, - { "CASE", SYM(CASE_SYM),0,0}, - { "CHAR", SYM(CHAR_SYM),0,0}, - { "CHARACTER", SYM(CHAR_SYM),0,0}, - { "CHARSET", SYM(CHARSET),0,0}, - { "CHANGE", SYM(CHANGE),0,0}, - { "CHANGED", SYM(CHANGED),0,0}, - { "CHECK", SYM(CHECK_SYM),0,0}, - { "CHECKSUM", SYM(CHECKSUM_SYM),0,0}, - { "CIPHER", SYM(CIPHER_SYM),0,0}, - { "CLIENT", SYM(CLIENT_SYM),0,0}, - { "CLOSE", SYM(CLOSE_SYM),0,0}, - { "COLUMN", SYM(COLUMN_SYM),0,0}, - { "COLUMNS", SYM(COLUMNS),0,0}, - { "COMMENT", SYM(COMMENT_SYM),0,0}, - { "COMMIT", SYM(COMMIT_SYM),0,0}, - { "COMMITTED", SYM(COMMITTED_SYM),0,0}, - { "COMPRESSED", SYM(COMPRESSED_SYM),0,0}, - { "CONCURRENT", SYM(CONCURRENT),0,0}, - { "CONSTRAINT", SYM(CONSTRAINT),0,0}, - { "CREATE", SYM(CREATE),0,0}, - { "CROSS", SYM(CROSS),0,0}, - { "CUBE", SYM(CUBE_SYM),0,0}, - { "CURRENT_DATE", SYM(CURDATE),0,0}, - { "CURRENT_TIME", SYM(CURTIME),0,0}, - { "CURRENT_TIMESTAMP", SYM(NOW_SYM),0,0}, - { "DATA", SYM(DATA_SYM),0,0}, - { "DATABASE", SYM(DATABASE),0,0}, - { "DATABASES", SYM(DATABASES),0,0}, - { "DATE", SYM(DATE_SYM),0,0}, - { "DATETIME", SYM(DATETIME),0,0}, - { "DAY", SYM(DAY_SYM),0,0}, - { "DAY_HOUR", SYM(DAY_HOUR_SYM),0,0}, - { "DAY_MINUTE", SYM(DAY_MINUTE_SYM),0,0}, - { "DAY_SECOND", SYM(DAY_SECOND_SYM),0,0}, - { "DEC", SYM(DECIMAL_SYM),0,0}, - { "DECIMAL", SYM(DECIMAL_SYM),0,0}, - { "DES_KEY_FILE", SYM(DES_KEY_FILE),0,0}, - { "DEFAULT", SYM(DEFAULT),0,0}, - { "DELAYED", SYM(DELAYED_SYM),0,0}, - { "DELAY_KEY_WRITE", SYM(DELAY_KEY_WRITE_SYM),0,0}, - { "DELETE", SYM(DELETE_SYM),0,0}, - { "DESC", SYM(DESC),0,0}, - { "DESCRIBE", SYM(DESCRIBE),0,0}, - { "DIRECTORY", SYM(DIRECTORY_SYM),0,0}, - { "DISABLE", SYM(DISABLE_SYM),0,0}, - { "DISTINCT", SYM(DISTINCT),0,0}, - { "DISTINCTROW", SYM(DISTINCT),0,0}, /* Access likes this */ - { "DO", SYM(DO_SYM),0,0}, - { "DOUBLE", SYM(DOUBLE_SYM),0,0}, - { "DROP", SYM(DROP),0,0}, - { "DUMPFILE", SYM(DUMPFILE),0,0}, - { "DYNAMIC", SYM(DYNAMIC_SYM),0,0}, - { "END", SYM(END),0,0}, - { "ELSE", SYM(ELSE),0,0}, - { "ENGINE", SYM(TYPE_SYM),0,0}, /* Alias for TYPE= */ - { "ESCAPE", SYM(ESCAPE_SYM),0,0}, - { "ESCAPED", SYM(ESCAPED),0,0}, - { "ENABLE", SYM(ENABLE_SYM),0,0}, - { "ENCLOSED", SYM(ENCLOSED),0,0}, - { "ENUM", SYM(ENUM),0,0}, - { "EVENTS", SYM(EVENTS_SYM),0,0}, - { "EXECUTE", SYM(EXECUTE_SYM),0,0}, - { "EXPLAIN", SYM(DESCRIBE),0,0}, - { "EXISTS", SYM(EXISTS),0,0}, - { "EXTENDED", SYM(EXTENDED_SYM),0,0}, - { "FAST", SYM(FAST_SYM),0,0}, - { "FIELDS", SYM(COLUMNS),0,0}, - { "FILE", SYM(FILE_SYM),0,0}, - { "FIRST", SYM(FIRST_SYM),0,0}, - { "FIXED", SYM(FIXED_SYM),0,0}, - { "FLOAT", SYM(FLOAT_SYM),0,0}, - { "FLOAT4", SYM(FLOAT_SYM),0,0}, - { "FLOAT8", SYM(DOUBLE_SYM),0,0}, - { "FLUSH", SYM(FLUSH_SYM),0,0}, - { "FOREIGN", SYM(FOREIGN),0,0}, - { "FORCE", SYM(FORCE_SYM),0,0}, - { "RAID_TYPE", SYM(RAID_TYPE),0,0}, - { "RAID_CHUNKS", SYM(RAID_CHUNKS),0,0}, - { "RAID_CHUNKSIZE", SYM(RAID_CHUNKSIZE),0,0}, - { "ROW_FORMAT", SYM(ROW_FORMAT_SYM),0,0}, - { "FROM", SYM(FROM),0,0}, - { "FOR", SYM(FOR_SYM),0,0}, - { "FULL", SYM(FULL),0,0}, - { "FULLTEXT", SYM(FULLTEXT_SYM),0,0}, - { "FUNCTION", SYM(UDF_SYM),0,0}, - { "GLOBAL", SYM(GLOBAL_SYM),0,0}, - { "GRANT", SYM(GRANT),0,0}, - { "GRANTS", SYM(GRANTS),0,0}, - { "GROUP", SYM(GROUP),0,0}, - { "HAVING", SYM(HAVING),0,0}, - { "HANDLER", SYM(HANDLER_SYM),0,0}, - { "HEAP", SYM(HEAP_SYM),0,0}, - { "HIGH_PRIORITY", SYM(HIGH_PRIORITY),0,0}, - { "HOUR", SYM(HOUR_SYM),0,0}, - { "HOUR_MINUTE", SYM(HOUR_MINUTE_SYM),0,0}, - { "HOUR_SECOND", SYM(HOUR_SECOND_SYM),0,0}, - { "HOSTS", SYM(HOSTS_SYM),0,0}, - { "IDENTIFIED", SYM(IDENTIFIED_SYM),0,0}, - { "IGNORE", SYM(IGNORE_SYM),0,0}, - { "IN", SYM(IN_SYM),0,0}, - { "INDEX", SYM(INDEX),0,0}, - { "INDEXES", SYM(INDEXES),0,0}, - { "INFILE", SYM(INFILE),0,0}, - { "INNER", SYM(INNER_SYM),0,0}, - { "INNOBASE", SYM(INNOBASE_SYM),0,0}, - { "INNODB", SYM(INNOBASE_SYM),0,0}, - { "INSERT", SYM(INSERT),0,0}, - { "INSERT_METHOD", SYM(INSERT_METHOD),0,0}, - { "INT", SYM(INT_SYM),0,0}, - { "INTEGER", SYM(INT_SYM),0,0}, - { "INTERVAL", SYM(INTERVAL_SYM),0,0}, - { "INT1", SYM(TINYINT),0,0}, - { "INT2", SYM(SMALLINT),0,0}, - { "INT3", SYM(MEDIUMINT),0,0}, - { "INT4", SYM(INT_SYM),0,0}, - { "INT8", SYM(BIGINT),0,0}, - { "INTO", SYM(INTO),0,0}, - { "IO_THREAD", SYM(IO_THREAD),0,0}, - { "IF", SYM(IF),0,0}, - { "IS", SYM(IS),0,0}, - { "ISOLATION", SYM(ISOLATION),0,0}, - { "ISAM", SYM(ISAM_SYM),0,0}, - { "ISSUER", SYM(ISSUER_SYM),0,0}, - { "JOIN", SYM(JOIN_SYM),0,0}, - { "KEY", SYM(KEY_SYM),0,0}, - { "KEYS", SYM(KEYS),0,0}, - { "KILL", SYM(KILL_SYM),0,0}, - { "LAST", SYM(LAST_SYM),0,0}, - { "LEADING", SYM(LEADING),0,0}, - { "LEFT", SYM(LEFT),0,0}, - { "LEVEL", SYM(LEVEL_SYM),0,0}, - { "LIKE", SYM(LIKE),0,0}, - { "LINES", SYM(LINES),0,0}, - { "LIMIT", SYM(LIMIT),0,0}, - { "LOAD", SYM(LOAD),0,0}, - { "LOCAL", SYM(LOCAL_SYM),0,0}, - { "LOCALTIME", SYM(NOW_SYM),0,0}, - { "LOCALTIMESTAMP", SYM(NOW_SYM),0,0}, - { "LOCK", SYM(LOCK_SYM),0,0}, - { "LOCKS", SYM(LOCKS_SYM),0,0}, - { "LOGS", SYM(LOGS_SYM),0,0}, - { "LONG", SYM(LONG_SYM),0,0}, - { "LONGBLOB", SYM(LONGBLOB),0,0}, - { "LONGTEXT", SYM(LONGTEXT),0,0}, - { "LOW_PRIORITY", SYM(LOW_PRIORITY),0,0}, - { "MASTER", SYM(MASTER_SYM),0,0}, - { "MASTER_CONNECT_RETRY", SYM(MASTER_CONNECT_RETRY_SYM),0,0}, - { "MASTER_HOST", SYM(MASTER_HOST_SYM),0,0}, - { "MASTER_LOG_FILE", SYM(MASTER_LOG_FILE_SYM),0,0}, - { "MASTER_LOG_POS", SYM(MASTER_LOG_POS_SYM),0,0}, - { "MASTER_PASSWORD", SYM(MASTER_PASSWORD_SYM),0,0}, - { "MASTER_PORT", SYM(MASTER_PORT_SYM),0,0}, - { "MASTER_SERVER_ID", SYM(MASTER_SERVER_ID_SYM),0,0}, - { "MASTER_USER", SYM(MASTER_USER_SYM),0,0}, - { "MAX_ROWS", SYM(MAX_ROWS),0,0}, - { "MAX_QUERIES_PER_HOUR", SYM(MAX_QUERIES_PER_HOUR), 0,0}, - { "MAX_UPDATES_PER_HOUR", SYM(MAX_UPDATES_PER_HOUR), 0,0}, - { "MAX_CONNECTIONS_PER_HOUR", SYM(MAX_CONNECTIONS_PER_HOUR), 0,0}, - { "MATCH", SYM(MATCH),0,0}, - { "MEDIUMBLOB", SYM(MEDIUMBLOB),0,0}, - { "MEDIUMTEXT", SYM(MEDIUMTEXT),0,0}, - { "MEDIUMINT", SYM(MEDIUMINT),0,0}, - { "MERGE", SYM(MERGE_SYM),0,0}, - { "MEDIUM", SYM(MEDIUM_SYM),0,0}, - { "MEMORY", SYM(MEMORY_SYM),0,0}, - { "MIDDLEINT", SYM(MEDIUMINT),0,0}, /* For powerbuilder */ - { "MIN_ROWS", SYM(MIN_ROWS),0,0}, - { "MINUTE", SYM(MINUTE_SYM),0,0}, - { "MINUTE_SECOND", SYM(MINUTE_SECOND_SYM),0,0}, - { "MODE", SYM(MODE_SYM),0,0}, - { "MODIFY", SYM(MODIFY_SYM),0,0}, - { "MONTH", SYM(MONTH_SYM),0,0}, - { "MRG_MYISAM", SYM(MERGE_SYM),0,0}, - { "MYISAM", SYM(MYISAM_SYM),0,0}, - { "NATURAL", SYM(NATURAL),0,0}, - { "NATIONAL", SYM(NATIONAL_SYM),0,0}, - { "NEXT", SYM(NEXT_SYM),0,0}, - { "NEW", SYM(NEW_SYM),0,0}, - { "NCHAR", SYM(NCHAR_SYM),0,0}, - { "NO", SYM(NO_SYM),0,0}, - { "NONE", SYM(NONE_SYM),0,0}, - { "NOT", SYM(NOT),0,0}, - { "NULL", SYM(NULL_SYM),0,0}, - { "NUMERIC", SYM(NUMERIC_SYM),0,0}, - { "OFFSET", SYM(OFFSET_SYM),0,0}, - { "ON", SYM(ON),0,0}, - { "OPEN", SYM(OPEN_SYM),0,0}, - { "OPTIMIZE", SYM(OPTIMIZE),0,0}, - { "OPTION", SYM(OPTION),0,0}, - { "OPTIONALLY", SYM(OPTIONALLY),0,0}, - { "OR", SYM(OR),0,0}, - { "ORDER", SYM(ORDER_SYM),0,0}, - { "OUTER", SYM(OUTER),0,0}, - { "OUTFILE", SYM(OUTFILE),0,0}, - { "PACK_KEYS", SYM(PACK_KEYS_SYM),0,0}, - { "PARTIAL", SYM(PARTIAL),0,0}, - { "PASSWORD", SYM(PASSWORD),0,0}, - { "PURGE", SYM(PURGE),0,0}, - { "PRECISION", SYM(PRECISION),0,0}, - { "PREV", SYM(PREV_SYM),0,0}, - { "PRIMARY", SYM(PRIMARY_SYM),0,0}, - { "PROCEDURE", SYM(PROCEDURE),0,0}, - { "PROCESS" , SYM(PROCESS),0,0}, - { "PROCESSLIST", SYM(PROCESSLIST_SYM),0,0}, - { "PRIVILEGES", SYM(PRIVILEGES),0,0}, - { "QUERY", SYM(QUERY_SYM),0,0}, - { "QUICK", SYM(QUICK),0,0}, - { "RAID0", SYM(RAID_0_SYM),0,0}, - { "READ", SYM(READ_SYM),0,0}, - { "REAL", SYM(REAL),0,0}, - { "REFERENCES", SYM(REFERENCES),0,0}, - { "RELAY_LOG_FILE", SYM(RELAY_LOG_FILE_SYM),0,0}, - { "RELAY_LOG_POS", SYM(RELAY_LOG_POS_SYM),0,0}, - { "RELOAD", SYM(RELOAD),0,0}, - { "REGEXP", SYM(REGEXP),0,0}, - { "RENAME", SYM(RENAME),0,0}, - { "REPAIR", SYM(REPAIR),0,0}, - { "REPLACE", SYM(REPLACE),0,0}, - { "REPLICATION", SYM(REPLICATION),0,0}, - { "REPEATABLE", SYM(REPEATABLE_SYM),0,0}, - { "REQUIRE", SYM(REQUIRE_SYM),0,0}, - { "RESET", SYM(RESET_SYM),0,0}, - { "USER_RESOURCES", SYM(RESOURCES),0,0}, - { "RESTORE", SYM(RESTORE_SYM),0,0}, - { "RESTRICT", SYM(RESTRICT),0,0}, - { "RETURNS", SYM(UDF_RETURNS_SYM),0,0}, - { "REVOKE", SYM(REVOKE),0,0}, - { "RIGHT", SYM(RIGHT),0,0}, - { "RLIKE", SYM(REGEXP),0,0}, /* Like in mSQL2 */ - { "ROLLBACK", SYM(ROLLBACK_SYM),0,0}, - { "ROLLUP", SYM(ROLLUP_SYM),0,0}, - { "ROW", SYM(ROW_SYM),0,0}, - { "ROWS", SYM(ROWS_SYM),0,0}, - { "SAVEPOINT", SYM(SAVEPOINT_SYM),0,0}, - { "SECOND", SYM(SECOND_SYM),0,0}, - { "SELECT", SYM(SELECT_SYM),0,0}, - { "SERIALIZABLE", SYM(SERIALIZABLE_SYM),0,0}, - { "SESSION", SYM(SESSION_SYM),0,0}, - { "SET", SYM(SET),0,0}, - { "SIGNED", SYM(SIGNED_SYM),0,0}, - { "SHARE", SYM(SHARE_SYM),0,0}, - { "SHOW", SYM(SHOW),0,0}, - { "SHUTDOWN", SYM(SHUTDOWN),0,0}, - { "SLAVE", SYM(SLAVE),0,0}, - { "SMALLINT", SYM(SMALLINT),0,0}, - { "SONAME", SYM(UDF_SONAME_SYM),0,0}, - { "SQL_BIG_RESULT", SYM(SQL_BIG_RESULT),0,0}, - { "SQL_BUFFER_RESULT", SYM(SQL_BUFFER_RESULT),0,0}, - { "SQL_CACHE", SYM(SQL_CACHE_SYM), 0, 0}, - { "SQL_CALC_FOUND_ROWS", SYM(SQL_CALC_FOUND_ROWS),0,0}, - { "SQL_NO_CACHE", SYM(SQL_NO_CACHE_SYM), 0, 0}, - { "SQL_SMALL_RESULT", SYM(SQL_SMALL_RESULT),0,0}, - { "SQL_THREAD", SYM(SQL_THREAD),0,0}, - { "SSL", SYM(SSL_SYM),0,0}, - { "STRAIGHT_JOIN", SYM(STRAIGHT_JOIN),0,0}, - { "START", SYM(START_SYM),0,0}, - { "STARTING", SYM(STARTING),0,0}, - { "STATUS", SYM(STATUS_SYM),0,0}, - { "STRING", SYM(STRING_SYM),0,0}, - { "STOP", SYM(STOP_SYM),0,0}, - { "STRIPED", SYM(RAID_STRIPED_SYM),0,0}, - { "SUBJECT", SYM(SUBJECT_SYM),0,0}, - { "SUPER", SYM(SUPER_SYM),0,0}, - { "TABLE", SYM(TABLE_SYM),0,0}, - { "TABLES", SYM(TABLES),0,0}, - { "TEMPORARY", SYM(TEMPORARY),0,0}, - { "TERMINATED", SYM(TERMINATED),0,0}, - { "TEXT", SYM(TEXT_SYM),0,0}, - { "THEN", SYM(THEN_SYM),0,0}, - { "TIME", SYM(TIME_SYM),0,0}, - { "TIMESTAMP", SYM(TIMESTAMP),0,0}, - { "TINYBLOB", SYM(TINYBLOB),0,0}, - { "TINYTEXT", SYM(TINYTEXT),0,0}, - { "TINYINT", SYM(TINYINT),0,0}, - { "TRAILING", SYM(TRAILING),0,0}, - { "TRANSACTION", SYM(TRANSACTION_SYM),0,0}, - { "TRUNCATE", SYM(TRUNCATE_SYM),0,0}, - { "TO", SYM(TO_SYM),0,0}, - { "TYPE", SYM(TYPE_SYM),0,0}, - { "UNCOMMITTED", SYM(UNCOMMITTED_SYM),0,0}, - { "UNION", SYM(UNION_SYM),0,0}, - { "UNIQUE", SYM(UNIQUE_SYM),0,0}, - { "UNLOCK", SYM(UNLOCK_SYM),0,0}, - { "UNSIGNED", SYM(UNSIGNED),0,0}, - { "USE", SYM(USE_SYM),0,0}, - { "USE_FRM", SYM(USE_FRM),0,0}, - { "USING", SYM(USING),0,0}, - { "UPDATE", SYM(UPDATE_SYM),0,0}, - { "USAGE", SYM(USAGE),0,0}, - { "VALUES", SYM(VALUES),0,0}, - { "VARCHAR", SYM(VARCHAR),0,0}, - { "VARIABLES", SYM(VARIABLES),0,0}, - { "VARYING", SYM(VARYING),0,0}, - { "VARBINARY", SYM(VARBINARY),0,0}, - { "WITH", SYM(WITH),0,0}, - { "WORK", SYM(WORK_SYM),0,0}, - { "WRITE", SYM(WRITE_SYM),0,0}, - { "WHEN", SYM(WHEN_SYM),0,0}, - { "WHERE", SYM(WHERE),0,0}, - { "XOR", SYM(XOR),0,0}, - { "X509", SYM(X509_SYM),0,0}, - { "YEAR", SYM(YEAR_SYM),0,0}, - { "YEAR_MONTH", SYM(YEAR_MONTH_SYM),0,0}, - { "ZEROFILL", SYM(ZEROFILL),0,0}, - { "||", SYM(OR_OR_CONCAT),0,0} + { "&&", SYM(AND_SYM)}, + { "<", SYM(LT)}, + { "<=", SYM(LE)}, + { "<>", SYM(NE)}, + { "!=", SYM(NE)}, + { "=", SYM(EQ)}, + { ">", SYM(GT_SYM)}, + { ">=", SYM(GE)}, + { "<<", SYM(SHIFT_LEFT)}, + { ">>", SYM(SHIFT_RIGHT)}, + { "<=>", SYM(EQUAL_SYM)}, + { "ACTION", SYM(ACTION)}, + { "ADD", SYM(ADD)}, + { "AFTER", SYM(AFTER_SYM)}, + { "AGAINST", SYM(AGAINST)}, + { "AGGREGATE", SYM(AGGREGATE_SYM)}, + { "ALL", SYM(ALL)}, + { "ALTER", SYM(ALTER)}, + { "ANALYZE", SYM(ANALYZE_SYM)}, + { "AND", SYM(AND_SYM)}, + { "ANY", SYM(ANY_SYM)}, + { "AS", SYM(AS)}, + { "ASC", SYM(ASC)}, + { "ASCII", SYM(ASCII_SYM)}, + { "AUTO_INCREMENT", SYM(AUTO_INC)}, + { "AVG", SYM(AVG_SYM)}, + { "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)}, + { "BACKUP", SYM(BACKUP_SYM)}, + { "BDB", SYM(BERKELEY_DB_SYM)}, + { "BEFORE", SYM(BEFORE_SYM)}, + { "BEGIN", SYM(BEGIN_SYM)}, + { "BERKELEYDB", SYM(BERKELEY_DB_SYM)}, + { "BETWEEN", SYM(BETWEEN_SYM)}, + { "BIGINT", SYM(BIGINT)}, + { "BINARY", SYM(BINARY)}, + { "BINLOG", SYM(BINLOG_SYM)}, + { "BIT", SYM(BIT_SYM)}, + { "BLOB", SYM(BLOB_SYM)}, + { "BOOL", SYM(BOOL_SYM)}, + { "BOOLEAN", SYM(BOOLEAN_SYM)}, + { "BOTH", SYM(BOTH)}, + { "BTREE", SYM(BTREE_SYM)}, + { "BY", SYM(BY)}, + { "BYTE", SYM(BYTE_SYM)}, + { "CACHE", SYM(CACHE_SYM)}, + { "CASCADE", SYM(CASCADE)}, + { "CASE", SYM(CASE_SYM)}, + { "CHANGE", SYM(CHANGE)}, + { "CHANGED", SYM(CHANGED)}, + { "CHAR", SYM(CHAR_SYM)}, + { "CHARACTER", SYM(CHAR_SYM)}, + { "CHARSET", SYM(CHARSET)}, + { "CHECK", SYM(CHECK_SYM)}, + { "CHECKSUM", SYM(CHECKSUM_SYM)}, + { "CIPHER", SYM(CIPHER_SYM)}, + { "CLIENT", SYM(CLIENT_SYM)}, + { "CLOSE", SYM(CLOSE_SYM)}, + { "COLLATE", SYM(COLLATE_SYM)}, + { "COLLATION", SYM(COLLATION_SYM)}, + { "COLUMN", SYM(COLUMN_SYM)}, + { "COLUMNS", SYM(COLUMNS)}, + { "COMMENT", SYM(COMMENT_SYM)}, + { "COMMIT", SYM(COMMIT_SYM)}, + { "COMMITTED", SYM(COMMITTED_SYM)}, + { "COMPRESSED", SYM(COMPRESSED_SYM)}, + { "CONCURRENT", SYM(CONCURRENT)}, + { "CONSISTENT", SYM(CONSISTENT_SYM)}, + { "CONSTRAINT", SYM(CONSTRAINT)}, + { "CONVERT", SYM(CONVERT_SYM)}, + { "CREATE", SYM(CREATE)}, + { "CROSS", SYM(CROSS)}, + { "CUBE", SYM(CUBE_SYM)}, + { "CURRENT_DATE", SYM(CURDATE)}, + { "CURRENT_TIME", SYM(CURTIME)}, + { "CURRENT_TIMESTAMP", SYM(NOW_SYM)}, + { "CURRENT_USER", SYM(CURRENT_USER)}, + { "DATA", SYM(DATA_SYM)}, + { "DATABASE", SYM(DATABASE)}, + { "DATABASES", SYM(DATABASES)}, + { "DATE", SYM(DATE_SYM)}, + { "DATETIME", SYM(DATETIME)}, + { "DAY", SYM(DAY_SYM)}, + { "DAY_HOUR", SYM(DAY_HOUR_SYM)}, + { "DAY_MICROSECOND", SYM(DAY_MICROSECOND_SYM)}, + { "DAY_MINUTE", SYM(DAY_MINUTE_SYM)}, + { "DAY_SECOND", SYM(DAY_SECOND_SYM)}, + { "DEALLOCATE", SYM(DEALLOCATE_SYM)}, + { "DEC", SYM(DECIMAL_SYM)}, + { "DECIMAL", SYM(DECIMAL_SYM)}, + { "DEFAULT", SYM(DEFAULT)}, + { "DELAYED", SYM(DELAYED_SYM)}, + { "DELAY_KEY_WRITE", SYM(DELAY_KEY_WRITE_SYM)}, + { "DELETE", SYM(DELETE_SYM)}, + { "DESC", SYM(DESC)}, + { "DESCRIBE", SYM(DESCRIBE)}, + { "DES_KEY_FILE", SYM(DES_KEY_FILE)}, + { "DIRECTORY", SYM(DIRECTORY_SYM)}, + { "DISABLE", SYM(DISABLE_SYM)}, + { "DISCARD", SYM(DISCARD)}, + { "DISTINCT", SYM(DISTINCT)}, + { "DISTINCTROW", SYM(DISTINCT)}, /* Access likes this */ + { "DIV", SYM(DIV_SYM)}, + { "DO", SYM(DO_SYM)}, + { "DOUBLE", SYM(DOUBLE_SYM)}, + { "DROP", SYM(DROP)}, + { "DUAL", SYM(DUAL_SYM)}, + { "DUMPFILE", SYM(DUMPFILE)}, + { "DUPLICATE", SYM(DUPLICATE_SYM)}, + { "DYNAMIC", SYM(DYNAMIC_SYM)}, + { "ELSE", SYM(ELSE)}, + { "ENABLE", SYM(ENABLE_SYM)}, + { "ENCLOSED", SYM(ENCLOSED)}, + { "END", SYM(END)}, + { "ENGINE", SYM(ENGINE_SYM)}, + { "ENGINES", SYM(ENGINES_SYM)}, + { "ENUM", SYM(ENUM)}, + { "ERRORS", SYM(ERRORS)}, + { "ESCAPE", SYM(ESCAPE_SYM)}, + { "ESCAPED", SYM(ESCAPED)}, + { "EVENTS", SYM(EVENTS_SYM)}, + { "EXECUTE", SYM(EXECUTE_SYM)}, + { "EXISTS", SYM(EXISTS)}, + { "EXPANSION", SYM(EXPANSION_SYM)}, + { "EXPLAIN", SYM(DESCRIBE)}, + { "EXTENDED", SYM(EXTENDED_SYM)}, + { "FALSE", SYM(FALSE_SYM)}, + { "FAST", SYM(FAST_SYM)}, + { "FIELDS", SYM(COLUMNS)}, + { "FILE", SYM(FILE_SYM)}, + { "FIRST", SYM(FIRST_SYM)}, + { "FIXED", SYM(FIXED_SYM)}, + { "FLOAT", SYM(FLOAT_SYM)}, + { "FLOAT4", SYM(FLOAT_SYM)}, + { "FLOAT8", SYM(DOUBLE_SYM)}, + { "FLUSH", SYM(FLUSH_SYM)}, + { "FOR", SYM(FOR_SYM)}, + { "FORCE", SYM(FORCE_SYM)}, + { "FOREIGN", SYM(FOREIGN)}, + { "FROM", SYM(FROM)}, + { "FULL", SYM(FULL)}, + { "FULLTEXT", SYM(FULLTEXT_SYM)}, + { "FUNCTION", SYM(UDF_SYM)}, + { "GEOMETRY", SYM(GEOMETRY_SYM)}, + { "GEOMETRYCOLLECTION",SYM(GEOMETRYCOLLECTION)}, + { "GET_FORMAT", SYM(GET_FORMAT)}, + { "GLOBAL", SYM(GLOBAL_SYM)}, + { "GRANT", SYM(GRANT)}, + { "GRANTS", SYM(GRANTS)}, + { "GROUP", SYM(GROUP)}, + { "HANDLER", SYM(HANDLER_SYM)}, + { "HASH", SYM(HASH_SYM)}, + { "HAVING", SYM(HAVING)}, + { "HELP", SYM(HELP_SYM)}, + { "HIGH_PRIORITY", SYM(HIGH_PRIORITY)}, + { "HOSTS", SYM(HOSTS_SYM)}, + { "HOUR", SYM(HOUR_SYM)}, + { "HOUR_MICROSECOND", SYM(HOUR_MICROSECOND_SYM)}, + { "HOUR_MINUTE", SYM(HOUR_MINUTE_SYM)}, + { "HOUR_SECOND", SYM(HOUR_SECOND_SYM)}, + { "IDENTIFIED", SYM(IDENTIFIED_SYM)}, + { "IF", SYM(IF)}, + { "IGNORE", SYM(IGNORE_SYM)}, + { "IMPORT", SYM(IMPORT)}, + { "IN", SYM(IN_SYM)}, + { "INDEX", SYM(INDEX_SYM)}, + { "INDEXES", SYM(INDEXES)}, + { "INFILE", SYM(INFILE)}, + { "INNER", SYM(INNER_SYM)}, + { "INNOBASE", SYM(INNOBASE_SYM)}, + { "INNODB", SYM(INNOBASE_SYM)}, + { "INSERT", SYM(INSERT)}, + { "INSERT_METHOD", SYM(INSERT_METHOD)}, + { "INT", SYM(INT_SYM)}, + { "INT1", SYM(TINYINT)}, + { "INT2", SYM(SMALLINT)}, + { "INT3", SYM(MEDIUMINT)}, + { "INT4", SYM(INT_SYM)}, + { "INT8", SYM(BIGINT)}, + { "INTEGER", SYM(INT_SYM)}, + { "INTERVAL", SYM(INTERVAL_SYM)}, + { "INTO", SYM(INTO)}, + { "IO_THREAD", SYM(RELAY_THREAD)}, + { "IS", SYM(IS)}, + { "ISOLATION", SYM(ISOLATION)}, + { "ISSUER", SYM(ISSUER_SYM)}, + { "JOIN", SYM(JOIN_SYM)}, + { "KEY", SYM(KEY_SYM)}, + { "KEYS", SYM(KEYS)}, + { "KILL", SYM(KILL_SYM)}, + { "LAST", SYM(LAST_SYM)}, + { "LEADING", SYM(LEADING)}, + { "LEAVES", SYM(LEAVES)}, + { "LEFT", SYM(LEFT)}, + { "LEVEL", SYM(LEVEL_SYM)}, + { "LIKE", SYM(LIKE)}, + { "LIMIT", SYM(LIMIT)}, + { "LINES", SYM(LINES)}, + { "LINESTRING", SYM(LINESTRING)}, + { "LOAD", SYM(LOAD)}, + { "LOCAL", SYM(LOCAL_SYM)}, + { "LOCALTIME", SYM(NOW_SYM)}, + { "LOCALTIMESTAMP", SYM(NOW_SYM)}, + { "LOCK", SYM(LOCK_SYM)}, + { "LOCKS", SYM(LOCKS_SYM)}, + { "LOGS", SYM(LOGS_SYM)}, + { "LONG", SYM(LONG_SYM)}, + { "LONGBLOB", SYM(LONGBLOB)}, + { "LONGTEXT", SYM(LONGTEXT)}, + { "LOW_PRIORITY", SYM(LOW_PRIORITY)}, + { "MASTER", SYM(MASTER_SYM)}, + { "MASTER_CONNECT_RETRY", SYM(MASTER_CONNECT_RETRY_SYM)}, + { "MASTER_HOST", SYM(MASTER_HOST_SYM)}, + { "MASTER_LOG_FILE", SYM(MASTER_LOG_FILE_SYM)}, + { "MASTER_LOG_POS", SYM(MASTER_LOG_POS_SYM)}, + { "MASTER_PASSWORD", SYM(MASTER_PASSWORD_SYM)}, + { "MASTER_PORT", SYM(MASTER_PORT_SYM)}, + { "MASTER_SERVER_ID", SYM(MASTER_SERVER_ID_SYM)}, + { "MASTER_SSL", SYM(MASTER_SSL_SYM)}, + { "MASTER_SSL_CA", SYM(MASTER_SSL_CA_SYM)}, + { "MASTER_SSL_CAPATH",SYM(MASTER_SSL_CAPATH_SYM)}, + { "MASTER_SSL_CERT", SYM(MASTER_SSL_CERT_SYM)}, + { "MASTER_SSL_CIPHER",SYM(MASTER_SSL_CIPHER_SYM)}, + { "MASTER_SSL_KEY", SYM(MASTER_SSL_KEY_SYM)}, + { "MASTER_USER", SYM(MASTER_USER_SYM)}, + { "MATCH", SYM(MATCH)}, + { "MAX_CONNECTIONS_PER_HOUR", SYM(MAX_CONNECTIONS_PER_HOUR)}, + { "MAX_QUERIES_PER_HOUR", SYM(MAX_QUERIES_PER_HOUR)}, + { "MAX_ROWS", SYM(MAX_ROWS)}, + { "MAX_UPDATES_PER_HOUR", SYM(MAX_UPDATES_PER_HOUR)}, + { "MEDIUM", SYM(MEDIUM_SYM)}, + { "MEDIUMBLOB", SYM(MEDIUMBLOB)}, + { "MEDIUMINT", SYM(MEDIUMINT)}, + { "MEDIUMTEXT", SYM(MEDIUMTEXT)}, + { "MICROSECOND", SYM(MICROSECOND_SYM)}, + { "MIDDLEINT", SYM(MEDIUMINT)}, /* For powerbuilder */ + { "MINUTE", SYM(MINUTE_SYM)}, + { "MINUTE_MICROSECOND", SYM(MINUTE_MICROSECOND_SYM)}, + { "MINUTE_SECOND", SYM(MINUTE_SECOND_SYM)}, + { "MIN_ROWS", SYM(MIN_ROWS)}, + { "MOD", SYM(MOD_SYM)}, + { "MODE", SYM(MODE_SYM)}, + { "MODIFY", SYM(MODIFY_SYM)}, + { "MONTH", SYM(MONTH_SYM)}, + { "MULTILINESTRING", SYM(MULTILINESTRING)}, + { "MULTIPOINT", SYM(MULTIPOINT)}, + { "MULTIPOLYGON", SYM(MULTIPOLYGON)}, + { "NAMES", SYM(NAMES_SYM)}, + { "NATIONAL", SYM(NATIONAL_SYM)}, + { "NATURAL", SYM(NATURAL)}, + { "NDB", SYM(NDBCLUSTER_SYM)}, + { "NDBCLUSTER", SYM(NDBCLUSTER_SYM)}, + { "NCHAR", SYM(NCHAR_SYM)}, + { "NEW", SYM(NEW_SYM)}, + { "NEXT", SYM(NEXT_SYM)}, + { "NO", SYM(NO_SYM)}, + { "NONE", SYM(NONE_SYM)}, + { "NOT", SYM(NOT)}, + { "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)}, + { "NULL", SYM(NULL_SYM)}, + { "NUMERIC", SYM(NUMERIC_SYM)}, + { "NVARCHAR", SYM(NVARCHAR_SYM)}, + { "OFFSET", SYM(OFFSET_SYM)}, + { "OLD_PASSWORD", SYM(OLD_PASSWORD)}, + { "ON", SYM(ON)}, + { "ONE_SHOT", SYM(ONE_SHOT_SYM)}, + { "OPEN", SYM(OPEN_SYM)}, + { "OPTIMIZE", SYM(OPTIMIZE)}, + { "OPTION", SYM(OPTION)}, + { "OPTIONALLY", SYM(OPTIONALLY)}, + { "OR", SYM(OR_SYM)}, + { "ORDER", SYM(ORDER_SYM)}, + { "OUTER", SYM(OUTER)}, + { "OUTFILE", SYM(OUTFILE)}, + { "PACK_KEYS", SYM(PACK_KEYS_SYM)}, + { "PARTIAL", SYM(PARTIAL)}, + { "PASSWORD", SYM(PASSWORD)}, + { "POINT", SYM(POINT_SYM)}, + { "POLYGON", SYM(POLYGON)}, + { "PRECISION", SYM(PRECISION)}, + { "PREPARE", SYM(PREPARE_SYM)}, + { "PREV", SYM(PREV_SYM)}, + { "PRIMARY", SYM(PRIMARY_SYM)}, + { "PRIVILEGES", SYM(PRIVILEGES)}, + { "PROCEDURE", SYM(PROCEDURE)}, + { "PROCESS" , SYM(PROCESS)}, + { "PROCESSLIST", SYM(PROCESSLIST_SYM)}, + { "PURGE", SYM(PURGE)}, + { "QUERY", SYM(QUERY_SYM)}, + { "QUICK", SYM(QUICK)}, + { "RAID0", SYM(RAID_0_SYM)}, + { "RAID_CHUNKS", SYM(RAID_CHUNKS)}, + { "RAID_CHUNKSIZE", SYM(RAID_CHUNKSIZE)}, + { "RAID_TYPE", SYM(RAID_TYPE)}, + { "READ", SYM(READ_SYM)}, + { "REAL", SYM(REAL)}, + { "REFERENCES", SYM(REFERENCES)}, + { "REGEXP", SYM(REGEXP)}, + { "RELAY_LOG_FILE", SYM(RELAY_LOG_FILE_SYM)}, + { "RELAY_LOG_POS", SYM(RELAY_LOG_POS_SYM)}, + { "RELAY_THREAD", SYM(RELAY_THREAD)}, + { "RELOAD", SYM(RELOAD)}, + { "RENAME", SYM(RENAME)}, + { "REPAIR", SYM(REPAIR)}, + { "REPEATABLE", SYM(REPEATABLE_SYM)}, + { "REPLACE", SYM(REPLACE)}, + { "REPLICATION", SYM(REPLICATION)}, + { "REQUIRE", SYM(REQUIRE_SYM)}, + { "RESET", SYM(RESET_SYM)}, + { "RESTORE", SYM(RESTORE_SYM)}, + { "RESTRICT", SYM(RESTRICT)}, + { "RETURNS", SYM(UDF_RETURNS_SYM)}, + { "REVOKE", SYM(REVOKE)}, + { "RIGHT", SYM(RIGHT)}, + { "RLIKE", SYM(REGEXP)}, /* Like in mSQL2 */ + { "ROLLBACK", SYM(ROLLBACK_SYM)}, + { "ROLLUP", SYM(ROLLUP_SYM)}, + { "ROW", SYM(ROW_SYM)}, + { "ROWS", SYM(ROWS_SYM)}, + { "ROW_FORMAT", SYM(ROW_FORMAT_SYM)}, + { "RTREE", SYM(RTREE_SYM)}, + { "SAVEPOINT", SYM(SAVEPOINT_SYM)}, + { "SECOND", SYM(SECOND_SYM)}, + { "SECOND_MICROSECOND", SYM(SECOND_MICROSECOND_SYM)}, + { "SELECT", SYM(SELECT_SYM)}, + { "SEPARATOR", SYM(SEPARATOR_SYM)}, + { "SERIAL", SYM(SERIAL_SYM)}, + { "SERIALIZABLE", SYM(SERIALIZABLE_SYM)}, + { "SESSION", SYM(SESSION_SYM)}, + { "SET", SYM(SET)}, + { "SHARE", SYM(SHARE_SYM)}, + { "SHOW", SYM(SHOW)}, + { "SHUTDOWN", SYM(SHUTDOWN)}, + { "SIGNED", SYM(SIGNED_SYM)}, + { "SIMPLE", SYM(SIMPLE_SYM)}, + { "SLAVE", SYM(SLAVE)}, + { "SNAPSHOT", SYM(SNAPSHOT_SYM)}, + { "SMALLINT", SYM(SMALLINT)}, + { "SOME", SYM(ANY_SYM)}, + { "SONAME", SYM(UDF_SONAME_SYM)}, + { "SOUNDS", SYM(SOUNDS_SYM)}, + { "SPATIAL", SYM(SPATIAL_SYM)}, + { "SQL_BIG_RESULT", SYM(SQL_BIG_RESULT)}, + { "SQL_BUFFER_RESULT", SYM(SQL_BUFFER_RESULT)}, + { "SQL_CACHE", SYM(SQL_CACHE_SYM)}, + { "SQL_CALC_FOUND_ROWS", SYM(SQL_CALC_FOUND_ROWS)}, + { "SQL_NO_CACHE", SYM(SQL_NO_CACHE_SYM)}, + { "SQL_SMALL_RESULT", SYM(SQL_SMALL_RESULT)}, + { "SQL_THREAD", SYM(SQL_THREAD)}, + { "SSL", SYM(SSL_SYM)}, + { "START", SYM(START_SYM)}, + { "STARTING", SYM(STARTING)}, + { "STATUS", SYM(STATUS_SYM)}, + { "STOP", SYM(STOP_SYM)}, + { "STORAGE", SYM(STORAGE_SYM)}, + { "STRAIGHT_JOIN", SYM(STRAIGHT_JOIN)}, + { "STRING", SYM(STRING_SYM)}, + { "STRIPED", SYM(RAID_STRIPED_SYM)}, + { "SUBJECT", SYM(SUBJECT_SYM)}, + { "SUPER", SYM(SUPER_SYM)}, + { "TABLE", SYM(TABLE_SYM)}, + { "TABLES", SYM(TABLES)}, + { "TABLESPACE", SYM(TABLESPACE)}, + { "TEMPORARY", SYM(TEMPORARY)}, + { "TERMINATED", SYM(TERMINATED)}, + { "TEXT", SYM(TEXT_SYM)}, + { "THEN", SYM(THEN_SYM)}, + { "TIME", SYM(TIME_SYM)}, + { "TIMESTAMP", SYM(TIMESTAMP)}, + { "TINYBLOB", SYM(TINYBLOB)}, + { "TINYINT", SYM(TINYINT)}, + { "TINYTEXT", SYM(TINYTEXT)}, + { "TO", SYM(TO_SYM)}, + { "TRAILING", SYM(TRAILING)}, + { "TRANSACTION", SYM(TRANSACTION_SYM)}, + { "TRUE", SYM(TRUE_SYM)}, + { "TRUNCATE", SYM(TRUNCATE_SYM)}, + { "TYPE", SYM(TYPE_SYM)}, + { "TYPES", SYM(TYPES_SYM)}, + { "UNCOMMITTED", SYM(UNCOMMITTED_SYM)}, + { "UNICODE", SYM(UNICODE_SYM)}, + { "UNION", SYM(UNION_SYM)}, + { "UNIQUE", SYM(UNIQUE_SYM)}, + { "UNLOCK", SYM(UNLOCK_SYM)}, + { "UNSIGNED", SYM(UNSIGNED)}, + { "UNTIL", SYM(UNTIL_SYM)}, + { "UPDATE", SYM(UPDATE_SYM)}, + { "USAGE", SYM(USAGE)}, + { "USE", SYM(USE_SYM)}, + { "USER", SYM(USER)}, + { "USER_RESOURCES", SYM(RESOURCES)}, + { "USE_FRM", SYM(USE_FRM)}, + { "USING", SYM(USING)}, + { "UTC_DATE", SYM(UTC_DATE_SYM)}, + { "UTC_TIME", SYM(UTC_TIME_SYM)}, + { "UTC_TIMESTAMP", SYM(UTC_TIMESTAMP_SYM)}, + { "VALUE", SYM(VALUE_SYM)}, + { "VALUES", SYM(VALUES)}, + { "VARBINARY", SYM(VARBINARY)}, + { "VARCHAR", SYM(VARCHAR)}, + { "VARCHARACTER", SYM(VARCHAR)}, + { "VARIABLES", SYM(VARIABLES)}, + { "VARYING", SYM(VARYING)}, + { "WARNINGS", SYM(WARNINGS)}, + { "WHEN", SYM(WHEN_SYM)}, + { "WHERE", SYM(WHERE)}, + { "WITH", SYM(WITH)}, + { "WORK", SYM(WORK_SYM)}, + { "WRITE", SYM(WRITE_SYM)}, + { "X509", SYM(X509_SYM)}, + { "XOR", SYM(XOR)}, + { "YEAR", SYM(YEAR_SYM)}, + { "YEAR_MONTH", SYM(YEAR_MONTH_SYM)}, + { "ZEROFILL", SYM(ZEROFILL)}, + { "||", SYM(OR_OR_CONCAT)} }; static SYMBOL sql_functions[] = { - { "ABS", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_abs)}, - { "ACOS", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_acos)}, - { "ADDDATE", SYM(DATE_ADD_INTERVAL),0,0}, - { "AES_ENCRYPT", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_aes_encrypt)}, - { "AES_DECRYPT", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_aes_decrypt)}, - { "ASCII", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ascii)}, - { "ASIN", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_asin)}, - { "ATAN", SYM(ATAN),0,0}, - { "ATAN2", SYM(ATAN),0,0}, - { "BENCHMARK", SYM(BENCHMARK_SYM),0,0}, - { "BIN", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bin)}, - { "BIT_COUNT", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bit_count)}, - { "BIT_OR", SYM(BIT_OR),0,0}, - { "BIT_AND", SYM(BIT_AND),0,0}, - { "CAST", SYM(CAST_SYM),0,0}, - { "CEIL", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)}, - { "CEILING", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)}, - { "CURRENT_USER", SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_current_user)}, - { "BIT_LENGTH", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bit_length)}, - { "CHAR_LENGTH", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_char_length)}, - { "CHARACTER_LENGTH", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_char_length)}, - { "COALESCE", SYM(COALESCE),0,0}, - { "CONCAT", SYM(CONCAT),0,0}, - { "CONCAT_WS", SYM(CONCAT_WS),0,0}, - { "CONNECTION_ID", SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_connection_id)}, - { "CONV", SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_conv)}, - { "CONVERT", SYM(CONVERT_SYM),0,0}, - { "COUNT", SYM(COUNT_SYM),0,0}, - { "COS", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cos)}, - { "COT", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cot)}, - { "CURDATE", SYM(CURDATE),0,0}, - { "CURTIME", SYM(CURTIME),0,0}, - { "DATE_ADD", SYM(DATE_ADD_INTERVAL),0,0}, - { "DATE_FORMAT", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_date_format)}, - { "DATE_SUB", SYM(DATE_SUB_INTERVAL),0,0}, - { "DAYNAME", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayname)}, - { "DAYOFMONTH", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofmonth)}, - { "DAYOFWEEK", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofweek)}, - { "DAYOFYEAR", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofyear)}, - { "DECODE", SYM(DECODE_SYM),0,0}, - { "DEGREES", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_degrees)}, - { "DES_ENCRYPT", SYM(DES_ENCRYPT_SYM),0,0}, - { "DES_DECRYPT", SYM(DES_DECRYPT_SYM),0,0}, - { "ELT", SYM(ELT_FUNC),0,0}, - { "ENCODE", SYM(ENCODE_SYM),0,0}, - { "ENCRYPT", SYM(ENCRYPT),0,0}, - { "EXTRACT", SYM(EXTRACT_SYM),0,0}, - { "EXP", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_exp)}, - { "EXPORT_SET", SYM(EXPORT_SET),0,0}, - { "FIELD", SYM(FIELD_FUNC),0,0}, /* For compability */ - { "FIND_IN_SET", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_find_in_set)}, - { "FLOOR", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_floor)}, - { "FORMAT", SYM(FORMAT_SYM),0,0}, - { "FOUND_ROWS", SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_found_rows)}, - { "FROM_DAYS", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_from_days)}, - { "FROM_UNIXTIME", SYM(FROM_UNIXTIME),0,0}, - { "GET_LOCK", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_get_lock)}, - { "GREATEST", SYM(GREATEST_SYM),0,0}, - { "GROUP_UNIQUE_USERS", SYM(GROUP_UNIQUE_USERS),0,0}, - { "HEX", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_hex)}, - { "IFNULL", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_ifnull)}, - { "INET_ATON", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_inet_aton)}, - { "INET_NTOA", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_inet_ntoa)}, - { "INSTR", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_instr)}, - { "ISNULL", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_isnull)}, - { "IS_FREE_LOCK", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_is_free_lock)}, - { "LAST_INSERT_ID", SYM(LAST_INSERT_ID),0,0}, - { "LCASE", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_lcase)}, - { "LEAST", SYM(LEAST_SYM),0,0}, - { "LENGTH", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_length)}, - { "LN", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ln)}, - { "LOAD_FILE", SYM(FUNC_ARG1),0,CREATE_FUNC(create_load_file)}, - { "LOCATE", SYM(LOCATE),0,0}, - { "LOG", SYM(LOG_SYM),0,0}, - { "LOG2", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_log2)}, - { "LOG10", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_log10)}, - { "LOWER", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_lcase)}, - { "LPAD", SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_lpad)}, - { "LTRIM", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ltrim)}, - { "MAKE_SET", SYM(MAKE_SET_SYM),0,0}, - { "MASTER_POS_WAIT", SYM(MASTER_POS_WAIT),0,0}, - { "MAX", SYM(MAX_SYM),0,0}, - { "MD5", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_md5)}, - { "MID", SYM(SUBSTRING),0,0}, /* unireg function */ - { "MIN", SYM(MIN_SYM),0,0}, - { "MOD", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_mod)}, - { "MONTHNAME", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_monthname)}, - { "NOW", SYM(NOW_SYM),0,0}, - { "NULLIF", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_nullif)}, - { "OCTET_LENGTH", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_length)}, - { "OCT", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_oct)}, - { "OLD_PASSWORD", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_password)}, - { "ORD", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ord)}, - { "PERIOD_ADD", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_period_add)}, - { "PERIOD_DIFF", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_period_diff)}, - { "PI", SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_pi)}, - { "POSITION", SYM(POSITION_SYM),0,0}, - { "POW", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, - { "POWER", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, - { "QUARTER", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quarter)}, - { "QUOTE", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quote)}, - { "RADIANS", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_radians)}, - { "RAND", SYM(RAND),0,0}, - { "RELEASE_LOCK", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_release_lock)}, - { "REPEAT", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_repeat)}, - { "REVERSE", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_reverse)}, - { "ROUND", SYM(ROUND),0,0}, - { "RPAD", SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_rpad)}, - { "RTRIM", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_rtrim)}, - { "SEC_TO_TIME", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sec_to_time)}, - { "SESSION_USER", SYM(USER),0,0}, - { "SUBDATE", SYM(DATE_SUB_INTERVAL),0,0}, - { "SIGN", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sign)}, - { "SIN", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sin)}, - { "SHA", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)}, - { "SHA1", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)}, - { "SOUNDEX", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_soundex)}, - { "SPACE", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_space)}, - { "SQRT", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sqrt)}, - { "STD", SYM(STD_SYM),0,0}, - { "STDDEV", SYM(STD_SYM),0,0}, - { "STRCMP", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_strcmp)}, - { "SUBSTRING", SYM(SUBSTRING),0,0}, - { "SUBSTRING_INDEX", SYM(SUBSTRING_INDEX),0,0}, - { "SUM", SYM(SUM_SYM),0,0}, - { "SYSDATE", SYM(NOW_SYM),0,0}, - { "SYSTEM_USER", SYM(USER),0,0}, - { "TAN", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_tan)}, - { "TIME_FORMAT", SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_time_format)}, - { "TIME_TO_SEC", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_time_to_sec)}, - { "TO_DAYS", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_to_days)}, - { "TRIM", SYM(TRIM),0,0}, - { "UCASE", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ucase)}, - { "UNIQUE_USERS", SYM(UNIQUE_USERS),0,0}, - { "UNIX_TIMESTAMP", SYM(UNIX_TIMESTAMP),0,0}, - { "UPPER", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ucase)}, - { "USER", SYM(USER),0,0}, - { "VERSION", SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_version)}, - { "WEEK", SYM(WEEK_SYM),0,0}, - { "WEEKDAY", SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekday)}, - { "YEARWEEK", SYM(YEARWEEK),0,0} + { "ABS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_abs)}, + { "ACOS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_acos)}, + { "ADDDATE", SYM(ADDDATE_SYM)}, + { "ADDTIME", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_addtime)}, + { "AES_ENCRYPT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_aes_encrypt)}, + { "AES_DECRYPT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_aes_decrypt)}, + { "AREA", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_area)}, + { "ASIN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_asin)}, + { "ASBINARY", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkb)}, + { "ASTEXT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkt)}, + { "ASWKB", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkb)}, + { "ASWKT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkt)}, + { "ATAN", SYM(ATAN)}, + { "ATAN2", SYM(ATAN)}, + { "BENCHMARK", SYM(BENCHMARK_SYM)}, + { "BIN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bin)}, + { "BIT_COUNT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bit_count)}, + { "BIT_OR", SYM(BIT_OR)}, + { "BIT_AND", SYM(BIT_AND)}, + { "BIT_XOR", SYM(BIT_XOR)}, + { "CAST", SYM(CAST_SYM)}, + { "CEIL", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)}, + { "CEILING", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)}, + { "BIT_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bit_length)}, + { "CENTROID", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_centroid)}, + { "CHAR_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_char_length)}, + { "CHARACTER_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_char_length)}, + { "COALESCE", SYM(COALESCE)}, + { "COERCIBILITY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_coercibility)}, + { "COMPRESS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_compress)}, + { "CONCAT", SYM(CONCAT)}, + { "CONCAT_WS", SYM(CONCAT_WS)}, + { "CONNECTION_ID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_connection_id)}, + { "CONTAINS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_contains)}, + { "CONV", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_conv)}, + { "CONVERT_TZ", SYM(CONVERT_TZ_SYM)}, + { "COUNT", SYM(COUNT_SYM)}, + { "COS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cos)}, + { "COT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cot)}, + { "CRC32", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_crc32)}, + { "CROSSES", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_crosses)}, + { "CURDATE", SYM(CURDATE)}, + { "CURTIME", SYM(CURTIME)}, + { "DATE_ADD", SYM(DATE_ADD_INTERVAL)}, + { "DATEDIFF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_datediff)}, + { "DATE_FORMAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_date_format)}, + { "DATE_SUB", SYM(DATE_SUB_INTERVAL)}, + { "DAYNAME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayname)}, + { "DAYOFMONTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofmonth)}, + { "DAYOFWEEK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofweek)}, + { "DAYOFYEAR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofyear)}, + { "DECODE", SYM(DECODE_SYM)}, + { "DEGREES", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_degrees)}, + { "DES_ENCRYPT", SYM(DES_ENCRYPT_SYM)}, + { "DES_DECRYPT", SYM(DES_DECRYPT_SYM)}, + { "DIMENSION", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_dimension)}, + { "DISJOINT", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_disjoint)}, + { "ELT", SYM(ELT_FUNC)}, + { "ENCODE", SYM(ENCODE_SYM)}, + { "ENCRYPT", SYM(ENCRYPT)}, + { "ENDPOINT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_endpoint)}, + { "ENVELOPE", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_envelope)}, + { "EQUALS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_equals)}, + { "EXTERIORRING", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_exteriorring)}, + { "EXTRACT", SYM(EXTRACT_SYM)}, + { "EXP", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_exp)}, + { "EXPORT_SET", SYM(EXPORT_SET)}, + { "FIELD", SYM(FIELD_FUNC)}, /* For compability */ + { "FIND_IN_SET", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_find_in_set)}, + { "FLOOR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_floor)}, + { "FORMAT", SYM(FORMAT_SYM)}, + { "FOUND_ROWS", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_found_rows)}, + { "FROM_DAYS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_from_days)}, + { "FROM_UNIXTIME", SYM(FROM_UNIXTIME)}, + { "GET_LOCK", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_get_lock)}, + { "GEOMETRYN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_geometryn)}, + { "GEOMETRYTYPE", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_geometry_type)}, + { "GEOMCOLLFROMTEXT", SYM(GEOMCOLLFROMTEXT)}, + { "GEOMCOLLFROMWKB", SYM(GEOMFROMWKB)}, + { "GEOMETRYCOLLECTIONFROMTEXT",SYM(GEOMCOLLFROMTEXT)}, + { "GEOMETRYCOLLECTIONFROMWKB",SYM(GEOMFROMWKB)}, + { "GEOMETRYFROMTEXT", SYM(GEOMFROMTEXT)}, + { "GEOMETRYFROMWKB", SYM(GEOMFROMWKB)}, + { "GEOMFROMTEXT", SYM(GEOMFROMTEXT)}, + { "GEOMFROMWKB", SYM(GEOMFROMWKB)}, + { "GLENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_glength)}, + { "GREATEST", SYM(GREATEST_SYM)}, + { "GROUP_CONCAT", SYM(GROUP_CONCAT_SYM)}, + { "GROUP_UNIQUE_USERS", SYM(GROUP_UNIQUE_USERS)}, + { "HEX", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_hex)}, + { "IFNULL", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_ifnull)}, + { "INET_ATON", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_inet_aton)}, + { "INET_NTOA", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_inet_ntoa)}, + { "INSTR", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_instr)}, + { "INTERIORRINGN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_interiorringn)}, + { "INTERSECTS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_intersects)}, + { "ISCLOSED", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_isclosed)}, + { "ISEMPTY", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_isempty)}, + { "ISNULL", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_isnull)}, + { "IS_FREE_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_is_free_lock)}, + { "IS_USED_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_is_used_lock)}, + { "LAST_INSERT_ID", SYM(LAST_INSERT_ID)}, + { "ISSIMPLE", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_issimple)}, + { "LAST_DAY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_last_day)}, + { "LCASE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_lcase)}, + { "LEAST", SYM(LEAST_SYM)}, + { "LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_length)}, + { "LN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ln)}, + { "LINEFROMTEXT", SYM(LINEFROMTEXT)}, + { "LINEFROMWKB", SYM(GEOMFROMWKB)}, + { "LINESTRINGFROMTEXT",SYM(LINEFROMTEXT)}, + { "LINESTRINGFROMWKB",SYM(GEOMFROMWKB)}, + { "LOAD_FILE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_load_file)}, + { "LOCATE", SYM(LOCATE)}, + { "LOG", SYM(LOG_SYM)}, + { "LOG2", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_log2)}, + { "LOG10", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_log10)}, + { "LOWER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_lcase)}, + { "LPAD", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_lpad)}, + { "LTRIM", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ltrim)}, + { "MAKE_SET", SYM(MAKE_SET_SYM)}, + { "MAKEDATE", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_makedate)}, + { "MAKETIME", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_maketime)}, + { "MASTER_POS_WAIT", SYM(MASTER_POS_WAIT)}, + { "MAX", SYM(MAX_SYM)}, + { "MBRCONTAINS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_contains)}, + { "MBRDISJOINT", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_disjoint)}, + { "MBREQUAL", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_equals)}, + { "MBRINTERSECTS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_intersects)}, + { "MBROVERLAPS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_overlaps)}, + { "MBRTOUCHES", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_touches)}, + { "MBRWITHIN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_within)}, + { "MD5", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_md5)}, + { "MID", SYM(SUBSTRING)}, /* unireg function */ + { "MIN", SYM(MIN_SYM)}, + { "MLINEFROMTEXT", SYM(MLINEFROMTEXT)}, + { "MLINEFROMWKB", SYM(GEOMFROMWKB)}, + { "MPOINTFROMTEXT", SYM(MPOINTFROMTEXT)}, + { "MPOINTFROMWKB", SYM(GEOMFROMWKB)}, + { "MPOLYFROMTEXT", SYM(MPOLYFROMTEXT)}, + { "MPOLYFROMWKB", SYM(GEOMFROMWKB)}, + { "MONTHNAME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_monthname)}, + { "MULTILINESTRINGFROMTEXT",SYM(MLINEFROMTEXT)}, + { "MULTILINESTRINGFROMWKB",SYM(GEOMFROMWKB)}, + { "MULTIPOINTFROMTEXT",SYM(MPOINTFROMTEXT)}, + { "MULTIPOINTFROMWKB",SYM(GEOMFROMWKB)}, + { "MULTIPOLYGONFROMTEXT",SYM(MPOLYFROMTEXT)}, + { "MULTIPOLYGONFROMWKB",SYM(GEOMFROMWKB)}, + { "NOW", SYM(NOW_SYM)}, + { "NULLIF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_nullif)}, + { "NUMGEOMETRIES", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_numgeometries)}, + { "NUMINTERIORRINGS", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_numinteriorring)}, + { "NUMPOINTS", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_numpoints)}, + { "OCTET_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_length)}, + { "OCT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_oct)}, + { "ORD", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ord)}, + { "OVERLAPS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_overlaps)}, + { "PERIOD_ADD", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_period_add)}, + { "PERIOD_DIFF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_period_diff)}, + { "PI", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_pi)}, + { "POINTFROMTEXT", SYM(POINTFROMTEXT)}, + { "POINTFROMWKB", SYM(GEOMFROMWKB)}, + { "POINTN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_pointn)}, + { "POLYFROMTEXT", SYM(POLYFROMTEXT)}, + { "POLYFROMWKB", SYM(GEOMFROMWKB)}, + { "POLYGONFROMTEXT", SYM(POLYFROMTEXT)}, + { "POLYGONFROMWKB", SYM(GEOMFROMWKB)}, + { "POSITION", SYM(POSITION_SYM)}, + { "POW", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, + { "POWER", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, + { "QUARTER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quarter)}, + { "QUOTE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quote)}, + { "RADIANS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_radians)}, + { "RAND", SYM(RAND)}, + { "RELEASE_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_release_lock)}, + { "REPEAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_repeat)}, + { "REVERSE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_reverse)}, + { "ROUND", SYM(ROUND)}, + { "RPAD", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_rpad)}, + { "RTRIM", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_rtrim)}, + { "SEC_TO_TIME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sec_to_time)}, + { "SESSION_USER", SYM(USER)}, + { "SUBDATE", SYM(SUBDATE_SYM)}, + { "SIGN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sign)}, + { "SIN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sin)}, + { "SHA", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)}, + { "SHA1", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)}, + { "SOUNDEX", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_soundex)}, + { "SPACE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_space)}, + { "SQRT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sqrt)}, + { "SRID", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_srid)}, + { "STARTPOINT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_startpoint)}, + { "STD", SYM(STD_SYM)}, + { "STDDEV", SYM(STD_SYM)}, + { "STR_TO_DATE", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_str_to_date)}, + { "STRCMP", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_strcmp)}, + { "SUBSTR", SYM(SUBSTRING)}, + { "SUBSTRING", SYM(SUBSTRING)}, + { "SUBSTRING_INDEX", SYM(SUBSTRING_INDEX)}, + { "SUBTIME", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_subtime)}, + { "SUM", SYM(SUM_SYM)}, + { "SYSDATE", SYM(NOW_SYM)}, + { "SYSTEM_USER", SYM(USER)}, + { "TAN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_tan)}, + { "TIME_FORMAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_time_format)}, + { "TIME_TO_SEC", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_time_to_sec)}, + { "TIMEDIFF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_timediff)}, + { "TO_DAYS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_to_days)}, + { "TOUCHES", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_touches)}, + { "TRIM", SYM(TRIM)}, + { "UCASE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ucase)}, + { "UNCOMPRESS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_uncompress)}, + { "UNCOMPRESSED_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_uncompressed_length)}, + { "UNHEX", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_unhex)}, + { "UNIQUE_USERS", SYM(UNIQUE_USERS)}, + { "UNIX_TIMESTAMP", SYM(UNIX_TIMESTAMP)}, + { "UPPER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ucase)}, + { "UUID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_uuid)}, + { "VARIANCE", SYM(VARIANCE_SYM)}, + { "VERSION", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_version)}, + { "WEEK", SYM(WEEK_SYM)}, + { "WEEKDAY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekday)}, + { "WEEKOFYEAR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekofyear)}, + { "WITHIN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_within)}, + { "X", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_x)}, + { "Y", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_y)}, + { "YEARWEEK", SYM(YEARWEEK)} }; diff --git a/sql/lex_symbol.h b/sql/lex_symbol.h index 9fff1751b1b..3074a489b6a 100644 --- a/sql/lex_symbol.h +++ b/sql/lex_symbol.h @@ -20,11 +20,14 @@ #ifndef _lex_symbol_h #define _lex_symbol_h +struct st_sym_group; + typedef struct st_symbol { const char *name; uint tok; uint length; void *create_func; + struct st_sym_group *group; } SYMBOL; typedef struct st_lex_symbol @@ -34,4 +37,13 @@ typedef struct st_lex_symbol uint length; } LEX_SYMBOL; +typedef struct st_sym_group { + const char *name; + const char *needed_define; +} SYM_GROUP; + +extern SYM_GROUP sym_group_common; +extern SYM_GROUP sym_group_geom; +extern SYM_GROUP sym_group_rtree; + #endif /* _lex_symbol_h */ diff --git a/sql/lock.cc b/sql/lock.cc index 713eb02d8ec..ab4a81034ba 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -78,6 +78,7 @@ extern HASH open_cache; static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count, uint flags, TABLE **write_locked); +static void reset_lock_data(MYSQL_LOCK *sql_lock); static int lock_external(THD *thd, TABLE **table,uint count); static int unlock_external(THD *thd, TABLE **table,uint count); static void print_lock_error(int error); @@ -120,12 +121,16 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags) */ if (wait_if_global_read_lock(thd, 1, 1)) { + /* Clear the lock type of all lock data to avoid reusage. */ + reset_lock_data(sql_lock); my_free((gptr) sql_lock,MYF(0)); sql_lock=0; break; - } + } if (thd->version != refresh_version) { + /* Clear the lock type of all lock data to avoid reusage. */ + reset_lock_data(sql_lock); my_free((gptr) sql_lock,MYF(0)); goto retry; } @@ -134,6 +139,8 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags) thd->proc_info="System lock"; if (lock_external(thd, tables, count)) { + /* Clear the lock type of all lock data to avoid reusage. */ + reset_lock_data(sql_lock); my_free((gptr) sql_lock,MYF(0)); sql_lock=0; thd->proc_info=0; @@ -183,6 +190,7 @@ retry: sql_lock=0; } } + thd->lock_time(); DBUG_RETURN (sql_lock); } @@ -264,7 +272,7 @@ void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock) { if (sql_lock->locks[i]->type >= TL_WRITE_ALLOW_READ) { - swap(THR_LOCK_DATA *,*lock,sql_lock->locks[i]); + swap_variables(THR_LOCK_DATA *, *lock, sql_lock->locks[i]); lock++; found++; } @@ -284,7 +292,7 @@ void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock) DBUG_ASSERT(sql_lock->table[i]->lock_position == i); if ((uint) sql_lock->table[i]->reginfo.lock_type >= TL_WRITE_ALLOW_READ) { - swap(TABLE *,*table,sql_lock->table[i]); + swap_variables(TABLE *, *table, sql_lock->table[i]); table++; found++; } @@ -638,10 +646,14 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, if (table->db_stat & HA_READ_ONLY) { my_error(ER_OPEN_AS_READONLY,MYF(0),table->table_name); + /* Clear the lock type of the lock data that are stored already. */ + sql_lock->lock_count= locks - sql_lock->locks; + reset_lock_data(sql_lock); my_free((gptr) sql_lock,MYF(0)); DBUG_RETURN(0); } } + THR_LOCK_DATA **org_locks = locks; locks_start= locks; locks= table->file->store_lock(thd, locks, (flags & GET_LOCK_UNLOCK) ? TL_IGNORE : @@ -653,19 +665,76 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, table->lock_count= (uint) (locks - locks_start); } *to++= table; + if (locks) + for ( ; org_locks != locks ; org_locks++) + (*org_locks)->debug_print_param= (void *) table; } DBUG_RETURN(sql_lock); } +/* + Reset lock type in lock data. + + SYNOPSIS + reset_lock_data() + sql_lock The MySQL lock. + + DESCRIPTION + + After a locking error we want to quit the locking of the table(s). + The test case in the bug report for Bug #18544 has the following + cases: 1. Locking error in lock_external() due to InnoDB timeout. + 2. Locking error in get_lock_data() due to missing write permission. + 3. Locking error in wait_if_global_read_lock() due to lock conflict. + + In all these cases we have already set the lock type into the lock + data of the open table(s). If the table(s) are in the open table + cache, they could be reused with the non-zero lock type set. This + could lead to ignoring a different lock type with the next lock. + + Clear the lock type of all lock data. This ensures that the next + lock request will set its lock type properly. + + RETURN + void +*/ + +static void reset_lock_data(MYSQL_LOCK *sql_lock) +{ + THR_LOCK_DATA **ldata; + THR_LOCK_DATA **ldata_end; + + for (ldata= sql_lock->locks, ldata_end= ldata + sql_lock->lock_count; + ldata < ldata_end; + ldata++) + { + /* Reset lock type. */ + (*ldata)->type= TL_UNLOCK; + } +} + + /***************************************************************************** -** Lock table based on the name. -** This is used when we need total access to a closed, not open table + Lock table based on the name. + This is used when we need total access to a closed, not open table *****************************************************************************/ /* Lock and wait for the named lock. - Returns 0 on ok + + SYNOPSIS + lock_and_wait_for_table_name() + thd Thread handler + table_list Lock first table in this list + + + NOTES + Works together with global read lock. + + RETURN + 0 ok + 1 error */ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list) @@ -695,21 +764,37 @@ end: /* Put a not open table with an old refresh version in the table cache. - This will force any other threads that uses the table to release it - as soon as possible. - One must have a lock on LOCK_open ! - Return values: - < 0 error - == 0 table locked - > 0 table locked, but someone is using it + + SYNPOSIS + lock_table_name() + thd Thread handler + table_list Lock first table in this list + + WARNING + If you are going to update the table, you should use + lock_and_wait_for_table_name instead of this function as this works + together with 'FLUSH TABLES WITH READ LOCK' + + NOTES + This will force any other threads that uses the table to release it + as soon as possible. + + REQUIREMENTS + One must have a lock on LOCK_open ! + + RETURN: + < 0 error + == 0 table locked + > 0 table locked, but someone is using it */ int lock_table_name(THD *thd, TABLE_LIST *table_list) { TABLE *table; char key[MAX_DBKEY_LENGTH]; - char *db= table_list->db ? table_list->db : (thd->db ? thd->db : (char*) ""); + char *db= table_list->db; uint key_length; + HASH_SEARCH_STATE state; DBUG_ENTER("lock_table_name"); DBUG_PRINT("enter",("db: %s name: %s", db, table_list->real_name)); @@ -720,9 +805,9 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) /* Only insert the table if we haven't insert it already */ - for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ; + for (table=(TABLE*) hash_first(&open_cache, (byte*)key, key_length, &state); table ; - table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length)) + table = (TABLE*) hash_next(&open_cache, (byte*)key, key_length, &state)) if (table->in_use == thd) DBUG_RETURN(0); @@ -740,18 +825,15 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) table->locked_by_name=1; table_list->table=table; - if (hash_insert(&open_cache, (byte*) table)) + if (my_hash_insert(&open_cache, (byte*) table)) { my_free((gptr) table,MYF(0)); DBUG_RETURN(-1); } + if (remove_table_from_cache(thd, db, table_list->real_name, RTFC_NO_FLAG)) { - if (remove_table_from_cache(thd, db, - table_list->real_name, RTFC_NO_FLAG)) - { - DBUG_RETURN(1); // Table is in use - } + DBUG_RETURN(1); // Table is in use } DBUG_RETURN(0); } @@ -807,6 +889,10 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list) table_list Names of tables to lock NOTES + If you are just locking one table, you should use + lock_and_wait_for_table_name(). + + REQUIREMENTS One must have a lock on LOCK_open when calling this RETURN @@ -902,7 +988,7 @@ static void print_lock_error(int error) least the first step above) global_read_lock_blocks_commit count of threads which have the global read lock and block - commits (i.e. have completed the second step above) + commits (i.e. are in or have completed the second step above) waiting_for_read_lock count of threads which want to take a global read lock but cannot protect_against_global_read_lock @@ -1021,7 +1107,8 @@ void unlock_global_read_lock(THD *thd) (is_not_commit || \ global_read_lock_blocks_commit)) -bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commit) +bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, + bool is_not_commit) { const char *old_message; bool result= 0, need_exit_cond; @@ -1029,7 +1116,7 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commi LINT_INIT(old_message); (void) pthread_mutex_lock(&LOCK_open); - if (need_exit_cond= must_wait) + if ((need_exit_cond= must_wait)) { if (thd->global_read_lock) // This thread had the read locks { @@ -1053,7 +1140,11 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commi } if (!abort_on_refresh && !result) protect_against_global_read_lock++; - if (unlikely(need_exit_cond)) // global read locks are rare + /* + The following is only true in case of a global read locks (which is rare) + and if old_message is set + */ + if (unlikely(need_exit_cond)) thd->exit_cond(old_message); else pthread_mutex_unlock(&LOCK_open); @@ -1068,7 +1159,8 @@ void start_waiting_global_read_lock(THD *thd) if (unlikely(thd->global_read_lock)) DBUG_VOID_RETURN; (void) pthread_mutex_lock(&LOCK_open); - tmp= (!--protect_against_global_read_lock && waiting_for_read_lock); + tmp= (!--protect_against_global_read_lock && + (waiting_for_read_lock || global_read_lock_blocks_commit)); (void) pthread_mutex_unlock(&LOCK_open); if (tmp) pthread_cond_broadcast(&COND_refresh); diff --git a/sql/log.cc b/sql/log.cc index 18c644473f1..c530f15a84f 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1,15 +1,15 @@ /* Copyright (C) 2000-2003 MySQL AB - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -23,20 +23,19 @@ #endif #include "mysql_priv.h" -#include "sql_acl.h" #include "sql_repl.h" +#include "ha_innodb.h" // necessary to cut the binlog when crash recovery #include <my_dir.h> #include <stdarg.h> #include <m_ctype.h> // For test_if_number -#include <assert.h> #ifdef __NT__ #include "message.h" #endif MYSQL_LOG mysql_log,mysql_update_log,mysql_slow_log,mysql_bin_log; -extern I_List<i_string> binlog_do_db, binlog_ignore_db; +ulong sync_binlog_counter= 0; static bool test_if_number(const char *str, long *res, bool allow_wildcards); @@ -88,16 +87,19 @@ void setup_windows_event_source() static int find_uniq_filename(char *name) { - long number; - uint i,length; - char buff[FN_REFLEN]; - struct st_my_dir *dir_info; + long number; + uint i; + char buff[FN_REFLEN]; + struct st_my_dir *dir_info; reg1 struct fileinfo *file_info; - ulong max_found=0; + ulong max_found=0; + DBUG_ENTER("find_uniq_filename"); - length=dirname_part(buff,name); - char *start=name+length,*end=strend(start); + uint length = dirname_part(buff,name); + char *start = name + length; + char *end = strend(start); + *end='.'; length= (uint) (end-start+1); @@ -118,7 +120,7 @@ static int find_uniq_filename(char *name) my_dirend(dir_info); *end++='.'; - sprintf(end,"%03ld",max_found+1); + sprintf(end,"%06ld",max_found+1); DBUG_RETURN(0); } @@ -149,6 +151,7 @@ MYSQL_LOG::~MYSQL_LOG() void MYSQL_LOG::cleanup() { + DBUG_ENTER("cleanup"); if (inited) { inited= 0; @@ -157,6 +160,7 @@ void MYSQL_LOG::cleanup() (void) pthread_mutex_destroy(&LOCK_index); (void) pthread_cond_destroy(&update_cond); } + DBUG_VOID_RETURN; } @@ -221,7 +225,7 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, const char *new_name, const char *index_file_name_arg, enum cache_type io_cache_type_arg, bool no_auto_events_arg, - ulong max_size) + ulong max_size_arg) { char buff[512]; File file= -1, index_file_nr= -1; @@ -232,7 +236,7 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, last_time=query_start=0; write_error=0; - init(log_type_arg,io_cache_type_arg,no_auto_events_arg,max_size); + init(log_type_arg,io_cache_type_arg,no_auto_events_arg,max_size_arg); if (!(name=my_strdup(log_name,MYF(MY_WME)))) goto err; @@ -251,20 +255,26 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, if ((file=my_open(log_file_name,open_flags, MYF(MY_WME | ME_WAITTANG))) < 0 || init_io_cache(&log_file, file, IO_SIZE, io_cache_type, - my_tell(file,MYF(MY_WME)), 0, MYF(MY_WME | MY_NABP))) + my_tell(file,MYF(MY_WME)), 0, + MYF(MY_WME | MY_NABP | + ((log_type == LOG_BIN) ? MY_WAIT_IF_FULL : 0)))) goto err; switch (log_type) { case LOG_NORMAL: { char *end; - int len=my_snprintf(buff, sizeof(buff), -#ifdef __NT__ - "%s, Version: %s, started with:\nTCP Port: %d, Named Pipe: %s\n", + int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s. " +#ifdef EMBEDDED_LIBRARY + "embedded library\n", my_progname, server_version +#elif __NT__ + "started with:\nTCP Port: %d, Named Pipe: %s\n", + my_progname, server_version, mysqld_port, mysqld_unix_port #else - "%s, Version: %s, started with:\nTcp port: %d Unix socket: %s\n", + "started with:\nTcp port: %d Unix socket: %s\n", + my_progname,server_version,mysqld_port,mysqld_unix_port #endif - my_progname, server_version, mysql_port, mysql_unix_port); + ); end=strnmov(buff+len,"Time Id Command Argument\n", sizeof(buff)-len); if (my_b_write(&log_file, (byte*) buff,(uint) (end-buff)) || @@ -325,16 +335,19 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, First open of this class instance Create an index file that will hold all file names uses for logging. Add new entries to the end of it. + Index file (and binlog) are so critical for recovery/replication + that we create them with MY_WAIT_IF_FULL. */ fn_format(index_file_name, index_file_name_arg, mysql_data_home, ".index", opt); if ((index_file_nr= my_open(index_file_name, O_RDWR | O_CREAT | O_BINARY , MYF(MY_WME))) < 0 || + my_sync(index_file_nr, MYF(MY_WME)) || init_io_cache(&index_file, index_file_nr, IO_SIZE, WRITE_CACHE, my_seek(index_file_nr,0L,MY_SEEK_END,MYF(0)), - 0, MYF(MY_WME))) + 0, MYF(MY_WME | MY_WAIT_IF_FULL))) goto err; } else @@ -350,16 +363,21 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, s.set_log_pos(this); s.write(&log_file); } - if (flush_io_cache(&log_file)) + if (flush_io_cache(&log_file) || + my_sync(log_file.file, MYF(MY_WME))) goto err; if (write_file_name_to_index_file) { - /* As this is a new log file, we write the file name to the index file */ + /* + As this is a new log file, we write the file name to the index + file. As every time we write to the index file, we sync it. + */ if (my_b_write(&index_file, (byte*) log_file_name, strlen(log_file_name)) || my_b_write(&index_file, (byte*) "\n", 1) || - flush_io_cache(&index_file)) + flush_io_cache(&index_file) || + my_sync(index_file.file, MYF(MY_WME))) goto err; } break; @@ -440,7 +458,8 @@ static bool copy_up_file_and_fill(IO_CACHE *index_file, my_off_t offset) goto err; } /* The following will either truncate the file or fill the end with \n' */ - if (my_chsize(file, offset - init_offset, '\n', MYF(MY_WME))) + if (my_chsize(file, offset - init_offset, '\n', MYF(MY_WME)) || + my_sync(file, MYF(MY_WME))) goto err; /* Reset data in old index cache */ @@ -545,7 +564,6 @@ int MYSQL_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name, RETURN VALUES 0 ok LOG_INFO_EOF End of log-index-file found - LOG_INFO_SEEK Could not allocate IO cache LOG_INFO_IO Got IO error while reading file */ @@ -650,24 +668,32 @@ err: /* - Delete the current log file, remove it from index file and start on next + Delete relay log files prior to rli->group_relay_log_name + (i.e. all logs which are not involved in a non-finished group + (transaction)), remove them from the index file and start on next relay log. SYNOPSIS purge_first_log() - rli Relay log information - + rli Relay log information + included If false, all relay logs that are strictly before + rli->group_relay_log_name are deleted ; if true, the latter is + deleted too (i.e. all relay logs + read by the SQL slave thread are deleted). + NOTE - This is only called from the slave-execute thread when it has read - all commands from a log and want to switch to a new log. - - When this happens, we should never be in an active transaction as - a transaction is always written as a single block to the binary log. + all commands from a relay log and want to switch to a new relay log. + - When this happens, we can be in an active transaction as + a transaction can span over two relay logs + (although it is always written as a single block to the master's binary + log, hence cannot span over two master's binary logs). IMPLEMENTATION - Protects index file with LOCK_index - - Delete first log file, - - Copy all file names after this one to the front of the index file + - Delete relevant relay log files + - Copy all file names after these ones to the front of the index file - If the OS has truncate, truncate the file, else fill it with \n' - - Read the first file name from the index file and store in rli->linfo + - Read the next file name from the index file and store in rli->linfo RETURN VALUES 0 ok @@ -676,66 +702,71 @@ err: LOG_INFO_IO Got IO error while reading file */ -int MYSQL_LOG::purge_first_log(struct st_relay_log_info* rli) +#ifdef HAVE_REPLICATION + +int MYSQL_LOG::purge_first_log(struct st_relay_log_info* rli, bool included) { int error; DBUG_ENTER("purge_first_log"); - /* - Test pre-conditions. - - Assume that we have previously read the first log and - stored it in rli->relay_log_name - */ DBUG_ASSERT(is_open()); DBUG_ASSERT(rli->slave_running == 1); - DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->relay_log_name)); - DBUG_ASSERT(rli->linfo.index_file_offset == - strlen(rli->relay_log_name) + 1); + DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name)); - /* We have already processed the relay log, so it's safe to delete it */ - my_delete(rli->relay_log_name, MYF(0)); pthread_mutex_lock(&LOCK_index); - if (copy_up_file_and_fill(&index_file, rli->linfo.index_file_offset)) - { - error= LOG_INFO_IO; - goto err; - } + pthread_mutex_lock(&rli->log_space_lock); + rli->relay_log.purge_logs(rli->group_relay_log_name, included, + 0, 0, &rli->log_space_total); + // Tell the I/O thread to take the relay_log_space_limit into account + rli->ignore_log_space_limit= 0; + pthread_mutex_unlock(&rli->log_space_lock); /* - Update the space counter used by all relay logs Ok to broadcast after the critical region as there is no risk of the mutex being destroyed by this thread later - this helps save context switches */ - pthread_mutex_lock(&rli->log_space_lock); - rli->log_space_total -= rli->relay_log_pos; - //tell the I/O thread to take the relay_log_space_limit into account - rli->ignore_log_space_limit= 0; - pthread_mutex_unlock(&rli->log_space_lock); pthread_cond_broadcast(&rli->log_space_cond); /* Read the next log file name from the index file and pass it back to the caller + If included is true, we want the first relay log; + otherwise we want the one after event_relay_log_name. */ - if ((error=find_log_pos(&rli->linfo, NullS, 0 /*no mutex*/))) + if ((included && (error=find_log_pos(&rli->linfo, NullS, 0))) || + (!included && + ((error=find_log_pos(&rli->linfo, rli->event_relay_log_name, 0)) || + (error=find_next_log(&rli->linfo, 0))))) { char buff[22]; - sql_print_error("next log error: %d offset: %s log: %s", - error, - llstr(rli->linfo.index_file_offset,buff), - rli->linfo.log_file_name); + sql_print_error("next log error: %d offset: %s log: %s included: %d", + error, + llstr(rli->linfo.index_file_offset,buff), + rli->group_relay_log_name, + included); goto err; } + /* - Reset position to current log. This involves setting both of the - position variables: + Reset rli's coordinates to the current log. */ - rli->relay_log_pos = BIN_LOG_HEADER_SIZE; - rli->pending = 0; - strmake(rli->relay_log_name,rli->linfo.log_file_name, - sizeof(rli->relay_log_name)-1); + rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE; + strmake(rli->event_relay_log_name,rli->linfo.log_file_name, + sizeof(rli->event_relay_log_name)-1); + + /* + If we removed the rli->group_relay_log_name file, + we must update the rli->group* coordinates, otherwise do not touch it as the + group's execution is not finished (e.g. COMMIT not executed) + */ + if (included) + { + rli->group_relay_log_pos = BIN_LOG_HEADER_SIZE; + strmake(rli->group_relay_log_name,rli->linfo.log_file_name, + sizeof(rli->group_relay_log_name)-1); + rli->notify_group_relay_log_name_update(); + } /* Store where we are in the new file for the execution thread */ flush_relay_log_info(rli); @@ -745,15 +776,33 @@ err: DBUG_RETURN(error); } +/* + Update log index_file +*/ + +int MYSQL_LOG::update_log_index(LOG_INFO* log_info, bool need_update_threads) +{ + if (copy_up_file_and_fill(&index_file, log_info->index_file_start_offset)) + return LOG_INFO_IO; + + // now update offsets in index file for running threads + if (need_update_threads) + adjust_linfo_offsets(log_info->index_file_start_offset); + return 0; +} /* Remove all logs before the given log from disk and from the index file. SYNOPSIS purge_logs() - thd Thread pointer - to_log Delete all log file name before this file. This file is not - deleted + to_log Delete all log file name before this file. + included If true, to_log is deleted too. + need_mutex + need_update_threads If we want to update the log coordinates of + all threads. False for relay logs, true otherwise. + freed_log_space If not null, decrement this variable of + the amount of log space freed NOTES If any of the logs before the deleted one is in use, @@ -764,28 +813,59 @@ err: LOG_INFO_EOF to_log not found */ -int MYSQL_LOG::purge_logs(THD* thd, const char* to_log) +int MYSQL_LOG::purge_logs(const char *to_log, + bool included, + bool need_mutex, + bool need_update_threads, + ulonglong *decrease_log_space) { int error; + bool exit_loop= 0; LOG_INFO log_info; DBUG_ENTER("purge_logs"); + DBUG_PRINT("info",("to_log= %s",to_log)); - pthread_mutex_lock(&LOCK_index); + if (need_mutex) + pthread_mutex_lock(&LOCK_index); if ((error=find_log_pos(&log_info, to_log, 0 /*no mutex*/))) goto err; /* - File name exists in index file; Delete until we find this file + File name exists in index file; delete until we find this file or a file that is used. */ if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/))) goto err; - while (strcmp(to_log,log_info.log_file_name) && - !log_in_use(log_info.log_file_name)) + while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) && + !log_in_use(log_info.log_file_name)) { - /* It's not fatal even if we can't delete a log file */ - my_delete(log_info.log_file_name, MYF(0)); - if (find_next_log(&log_info, 0)) + ulong tmp; + LINT_INIT(tmp); + if (decrease_log_space) //stat the file we want to delete + { + MY_STAT s; + if (my_stat(log_info.log_file_name,&s,MYF(0))) + tmp= s.st_size; + else + { + /* + If we could not stat, we can't know the amount + of space that deletion will free. In most cases, + deletion won't work either, so it's not a problem. + */ + sql_print_information("Failed to execute my_stat on file '%s'", + log_info.log_file_name); + tmp= 0; + } + } + /* + It's not fatal if we can't delete a log file ; + if we could delete it, take its size into account + */ + DBUG_PRINT("info",("purging %s",log_info.log_file_name)); + if (!my_delete(log_info.log_file_name, MYF(0)) && decrease_log_space) + *decrease_log_space-= tmp; + if (find_next_log(&log_info, 0) || exit_loop) break; } @@ -793,15 +873,67 @@ int MYSQL_LOG::purge_logs(THD* thd, const char* to_log) If we get killed -9 here, the sysadmin would have to edit the log index file after restart - otherwise, this should be safe */ + error= update_log_index(&log_info, need_update_threads); - if (copy_up_file_and_fill(&index_file, log_info.index_file_start_offset)) - { - error= LOG_INFO_IO; +err: + if (need_mutex) + pthread_mutex_unlock(&LOCK_index); + DBUG_RETURN(error); +} + +/* + Remove all logs before the given file date from disk and from the + index file. + + SYNOPSIS + purge_logs_before_date() + thd Thread pointer + before_date Delete all log files before given date. + + NOTES + If any of the logs before the deleted one is in use, + only purge logs up to this one. + + RETURN VALUES + 0 ok + LOG_INFO_PURGE_NO_ROTATE Binary file that can't be rotated +*/ + +int MYSQL_LOG::purge_logs_before_date(time_t purge_time) +{ + int error; + LOG_INFO log_info; + MY_STAT stat_area; + + DBUG_ENTER("purge_logs_before_date"); + + pthread_mutex_lock(&LOCK_index); + + /* + Delete until we find curren file + or a file that is used or a file + that is older than purge_time. + */ + if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/))) goto err; + + while (strcmp(log_file_name, log_info.log_file_name) && + !log_in_use(log_info.log_file_name)) + { + /* It's not fatal even if we can't delete a log file */ + if (!my_stat(log_info.log_file_name, &stat_area, MYF(0)) || + stat_area.st_mtime >= purge_time) + break; + my_delete(log_info.log_file_name, MYF(0)); + if (find_next_log(&log_info, 0)) + break; } - // now update offsets in index file for running threads - adjust_linfo_offsets(log_info.index_file_start_offset); + /* + If we get killed -9 here, the sysadmin would have to edit + the log index file after restart - otherwise, this should be safe + */ + error= update_log_index(&log_info, 1); err: pthread_mutex_unlock(&LOCK_index); @@ -809,6 +941,9 @@ err: } +#endif /* HAVE_REPLICATION */ + + /* Create a new log file name @@ -892,17 +1027,10 @@ void MYSQL_LOG::new_file(bool need_lock) We log the whole file name for log file as the user may decide to change base names at some point. */ - THD* thd = current_thd; /* may be 0 if we are reacting to SIGHUP */ - Rotate_log_event r(thd,new_name+dirname_length(new_name)); + THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */ + Rotate_log_event r(thd,new_name+dirname_length(new_name), + 0, LOG_EVENT_OFFSET, 0); r.set_log_pos(this); - - /* - Because this log rotation could have been initiated by a master of - the slave running with log-bin, we set the flag on rotate - event to prevent infinite log rotation loop - */ - if (thd && thd->slave_thread) - r.flags|= LOG_EVENT_FORCED_ROTATE_F; r.write(&log_file); bytes_written += r.get_event_len(); } @@ -924,6 +1052,8 @@ void MYSQL_LOG::new_file(bool need_lock) open(old_name, save_log_type, new_name_ptr, index_file_name, io_cache_type, no_auto_events, max_size); + if (this == &mysql_bin_log) + report_pos_in_innodb(); my_free(old_name,MYF(0)); end: @@ -977,7 +1107,7 @@ bool MYSQL_LOG::appendv(const char* buf, uint len,...) DBUG_ASSERT(log_file.type == SEQ_READ_APPEND); - pthread_mutex_lock(&LOCK_log); + safe_mutex_assert_owner(&LOCK_log); do { if (my_b_append(&log_file,(byte*) buf,len)) @@ -996,7 +1126,6 @@ bool MYSQL_LOG::appendv(const char* buf, uint len,...) } err: - pthread_mutex_unlock(&LOCK_log); if (!error) signal_update(); DBUG_RETURN(error); @@ -1013,7 +1142,8 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, { if (is_open() && (what_to_log & (1L << (uint) command))) { - int error=0; + uint length; + int error= 0; VOID(pthread_mutex_lock(&LOCK_log)); /* Test if someone closed between the is_open test and lock */ @@ -1027,8 +1157,11 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, if (thd) { // Normal thread - if ((thd->options & OPTION_LOG_OFF) && - (thd->master_access & SUPER_ACL)) + if ((thd->options & OPTION_LOG_OFF) +#ifndef NO_EMBEDDED_ACCESS_CHECKS + && (thd->master_access & SUPER_ACL) +#endif +) { VOID(pthread_mutex_unlock(&LOCK_log)); return 0; // No logging @@ -1062,8 +1195,10 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, } else if (my_b_write(&log_file, (byte*) "\t\t",2) < 0) error=errno; - sprintf(buff,"%7ld %-11.11s", id,command_name[(uint) command]); - if (my_b_write(&log_file, (byte*) buff,strlen(buff))) + length=my_sprintf(buff, + (buff, "%7ld %-11.11s", id, + command_name[(uint) command])); + if (my_b_write(&log_file, (byte*) buff,length)) error=errno; if (format) { @@ -1088,6 +1223,13 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, } +inline bool sync_binlog(IO_CACHE *cache) +{ + return (sync_binlog_period && + (sync_binlog_period == ++sync_binlog_counter) && + (sync_binlog_counter= 0, my_sync(cache->file, MYF(MY_WME)))); +} + /* Write an event to the binary log */ @@ -1097,6 +1239,7 @@ bool MYSQL_LOG::write(Log_event* event_info) THD *thd=event_info->thd; bool called_handler_commit=0; bool error=0; + bool should_rotate = 0; DBUG_ENTER("MYSQL_LOG::write(event)"); pthread_mutex_lock(&LOCK_log); @@ -1108,7 +1251,6 @@ bool MYSQL_LOG::write(Log_event* event_info) */ if (is_open()) { - bool should_rotate= 0; const char *local_db= event_info->get_db(); IO_CACHE *file= &log_file; #ifdef USING_TRANSACTIONS @@ -1127,19 +1269,20 @@ bool MYSQL_LOG::write(Log_event* event_info) file= &thd->transaction.trans_log; #endif DBUG_PRINT("info",("event type=%d",event_info->get_type_code())); +#ifdef HAVE_REPLICATION /* In the future we need to add to the following if tests like "do the involved tables match (to be implemented) binlog_[wild_]{do|ignore}_table?" (WL#1049)" */ - if ((thd && !(thd->options & OPTION_BIN_LOG) && - (thd->master_access & SUPER_ACL)) || - (local_db && !db_ok(local_db, binlog_do_db, binlog_ignore_db))) + if ((thd && !(thd->options & OPTION_BIN_LOG)) || + (!db_ok(local_db, binlog_do_db, binlog_ignore_db))) { VOID(pthread_mutex_unlock(&LOCK_log)); - DBUG_PRINT("error",("!db_ok")); + DBUG_PRINT("error",("!db_ok('%s')", local_db)); DBUG_RETURN(0); } +#endif /* HAVE_REPLICATION */ error=1; /* @@ -1154,6 +1297,56 @@ bool MYSQL_LOG::write(Log_event* event_info) if (thd) { +#if MYSQL_VERSION_ID < 50000 + /* + To make replication of charsets working in 4.1 we are writing values + of charset related variables before every statement in the binlog, + if values of those variables differ from global server-wide defaults. + We are using SET ONE_SHOT command so that the charset vars get reset + to default after the first non-SET statement. + In the next 5.0 this won't be needed as we will use the new binlog + format to store charset info. + */ + if ((thd->variables.character_set_client->number != + global_system_variables.collation_server->number) || + (thd->variables.character_set_client->number != + thd->variables.collation_connection->number) || + (thd->variables.collation_server->number != + thd->variables.collation_connection->number)) + { + char buf[200]; + int written= my_snprintf(buf, sizeof(buf)-1, + "SET ONE_SHOT CHARACTER_SET_CLIENT=%u,\ +COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u", + (uint) thd->variables.character_set_client->number, + (uint) thd->variables.collation_connection->number, + (uint) thd->variables.collation_database->number, + (uint) thd->variables.collation_server->number); + Query_log_event e(thd, buf, written, 0, FALSE); + e.set_log_pos(this); + e.error_code = 0; // This statement cannot fail (see [1]). + if (e.write(file)) + goto err; + } + /* + We use the same ONE_SHOT trick for making replication of time zones + working in 4.1. Again in 5.0 we have better means for doing this. + */ + if (thd->time_zone_used && + thd->variables.time_zone != global_system_variables.time_zone) + { + char buf[MAX_TIME_ZONE_NAME_LENGTH + 26]; + char *buf_end= strxmov(buf, "SET ONE_SHOT TIME_ZONE='", + thd->variables.time_zone->get_name()->ptr(), + "'", NullS); + Query_log_event e(thd, buf, buf_end - buf, 0, FALSE); + e.set_log_pos(this); + e.error_code = 0; // This statement cannot fail (see [1]). + if (e.write(file)) + goto err; + } +#endif + if (thd->last_insert_id_used) { Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT, @@ -1176,17 +1369,36 @@ bool MYSQL_LOG::write(Log_event* event_info) if (e.write(file)) goto err; } + if (thd->user_var_events.elements) + { + for (uint i= 0; i < thd->user_var_events.elements; i++) + { + BINLOG_USER_VAR_EVENT *user_var_event; + get_dynamic(&thd->user_var_events,(gptr) &user_var_event, i); + User_var_log_event e(thd, user_var_event->user_var_event->name.str, + user_var_event->user_var_event->name.length, + user_var_event->value, + user_var_event->length, + user_var_event->type, + user_var_event->charset_number); + e.set_log_pos(this); + if (e.write(file)) + goto err; + } + } +#ifdef TO_BE_REMOVED if (thd->variables.convert_set) { char buf[256], *p; p= strmov(strmov(buf, "SET CHARACTER SET "), thd->variables.convert_set->name); Query_log_event e(thd, buf, (ulong) (p - buf), 0); - e.error_code = 0; // This statement cannot fail (see [1]). e.set_log_pos(this); + e.error_code = 0; // This statement cannot fail (see [1]). if (e.write(file)) goto err; } +#endif /* If the user has set FOREIGN_KEY_CHECKS=0 we wrap every SQL @@ -1198,7 +1410,7 @@ bool MYSQL_LOG::write(Log_event* event_info) if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { - Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=0", 24, 0); + Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=0", 24, 0, FALSE); e.set_log_pos(this); e.error_code = 0; // This statement cannot fail (see [1]). if (e.write(file)) @@ -1227,22 +1439,12 @@ bool MYSQL_LOG::write(Log_event* event_info) { if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { - Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=1", 24, 0); + Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=1", 24, 0, FALSE); e.set_log_pos(this); e.error_code = 0; // This statement cannot fail (see [1]). if (e.write(file)) goto err; } -#if MYSQL_VERSION_ID < 40100 - if (thd->variables.convert_set) - { - Query_log_event e(thd, "SET CHARACTER SET DEFAULT", 25, 0); - e.set_log_pos(this); - e.error_code = 0; // This statement cannot fail (see [1]). - if (e.write(file)) - goto err; - } -#endif } /* @@ -1264,9 +1466,9 @@ bool MYSQL_LOG::write(Log_event* event_info) if (file == &log_file) // we are writing to the real log (disk) { - if (flush_io_cache(file)) + if (flush_io_cache(file) || sync_binlog(file)) goto err; - + if (opt_using_transactions && !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { @@ -1280,6 +1482,30 @@ bool MYSQL_LOG::write(Log_event* event_info) if (event_info->get_type_code() == QUERY_EVENT || event_info->get_type_code() == EXEC_LOAD_EVENT) { +#ifndef DBUG_OFF + if (unlikely(opt_crash_binlog_innodb)) + { + /* + This option is for use in rpl_crash_binlog_innodb.test. + 1st we want to verify that Binlog_dump thread cannot send the + event now (because of LOCK_log): we here tell the Binlog_dump + thread to wake up, sleep for the slave to have time to possibly + receive data from the master (it should not), and then crash. + 2nd we want to verify that at crash recovery the rolled back + event is cut from the binlog. + */ + if (!(--opt_crash_binlog_innodb)) + { + signal_update(); + sleep(2); + fprintf(stderr,"This is a normal crash because of" + " --crash-binlog-innodb\n"); + assert(0); + } + DBUG_PRINT("info",("opt_crash_binlog_innodb: %d", + opt_crash_binlog_innodb)); + } +#endif error = ha_report_binlog_offset_and_commit(thd, log_file_name, file->pos_in_file); called_handler_commit=1; @@ -1321,6 +1547,14 @@ err: if (called_handler_commit) ha_commit_complete(thd); +#ifdef HAVE_REPLICATION + if (should_rotate && expire_logs_days) + { + long purge_time= time(0) - expire_logs_days*24*60*60; + if (purge_time >= 0) + error= purge_logs_before_date(purge_time); + } +#endif DBUG_RETURN(error); } @@ -1360,6 +1594,7 @@ uint MYSQL_LOG::next_file_id() bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) { + bool should_rotate= 0, error= 0; VOID(pthread_mutex_lock(&LOCK_log)); DBUG_ENTER("MYSQL_LOG::write(cache"); @@ -1382,7 +1617,7 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) we will add the "COMMIT mark and write the buffer to the binlog. */ { - Query_log_event qinfo(thd, "BEGIN", 5, TRUE); + Query_log_event qinfo(thd, "BEGIN", 5, TRUE, FALSE); /* Imagine this is rollback due to net timeout, after all statements of the transaction succeeded. Then we want a zero-error code in BEGIN. @@ -1423,10 +1658,11 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) Query_log_event qinfo(thd, commit_or_rollback ? "COMMIT" : "ROLLBACK", commit_or_rollback ? 6 : 8, - TRUE); + TRUE, FALSE); qinfo.error_code= 0; qinfo.set_log_pos(this); - if (qinfo.write(&log_file) || flush_io_cache(&log_file)) + if (qinfo.write(&log_file) || flush_io_cache(&log_file) || + sync_binlog(&log_file)) goto err; } if (cache->error) // Error on read @@ -1435,12 +1671,28 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) write_error=1; // Don't give more errors goto err; } +#ifndef DBUG_OFF + if (unlikely(opt_crash_binlog_innodb)) + { + /* see the previous MYSQL_LOG::write() method for a comment */ + if (!(--opt_crash_binlog_innodb)) + { + signal_update(); + sleep(2); + fprintf(stderr, "This is a normal crash because of" + " --crash-binlog-innodb\n"); + assert(0); + } + DBUG_PRINT("info",("opt_crash_binlog_innodb: %d", + opt_crash_binlog_innodb)); + } +#endif if ((ha_report_binlog_offset_and_commit(thd, log_file_name, log_file.pos_in_file))) goto err; signal_update(); DBUG_PRINT("info",("max_size: %lu",max_size)); - if (my_b_tell(&log_file) >= (my_off_t) max_size) + if (should_rotate= (my_b_tell(&log_file) >= (my_off_t) max_size)) { pthread_mutex_lock(&LOCK_index); new_file(0); // inside mutex @@ -1456,7 +1708,16 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) ha_commit_complete(thd); - DBUG_RETURN(0); +#ifdef HAVE_REPLICATION + if (should_rotate && expire_logs_days) + { + long purge_time= time(0) - expire_logs_days*24*60*60; + if (purge_time >= 0) + error= purge_logs_before_date(purge_time); + } +#endif + + DBUG_RETURN(error); err: if (!write_error) @@ -1471,11 +1732,7 @@ err: /* Write update log in a format suitable for incremental backup - - NOTE - - This code should be deleted in MySQL 5,0 as the binary log - is a full replacement for the update log. - + This is also used by the slow query log. */ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, @@ -1485,19 +1742,20 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, time_t current_time; if (!is_open()) return 0; + DBUG_ENTER("MYSQL_LOG::write"); + VOID(pthread_mutex_lock(&LOCK_log)); if (is_open()) { // Safety agains reopen int tmp_errno=0; char buff[80],*end; end=buff; - if (!(thd->options & OPTION_UPDATE_LOG) && - (thd->master_access & SUPER_ACL)) + if (!(thd->options & OPTION_UPDATE_LOG)) { VOID(pthread_mutex_unlock(&LOCK_log)); - return 0; + DBUG_RETURN(0); } - if ((specialflag & SPECIAL_LONG_LOG_FORMAT) || query_start_arg) + if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT) || query_start_arg) { current_time=time(NULL); if (current_time != last_time) @@ -1519,8 +1777,8 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, tmp_errno=errno; } if (my_b_printf(&log_file, "# User@Host: %s[%s] @ %s [%s]\n", - thd->priv_user, - thd->user, + thd->priv_user ? thd->priv_user : "", + thd->user ? thd->user : "", thd->host ? thd->host : "", thd->ip ? thd->ip : "") == (uint) -1) tmp_errno=errno; @@ -1550,7 +1808,7 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, // Save value if we do an insert. if (thd->insert_id_used) { - if (specialflag & SPECIAL_LONG_LOG_FORMAT) + if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT)) { end=strmov(end,",insert_id="); end=longlong10_to_str((longlong) thd->last_insert_id,end,-10); @@ -1595,7 +1853,7 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, } } VOID(pthread_mutex_unlock(&LOCK_log)); - return error; + DBUG_RETURN(error); } @@ -1615,17 +1873,19 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, THD::enter_cond() (see NOTES in sql_class.h). */ - -void MYSQL_LOG:: wait_for_update(THD* thd, bool master_or_slave) +void MYSQL_LOG::wait_for_update(THD* thd, bool master_or_slave) { - const char* old_msg = thd->enter_cond(&update_cond, &LOCK_log, - master_or_slave ? - "Has read all relay log; waiting for \ -the I/O slave thread to update it" : - "Has sent all binlog to slave; \ -waiting for binlog to be updated"); + const char *old_msg; + DBUG_ENTER("wait_for_update"); + old_msg= thd->enter_cond(&update_cond, &LOCK_log, + master_or_slave ? + "Has read all relay log; waiting for the slave I/O " + "thread to update it" : + "Has sent all binlog to slave; waiting for binlog " + "to be updated"); pthread_cond_wait(&update_cond, &LOCK_log); thd->exit_cond(old_msg); + DBUG_VOID_RETURN; } @@ -1651,6 +1911,7 @@ void MYSQL_LOG::close(uint exiting) DBUG_PRINT("enter",("exiting: %d", (int) exiting)); if (log_type != LOG_CLOSED && log_type != LOG_TO_BE_OPENED) { +#ifdef HAVE_REPLICATION if (log_type == LOG_BIN && !no_auto_events && (exiting & LOG_CLOSE_STOP_EVENT)) { @@ -1659,6 +1920,7 @@ void MYSQL_LOG::close(uint exiting) s.write(&log_file); signal_update(); } +#endif /* HAVE_REPLICATION */ end_io_cache(&log_file); if (my_close(log_file.file,MYF(0)) < 0 && ! write_error) { @@ -1734,8 +1996,8 @@ static bool test_if_number(register const char *str, while (*str++ == ' ') ; if (*--str == '-' || *str == '+') str++; - while (isdigit(*str) || (allow_wildcards && - (*str == wild_many || *str == wild_one))) + while (my_isdigit(files_charset_info,*str) || + (allow_wildcards && (*str == wild_many || *str == wild_one))) { flag=1; str++; @@ -1743,7 +2005,7 @@ static bool test_if_number(register const char *str, if (*str == '.') { for (str++ ; - isdigit(*str) || + my_isdigit(files_charset_info,*str) || (allow_wildcards && (*str == wild_many || *str == wild_one)) ; str++, flag=1) ; } @@ -1768,21 +2030,15 @@ void print_buffer_to_file(enum loglevel level, const char *buffer) skr=time(NULL); localtime_r(&skr, &tm_tmp); start=&tm_tmp; -#if MYSQL_VERSION_ID > 40100 - fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %s\n", -#else - fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d %s\n", -#endif + fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %s\n", start->tm_year % 100, start->tm_mon+1, start->tm_mday, start->tm_hour, start->tm_min, start->tm_sec, -#if MYSQL_VERSION_ID > 40100 (level == ERROR_LEVEL ? "ERROR" : level == WARNING_LEVEL ? - "WARNING" : "INFORMATION"), -#endif + "Warning" : "Note"), buffer); fflush(stderr); @@ -1818,7 +2074,7 @@ bool flush_error_log() the current error file. */ strmov(strmov(err_temp, err_renamed),"-tmp"); - (void) my_delete(err_temp, MYF(0)); + (void) my_delete(err_temp, MYF(0)); if (freopen(err_temp,"a+",stdout)) { freopen(err_temp,"a+",stderr); @@ -1834,16 +2090,16 @@ bool flush_error_log() my_fwrite(stderr, (byte*) buf, bytes, MYF(0)); my_close(fd, MYF(0)); } - (void) my_delete(err_temp, MYF(0)); + (void) my_delete(err_temp, MYF(0)); } else result= 1; #else - my_rename(log_error_file,err_renamed,MYF(0)); - if (freopen(log_error_file,"a+",stdout)) - freopen(log_error_file,"a+",stderr); - else - result= 1; + my_rename(log_error_file,err_renamed,MYF(0)); + if (freopen(log_error_file,"a+",stdout)) + freopen(log_error_file,"a+",stderr); + else + result= 1; #endif VOID(pthread_mutex_unlock(&LOCK_error_log)); } @@ -1851,6 +2107,152 @@ bool flush_error_log() } +/* + If the server has InnoDB on, and InnoDB has published the position of the + last committed transaction (which happens only if a crash recovery occured at + this startup) then truncate the previous binary log at the position given by + InnoDB. If binlog is shorter than the position, print a message to the error + log. + + SYNOPSIS + cut_spurious_tail() + + RETURN VALUES + 1 Error + 0 Ok +*/ + +bool MYSQL_LOG::cut_spurious_tail() +{ + int error= 0; + DBUG_ENTER("cut_spurious_tail"); + +#ifdef HAVE_INNOBASE_DB + if (have_innodb != SHOW_OPTION_YES) + DBUG_RETURN(0); + /* + This is the place where we use information from InnoDB to cut the + binlog. + */ + char *name= ha_innobase::get_mysql_bin_log_name(); + ulonglong pos= ha_innobase::get_mysql_bin_log_pos(); + ulonglong actual_size; + char llbuf1[22], llbuf2[22]; + + if (name[0] == 0 || pos == ULONGLONG_MAX) + { + DBUG_PRINT("info", ("InnoDB has not set binlog info")); + DBUG_RETURN(0); + } + /* The binlog given by InnoDB normally is never an active binlog */ + if (is_open() && is_active(name)) + { + sql_print_error("Warning: after InnoDB crash recovery, InnoDB says that " + "the binary log of the previous run has the same name " + "'%s' as the current one; this is likely to be abnormal.", + name); + DBUG_RETURN(1); + } + sql_print_error("After InnoDB crash recovery, checking if the binary log " + "'%s' contains rolled back transactions which must be " + "removed from it...", name); + /* If we have a too long binlog, cut. If too short, print error */ + int fd= my_open(name, O_EXCL | O_APPEND | O_BINARY | O_WRONLY, MYF(MY_WME)); + if (fd < 0) + { + int save_errno= my_errno; + sql_print_error("Could not open the binary log '%s' for truncation.", + name); + if (save_errno != ENOENT) + sql_print_error("The binary log '%s' should not be used for " + "replication.", name); + DBUG_RETURN(1); + } + + if (pos > (actual_size= my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME)))) + { + /* + Note that when we have MyISAM rollback this error message should be + reconsidered. + */ + sql_print_error("The binary log '%s' is shorter than its expected size " + "(actual: %s, expected: %s) so it misses at least one " + "committed transaction; so it should not be used for " + "replication or point-in-time recovery. You would need " + "to restart slaves from a fresh master's data " + "snapshot ", + name, llstr(actual_size, llbuf1), + llstr(pos, llbuf2)); + error= 1; + goto err; + } + if (pos < actual_size) + { + sql_print_error("The binary log '%s' is bigger than its expected size " + "(actual: %s, expected: %s) so it contains a rolled back " + "transaction; now truncating that.", name, + llstr(actual_size, llbuf1), llstr(pos, llbuf2)); + /* + As on some OS, my_chsize() can only pad with 0s instead of really + truncating. Then mysqlbinlog (and Binlog_dump thread) will error on + these zeroes. This is annoying, but not more (you just need to manually + switch replication to the next binlog). Fortunately, in my_chsize.c, it + says that all modern machines support real ftruncate(). + + */ + if ((error= my_chsize(fd, pos, 0, MYF(MY_WME)))) + goto err; + } +err: + if (my_close(fd, MYF(MY_WME))) + error= 1; +#endif + DBUG_RETURN(error); +} + + +/* + If the server has InnoDB on, store the binlog name and position into + InnoDB. This function is used every time we create a new binlog. + + SYNOPSIS + report_pos_in_innodb() + + NOTES + This cannot simply be done in MYSQL_LOG::open(), because when we create + the first binlog at startup, we have not called ha_init() yet so we cannot + write into InnoDB yet. + + RETURN VALUES + 1 Error + 0 Ok +*/ + +void MYSQL_LOG::report_pos_in_innodb() +{ + DBUG_ENTER("report_pos_in_innodb"); +#ifdef HAVE_INNOBASE_DB + if (is_open() && have_innodb == SHOW_OPTION_YES) + { + DBUG_PRINT("info", ("Reporting binlog info into InnoDB - " + "name: '%s' position: %d", + log_file_name, my_b_tell(&log_file))); + innobase_store_binlog_offset_and_flush_log(log_file_name, + my_b_tell(&log_file)); + } +#endif + DBUG_VOID_RETURN; +} + + +void MYSQL_LOG::signal_update() +{ + DBUG_ENTER("MYSQL_LOG::signal_update"); + pthread_cond_broadcast(&update_cond); + DBUG_VOID_RETURN; +} + + #ifdef __NT__ void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, uint length, int buffLen) @@ -1861,9 +2263,9 @@ void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, DBUG_ENTER("print_buffer_to_nt_eventlog"); buffptr= buff; - if (length > (uint)(buffLen-4)) + if (length > (uint)(buffLen-5)) { - char *newBuff= new char[length + 4]; + char *newBuff= new char[length + 5]; strcpy(newBuff, buff); buffptr= newBuff; } diff --git a/sql/log_event.cc b/sql/log_event.cc index d1321b878bf..19c32b2d28e 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -16,15 +16,21 @@ #ifndef MYSQL_CLIENT -#ifdef __GNUC__ + +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif + #include "mysql_priv.h" #include "slave.h" #include <my_dir.h> #endif /* MYSQL_CLIENT */ -#include <assert.h> +#define log_cs &my_charset_latin1 + +/* + pretty_print_str() +*/ #ifdef MYSQL_CLIENT static void pretty_print_str(FILE* file, char* str, int len) @@ -49,9 +55,10 @@ static void pretty_print_str(FILE* file, char* str, int len) } fputc('\'', file); } -#endif +#endif /* MYSQL_CLIENT */ -#ifndef MYSQL_CLIENT + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) static void clear_all_errors(THD *thd, struct st_relay_log_info *rli) { @@ -61,37 +68,56 @@ static void clear_all_errors(THD *thd, struct st_relay_log_info *rli) rli->last_slave_errno = 0; } + +/* + Ignore error code specified on command line +*/ + inline int ignored_error_code(int err_code) { return ((err_code == ER_SLAVE_IGNORED_TABLE) || (use_slave_mask && bitmap_is_set(&slave_error_mask, err_code))); } +#endif -static void pretty_print_str(String* packet, char* str, int len) +/* + pretty_print_str() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +static char *pretty_print_str(char *packet, char *str, int len) { - char* end = str + len; - packet->append('\''); + char *end= str + len; + char *pos= packet; + *pos++= '\''; while (str < end) { char c; switch ((c=*str++)) { - case '\n': packet->append( "\\n"); break; - case '\r': packet->append( "\\r"); break; - case '\\': packet->append( "\\\\"); break; - case '\b': packet->append( "\\b"); break; - case '\t': packet->append( "\\t"); break; - case '\'': packet->append( "\\'"); break; - case 0 : packet->append( "\\0"); break; + case '\n': *pos++= '\\'; *pos++= 'n'; break; + case '\r': *pos++= '\\'; *pos++= 'r'; break; + case '\\': *pos++= '\\'; *pos++= '\\'; break; + case '\b': *pos++= '\\'; *pos++= 'b'; break; + case '\t': *pos++= '\\'; *pos++= 't'; break; + case '\'': *pos++= '\\'; *pos++= '\''; break; + case 0 : *pos++= '\\'; *pos++= '0'; break; default: - packet->append((char)c); + *pos++= c; break; } } - packet->append('\''); + *pos++= '\''; + return pos; } +#endif /* !MYSQL_CLIENT */ +/* + slave_load_file_stem() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) static inline char* slave_load_file_stem(char*buf, uint file_id, int event_server_id) { @@ -103,70 +129,27 @@ static inline char* slave_load_file_stem(char*buf, uint file_id, *buf++ = '-'; return int10_to_str(file_id, buf, 10); } - #endif -const char* Log_event::get_type_str() -{ - switch(get_type_code()) { - case START_EVENT: return "Start"; - case STOP_EVENT: return "Stop"; - case QUERY_EVENT: return "Query"; - case ROTATE_EVENT: return "Rotate"; - case INTVAR_EVENT: return "Intvar"; - case LOAD_EVENT: return "Load"; - case NEW_LOAD_EVENT: return "New_load"; - case SLAVE_EVENT: return "Slave"; - case CREATE_FILE_EVENT: return "Create_file"; - case APPEND_BLOCK_EVENT: return "Append_block"; - case DELETE_FILE_EVENT: return "Delete_file"; - case EXEC_LOAD_EVENT: return "Exec_load"; - default: /* impossible */ return "Unknown"; - } -} - -#ifndef MYSQL_CLIENT -Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans) - :log_pos(0), temp_buf(0), exec_time(0), cached_event_len(0), - flags(flags_arg), thd(thd_arg) -{ - server_id = thd->server_id; - when = thd->start_time; - cache_stmt= (using_trans && - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))); -} - -/* - This minimal constructor is for when you are not even sure that there is a - valid THD. For example in the server when we are shutting down or flushing - logs after receiving a SIGHUP (then we must write a Rotate to the binlog but - we have no THD, so we need this minimal constructor). -*/ -Log_event::Log_event() - :temp_buf(0), exec_time(0), cached_event_len(0), flags(0), cache_stmt(0), - thd(0) -{ - server_id = ::server_id; - when = time(NULL); - log_pos=0; -} /* Delete all temporary files used for SQL_LOAD. + + SYNOPSIS + cleanup_load_tmpdir() */ +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) static void cleanup_load_tmpdir() { MY_DIR *dirp; FILEINFO *file; uint i; - char fname[FN_REFLEN]; - char prefbuf[31]; - char *p; - + char fname[FN_REFLEN], prefbuf[31], *p; + if (!(dirp=my_dir(slave_load_tmpdir,MYF(MY_WME)))) return; - + /* When we are deleting temporary files, we should only remove the files associated with the server id of our server. @@ -195,6 +178,124 @@ static void cleanup_load_tmpdir() #endif +/* + write_str() +*/ + +static bool write_str(IO_CACHE *file, char *str, byte length) +{ + return (my_b_safe_write(file, &length, 1) || + my_b_safe_write(file, (byte*) str, (int) length)); +} + + +/* + read_str() +*/ + +static inline int read_str(char * &buf, char *buf_end, char * &str, + uint8 &len) +{ + if (buf + (uint) (uchar) *buf >= buf_end) + return 1; + len = (uint8) *buf; + str= buf+1; + buf+= (uint) len+1; + return 0; +} + +/* + Transforms a string into "" or its expression in 0x... form. +*/ +char *str_to_hex(char *to, const char *from, uint len) +{ + char *p= to; + if (len) + { + p= strmov(p, "0x"); + for (uint i= 0; i < len; i++, p+= 2) + { + /* val[i] is char. Casting to uchar helps greatly if val[i] < 0 */ + uint tmp= (uint) (uchar) from[i]; + p[0]= _dig_vec_upper[tmp >> 4]; + p[1]= _dig_vec_upper[tmp & 15]; + } + *p= 0; + } + else + p= strmov(p, "\"\""); + return p; // pointer to end 0 of 'to' +} + + +/************************************************************************** + Log_event methods +**************************************************************************/ + +/* + Log_event::get_type_str() +*/ + +const char* Log_event::get_type_str() +{ + switch(get_type_code()) { + case START_EVENT: return "Start"; + case STOP_EVENT: return "Stop"; + case QUERY_EVENT: return "Query"; + case ROTATE_EVENT: return "Rotate"; + case INTVAR_EVENT: return "Intvar"; + case LOAD_EVENT: return "Load"; + case NEW_LOAD_EVENT: return "New_load"; + case SLAVE_EVENT: return "Slave"; + case CREATE_FILE_EVENT: return "Create_file"; + case APPEND_BLOCK_EVENT: return "Append_block"; + case DELETE_FILE_EVENT: return "Delete_file"; + case EXEC_LOAD_EVENT: return "Exec_load"; + case RAND_EVENT: return "RAND"; + case USER_VAR_EVENT: return "User var"; + default: return "Unknown"; /* impossible */ + } +} + + +/* + Log_event::Log_event() +*/ + +#ifndef MYSQL_CLIENT +Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans) + :log_pos(0), temp_buf(0), exec_time(0), cached_event_len(0), + flags(flags_arg), thd(thd_arg) +{ + server_id= thd->server_id; + when= thd->start_time; + cache_stmt= (using_trans && + (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))); +} + + +/* + This minimal constructor is for when you are not even sure that there is a + valid THD. For example in the server when we are shutting down or flushing + logs after receiving a SIGHUP (then we must write a Rotate to the binlog but + we have no THD, so we need this minimal constructor). +*/ + +Log_event::Log_event() + :temp_buf(0), exec_time(0), cached_event_len(0), flags(0), cache_stmt(0), + thd(0) +{ + server_id= ::server_id; + when= time(NULL); + log_pos= 0; +} +#endif /* !MYSQL_CLIENT */ + + +/* + Log_event::Log_event() +*/ + Log_event::Log_event(const char* buf, bool old_format) :temp_buf(0), cached_event_len(0), cache_stmt(0) { @@ -215,11 +316,17 @@ Log_event::Log_event(const char* buf, bool old_format) #endif } - #ifndef MYSQL_CLIENT +#ifdef HAVE_REPLICATION + +/* + Log_event::exec_event() +*/ int Log_event::exec_event(struct st_relay_log_info* rli) { + DBUG_ENTER("Log_event::exec_event"); + /* rli is null when (as far as I (Guilhem) know) the caller is @@ -234,212 +341,109 @@ int Log_event::exec_event(struct st_relay_log_info* rli) */ if (rli) { - if (rli->inside_transaction) - rli->inc_pending(get_event_len()); + /* + If in a transaction, and if the slave supports transactions, + just inc_event_relay_log_pos(). We only have to check for OPTION_BEGIN + (not OPTION_NOT_AUTOCOMMIT) as transactions are logged + with BEGIN/COMMIT, not with SET AUTOCOMMIT= . + + CAUTION: opt_using_transactions means + innodb || bdb ; suppose the master supports InnoDB and BDB, + but the slave supports only BDB, problems + will arise: + - suppose an InnoDB table is created on the master, + - then it will be MyISAM on the slave + - but as opt_using_transactions is true, the slave will believe he is + transactional with the MyISAM table. And problems will come when one + does START SLAVE; STOP SLAVE; START SLAVE; (the slave will resume at + BEGIN whereas there has not been any rollback). This is the problem of + using opt_using_transactions instead of a finer + "does the slave support _the_transactional_handler_used_on_the_master_". + + More generally, we'll have problems when a query mixes a transactional + handler and MyISAM and STOP SLAVE is issued in the middle of the + "transaction". START SLAVE will resume at BEGIN while the MyISAM table + has already been updated. + */ + if ((thd->options & OPTION_BEGIN) && opt_using_transactions) + rli->inc_event_relay_log_pos(get_event_len()); else { - rli->inc_pos(get_event_len(),log_pos); + rli->inc_group_relay_log_pos(get_event_len(),log_pos); flush_relay_log_info(rli); + /* + Note that Rotate_log_event::exec_event() does not call this function, + so there is no chance that a fake rotate event resets + last_master_timestamp. + Note that we update without mutex (probably ok - except in some very + rare cases, only consequence is that value may take some time to + display in Seconds_Behind_Master - not critical). + */ + rli->last_master_timestamp= when; } } - return 0; -} - -void Log_event::pack_info(String* packet) -{ - net_store_data(packet, "", 0); + DBUG_RETURN(0); } -void Query_log_event::pack_info(String* packet) -{ - char buf[256]; - String tmp(buf, sizeof(buf)); - tmp.length(0); - if (db && db_len) - { - tmp.append("use `",5); - tmp.append(db, db_len); - tmp.append("`; ", 3); - } - if (query && q_len) - tmp.append(query, q_len); - net_store_data(packet, (char*)tmp.ptr(), tmp.length()); -} +/* + Log_event::pack_info() +*/ -void Start_log_event::pack_info(String* packet) +void Log_event::pack_info(Protocol *protocol) { - char buf1[256]; - String tmp(buf1, sizeof(buf1)); - tmp.length(0); - char buf[22]; - - tmp.append("Server ver: "); - tmp.append(server_version); - tmp.append(", Binlog ver: "); - tmp.append(llstr(binlog_version, buf)); - net_store_data(packet, tmp.ptr(), tmp.length()); + protocol->store("", &my_charset_bin); } -void Load_log_event::pack_info(String* packet) -{ - char buf[256]; - String tmp(buf, sizeof(buf)); - tmp.length(0); - if (db && db_len) - { - tmp.append("use "); - tmp.append(db, db_len); - tmp.append("; ", 2); - } - - tmp.append("LOAD DATA INFILE '"); - tmp.append(fname, fname_len); - tmp.append("' ", 2); - if (sql_ex.opt_flags & REPLACE_FLAG) - tmp.append(" REPLACE "); - else if (sql_ex.opt_flags & IGNORE_FLAG) - tmp.append(" IGNORE "); - - tmp.append("INTO TABLE `"); - tmp.append(table_name); - tmp.append("` FIELDS TERMINATED BY "); - pretty_print_str(&tmp, sql_ex.field_term, sql_ex.field_term_len); - if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG ) - tmp.append(" OPTIONALLY "); - tmp.append( " ENCLOSED BY "); - pretty_print_str(&tmp, sql_ex.enclosed, sql_ex.enclosed_len); - tmp.append( " ESCAPED BY "); - pretty_print_str(&tmp, sql_ex.escaped, sql_ex.escaped_len); - - tmp.append(" LINES TERMINATED BY "); - pretty_print_str(&tmp, sql_ex.line_term, sql_ex.line_term_len); - if (sql_ex.line_start_len) - { - tmp.append(" STARTING BY "); - pretty_print_str(&tmp, sql_ex.line_start, sql_ex.line_start_len); - } - - if ((long) skip_lines > 0) - { - char nr_buff[32], *end; - tmp.append( " IGNORE "); - end= longlong10_to_str((longlong) skip_lines, nr_buff, 10); - tmp.append(nr_buff, (uint) (end-nr_buff)); - tmp.append( " LINES"); - } - - if (num_fields) - { - uint i; - const char* field = fields; - tmp.append(" ("); - for (i = 0; i < num_fields; i++) - { - if (i) - tmp.append(" ,"); - tmp.append( field); - - field += field_lens[i] + 1; - } - tmp.append(')'); - } - - net_store_data(packet, tmp.ptr(), tmp.length()); -} -void Rotate_log_event::pack_info(String* packet) -{ - char buf1[256], buf[22]; - String tmp(buf1, sizeof(buf1)); - tmp.length(0); - tmp.append(new_log_ident, ident_len); - tmp.append(";pos="); - tmp.append(llstr(pos,buf)); - if (flags & LOG_EVENT_FORCED_ROTATE_F) - tmp.append("; forced by master"); - net_store_data(packet, tmp.ptr(), tmp.length()); -} +/* + Log_event::net_send() -void Intvar_log_event::pack_info(String* packet) -{ - char buf1[256], buf[22]; - String tmp(buf1, sizeof(buf1)); - tmp.length(0); - tmp.append(get_var_type_name()); - tmp.append('='); - tmp.append(llstr(val, buf)); - net_store_data(packet, tmp.ptr(), tmp.length()); -} + Only called by SHOW BINLOG EVENTS +*/ -void Rand_log_event::pack_info(String* packet) +int Log_event::net_send(Protocol *protocol, const char* log_name, my_off_t pos) { - char buf1[256], *pos; - pos=strmov(buf1,"rand_seed1="); - pos=int10_to_str((long) seed1, pos, 10); - pos=strmov(pos, ",rand_seed2="); - pos=int10_to_str((long) seed2, pos, 10); - net_store_data(packet, buf1, (uint) (pos-buf1)); + const char *p= strrchr(log_name, FN_LIBCHAR); + const char *event_type; + if (p) + log_name = p + 1; + + protocol->prepare_for_resend(); + protocol->store(log_name, &my_charset_bin); + protocol->store((ulonglong) pos); + event_type = get_type_str(); + protocol->store(event_type, strlen(event_type), &my_charset_bin); + protocol->store((uint32) server_id); + protocol->store((ulonglong) log_pos); + pack_info(protocol); + return protocol->write(); } +#endif /* HAVE_REPLICATION */ -void Slave_log_event::pack_info(String* packet) -{ - char buf1[256], buf[22], *end; - String tmp(buf1, sizeof(buf1)); - tmp.length(0); - tmp.append("host="); - tmp.append(master_host); - tmp.append(",port="); - end= int10_to_str((long) master_port, buf, 10); - tmp.append(buf, (uint32) (end-buf)); - tmp.append(",log="); - tmp.append(master_log); - tmp.append(",pos="); - tmp.append(llstr(master_pos,buf)); - net_store_data(packet, tmp.ptr(), tmp.length()); -} +/* + Log_event::init_show_field_list() +*/ void Log_event::init_show_field_list(List<Item>* field_list) { field_list->push_back(new Item_empty_string("Log_name", 20)); - field_list->push_back(new Item_empty_string("Pos", 20)); + field_list->push_back(new Item_return_int("Pos", 11, + MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Event_type", 20)); - field_list->push_back(new Item_empty_string("Server_id", 20)); - field_list->push_back(new Item_empty_string("Orig_log_pos", 20)); + field_list->push_back(new Item_return_int("Server_id", 10, + MYSQL_TYPE_LONG)); + field_list->push_back(new Item_return_int("Orig_log_pos", 11, + MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Info", 20)); } -/* - * only called by SHOW BINLOG EVENTS - */ -int Log_event::net_send(THD* thd_arg, const char* log_name, my_off_t pos) -{ - String* packet = &thd_arg->packet; - const char* p = strrchr(log_name, FN_LIBCHAR); - const char* event_type; - if (p) - log_name = p + 1; - - packet->length(0); - net_store_data(packet, log_name, strlen(log_name)); - net_store_data(packet, (longlong) pos); - event_type = get_type_str(); - net_store_data(packet, event_type, strlen(event_type)); - net_store_data(packet, server_id); - net_store_data(packet, (longlong) log_pos); - pack_info(packet); - return my_net_write(&thd_arg->net, (char*) packet->ptr(), packet->length()); -} - -#endif /* MYSQL_CLIENT */ - - -int Query_log_event::write(IO_CACHE* file) -{ - return query ? Log_event::write(file) : -1; -} +#endif /* !MYSQL_CLIENT */ +/* + Log_event::write() +*/ int Log_event::write(IO_CACHE* file) { @@ -447,6 +451,10 @@ int Log_event::write(IO_CACHE* file) } +/* + Log_event::write_header() +*/ + int Log_event::write_header(IO_CACHE* file) { char buf[LOG_EVENT_HEADER_LEN]; @@ -466,8 +474,12 @@ int Log_event::write_header(IO_CACHE* file) return (my_b_safe_write(file, (byte*) buf, (uint) (pos - buf))); } -#ifndef MYSQL_CLIENT +/* + Log_event::read_log_event() +*/ + +#ifndef MYSQL_CLIENT int Log_event::read_log_event(IO_CACHE* file, String* packet, pthread_mutex_t* log_lock) { @@ -522,8 +534,7 @@ end: pthread_mutex_unlock(log_lock); DBUG_RETURN(result); } - -#endif // MYSQL_CLIENT +#endif /* !MYSQL_CLIENT */ #ifndef MYSQL_CLIENT #define UNLOCK_MUTEX if (log_lock) pthread_mutex_unlock(log_lock); @@ -535,7 +546,13 @@ end: #define max_allowed_packet (*mysql_get_parameters()->p_max_allowed_packet) #endif -// allocates memory - the caller is responsible for clean-up +/* + Log_event::read_log_event() + + NOTE: + Allocates memory; The caller is responsible for clean-up +*/ + #ifndef MYSQL_CLIENT Log_event* Log_event::read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, @@ -609,14 +626,20 @@ Error in Log_event::read_log_event(): '%s', data_len: %d, event_type: %d", } +/* + Log_event::read_log_event() +*/ + Log_event* Log_event::read_log_event(const char* buf, int event_len, const char **error, bool old_format) { + DBUG_ENTER("Log_event::read_log_event"); + if (event_len < EVENT_LEN_OFFSET || (uint) event_len != uint4korr(buf+EVENT_LEN_OFFSET)) { *error="Sanity check failed"; // Needed to free buffer - return NULL; // general sanity check - will fail on a partial read + DBUG_RETURN(NULL); // general sanity check - will fail on a partial read } Log_event* ev = NULL; @@ -634,9 +657,11 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len, case ROTATE_EVENT: ev = new Rotate_log_event(buf, event_len, old_format); break; +#ifdef HAVE_REPLICATION case SLAVE_EVENT: ev = new Slave_log_event(buf, event_len); break; +#endif /* HAVE_REPLICATION */ case CREATE_FILE_EVENT: ev = new Create_file_log_event(buf, event_len, old_format); break; @@ -652,15 +677,20 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len, case START_EVENT: ev = new Start_log_event(buf, old_format); break; +#ifdef HAVE_REPLICATION case STOP_EVENT: ev = new Stop_log_event(buf, old_format); break; +#endif /* HAVE_REPLICATION */ case INTVAR_EVENT: ev = new Intvar_log_event(buf, old_format); break; case RAND_EVENT: ev = new Rand_log_event(buf, old_format); break; + case USER_VAR_EVENT: + ev = new User_var_log_event(buf, old_format); + break; default: break; } @@ -671,20 +701,24 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len, if (!force_opt) { *error= "Found invalid event in binary log"; - return 0; + DBUG_RETURN(0); } ev= new Unknown_log_event(buf, old_format); #else *error= "Found invalid event in binary log"; - return 0; + DBUG_RETURN(0); #endif } ev->cached_event_len = event_len; - return ev; + DBUG_RETURN(ev); } - #ifdef MYSQL_CLIENT + +/* + Log_event::print_header() +*/ + void Log_event::print_header(FILE* file) { char llbuff[22]; @@ -694,6 +728,10 @@ void Log_event::print_header(FILE* file) llstr(log_pos,llbuff)); } +/* + Log_event::print_timestamp() +*/ + void Log_event::print_timestamp(FILE* file, time_t* ts) { struct tm *res; @@ -715,134 +753,157 @@ void Log_event::print_timestamp(FILE* file, time_t* ts) res->tm_sec); } +#endif /* MYSQL_CLIENT */ -void Start_log_event::print(FILE* file, bool short_form, char* last_db) -{ - if (short_form) - return; - - print_header(file); - fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version, - server_version); - print_timestamp(file); - if (created) - fprintf(file," at startup"); - fputc('\n', file); - fflush(file); -} - -void Stop_log_event::print(FILE* file, bool short_form, char* last_db) -{ - if (short_form) - return; - print_header(file); - fprintf(file, "\tStop\n"); - fflush(file); -} +/* + Log_event::set_log_pos() +*/ -void Rotate_log_event::print(FILE* file, bool short_form, char* last_db) +#ifndef MYSQL_CLIENT +void Log_event::set_log_pos(MYSQL_LOG* log) { - char buf[22]; - if (short_form) - return; - - print_header(file); - fprintf(file, "\tRotate to "); - if (new_log_ident) - my_fwrite(file, (byte*) new_log_ident, (uint)ident_len, - MYF(MY_NABP | MY_WME)); - fprintf(file, " pos: %s", llstr(pos, buf)); - if (flags & LOG_EVENT_FORCED_ROTATE_F) - fprintf(file," forced by master"); - fputc('\n', file); - fflush(file); + if (!log_pos) + log_pos = my_b_tell(&log->log_file); } +#endif /* !MYSQL_CLIENT */ -#endif /* #ifdef MYSQL_CLIENT */ +/************************************************************************** + Query_log_event methods +**************************************************************************/ -Start_log_event::Start_log_event(const char* buf, - bool old_format) - :Log_event(buf, old_format) -{ - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - binlog_version = uint2korr(buf+ST_BINLOG_VER_OFFSET); - memcpy(server_version, buf+ST_SERVER_VER_OFFSET, - ST_SERVER_VER_LEN); - created = uint4korr(buf+ST_CREATED_OFFSET); -} - -int Start_log_event::write_data(IO_CACHE* file) -{ - char buff[START_HEADER_LEN]; - int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); - memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); - int4store(buff + ST_CREATED_OFFSET,created); - return (my_b_safe_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0); -} +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +/* + Query_log_event::pack_info() +*/ -Rotate_log_event::Rotate_log_event(const char* buf, int event_len, - bool old_format) - :Log_event(buf, old_format),new_log_ident(NULL),alloced(0) +void Query_log_event::pack_info(Protocol *protocol) { - // The caller will ensure that event_len is what we have at EVENT_LEN_OFFSET - int header_size = (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - uint ident_offset; - if (event_len < header_size) + char *buf, *pos; + if (!(buf= my_malloc(9 + db_len + q_len, MYF(MY_WME)))) return; - buf += header_size; - if (old_format) + pos= buf; + if (!(flags & LOG_EVENT_SUPPRESS_USE_F) + && db && db_len) { - ident_len = (uint)(event_len - OLD_HEADER_LEN); - pos = 4; - ident_offset = 0; + pos= strmov(buf, "use `"); + memcpy(pos, db, db_len); + pos= strmov(pos+db_len, "`; "); } - else + if (query && q_len) { - ident_len = (uint)(event_len - ROTATE_EVENT_OVERHEAD); - pos = uint8korr(buf + R_POS_OFFSET); - ident_offset = ROTATE_HEADER_LEN; + memcpy(pos, query, q_len); + pos+= q_len; } - set_if_smaller(ident_len,FN_REFLEN-1); - if (!(new_log_ident= my_strdup_with_length((byte*) buf + - ident_offset, - (uint) ident_len, - MYF(MY_WME)))) - return; - alloced = 1; + protocol->store(buf, pos-buf, &my_charset_bin); + my_free(buf, MYF(MY_ALLOW_ZERO_PTR)); } +#endif -int Rotate_log_event::write_data(IO_CACHE* file) +/* + Query_log_event::write() +*/ + +int Query_log_event::write(IO_CACHE* file) { - char buf[ROTATE_HEADER_LEN]; - int8store(buf + R_POS_OFFSET, pos); - return (my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) || - my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len)); + return query ? Log_event::write(file) : -1; } +/* + Query_log_event::write_data() +*/ + +int Query_log_event::write_data(IO_CACHE* file) +{ + char buf[QUERY_HEADER_LEN]; + + if (!query) + return -1; + + /* + We want to store the thread id: + (- as an information for the user when he reads the binlog) + - if the query uses temporary table: for the slave SQL thread to know to + which master connection the temp table belongs. + Now imagine we (write_data()) are called by the slave SQL thread (we are + logging a query executed by this thread; the slave runs with + --log-slave-updates). Then this query will be logged with + thread_id=the_thread_id_of_the_SQL_thread. Imagine that 2 temp tables of + the same name were created simultaneously on the master (in the master + binlog you have + CREATE TEMPORARY TABLE t; (thread 1) + CREATE TEMPORARY TABLE t; (thread 2) + ...) + then in the slave's binlog there will be + CREATE TEMPORARY TABLE t; (thread_id_of_the_slave_SQL_thread) + CREATE TEMPORARY TABLE t; (thread_id_of_the_slave_SQL_thread) + which is bad (same thread id!). + + To avoid this, we log the thread's thread id EXCEPT for the SQL + slave thread for which we log the original (master's) thread id. + Now this moves the bug: what happens if the thread id on the + master was 10 and when the slave replicates the query, a + connection number 10 is opened by a normal client on the slave, + and updates a temp table of the same name? We get a problem + again. To avoid this, in the handling of temp tables (sql_base.cc) + we use thread_id AND server_id. TODO when this is merged into + 4.1: in 4.1, slave_proxy_id has been renamed to pseudo_thread_id + and is a session variable: that's to make mysqlbinlog work with + temp tables. We probably need to introduce + + SET PSEUDO_SERVER_ID + for mysqlbinlog in 4.1. mysqlbinlog would print: + SET PSEUDO_SERVER_ID= + SET PSEUDO_THREAD_ID= + for each query using temp tables. + */ + int4store(buf + Q_THREAD_ID_OFFSET, slave_proxy_id); + int4store(buf + Q_EXEC_TIME_OFFSET, exec_time); + buf[Q_DB_LEN_OFFSET] = (char) db_len; + int2store(buf + Q_ERR_CODE_OFFSET, error_code); + + return (my_b_safe_write(file, (byte*) buf, QUERY_HEADER_LEN) || + my_b_safe_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) || + my_b_safe_write(file, (byte*) query, q_len)) ? -1 : 0; +} + + +/* + Query_log_event::Query_log_event() +*/ + #ifndef MYSQL_CLIENT Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, - ulong query_length, bool using_trans) - :Log_event(thd_arg, 0, using_trans), data_buf(0), query(query_arg), + ulong query_length, bool using_trans, + bool suppress_use) + :Log_event(thd_arg, + ((thd_arg->tmp_table_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0) + | (suppress_use ? LOG_EVENT_SUPPRESS_USE_F : 0)), + using_trans), + data_buf(0), query(query_arg), db(thd_arg->db), q_len((uint32) query_length), error_code(thd_arg->killed ? ((thd_arg->system_thread & SYSTEM_THREAD_DELAYED_INSERT) ? 0 : ER_SERVER_SHUTDOWN) : thd_arg->net.last_errno), thread_id(thd_arg->thread_id), /* save the original thread id; we already know the server id */ - slave_proxy_id(thd_arg->slave_proxy_id) - + slave_proxy_id(thd_arg->variables.pseudo_thread_id) { time_t end_time; time(&end_time); exec_time = (ulong) (end_time - thd->start_time); db_len = (db) ? (uint32) strlen(db) : 0; } -#endif +#endif /* MYSQL_CLIENT */ + + +/* + Query_log_event::Query_log_event() +*/ Query_log_event::Query_log_event(const char* buf, int event_len, bool old_format) @@ -880,11 +941,15 @@ Query_log_event::Query_log_event(const char* buf, int event_len, } -#ifdef MYSQL_CLIENT +/* + Query_log_event::print() +*/ +#ifdef MYSQL_CLIENT void Query_log_event::print(FILE* file, bool short_form, char* last_db) { char buff[40],*end; // Enough for SET TIMESTAMP + const uint set_len= sizeof("SET ONE_SHOT CHARACTER_SET_CLIENT=") - 1; if (!short_form) { print_header(file); @@ -894,161 +959,449 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db) bool different_db= 1; - if (db && last_db) + if (!(flags & LOG_EVENT_SUPPRESS_USE_F)) { - if (different_db= memcmp(last_db, db, db_len + 1)) - memcpy(last_db, db, db_len + 1); + if (db && last_db) + { + if (different_db= memcmp(last_db, db, db_len + 1)) + memcpy(last_db, db, db_len + 1); + } + + if (db && db[0] && different_db) + { + fprintf(file, "use %s;\n", db); + } } - - if (db && db[0] && different_db) - fprintf(file, "use %s;\n", db); + end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10); *end++=';'; *end++='\n'; my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME)); + if (flags & LOG_EVENT_THREAD_SPECIFIC_F) + fprintf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id); + /* charset_name command for mysql client */ + if (!strncmp(query, "SET ONE_SHOT CHARACTER_SET_CLIENT=", set_len)) + { + char * endptr; + int cs_number= strtoul(query + set_len, &endptr, 10); + DBUG_ASSERT(*endptr == ','); + CHARSET_INFO *cs_info= get_charset(cs_number, MYF(MY_WME)); + if (cs_info) { + fprintf(file, "/*!\\C %s */;\n", cs_info->csname); + } + } my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME)); fprintf(file, ";\n"); } -#endif +#endif /* MYSQL_CLIENT */ -int Query_log_event::write_data(IO_CACHE* file) +/* + Query_log_event::exec_event() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int Query_log_event::exec_event(struct st_relay_log_info* rli) { - if (!query) - return -1; - - char buf[QUERY_HEADER_LEN]; + int expected_error,actual_error= 0; + thd->db_length= db_len; + thd->db= (char*) rewrite_db(db, &thd->db_length); + /* - We want to store the thread id: - (- as an information for the user when he reads the binlog) - - if the query uses temporary table: for the slave SQL thread to know to - which master connection the temp table belongs. - Now imagine we (write_data()) are called by the slave SQL thread (we are - logging a query executed by this thread; the slave runs with - --log-slave-updates). Then this query will be logged with - thread_id=the_thread_id_of_the_SQL_thread. Imagine that 2 temp tables of the - same name were created simultaneously on the master (in the master binlog - you have - CREATE TEMPORARY TABLE t; (thread 1) - CREATE TEMPORARY TABLE t; (thread 2) - ...) - then in the slave's binlog there will be - CREATE TEMPORARY TABLE t; (thread_id_of_the_slave_SQL_thread) - CREATE TEMPORARY TABLE t; (thread_id_of_the_slave_SQL_thread) - which is bad (same thread id!). - To avoid this, we log the thread's thread id EXCEPT for the SQL slave thread - for which we log the original (master's) thread id. - Now this moves the bug: what happens if the thread id on the master was 10 - and when the slave replicates the query, a connection number 10 is opened by - a normal client on the slave, and updates a temp table of the same name? We - get a problem again. To avoid this, in the handling of temp tables - (sql_base.cc) we use thread_id AND server_id. - TODO when this is merged into 4.1: in 4.1, slave_proxy_id has been renamed - to pseudo_thread_id and is a session variable: that's to make mysqlbinlog - work with temp tables. We probably need to introduce - SET PSEUDO_SERVER_ID - for mysqlbinlog in 4.1. mysqlbinlog would print: - SET PSEUDO_SERVER_ID= - SET PSEUDO_THREAD_ID= - for each query using temp tables. + InnoDB internally stores the master log position it has processed so far; + position to store is of the END of the current log event. */ - int4store(buf + Q_THREAD_ID_OFFSET, slave_proxy_id); - int4store(buf + Q_EXEC_TIME_OFFSET, exec_time); - buf[Q_DB_LEN_OFFSET] = (char) db_len; - int2store(buf + Q_ERR_CODE_OFFSET, error_code); +#if MYSQL_VERSION_ID < 50000 + rli->future_group_master_log_pos= log_pos + get_event_len() - + (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); +#else + /* In 5.0 we store the end_log_pos in the relay log so no problem */ + rli->future_group_master_log_pos= log_pos; +#endif + clear_all_errors(thd, rli); - return (my_b_safe_write(file, (byte*) buf, QUERY_HEADER_LEN) || - my_b_safe_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) || - my_b_safe_write(file, (byte*) query, q_len)) ? -1 : 0; -} + /* + Note: We do not need to execute reset_one_shot_variables() if this + db_ok() test fails. + Reason: The db stored in binlog events is the same for SET and for + its companion query. If the SET is ignored because of + db_ok(), the companion query will also be ignored, and if + the companion query is ignored in the db_ok() test of + ::exec_event(), then the companion SET also have so we + don't need to reset_one_shot_variables(). + */ + if (db_ok(thd->db, replicate_do_db, replicate_ignore_db)) + { + thd->set_time((time_t)when); + thd->query_length= q_len; + thd->query = (char*)query; + VOID(pthread_mutex_lock(&LOCK_thread_count)); + thd->query_id = query_id++; + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + thd->variables.pseudo_thread_id= thread_id; // for temp tables -Intvar_log_event::Intvar_log_event(const char* buf, bool old_format) - :Log_event(buf, old_format) -{ - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - type = buf[I_TYPE_OFFSET]; - val = uint8korr(buf+I_VAL_OFFSET); -} + DBUG_PRINT("query",("%s",thd->query)); + if (ignored_error_code((expected_error= error_code)) || + !check_expected_error(thd,rli,expected_error)) + mysql_parse(thd, thd->query, q_len); + else + { + /* + The query got a really bad error on the master (thread killed etc), + which could be inconsistent. Parse it to test the table names: if the + replicate-*-do|ignore-table rules say "this query must be ignored" then + we exit gracefully; otherwise we warn about the bad error and tell DBA + to check/fix it. + */ + if (mysql_test_parse_for_slave(thd, thd->query, q_len)) + clear_all_errors(thd, rli); /* Can ignore query */ + else + { + slave_print_error(rli,expected_error, + "\ +Query partially completed on the master (error on master: %d) \ +and was aborted. There is a chance that your master is inconsistent at this \ +point. If you are sure that your master is ok, run this query manually on the \ +slave and then restart the slave with SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; \ +START SLAVE; . Query: '%s'", expected_error, thd->query); + thd->query_error= 1; + } + goto end; + } -const char* Intvar_log_event::get_var_type_name() -{ - switch(type) { - case LAST_INSERT_ID_EVENT: return "LAST_INSERT_ID"; - case INSERT_ID_EVENT: return "INSERT_ID"; - default: /* impossible */ return "UNKNOWN"; - } + /* If the query was not ignored, it is printed to the general log */ + if (thd->net.last_errno != ER_SLAVE_IGNORED_TABLE) + mysql_log.write(thd,COM_QUERY,"%s",thd->query); + + /* + If we expected a non-zero error code, and we don't get the same error + code, and none of them should be ignored. + */ + DBUG_PRINT("info",("expected_error: %d last_errno: %d", + expected_error, thd->net.last_errno)); + if ((expected_error != (actual_error= thd->net.last_errno)) && + expected_error && + !ignored_error_code(actual_error) && + !ignored_error_code(expected_error)) + { + slave_print_error(rli, 0, + "\ +Query caused different errors on master and slave. \ +Error on master: '%s' (%d), Error on slave: '%s' (%d). \ +Default database: '%s'. Query: '%s'", + ER_SAFE(expected_error), + expected_error, + actual_error ? thd->net.last_error: "no error", + actual_error, + print_slave_db_safe(thd->db), query); + thd->query_error= 1; + } + /* + If we get the same error code as expected, or they should be ignored. + */ + else if (expected_error == actual_error || + ignored_error_code(actual_error)) + { + DBUG_PRINT("info",("error ignored")); + clear_all_errors(thd, rli); + } + /* + Other cases: mostly we expected no error and get one. + */ + else if (thd->query_error || thd->is_fatal_error) + { + slave_print_error(rli,actual_error, + "Error '%s' on query. Default database: '%s'. Query: '%s'", + (actual_error ? thd->net.last_error : + "unexpected success or fatal error"), + print_slave_db_safe(thd->db), query); + thd->query_error= 1; + } + } /* End of if (db_ok(... */ + +end: + VOID(pthread_mutex_lock(&LOCK_thread_count)); + thd->db= 0; // prevent db from being freed + thd->query= 0; // just to be sure + thd->query_length= thd->db_length =0; + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + close_thread_tables(thd); + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); + /* + If there was an error we stop. Otherwise we increment positions. Note that + we will not increment group* positions if we are just after a SET + ONE_SHOT, because SET ONE_SHOT should not be separated from its following + updating query. + */ + return (thd->query_error ? thd->query_error : + (thd->one_shot_set ? (rli->inc_event_relay_log_pos(get_event_len()),0) : + Log_event::exec_event(rli))); } +#endif -int Intvar_log_event::write_data(IO_CACHE* file) + +/************************************************************************** + Start_log_event methods +**************************************************************************/ + +/* + Start_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Start_log_event::pack_info(Protocol *protocol) { - char buf[9]; - buf[I_TYPE_OFFSET] = type; - int8store(buf + I_VAL_OFFSET, val); - return my_b_safe_write(file, (byte*) buf, sizeof(buf)); + char buf[12 + ST_SERVER_VER_LEN + 14 + 22], *pos; + pos= strmov(buf, "Server ver: "); + pos= strmov(pos, server_version); + pos= strmov(pos, ", Binlog ver: "); + pos= int10_to_str(binlog_version, pos, 10); + protocol->store(buf, (uint) (pos-buf), &my_charset_bin); } +#endif + + +/* + Start_log_event::print() +*/ #ifdef MYSQL_CLIENT -void Intvar_log_event::print(FILE* file, bool short_form, char* last_db) +void Start_log_event::print(FILE* file, bool short_form, char* last_db) { - char llbuff[22]; - const char *msg; - LINT_INIT(msg); - - if (!short_form) - { - print_header(file); - fprintf(file, "\tIntvar\n"); - } + if (short_form) + return; - fprintf(file, "SET "); - switch (type) { - case LAST_INSERT_ID_EVENT: - msg="LAST_INSERT_ID"; - break; - case INSERT_ID_EVENT: - msg="INSERT_ID"; - break; - } - fprintf(file, "%s=%s;\n", msg, llstr(val,llbuff)); + print_header(file); + fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version, + server_version); + print_timestamp(file); + if (created) + fprintf(file," at startup"); + fputc('\n', file); fflush(file); } -#endif +#endif /* MYSQL_CLIENT */ -/***************************************************************************** - * - * Rand log event - * - ****************************************************************************/ -Rand_log_event::Rand_log_event(const char* buf, bool old_format) +/* + Start_log_event::Start_log_event() +*/ + +Start_log_event::Start_log_event(const char* buf, + bool old_format) :Log_event(buf, old_format) { buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - seed1 = uint8korr(buf+RAND_SEED1_OFFSET); - seed2 = uint8korr(buf+RAND_SEED2_OFFSET); + binlog_version = uint2korr(buf+ST_BINLOG_VER_OFFSET); + memcpy(server_version, buf+ST_SERVER_VER_OFFSET, + ST_SERVER_VER_LEN); + created = uint4korr(buf+ST_CREATED_OFFSET); } -int Rand_log_event::write_data(IO_CACHE* file) + +/* + Start_log_event::write_data() +*/ + +int Start_log_event::write_data(IO_CACHE* file) { - char buf[16]; - int8store(buf + RAND_SEED1_OFFSET, seed1); - int8store(buf + RAND_SEED2_OFFSET, seed2); - return my_b_safe_write(file, (byte*) buf, sizeof(buf)); + char buff[START_HEADER_LEN]; + int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); + memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); + int4store(buff + ST_CREATED_OFFSET,created); + return (my_b_safe_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0); } -#ifdef MYSQL_CLIENT -void Rand_log_event::print(FILE* file, bool short_form, char* last_db) +/* + Start_log_event::exec_event() + + The master started + + IMPLEMENTATION + - To handle the case where the master died without having time to write + DROP TEMPORARY TABLE, DO RELEASE_LOCK (prepared statements' deletion is + TODO), we clean up all temporary tables that we got, if we are sure we + can (see below). + + TODO + - Remove all active user locks. + Guilhem 2003-06: this is true but not urgent: the worst it can cause is + the use of a bit of memory for a user lock which will not be used + anymore. If the user lock is later used, the old one will be released. In + other words, no deadlock problem. +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int Start_log_event::exec_event(struct st_relay_log_info* rli) { - char llbuff[22],llbuff2[22]; - if (!short_form) + DBUG_ENTER("Start_log_event::exec_event"); + + /* + If the I/O thread has not started, mi->old_format is BINLOG_FORMAT_CURRENT + (that's what the MASTER_INFO constructor does), so the test below is not + perfect at all. + */ + switch (rli->mi->old_format) { + case BINLOG_FORMAT_CURRENT: + /* + This is 4.x, so a Start_log_event is only at master startup, + so we are sure the master has restarted and cleared his temp tables. + */ + close_temporary_tables(thd); + cleanup_load_tmpdir(); + /* + As a transaction NEVER spans on 2 or more binlogs: + if we have an active transaction at this point, the master died while + writing the transaction to the binary log, i.e. while flushing the binlog + cache to the binlog. As the write was started, the transaction had been + committed on the master, so we lack of information to replay this + transaction on the slave; all we can do is stop with error. + */ + if (thd->options & OPTION_BEGIN) + { + slave_print_error(rli, 0, "\ +Rolling back unfinished transaction (no COMMIT or ROLLBACK) from relay log. \ +A probable cause is that the master died while writing the transaction to its \ +binary log."); + return(1); + } + break; + + /* + Now the older formats; in that case load_tmpdir is cleaned up by the I/O + thread. + */ + case BINLOG_FORMAT_323_LESS_57: + /* + Cannot distinguish a Start_log_event generated at master startup and + one generated by master FLUSH LOGS, so cannot be sure temp tables + have to be dropped. So do nothing. + */ + break; + case BINLOG_FORMAT_323_GEQ_57: + /* + Can distinguish, based on the value of 'created', + which was generated at master startup. + */ + if (created) + close_temporary_tables(thd); + break; + default: + /* this case is impossible */ + return 1; + } + + DBUG_RETURN(Log_event::exec_event(rli)); +} +#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ + +/************************************************************************** + Load_log_event methods +**************************************************************************/ + +/* + Load_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Load_log_event::pack_info(Protocol *protocol) +{ + char *buf, *pos; + uint buf_len; + + buf_len= + 5 + db_len + 3 + // "use DB; " + 18 + fname_len + 2 + // "LOAD DATA INFILE 'file''" + 7 + // LOCAL + 9 + // " REPLACE or IGNORE " + 13 + table_name_len*2 + // "INTO TABLE `table`" + 21 + sql_ex.field_term_len*4 + 2 + // " FIELDS TERMINATED BY 'str'" + 23 + sql_ex.enclosed_len*4 + 2 + // " OPTIONALLY ENCLOSED BY 'str'" + 12 + sql_ex.escaped_len*4 + 2 + // " ESCAPED BY 'str'" + 21 + sql_ex.line_term_len*4 + 2 + // " FIELDS TERMINATED BY 'str'" + 19 + sql_ex.line_start_len*4 + 2 + // " LINES STARTING BY 'str'" + 15 + 22 + // " IGNORE xxx LINES" + 3 + (num_fields-1)*2 + field_block_len; // " (field1, field2, ...)" + + if (!(buf= my_malloc(buf_len, MYF(MY_WME)))) + return; + pos= buf; + if (db && db_len) { - print_header(file); - fprintf(file, "\tRand\n"); + pos= strmov(pos, "use `"); + memcpy(pos, db, db_len); + pos= strmov(pos+db_len, "`; "); } - fprintf(file, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s;\n", - llstr(seed1, llbuff),llstr(seed2, llbuff2)); - fflush(file); + + pos= strmov(pos, "LOAD DATA "); + if (check_fname_outside_temp_buf()) + pos= strmov(pos, "LOCAL "); + pos= strmov(pos, "INFILE '"); + memcpy(pos, fname, fname_len); + pos= strmov(pos+fname_len, "' "); + + if (sql_ex.opt_flags & REPLACE_FLAG) + pos= strmov(pos, " REPLACE "); + else if (sql_ex.opt_flags & IGNORE_FLAG) + pos= strmov(pos, " IGNORE "); + + pos= strmov(pos ,"INTO TABLE `"); + memcpy(pos, table_name, table_name_len); + pos+= table_name_len; + + /* We have to create all optinal fields as the default is not empty */ + pos= strmov(pos, "` FIELDS TERMINATED BY "); + pos= pretty_print_str(pos, sql_ex.field_term, sql_ex.field_term_len); + if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG) + pos= strmov(pos, " OPTIONALLY "); + pos= strmov(pos, " ENCLOSED BY "); + pos= pretty_print_str(pos, sql_ex.enclosed, sql_ex.enclosed_len); + + pos= strmov(pos, " ESCAPED BY "); + pos= pretty_print_str(pos, sql_ex.escaped, sql_ex.escaped_len); + + pos= strmov(pos, " LINES TERMINATED BY "); + pos= pretty_print_str(pos, sql_ex.line_term, sql_ex.line_term_len); + if (sql_ex.line_start_len) + { + pos= strmov(pos, " STARTING BY "); + pos= pretty_print_str(pos, sql_ex.line_start, sql_ex.line_start_len); + } + + if ((long) skip_lines > 0) + { + pos= strmov(pos, " IGNORE "); + pos= longlong10_to_str((longlong) skip_lines, pos, 10); + pos= strmov(pos," LINES "); + } + + if (num_fields) + { + uint i; + const char *field= fields; + pos= strmov(pos, " ("); + for (i = 0; i < num_fields; i++) + { + if (i) + { + *pos++= ' '; + *pos++= ','; + } + memcpy(pos, field, field_lens[i]); + pos+= field_lens[i]; + field+= field_lens[i] + 1; + } + *pos++= ')'; + } + + protocol->store(buf, pos-buf, &my_charset_bin); + my_free(buf, MYF(0)); } -#endif +#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ + + +/* + Load_log_event::write_data_header() +*/ int Load_log_event::write_data_header(IO_CACHE* file) { @@ -1062,6 +1415,11 @@ int Load_log_event::write_data_header(IO_CACHE* file) return my_b_safe_write(file, (byte*)buf, LOAD_HEADER_LEN); } + +/* + Load_log_event::write_data_body() +*/ + int Load_log_event::write_data_body(IO_CACHE* file) { if (sql_ex.write_data(file)) @@ -1078,106 +1436,20 @@ int Load_log_event::write_data_body(IO_CACHE* file) } - -static bool write_str(IO_CACHE *file, char *str, byte length) -{ - return (my_b_safe_write(file, &length, 1) || - my_b_safe_write(file, (byte*) str, (int) length)); -} - - -int sql_ex_info::write_data(IO_CACHE* file) -{ - if (new_format()) - { - return (write_str(file, field_term, field_term_len) || - write_str(file, enclosed, enclosed_len) || - write_str(file, line_term, line_term_len) || - write_str(file, line_start, line_start_len) || - write_str(file, escaped, escaped_len) || - my_b_safe_write(file,(byte*) &opt_flags,1)); - } - else - { - old_sql_ex old_ex; - old_ex.field_term= *field_term; - old_ex.enclosed= *enclosed; - old_ex.line_term= *line_term; - old_ex.line_start= *line_start; - old_ex.escaped= *escaped; - old_ex.opt_flags= opt_flags; - old_ex.empty_flags=empty_flags; - return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex)); - } -} - - -static inline int read_str(char * &buf, char *buf_end, char * &str, - uint8 &len) -{ - if (buf + (uint) (uchar) *buf >= buf_end) - return 1; - len = (uint8) *buf; - str= buf+1; - buf+= (uint) len+1; - return 0; -} - - -char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) -{ - cached_new_format = use_new_format; - if (use_new_format) - { - empty_flags=0; - /* - The code below assumes that buf will not disappear from - under our feet during the lifetime of the event. This assumption - holds true in the slave thread if the log is in new format, but is not - the case when we have old format because we will be reusing net buffer - to read the actual file before we write out the Create_file event. - */ - if (read_str(buf, buf_end, field_term, field_term_len) || - read_str(buf, buf_end, enclosed, enclosed_len) || - read_str(buf, buf_end, line_term, line_term_len) || - read_str(buf, buf_end, line_start, line_start_len) || - read_str(buf, buf_end, escaped, escaped_len)) - return 0; - opt_flags = *buf++; - } - else - { - field_term_len= enclosed_len= line_term_len= line_start_len= escaped_len=1; - field_term = buf++; // Use first byte in string - enclosed= buf++; - line_term= buf++; - line_start= buf++; - escaped= buf++; - opt_flags = *buf++; - empty_flags= *buf++; - if (empty_flags & FIELD_TERM_EMPTY) - field_term_len=0; - if (empty_flags & ENCLOSED_EMPTY) - enclosed_len=0; - if (empty_flags & LINE_TERM_EMPTY) - line_term_len=0; - if (empty_flags & LINE_START_EMPTY) - line_start_len=0; - if (empty_flags & ESCAPED_EMPTY) - escaped_len=0; - } - return buf; -} - +/* + Load_log_event::Load_log_event() +*/ #ifndef MYSQL_CLIENT -Load_log_event::Load_log_event(THD* thd_arg, sql_exchange* ex, - const char* db_arg, const char* table_name_arg, - List<Item>& fields_arg, +Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, + const char *db_arg, const char *table_name_arg, + List<Item> &fields_arg, enum enum_duplicates handle_dup, - bool using_trans) - :Log_event(thd_arg, 0, using_trans),thread_id(thd_arg->thread_id), - slave_proxy_id(thd_arg->slave_proxy_id), + bool ignore, bool using_trans) + :Log_event(thd_arg, !thd_arg->tmp_table_used ? + 0 : LOG_EVENT_THREAD_SPECIFIC_F, using_trans), + thread_id(thd_arg->thread_id), + slave_proxy_id(thd_arg->variables.pseudo_thread_id), num_fields(0),fields(0), field_lens(0),field_block_len(0), table_name(table_name_arg ? table_name_arg : ""), @@ -1208,24 +1480,29 @@ Load_log_event::Load_log_event(THD* thd_arg, sql_exchange* ex, if (ex->opt_enclosed) sql_ex.opt_flags|= OPT_ENCLOSED_FLAG; - sql_ex.empty_flags = 0; + sql_ex.empty_flags= 0; switch (handle_dup) { - case DUP_IGNORE: sql_ex.opt_flags|= IGNORE_FLAG; break; - case DUP_REPLACE: sql_ex.opt_flags|= REPLACE_FLAG; break; - case DUP_ERROR: break; + case DUP_REPLACE: + sql_ex.opt_flags|= REPLACE_FLAG; + break; + case DUP_UPDATE: // Impossible here + case DUP_ERROR: + break; } + if (ignore) + sql_ex.opt_flags|= IGNORE_FLAG; if (!ex->field_term->length()) - sql_ex.empty_flags|= FIELD_TERM_EMPTY; + sql_ex.empty_flags |= FIELD_TERM_EMPTY; if (!ex->enclosed->length()) - sql_ex.empty_flags|= ENCLOSED_EMPTY; + sql_ex.empty_flags |= ENCLOSED_EMPTY; if (!ex->line_term->length()) - sql_ex.empty_flags|= LINE_TERM_EMPTY; + sql_ex.empty_flags |= LINE_TERM_EMPTY; if (!ex->line_start->length()) - sql_ex.empty_flags|= LINE_START_EMPTY; + sql_ex.empty_flags |= LINE_START_EMPTY; if (!ex->escaped->length()) - sql_ex.empty_flags|= ESCAPED_EMPTY; + sql_ex.empty_flags |= ESCAPED_EMPTY; skip_lines = ex->skip_lines; @@ -1245,25 +1522,33 @@ Load_log_event::Load_log_event(THD* thd_arg, sql_exchange* ex, field_lens = (const uchar*)field_lens_buf.ptr(); fields = fields_buf.ptr(); } - -#endif +#endif /* !MYSQL_CLIENT */ /* - The caller must do buf[event_len] = 0 before he starts using the - constructed event. + Load_log_event::Load_log_event() + + NOTE + The caller must do buf[event_len] = 0 before he starts using the + constructed event. */ -Load_log_event::Load_log_event(const char* buf, int event_len, +Load_log_event::Load_log_event(const char *buf, int event_len, bool old_format) - :Log_event(buf, old_format),num_fields(0),fields(0), - field_lens(0),field_block_len(0), - table_name(0),db(0),fname(0),local_fname(FALSE) + :Log_event(buf, old_format), num_fields(0), fields(0), + field_lens(0), field_block_len(0), + table_name(0), db(0), fname(0), local_fname(FALSE) { - if (!event_len) // derived class, will call copy_log_event() itself - return; - copy_log_event(buf, event_len, old_format); + DBUG_ENTER("Load_log_event"); + if (event_len) // derived class, will call copy_log_event() itself + copy_log_event(buf, event_len, old_format); + DBUG_VOID_RETURN; } + +/* + Load_log_event::copy_log_event() +*/ + int Load_log_event::copy_log_event(const char *buf, ulong event_len, bool old_format) { @@ -1271,6 +1556,8 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, char* buf_end = (char*)buf + event_len; uint header_len= old_format ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; const char* data_head = buf + header_len; + DBUG_ENTER("Load_log_event::copy_log_event"); + slave_proxy_id= thread_id= uint4korr(data_head + L_THREAD_ID_OFFSET); exec_time = uint4korr(data_head + L_EXEC_TIME_OFFSET); skip_lines = uint4korr(data_head + L_SKIP_LINES_OFFSET); @@ -1283,19 +1570,19 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, get_data_body_offset()); if ((int) event_len < body_offset) - return 1; + DBUG_RETURN(1); /* Sql_ex.init() on success returns the pointer to the first byte after the sql_ex structure, which is the start of field lengths array. */ if (!(field_lens=(uchar*)sql_ex.init((char*)buf + body_offset, - buf_end, - buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) - return 1; - + buf_end, + buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) + DBUG_RETURN(1); + data_len = event_len - body_offset; if (num_fields > data_len) // simple sanity check against corruption - return 1; + DBUG_RETURN(1); for (uint i = 0; i < num_fields; i++) field_block_len += (uint)field_lens[i] + 1; @@ -1305,19 +1592,25 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, fname = db + db_len + 1; fname_len = strlen(fname); // null termination is accomplished by the caller doing buf[event_len]=0 - return 0; + DBUG_RETURN(0); } -#ifdef MYSQL_CLIENT +/* + Load_log_event::print() +*/ + +#ifdef MYSQL_CLIENT void Load_log_event::print(FILE* file, bool short_form, char* last_db) { print(file, short_form, last_db, 0); } + void Load_log_event::print(FILE* file, bool short_form, char* last_db, bool commented) { + DBUG_ENTER("Load_log_event::print"); if (!short_form) { print_header(file); @@ -1344,36 +1637,41 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db, commented ? "# " : "", db); + if (flags & LOG_EVENT_THREAD_SPECIFIC_F) + fprintf(file,"%sSET @@session.pseudo_thread_id=%lu;\n", + commented ? "# " : "", (ulong)thread_id); fprintf(file, "%sLOAD DATA ", commented ? "# " : ""); if (check_fname_outside_temp_buf()) fprintf(file, "LOCAL "); fprintf(file, "INFILE '%-*s' ", fname_len, fname); - if (sql_ex.opt_flags & REPLACE_FLAG ) + if (sql_ex.opt_flags & REPLACE_FLAG) fprintf(file," REPLACE "); - else if (sql_ex.opt_flags & IGNORE_FLAG ) + else if (sql_ex.opt_flags & IGNORE_FLAG) fprintf(file," IGNORE "); - + fprintf(file, "INTO TABLE `%s`", table_name); fprintf(file, " FIELDS TERMINATED BY "); pretty_print_str(file, sql_ex.field_term, sql_ex.field_term_len); - if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG ) + if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG) fprintf(file," OPTIONALLY "); fprintf(file, " ENCLOSED BY "); pretty_print_str(file, sql_ex.enclosed, sql_ex.enclosed_len); + fprintf(file, " ESCAPED BY "); pretty_print_str(file, sql_ex.escaped, sql_ex.escaped_len); + fprintf(file," LINES TERMINATED BY "); pretty_print_str(file, sql_ex.line_term, sql_ex.line_term_len); + if (sql_ex.line_start) { fprintf(file," STARTING BY "); pretty_print_str(file, sql_ex.line_start, sql_ex.line_start_len); } - if ((long) skip_lines > 0) fprintf(file, " IGNORE %ld LINES", (long) skip_lines); @@ -1394,554 +1692,36 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db, } fprintf(file, ";\n"); -} - -#endif /* #ifdef MYSQL_CLIENT */ - -#ifndef MYSQL_CLIENT - -void Log_event::set_log_pos(MYSQL_LOG* log) -{ - if (!log_pos) - log_pos = my_b_tell(&log->log_file); -} - - -void Load_log_event::set_fields(List<Item> &field_list) -{ - uint i; - const char *field= fields; - for (i= 0; i < num_fields; i++) - { - field_list.push_back(new Item_field(db, table_name, field)); - field+= field_lens[i] + 1; - } -} - - -Slave_log_event::Slave_log_event(THD* thd_arg, - struct st_relay_log_info* rli): - Log_event(thd_arg,0,0),mem_pool(0),master_host(0) -{ - DBUG_ENTER("Slave_log_event"); - if (!rli->inited) // QQ When can this happen ? - DBUG_VOID_RETURN; - - MASTER_INFO* mi = rli->mi; - // TODO: re-write this better without holding both locks at the same time - pthread_mutex_lock(&mi->data_lock); - pthread_mutex_lock(&rli->data_lock); - master_host_len = strlen(mi->host); - master_log_len = strlen(rli->master_log_name); - // on OOM, just do not initialize the structure and print the error - if ((mem_pool = (char*)my_malloc(get_data_size() + 1, - MYF(MY_WME)))) - { - master_host = mem_pool + SL_MASTER_HOST_OFFSET ; - memcpy(master_host, mi->host, master_host_len + 1); - master_log = master_host + master_host_len + 1; - memcpy(master_log, rli->master_log_name, master_log_len + 1); - master_port = mi->port; - master_pos = rli->master_log_pos; - DBUG_PRINT("info", ("master_log: %s pos: %d", master_log, - (ulong) master_pos)); - } - else - sql_print_error("Out of memory while recording slave event"); - pthread_mutex_unlock(&rli->data_lock); - pthread_mutex_unlock(&mi->data_lock); DBUG_VOID_RETURN; } - -#endif /* ! MYSQL_CLIENT */ - - -Slave_log_event::~Slave_log_event() -{ - my_free(mem_pool, MYF(MY_ALLOW_ZERO_PTR)); -} - -#ifdef MYSQL_CLIENT - -void Slave_log_event::print(FILE* file, bool short_form, char* last_db) -{ - char llbuff[22]; - if (short_form) - return; - print_header(file); - fputc('\n', file); - fprintf(file, "\ -Slave: master_host: '%s' master_port: %d master_log: '%s' master_pos: %s\n", - master_host, master_port, master_log, llstr(master_pos, llbuff)); -} - #endif /* MYSQL_CLIENT */ -int Slave_log_event::get_data_size() -{ - return master_host_len + master_log_len + 1 + SL_MASTER_HOST_OFFSET; -} -int Slave_log_event::write_data(IO_CACHE* file) -{ - int8store(mem_pool + SL_MASTER_POS_OFFSET, master_pos); - int2store(mem_pool + SL_MASTER_PORT_OFFSET, master_port); - // log and host are already there - return my_b_safe_write(file, (byte*)mem_pool, get_data_size()); -} - - -void Slave_log_event::init_from_mem_pool(int data_size) -{ - master_pos = uint8korr(mem_pool + SL_MASTER_POS_OFFSET); - master_port = uint2korr(mem_pool + SL_MASTER_PORT_OFFSET); - master_host = mem_pool + SL_MASTER_HOST_OFFSET; - master_host_len = strlen(master_host); - // safety - master_log = master_host + master_host_len + 1; - if (master_log > mem_pool + data_size) - { - master_host = 0; - return; - } - master_log_len = strlen(master_log); -} +/* + Load_log_event::set_fields() -Slave_log_event::Slave_log_event(const char* buf, int event_len) - :Log_event(buf,0),mem_pool(0),master_host(0) -{ - event_len -= LOG_EVENT_HEADER_LEN; - if (event_len < 0) - return; - if (!(mem_pool = (char*) my_malloc(event_len + 1, MYF(MY_WME)))) - return; - memcpy(mem_pool, buf + LOG_EVENT_HEADER_LEN, event_len); - mem_pool[event_len] = 0; - init_from_mem_pool(event_len); -} + Note that this function can not use the member variable + for the database, since LOAD DATA INFILE on the slave + can be for a different database than the current one. + This is the reason for the affected_db argument to this method. +*/ #ifndef MYSQL_CLIENT -Create_file_log_event:: -Create_file_log_event(THD* thd_arg, sql_exchange* ex, - const char* db_arg, const char* table_name_arg, - List<Item>& fields_arg, enum enum_duplicates handle_dup, - char* block_arg, uint block_len_arg, bool using_trans) - :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup, - using_trans), - fake_base(0),block(block_arg), event_buf(0), block_len(block_len_arg), - file_id(thd_arg->file_id = mysql_bin_log.next_file_id()) -{ - sql_ex.force_new_format(); -} -#endif - -int Create_file_log_event::write_data_body(IO_CACHE* file) +void Load_log_event::set_fields(const char* affected_db, + List<Item> &field_list) { - int res; - if ((res = Load_log_event::write_data_body(file)) || fake_base) - return res; - return (my_b_safe_write(file, (byte*) "", 1) || - my_b_safe_write(file, (byte*) block, block_len)); -} - -int Create_file_log_event::write_data_header(IO_CACHE* file) -{ - int res; - if ((res = Load_log_event::write_data_header(file)) || fake_base) - return res; - byte buf[CREATE_FILE_HEADER_LEN]; - int4store(buf + CF_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN); -} - -int Create_file_log_event::write_base(IO_CACHE* file) -{ - int res; - fake_base = 1; // pretend we are Load event - res = write(file); - fake_base = 0; - return res; -} - -Create_file_log_event::Create_file_log_event(const char* buf, int len, - bool old_format) - :Load_log_event(buf,0,old_format),fake_base(0),block(0),inited_from_old(0) -{ - int block_offset; - DBUG_ENTER("Create_file_log_event"); - - /* - We must make copy of 'buf' as this event may have to live over a - rotate log entry when used in mysqlbinlog - */ - if (!(event_buf= my_memdup((byte*) buf, len, MYF(MY_WME))) || - (copy_log_event(event_buf, len, old_format))) - DBUG_VOID_RETURN; - - if (!old_format) - { - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + - + LOAD_HEADER_LEN + CF_FILE_ID_OFFSET); - // + 1 for \0 terminating fname - block_offset = (LOG_EVENT_HEADER_LEN + Load_log_event::get_data_size() + - CREATE_FILE_HEADER_LEN + 1); - if (len < block_offset) - return; - block = (char*)buf + block_offset; - block_len = len - block_offset; - } - else - { - sql_ex.force_new_format(); - inited_from_old = 1; - } - DBUG_VOID_RETURN; -} - - -#ifdef MYSQL_CLIENT -void Create_file_log_event::print(FILE* file, bool short_form, - char* last_db, bool enable_local) -{ - if (short_form) - { - if (enable_local && check_fname_outside_temp_buf()) - Load_log_event::print(file, 1, last_db); - return; - } - - if (enable_local) + uint i; + const char* field = fields; + for (i= 0; i < num_fields; i++) { - Load_log_event::print(file, short_form, last_db, !check_fname_outside_temp_buf()); - /* - That one is for "file_id: etc" below: in mysqlbinlog we want the #, in - SHOW BINLOG EVENTS we don't. - */ - fprintf(file, "#"); + field_list.push_back(new Item_field(affected_db, table_name, field)); + field+= field_lens[i] + 1; } - - fprintf(file, " file_id: %d block_len: %d\n", file_id, block_len); -} - -void Create_file_log_event::print(FILE* file, bool short_form, - char* last_db) -{ - print(file,short_form,last_db,0); -} -#endif - -#ifndef MYSQL_CLIENT -void Create_file_log_event::pack_info(String* packet) -{ - char buf1[256],buf[22], *end; - String tmp(buf1, sizeof(buf1)); - tmp.length(0); - tmp.append("db="); - tmp.append(db, db_len); - tmp.append(";table="); - tmp.append(table_name, table_name_len); - tmp.append(";file_id="); - end= int10_to_str((long) file_id, buf, 10); - tmp.append(buf, (uint32) (end-buf)); - tmp.append(";block_len="); - end= int10_to_str((long) block_len, buf, 10); - tmp.append(buf, (uint32) (end-buf)); - net_store_data(packet, (char*) tmp.ptr(), tmp.length()); -} -#endif - -#ifndef MYSQL_CLIENT -Append_block_log_event::Append_block_log_event(THD* thd_arg, const char* db_arg, - char* block_arg, - uint block_len_arg, - bool using_trans) - :Log_event(thd_arg,0, using_trans), block(block_arg), - block_len(block_len_arg), file_id(thd_arg->file_id), db(db_arg) -{ -} -#endif - -Append_block_log_event::Append_block_log_event(const char* buf, int len) - :Log_event(buf, 0),block(0) -{ - if ((uint)len < APPEND_BLOCK_EVENT_OVERHEAD) - return; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); - block = (char*)buf + APPEND_BLOCK_EVENT_OVERHEAD; - block_len = len - APPEND_BLOCK_EVENT_OVERHEAD; -} - -int Append_block_log_event::write_data(IO_CACHE* file) -{ - byte buf[APPEND_BLOCK_HEADER_LEN]; - int4store(buf + AB_FILE_ID_OFFSET, file_id); - return (my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) || - my_b_safe_write(file, (byte*) block, block_len)); -} - -#ifdef MYSQL_CLIENT -void Append_block_log_event::print(FILE* file, bool short_form, - char* last_db) -{ - if (short_form) - return; - print_header(file); - fputc('\n', file); - fprintf(file, "#Append_block: file_id: %d block_len: %d\n", - file_id, block_len); -} -#endif - -#ifndef MYSQL_CLIENT -void Append_block_log_event::pack_info(String* packet) -{ - char buf1[256]; - sprintf(buf1, ";file_id=%u;block_len=%u", file_id, block_len); - net_store_data(packet, buf1); -} - -Delete_file_log_event::Delete_file_log_event(THD* thd_arg, const char* db_arg, - bool using_trans) - :Log_event(thd_arg, 0, using_trans), file_id(thd_arg->file_id), db(db_arg) -{ } -#endif - - -Delete_file_log_event::Delete_file_log_event(const char* buf, int len) - :Log_event(buf, 0),file_id(0) -{ - if ((uint)len < DELETE_FILE_EVENT_OVERHEAD) - return; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); -} - - -int Delete_file_log_event::write_data(IO_CACHE* file) -{ - byte buf[DELETE_FILE_HEADER_LEN]; - int4store(buf + DF_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, DELETE_FILE_HEADER_LEN); -} - -#ifdef MYSQL_CLIENT -void Delete_file_log_event::print(FILE* file, bool short_form, - char* last_db) -{ - if (short_form) - return; - print_header(file); - fputc('\n', file); - fprintf(file, "#Delete_file: file_id=%u\n", file_id); -} -#endif - -#ifndef MYSQL_CLIENT -void Delete_file_log_event::pack_info(String* packet) -{ - char buf1[64]; - sprintf(buf1, ";file_id=%u", (uint) file_id); - net_store_data(packet, buf1); -} -#endif - - -#ifndef MYSQL_CLIENT -Execute_load_log_event::Execute_load_log_event(THD* thd_arg, const char* db_arg, - bool using_trans) - :Log_event(thd_arg, 0, using_trans), file_id(thd_arg->file_id), db(db_arg) -{ -} -#endif - -Execute_load_log_event::Execute_load_log_event(const char* buf, int len) - :Log_event(buf, 0), file_id(0) -{ - if ((uint)len < EXEC_LOAD_EVENT_OVERHEAD) - return; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + EL_FILE_ID_OFFSET); -} - -int Execute_load_log_event::write_data(IO_CACHE* file) -{ - byte buf[EXEC_LOAD_HEADER_LEN]; - int4store(buf + EL_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, EXEC_LOAD_HEADER_LEN); -} - -#ifdef MYSQL_CLIENT -void Execute_load_log_event::print(FILE* file, bool short_form, - char* last_db) -{ - if (short_form) - return; - print_header(file); - fputc('\n', file); - fprintf(file, "#Exec_load: file_id=%d\n", - file_id); -} -#endif -#ifndef MYSQL_CLIENT -void Execute_load_log_event::pack_info(String* packet) -{ - char buf[64]; - sprintf(buf, ";file_id=%u", (uint) file_id); - net_store_data(packet, buf); -} -#endif - -#ifdef MYSQL_CLIENT -void Unknown_log_event::print(FILE* file, bool short_form, char* last_db) -{ - if (short_form) - return; - print_header(file); - fputc('\n', file); - fprintf(file, "# %s", "Unknown event\n"); -} -#endif - -#ifndef MYSQL_CLIENT -int Query_log_event::exec_event(struct st_relay_log_info* rli) -{ - int expected_error, actual_error= 0; - init_sql_alloc(&thd->mem_root, thd->variables.query_alloc_block_size,0); - thd->db= (char*) rewrite_db(db); // thd->db_length is set later if needed - - /* - InnoDB internally stores the master log position it has processed so far; - position to store is of the END of the current log event. - */ -#if MYSQL_VERSION_ID < 40100 - /* - If the event was converted from a 3.23 format, get_event_len() has grown by - 6 bytes (at least for most events, except LOAD DATA INFILE which is already - a big problem for 3.23->4.0 replication); 6 bytes is the difference between - the header's size in 4.0 (LOG_EVENT_HEADER_LEN) and the header's size in - 3.23 (OLD_HEADER_LEN). Note that using mi->old_format will not help if the - I/O thread has not started yet. - */ - rli->future_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#elif MYSQL_VERSION_ID < 50000 - rli->future_group_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - /* In 5.0 we store the end_log_pos in the relay log so no problem */ - rli->future_group_master_log_pos= log_pos; -#endif - clear_all_errors(thd, rli); - - if (db_ok(thd->db, replicate_do_db, replicate_ignore_db)) - { - thd->set_time((time_t)when); - thd->current_tablenr = 0; - /* - We cannot use db_len from event to fill thd->db_length, because - rewrite_db() may have changed db. - */ - thd->db_length= thd->db ? strlen(thd->db) : 0; - thd->query_length= q_len; - thd->query= (char *) query; - VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query_id = query_id++; - VOID(pthread_mutex_unlock(&LOCK_thread_count)); - thd->slave_proxy_id = thread_id; // for temp tables - - mysql_log.write(thd,COM_QUERY,"%s",thd->query); - DBUG_PRINT("query",("%s",thd->query)); - if (ignored_error_code(expected_error = error_code) || - !check_expected_error(thd,rli,expected_error)) - mysql_parse(thd, thd->query, q_len); - else - { - /* - The query got a really bad error on the master (thread killed etc), - which could be inconsistent. Parse it to test the table names: if the - replicate-*-do|ignore-table rules say "this query must be ignored" then - we exit gracefully; otherwise we warn about the bad error and tell DBA - to check/fix it. - */ - if (mysql_test_parse_for_slave(thd, thd->query, q_len)) - /* Can ignore query */ - clear_all_errors(thd, rli); - else - { - slave_print_error(rli,expected_error, - "query partially completed on the master \ -(error on master: %d) \ -and was aborted. There is a chance that your master is inconsistent at this \ -point. If you are sure that your master is ok, run this query manually on the\ - slave and then restart the slave with SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1;\ - START SLAVE; . Query: '%s'", expected_error, thd->query); - thd->query_error= 1; - } - goto end; - } - - /* - Set a flag if we are inside an transaction so that we can restart - the transaction from the start if we are killed - - This will only be done if we are supporting transactional tables - in the slave. - */ - if (!strcmp(thd->query,"BEGIN")) - rli->inside_transaction= opt_using_transactions; - else if (!(strcmp(thd->query,"COMMIT") && strcmp(thd->query,"ROLLBACK"))) - rli->inside_transaction=0; - - /* - If we expected a non-zero error code, and we don't get the same error - code, and none of them should be ignored. - */ - if ((expected_error != (actual_error = thd->net.last_errno)) && - expected_error && - !ignored_error_code(actual_error) && - !ignored_error_code(expected_error)) - { - slave_print_error(rli, 0, - "\ -Query caused different errors on master and slave. \ -Error on master: '%s' (%d), Error on slave: '%s' (%d). \ -Default database: '%s'. Query: '%s'", - ER_SAFE(expected_error), - expected_error, - actual_error ? thd->net.last_error: "no error", - actual_error, print_slave_db_safe(db), query); - thd->query_error= 1; - } - /* - If we get the same error code as expected, or they should be ignored. - */ - else if (expected_error == actual_error || - ignored_error_code(actual_error)) - clear_all_errors(thd, rli); - /* - Other cases: mostly we expected no error and get one. - */ - else if (thd->query_error || thd->fatal_error) - { - slave_print_error(rli,actual_error, - "Error '%s' on query. Default database: '%s'. Query: '%s'", - (actual_error ? thd->net.last_error : - "unexpected success or fatal error"), - print_slave_db_safe(db), query); - thd->query_error= 1; - } - } /* End of if (db_ok(... */ +#endif /* !MYSQL_CLIENT */ -end: - VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= 0; // prevent db from being freed - thd->query= 0; // just to be sure - thd->query_length= thd->db_length =0; - VOID(pthread_mutex_unlock(&LOCK_thread_count)); - close_thread_tables(thd); - free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC)); - return (thd->query_error ? thd->query_error : Log_event::exec_event(rli)); -} +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) /* Does the data loading job when executing a LOAD DATA on the slave @@ -1972,17 +1752,20 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors) { char *load_data_query= 0; - init_sql_alloc(&thd->mem_root, thd->variables.query_alloc_block_size, 0); - thd->db= (char*) rewrite_db(db); // thd->db_length is set later if needed + thd->db_length= db_len; + thd->db= (char*) rewrite_db(db, &thd->db_length); DBUG_ASSERT(thd->query == 0); + thd->query_length= 0; // Should not be needed + thd->query_error= 0; clear_all_errors(thd, rli); - + /* + Usually mysql_init_query() is called by mysql_parse(), but we need it here + as the present method does not call mysql_parse(). + */ + mysql_init_query(thd, 0, 0); if (!use_rli_only_for_errors) { -#if MYSQL_VERSION_ID < 40100 - rli->future_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#elif MYSQL_VERSION_ID < 50000 +#if MYSQL_VERSION_ID < 50000 rli->future_group_master_log_pos= log_pos + get_event_len() - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); #else @@ -1991,25 +1774,40 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, } /* - We test replicate_*_db rules. Note that we have already prepared the file to - load, even if we are going to ignore and delete it now. So it is possible - that we did a lot of disk writes for nothing. In other words, a big LOAD - DATA INFILE on the master will still consume a lot of space on the slave - (space in the relay log + space of temp files: twice the space of the file - to load...) even if it will finally be ignored. + We test replicate_*_db rules. Note that we have already prepared the file + to load, even if we are going to ignore and delete it now. So it is + possible that we did a lot of disk writes for nothing. In other words, a + big LOAD DATA INFILE on the master will still consume a lot of space on + the slave (space in the relay log + space of temp files: twice the space + of the file to load...) even if it will finally be ignored. TODO: fix this; this can be done by testing rules in Create_file_log_event::exec_event() and then discarding Append_block and al. Another way is do the filtering in the I/O thread (more efficient: no disk writes at all). + + + Note: We do not need to execute reset_one_shot_variables() if this + db_ok() test fails. + Reason: The db stored in binlog events is the same for SET and for + its companion query. If the SET is ignored because of + db_ok(), the companion query will also be ignored, and if + the companion query is ignored in the db_ok() test of + ::exec_event(), then the companion SET also have so we + don't need to reset_one_shot_variables(). */ if (db_ok(thd->db, replicate_do_db, replicate_ignore_db)) { thd->set_time((time_t)when); - thd->current_tablenr = 0; - thd->db_length= thd->db ? strlen(thd->db) : 0; VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query_id = query_id++; VOID(pthread_mutex_unlock(&LOCK_thread_count)); + /* + Initing thd->row_count is not necessary in theory as this variable has no + influence in the case of the slave SQL thread (it is used to generate a + "data truncated" warning but which is absorbed and never gets to the + error log); still we init it to avoid a Valgrind message. + */ + mysql_reset_errors(thd); TABLE_LIST tables; bzero((char*) &tables,sizeof(tables)); @@ -2017,6 +1815,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, tables.alias = tables.real_name = (char*)table_name; tables.lock_type = TL_WRITE; tables.updating= 1; + // the table will be opened in mysql_load if (table_rules_on && !tables_ok(thd, &tables)) { @@ -2028,6 +1827,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, { char llbuff[22]; enum enum_duplicates handle_dup; + bool ignore= 0; /* Make a simplified LOAD DATA INFILE query, for the information of the user in SHOW PROCESSLIST. Note that db is known in the 'db' column. @@ -2041,48 +1841,64 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, "` <...>", NullS) - load_data_query); thd->query= load_data_query; } + if (sql_ex.opt_flags & REPLACE_FLAG) + { handle_dup= DUP_REPLACE; + } else if (sql_ex.opt_flags & IGNORE_FLAG) - handle_dup= DUP_IGNORE; + { + ignore= 1; + handle_dup= DUP_ERROR; + } else { /* - When replication is running fine, if it was DUP_ERROR on the - master then we could choose DUP_IGNORE here, because if DUP_ERROR + When replication is running fine, if it was DUP_ERROR on the + master then we could choose IGNORE here, because if DUP_ERROR suceeded on master, and data is identical on the master and slave, - then there should be no uniqueness errors on slave, so DUP_IGNORE is + then there should be no uniqueness errors on slave, so IGNORE is the same as DUP_ERROR. But in the unlikely case of uniqueness errors (because the data on the master and slave happen to be different (user error or bug), we want LOAD DATA to print an error message on the slave to discover the problem. If reading from net (a 3.23 master), mysql_load() will change this - to DUP_IGNORE. + to IGNORE. */ handle_dup= DUP_ERROR; } + /* + We need to set thd->lex->sql_command and thd->lex->duplicates + since InnoDB tests these variables to decide if this is a LOAD + DATA ... REPLACE INTO ... statement even though mysql_parse() + is not called. This is not needed in 5.0 since there the LOAD + DATA ... statement is replicated using mysql_parse(), which + sets the thd->lex fields correctly. + */ + thd->lex->sql_command= SQLCOM_LOAD; + thd->lex->duplicates= handle_dup; sql_exchange ex((char*)fname, sql_ex.opt_flags & DUMPFILE_FLAG); - String field_term(sql_ex.field_term,sql_ex.field_term_len); - String enclosed(sql_ex.enclosed,sql_ex.enclosed_len); - String line_term(sql_ex.line_term,sql_ex.line_term_len); - String line_start(sql_ex.line_start,sql_ex.line_start_len); - String escaped(sql_ex.escaped,sql_ex.escaped_len); + String field_term(sql_ex.field_term,sql_ex.field_term_len,log_cs); + String enclosed(sql_ex.enclosed,sql_ex.enclosed_len,log_cs); + String line_term(sql_ex.line_term,sql_ex.line_term_len,log_cs); + String line_start(sql_ex.line_start,sql_ex.line_start_len,log_cs); + String escaped(sql_ex.escaped,sql_ex.escaped_len, log_cs); ex.field_term= &field_term; ex.enclosed= &enclosed; ex.line_term= &line_term; ex.line_start= &line_start; ex.escaped= &escaped; - + ex.opt_enclosed = (sql_ex.opt_flags & OPT_ENCLOSED_FLAG); if (sql_ex.empty_flags & FIELD_TERM_EMPTY) ex.field_term->length(0); ex.skip_lines = skip_lines; List<Item> field_list; - set_fields(field_list); - thd->slave_proxy_id = thread_id; + set_fields(thd->db,field_list); + thd->variables.pseudo_thread_id= thread_id; if (net) { // mysql_load will use thd->net to read the file @@ -2092,18 +1908,20 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, */ thd->net.pkt_nr = net->pkt_nr; } - if (mysql_load(thd, &ex, &tables, field_list, handle_dup, net != 0, + if (mysql_load(thd, &ex, &tables, field_list, handle_dup, ignore, net != 0, TL_WRITE)) thd->query_error = 1; - /* log_pos is the position of the LOAD event in the master log */ if (thd->cuted_fields) - sql_print_error("\ -Slave: load data infile on table '%s' at log position %s in log \ -'%s' produced %ld warning(s). Default database: '%s'", - (char*) table_name, - llstr(log_pos,llbuff), RPL_LOG_NAME, - (ulong) thd->cuted_fields, - print_slave_db_safe(db)); + { + /* log_pos is the position of the LOAD event in the master log */ + sql_print_warning("Slave: load data infile on table '%s' at " + "log position %s in log '%s' produced %ld " + "warning(s). Default database: '%s'", + (char*) table_name, + llstr(log_pos,llbuff), RPL_LOG_NAME, + (ulong) thd->cuted_fields, + print_slave_db_safe(thd->db)); + } if (net) net->pkt_nr= thd->net.pkt_nr; } @@ -2120,6 +1938,7 @@ Slave: load data infile on table '%s' at log position %s in log \ } thd->net.vio = 0; + char *save_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->db= 0; thd->query= 0; @@ -2142,135 +1961,150 @@ Slave: load data infile on table '%s' at log position %s in log \ } slave_print_error(rli,sql_errno,"\ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", - err, (char*)table_name, print_slave_db_safe(db)); - free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC)); + err, (char*)table_name, print_slave_db_safe(save_db)); + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); return 1; } - free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC)); + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); - if (thd->fatal_error) + if (thd->is_fatal_error) { slave_print_error(rli,ER_UNKNOWN_ERROR, "\ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", - (char*)table_name, print_slave_db_safe(db)); + (char*)table_name, print_slave_db_safe(save_db)); return 1; } return ( use_rli_only_for_errors ? 0 : Log_event::exec_event(rli) ); } +#endif + +/************************************************************************** + Rotate_log_event methods +**************************************************************************/ /* - The master started + Rotate_log_event::pack_info() +*/ - IMPLEMENTATION - - To handle the case where the master died without a stop event, - we clean up all temporary tables that we got, if we are sure we - can (see below). +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Rotate_log_event::pack_info(Protocol *protocol) +{ + char buf1[256], buf[22]; + String tmp(buf1, sizeof(buf1), log_cs); + tmp.length(0); + tmp.append(new_log_ident, ident_len); + tmp.append(";pos="); + tmp.append(llstr(pos,buf)); + protocol->store(tmp.ptr(), tmp.length(), &my_charset_bin); +} +#endif - TODO - - Remove all active user locks + +/* + Rotate_log_event::print() */ -int Start_log_event::exec_event(struct st_relay_log_info* rli) +#ifdef MYSQL_CLIENT +void Rotate_log_event::print(FILE* file, bool short_form, char* last_db) { + char buf[22]; + if (short_form) + return; - /* - If the I/O thread has not started, mi->old_format is BINLOG_FORMAT_CURRENT - (that's what the MASTER_INFO constructor does), so the test below is not - perfect at all. - */ - switch (rli->mi->old_format) { - case BINLOG_FORMAT_CURRENT : - /* - This is 4.x, so a Start_log_event is only at master startup, - so we are sure the master has restarted and cleared his temp tables. - */ - close_temporary_tables(thd); - cleanup_load_tmpdir(); - /* - As a transaction NEVER spans on 2 or more binlogs: - if we have an active transaction at this point, the master died while - writing the transaction to the binary log, i.e. while flushing the binlog - cache to the binlog. As the write was started, the transaction had been - committed on the master, so we lack of information to replay this - transaction on the slave; all we can do is stop with error. - */ - if (rli->inside_transaction) - { - slave_print_error(rli, 0, - "\ -Rolling back unfinished transaction (no COMMIT or ROLLBACK) from relay log. \ -A probable cause is that the master died while writing the transaction to its \ -binary log."); - return(1); - } - break; - /* - Now the older formats; in that case load_tmpdir is cleaned up by the I/O - thread. - */ - case BINLOG_FORMAT_323_LESS_57 : - /* - Cannot distinguish a Start_log_event generated at master startup and - one generated by master FLUSH LOGS, so cannot be sure temp tables - have to be dropped. So do nothing. - */ - break; - case BINLOG_FORMAT_323_GEQ_57 : - /* - Can distinguish, based on the value of 'created', - which was generated at master startup. - */ - if (created) - close_temporary_tables(thd); - break; - default : - /* this case is impossible */ - return 1; - } - - return Log_event::exec_event(rli); + print_header(file); + fprintf(file, "\tRotate to "); + if (new_log_ident) + my_fwrite(file, (byte*) new_log_ident, (uint)ident_len, + MYF(MY_NABP | MY_WME)); + fprintf(file, " pos: %s", llstr(pos, buf)); + fputc('\n', file); + fflush(file); } +#endif /* MYSQL_CLIENT */ -/* - The master stopped. Clean up all temporary tables + locks that the - master may have set. - TODO - - Remove all active user locks +/* + Rotate_log_event::Rotate_log_event() (2 constructors) */ -int Stop_log_event::exec_event(struct st_relay_log_info* rli) + +#ifndef MYSQL_CLIENT +Rotate_log_event::Rotate_log_event(THD* thd_arg, + const char* new_log_ident_arg, + uint ident_len_arg, ulonglong pos_arg, + uint flags_arg) + :Log_event(), new_log_ident(new_log_ident_arg), + pos(pos_arg),ident_len(ident_len_arg ? ident_len_arg : + (uint) strlen(new_log_ident_arg)), flags(flags_arg) +{ +#ifndef DBUG_OFF + char buff[22]; + DBUG_ENTER("Rotate_log_event::Rotate_log_event(THD*,...)"); + DBUG_PRINT("enter",("new_log_ident %s pos %s flags %lu", new_log_ident_arg, + llstr(pos_arg, buff), flags)); +#endif + if (flags & DUP_NAME) + new_log_ident= my_strdup_with_length((byte*) new_log_ident_arg, + ident_len, + MYF(MY_WME)); + DBUG_VOID_RETURN; +} +#endif + + +Rotate_log_event::Rotate_log_event(const char* buf, int event_len, + bool old_format) + :Log_event(buf, old_format), new_log_ident(0), flags(DUP_NAME) { - /* - do not clean up immediately after rotate event; - QQ: this should be a useless test: the only case when it is false is when - shutdown occured just after FLUSH LOGS. It has nothing to do with Rotate? - By the way, immediately after a Rotate - the I/O thread does not write the Stop to the relay log, - so we won't come here in that case. - */ - if (rli->master_log_pos > BIN_LOG_HEADER_SIZE) + // The caller will ensure that event_len is what we have at EVENT_LEN_OFFSET + int header_size = (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + uint ident_offset; + DBUG_ENTER("Rotate_log_event::Rotate_log_event(char*,...)"); + + if (event_len < header_size) + DBUG_VOID_RETURN; + + buf += header_size; + if (old_format) { - close_temporary_tables(thd); - cleanup_load_tmpdir(); + ident_len = (uint)(event_len - OLD_HEADER_LEN); + pos = 4; + ident_offset = 0; } - /* - We do not want to update master_log pos because we get a rotate event - before stop, so by now master_log_name is set to the next log. - If we updated it, we will have incorrect master coordinates and this - could give false triggers in MASTER_POS_WAIT() that we have reached - the target position when in fact we have not. - */ - rli->inc_pos(get_event_len(), 0); - flush_relay_log_info(rli); - return 0; + else + { + ident_len = (uint)(event_len - ROTATE_EVENT_OVERHEAD); + pos = uint8korr(buf + R_POS_OFFSET); + ident_offset = ROTATE_HEADER_LEN; + } + set_if_smaller(ident_len,FN_REFLEN-1); + new_log_ident= my_strdup_with_length((byte*) buf + ident_offset, + (uint) ident_len, + MYF(MY_WME)); + DBUG_VOID_RETURN; } /* + Rotate_log_event::write_data() +*/ + +int Rotate_log_event::write_data(IO_CACHE* file) +{ + char buf[ROTATE_HEADER_LEN]; + DBUG_ASSERT(!(flags & ZERO_LEN)); // such an event cannot be written + int8store(buf + R_POS_OFFSET, pos); + return (my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) || + my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len)); +} + + +/* + Rotate_log_event::exec_event() + Got a rotate log even from the master IMPLEMENTATION @@ -2284,11 +2118,13 @@ int Stop_log_event::exec_event(struct st_relay_log_info* rli) 0 ok */ +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) int Rotate_log_event::exec_event(struct st_relay_log_info* rli) { DBUG_ENTER("Rotate_log_event::exec_event"); pthread_mutex_lock(&rli->data_lock); + rli->event_relay_log_pos += get_event_len(); /* If we are in a transaction: the only normal case is when the I/O thread was copying a big transaction, then it was stopped and restarted: we have this @@ -2298,25 +2134,123 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) ROTATE (a fake one) ... COMMIT or ROLLBACK - In that case, we don't want to touch the coordinates which correspond to the - beginning of the transaction. + In that case, we don't want to touch the coordinates which correspond to + the beginning of the transaction. */ - if (rli->inside_transaction) - rli->inc_pending(get_event_len()); - else + if (!(thd->options & OPTION_BEGIN)) { - memcpy(rli->master_log_name, new_log_ident, ident_len+1); - rli->master_log_pos= pos; - rli->relay_log_pos += get_event_len(); - DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) rli->master_log_pos)); + memcpy(rli->group_master_log_name, new_log_ident, ident_len+1); + rli->notify_group_master_log_name_update(); + rli->group_master_log_pos = pos; + rli->group_relay_log_pos = rli->event_relay_log_pos; + DBUG_PRINT("info", ("group_master_log_pos: %lu", + (ulong) rli->group_master_log_pos)); } pthread_mutex_unlock(&rli->data_lock); pthread_cond_broadcast(&rli->data_cond); flush_relay_log_info(rli); DBUG_RETURN(0); } +#endif + + +/************************************************************************** + Intvar_log_event methods +**************************************************************************/ + +/* + Intvar_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Intvar_log_event::pack_info(Protocol *protocol) +{ + char buf[256], *pos; + pos= strmake(buf, get_var_type_name(), sizeof(buf)-23); + *pos++= '='; + pos= longlong10_to_str(val, pos, -10); + protocol->store(buf, (uint) (pos-buf), &my_charset_bin); +} +#endif + + +/* + Intvar_log_event::Intvar_log_event() +*/ + +Intvar_log_event::Intvar_log_event(const char* buf, bool old_format) + :Log_event(buf, old_format) +{ + buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + type = buf[I_TYPE_OFFSET]; + val = uint8korr(buf+I_VAL_OFFSET); +} + + +/* + Intvar_log_event::get_var_type_name() +*/ + +const char* Intvar_log_event::get_var_type_name() +{ + switch(type) { + case LAST_INSERT_ID_EVENT: return "LAST_INSERT_ID"; + case INSERT_ID_EVENT: return "INSERT_ID"; + default: /* impossible */ return "UNKNOWN"; + } +} + +/* + Intvar_log_event::write_data() +*/ + +int Intvar_log_event::write_data(IO_CACHE* file) +{ + char buf[9]; + buf[I_TYPE_OFFSET] = type; + int8store(buf + I_VAL_OFFSET, val); + return my_b_safe_write(file, (byte*) buf, sizeof(buf)); +} + + +/* + Intvar_log_event::print() +*/ +#ifdef MYSQL_CLIENT +void Intvar_log_event::print(FILE* file, bool short_form, char* last_db) +{ + char llbuff[22]; + const char *msg; + LINT_INIT(msg); + + if (!short_form) + { + print_header(file); + fprintf(file, "\tIntvar\n"); + } + + fprintf(file, "SET "); + switch (type) { + case LAST_INSERT_ID_EVENT: + msg="LAST_INSERT_ID"; + break; + case INSERT_ID_EVENT: + msg="INSERT_ID"; + break; + } + fprintf(file, "%s=%s;\n", msg, llstr(val,llbuff)); + fflush(file); +} +#endif + + +/* + Intvar_log_event::exec_event() +*/ + +#if defined(HAVE_REPLICATION)&& !defined(MYSQL_CLIENT) int Intvar_log_event::exec_event(struct st_relay_log_info* rli) { switch (type) { @@ -2328,25 +2262,733 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) thd->next_insert_id = val; break; } - rli->inc_pending(get_event_len()); + rli->inc_event_relay_log_pos(get_event_len()); return 0; } +#endif + + +/************************************************************************** + Rand_log_event methods +**************************************************************************/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Rand_log_event::pack_info(Protocol *protocol) +{ + char buf1[256], *pos; + pos= strmov(buf1,"rand_seed1="); + pos= int10_to_str((long) seed1, pos, 10); + pos= strmov(pos, ",rand_seed2="); + pos= int10_to_str((long) seed2, pos, 10); + protocol->store(buf1, (uint) (pos-buf1), &my_charset_bin); +} +#endif + + +Rand_log_event::Rand_log_event(const char* buf, bool old_format) + :Log_event(buf, old_format) +{ + buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + seed1 = uint8korr(buf+RAND_SEED1_OFFSET); + seed2 = uint8korr(buf+RAND_SEED2_OFFSET); +} + +int Rand_log_event::write_data(IO_CACHE* file) +{ + char buf[16]; + int8store(buf + RAND_SEED1_OFFSET, seed1); + int8store(buf + RAND_SEED2_OFFSET, seed2); + return my_b_safe_write(file, (byte*) buf, sizeof(buf)); +} + + +#ifdef MYSQL_CLIENT +void Rand_log_event::print(FILE* file, bool short_form, char* last_db) +{ + char llbuff[22],llbuff2[22]; + if (!short_form) + { + print_header(file); + fprintf(file, "\tRand\n"); + } + fprintf(file, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s;\n", + llstr(seed1, llbuff),llstr(seed2, llbuff2)); + fflush(file); +} +#endif /* MYSQL_CLIENT */ + + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) int Rand_log_event::exec_event(struct st_relay_log_info* rli) { - thd->rand.seed1 = (ulong) seed1; - thd->rand.seed2 = (ulong) seed2; - rli->inc_pending(get_event_len()); + thd->rand.seed1= (ulong) seed1; + thd->rand.seed2= (ulong) seed2; + rli->inc_event_relay_log_pos(get_event_len()); return 0; } +#endif /* !MYSQL_CLIENT */ + + +/************************************************************************** + User_var_log_event methods +**************************************************************************/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void User_var_log_event::pack_info(Protocol* protocol) +{ + char *buf= 0; + uint val_offset= 4 + name_len; + uint event_len= val_offset; + + if (is_null) + { + buf= my_malloc(val_offset + 5, MYF(MY_WME)); + strmov(buf + val_offset, "NULL"); + event_len= val_offset + 4; + } + else + { + switch (type) { + case REAL_RESULT: + double real_val; + float8get(real_val, val); + buf= my_malloc(val_offset + FLOATING_POINT_BUFFER, MYF(MY_WME)); + event_len+= my_sprintf(buf + val_offset, + (buf + val_offset, "%.14g", real_val)); + break; + case INT_RESULT: + buf= my_malloc(val_offset + 22, MYF(MY_WME)); + event_len= longlong10_to_str(uint8korr(val), buf + val_offset,-10)-buf; + break; + case STRING_RESULT: + /* 15 is for 'COLLATE' and other chars */ + buf= my_malloc(event_len+val_len*2+1+2*MY_CS_NAME_SIZE+15, MYF(MY_WME)); + CHARSET_INFO *cs; + if (!(cs= get_charset(charset_number, MYF(0)))) + { + strmov(buf+val_offset, "???"); + event_len+= 3; + } + else + { + char *p= strxmov(buf + val_offset, "_", cs->csname, " ", NullS); + p= str_to_hex(p, val, val_len); + p= strxmov(p, " COLLATE ", cs->name, NullS); + event_len= p-buf; + } + break; + case ROW_RESULT: + default: + DBUG_ASSERT(1); + return; + } + } + buf[0]= '@'; + buf[1]= '`'; + buf[2+name_len]= '`'; + buf[3+name_len]= '='; + memcpy(buf+2, name, name_len); + protocol->store(buf, event_len, &my_charset_bin); + my_free(buf, MYF(MY_ALLOW_ZERO_PTR)); +} +#endif /* !MYSQL_CLIENT */ + + +User_var_log_event::User_var_log_event(const char* buf, bool old_format) + :Log_event(buf, old_format) +{ + buf+= (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + name_len= uint4korr(buf); + name= (char *) buf + UV_NAME_LEN_SIZE; + buf+= UV_NAME_LEN_SIZE + name_len; + is_null= (bool) *buf; + if (is_null) + { + type= STRING_RESULT; + charset_number= my_charset_bin.number; + val_len= 0; + val= 0; + } + else + { + type= (Item_result) buf[UV_VAL_IS_NULL]; + charset_number= uint4korr(buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE); + val_len= uint4korr(buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + + UV_CHARSET_NUMBER_SIZE); + val= (char *) (buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + + UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE); + } +} + + +int User_var_log_event::write_data(IO_CACHE* file) +{ + char buf[UV_NAME_LEN_SIZE]; + char buf1[UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + + UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE]; + char buf2[8], *pos= buf2; + uint buf1_length; + + int4store(buf, name_len); + + if ((buf1[0]= is_null)) + { + buf1_length= 1; + val_len= 0; + } + else + { + buf1[1]= type; + int4store(buf1 + 2, charset_number); + int4store(buf1 + 2 + UV_CHARSET_NUMBER_SIZE, val_len); + buf1_length= 10; + + switch (type) { + case REAL_RESULT: + float8store(buf2, *(double*) val); + break; + case INT_RESULT: + int8store(buf2, *(longlong*) val); + break; + case STRING_RESULT: + pos= val; + break; + case ROW_RESULT: + default: + DBUG_ASSERT(1); + return 0; + } + } + return (my_b_safe_write(file, (byte*) buf, sizeof(buf)) || + my_b_safe_write(file, (byte*) name, name_len) || + my_b_safe_write(file, (byte*) buf1, buf1_length) || + my_b_safe_write(file, (byte*) pos, val_len)); +} + +/* + User_var_log_event::print() +*/ + +#ifdef MYSQL_CLIENT +void User_var_log_event::print(FILE* file, bool short_form, char* last_db) +{ + if (!short_form) + { + print_header(file); + fprintf(file, "\tUser_var\n"); + } + + fprintf(file, "SET @`"); + my_fwrite(file, (byte*) name, (uint) (name_len), MYF(MY_NABP | MY_WME)); + fprintf(file, "`"); + + if (is_null) + { + fprintf(file, ":=NULL;\n"); + } + else + { + switch (type) { + case REAL_RESULT: + double real_val; + float8get(real_val, val); + fprintf(file, ":=%.14g;\n", real_val); + break; + case INT_RESULT: + char int_buf[22]; + longlong10_to_str(uint8korr(val), int_buf, -10); + fprintf(file, ":=%s;\n", int_buf); + break; + case STRING_RESULT: + { + /* + Let's express the string in hex. That's the most robust way. If we + print it in character form instead, we need to escape it with + character_set_client which we don't know (we will know it in 5.0, but + in 4.1 we don't know it easily when we are printing + User_var_log_event). Explanation why we would need to bother with + character_set_client (quoting Bar): + > Note, the parser doesn't switch to another unescaping mode after + > it has met a character set introducer. + > For example, if an SJIS client says something like: + > SET @a= _ucs2 \0a\0b' + > the string constant is still unescaped according to SJIS, not + > according to UCS2. + */ + char *hex_str; + CHARSET_INFO *cs; + + if (!(hex_str= (char *)my_alloca(2*val_len+1+2))) // 2 hex digits / byte + break; // no error, as we are 'void' + str_to_hex(hex_str, val, val_len); + /* + For proper behaviour when mysqlbinlog|mysql, we need to explicitely + specify the variable's collation. It will however cause problems when + people want to mysqlbinlog|mysql into another server not supporting the + character set. But there's not much to do about this and it's unlikely. + */ + if (!(cs= get_charset(charset_number, MYF(0)))) + /* + Generate an unusable command (=> syntax error) is probably the best + thing we can do here. + */ + fprintf(file, ":=???;\n"); + else + fprintf(file, ":=_%s %s COLLATE `%s`;\n", cs->csname, hex_str, cs->name); + my_afree(hex_str); + } + break; + case ROW_RESULT: + default: + DBUG_ASSERT(1); + return; + } + } + fflush(file); +} +#endif + + +/* + User_var_log_event::exec_event() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int User_var_log_event::exec_event(struct st_relay_log_info* rli) +{ + Item *it= 0; + CHARSET_INFO *charset; + if (!(charset= get_charset(charset_number, MYF(MY_WME)))) + return 1; + LEX_STRING user_var_name; + user_var_name.str= name; + user_var_name.length= name_len; + double real_val; + longlong int_val; + + if (is_null) + { + it= new Item_null(); + } + else + { + switch (type) { + case REAL_RESULT: + float8get(real_val, val); + it= new Item_real(real_val); + val= (char*) &real_val; // Pointer to value in native format + val_len= 8; + break; + case INT_RESULT: + int_val= (longlong) uint8korr(val); + it= new Item_int(int_val); + val= (char*) &int_val; // Pointer to value in native format + val_len= 8; + break; + case STRING_RESULT: + it= new Item_string(val, val_len, charset); + break; + case ROW_RESULT: + default: + DBUG_ASSERT(1); + return 0; + } + } + Item_func_set_user_var e(user_var_name, it); + /* + Item_func_set_user_var can't substitute something else on its place => + 0 can be passed as last argument (reference on item) + */ + e.fix_fields(thd, 0, 0); + /* + A variable can just be considered as a table with + a single record and with a single column. Thus, like + a column value, it could always have IMPLICIT derivation. + */ + e.update_hash(val, val_len, type, charset, DERIVATION_IMPLICIT); + free_root(thd->mem_root,0); + + rli->inc_event_relay_log_pos(get_event_len()); + return 0; +} +#endif /* !MYSQL_CLIENT */ + + +/************************************************************************** + Slave_log_event methods +**************************************************************************/ + +#ifdef HAVE_REPLICATION +#ifdef MYSQL_CLIENT +void Unknown_log_event::print(FILE* file, bool short_form, char* last_db) +{ + if (short_form) + return; + print_header(file); + fputc('\n', file); + fprintf(file, "# %s", "Unknown event\n"); +} +#endif + +#ifndef MYSQL_CLIENT +void Slave_log_event::pack_info(Protocol *protocol) +{ + char buf[256+HOSTNAME_LENGTH], *pos; + pos= strmov(buf, "host="); + pos= strnmov(pos, master_host, HOSTNAME_LENGTH); + pos= strmov(pos, ",port="); + pos= int10_to_str((long) master_port, pos, 10); + pos= strmov(pos, ",log="); + pos= strmov(pos, master_log); + pos= strmov(pos, ",pos="); + pos= longlong10_to_str(master_pos, pos, 10); + protocol->store(buf, pos-buf, &my_charset_bin); +} +#endif /* !MYSQL_CLIENT */ + + +#ifndef MYSQL_CLIENT +Slave_log_event::Slave_log_event(THD* thd_arg, + struct st_relay_log_info* rli) + :Log_event(thd_arg, 0, 0), mem_pool(0), master_host(0) +{ + DBUG_ENTER("Slave_log_event"); + if (!rli->inited) // QQ When can this happen ? + DBUG_VOID_RETURN; + + MASTER_INFO* mi = rli->mi; + // TODO: re-write this better without holding both locks at the same time + pthread_mutex_lock(&mi->data_lock); + pthread_mutex_lock(&rli->data_lock); + master_host_len = strlen(mi->host); + master_log_len = strlen(rli->group_master_log_name); + // on OOM, just do not initialize the structure and print the error + if ((mem_pool = (char*)my_malloc(get_data_size() + 1, + MYF(MY_WME)))) + { + master_host = mem_pool + SL_MASTER_HOST_OFFSET ; + memcpy(master_host, mi->host, master_host_len + 1); + master_log = master_host + master_host_len + 1; + memcpy(master_log, rli->group_master_log_name, master_log_len + 1); + master_port = mi->port; + master_pos = rli->group_master_log_pos; + DBUG_PRINT("info", ("master_log: %s pos: %d", master_log, + (ulong) master_pos)); + } + else + sql_print_error("Out of memory while recording slave event"); + pthread_mutex_unlock(&rli->data_lock); + pthread_mutex_unlock(&mi->data_lock); + DBUG_VOID_RETURN; +} +#endif /* !MYSQL_CLIENT */ + + +Slave_log_event::~Slave_log_event() +{ + my_free(mem_pool, MYF(MY_ALLOW_ZERO_PTR)); +} + + +#ifdef MYSQL_CLIENT +void Slave_log_event::print(FILE* file, bool short_form, char* last_db) +{ + char llbuff[22]; + if (short_form) + return; + print_header(file); + fputc('\n', file); + fprintf(file, "\ +Slave: master_host: '%s' master_port: %d master_log: '%s' master_pos: %s\n", + master_host, master_port, master_log, llstr(master_pos, llbuff)); +} +#endif /* MYSQL_CLIENT */ + + +int Slave_log_event::get_data_size() +{ + return master_host_len + master_log_len + 1 + SL_MASTER_HOST_OFFSET; +} + + +int Slave_log_event::write_data(IO_CACHE* file) +{ + int8store(mem_pool + SL_MASTER_POS_OFFSET, master_pos); + int2store(mem_pool + SL_MASTER_PORT_OFFSET, master_port); + // log and host are already there + return my_b_safe_write(file, (byte*)mem_pool, get_data_size()); +} + + +void Slave_log_event::init_from_mem_pool(int data_size) +{ + master_pos = uint8korr(mem_pool + SL_MASTER_POS_OFFSET); + master_port = uint2korr(mem_pool + SL_MASTER_PORT_OFFSET); + master_host = mem_pool + SL_MASTER_HOST_OFFSET; + master_host_len = strlen(master_host); + // safety + master_log = master_host + master_host_len + 1; + if (master_log > mem_pool + data_size) + { + master_host = 0; + return; + } + master_log_len = strlen(master_log); +} + + +Slave_log_event::Slave_log_event(const char* buf, int event_len) + :Log_event(buf,0),mem_pool(0),master_host(0) +{ + event_len -= LOG_EVENT_HEADER_LEN; + if (event_len < 0) + return; + if (!(mem_pool = (char*) my_malloc(event_len + 1, MYF(MY_WME)))) + return; + memcpy(mem_pool, buf + LOG_EVENT_HEADER_LEN, event_len); + mem_pool[event_len] = 0; + init_from_mem_pool(event_len); +} + + +#ifndef MYSQL_CLIENT int Slave_log_event::exec_event(struct st_relay_log_info* rli) { if (mysql_bin_log.is_open()) mysql_bin_log.write(this); return Log_event::exec_event(rli); } +#endif /* !MYSQL_CLIENT */ + + +/************************************************************************** + Stop_log_event methods +**************************************************************************/ +/* + Stop_log_event::print() +*/ + +#ifdef MYSQL_CLIENT +void Stop_log_event::print(FILE* file, bool short_form, char* last_db) +{ + if (short_form) + return; + + print_header(file); + fprintf(file, "\tStop\n"); + fflush(file); +} +#endif /* MYSQL_CLIENT */ + + +/* + Stop_log_event::exec_event() + + The master stopped. + We used to clean up all temporary tables but this is useless as, as the + master has shut down properly, it has written all DROP TEMPORARY TABLE and DO + RELEASE_LOCK (prepared statements' deletion is TODO). + We used to clean up slave_load_tmpdir, but this is useless as it has been + cleared at the end of LOAD DATA INFILE. + So we have nothing to do here. + The place were we must do this cleaning is in Start_log_event::exec_event(), + not here. Because if we come here, the master was sane. +*/ + +#ifndef MYSQL_CLIENT +int Stop_log_event::exec_event(struct st_relay_log_info* rli) +{ + /* + We do not want to update master_log pos because we get a rotate event + before stop, so by now group_master_log_name is set to the next log. + If we updated it, we will have incorrect master coordinates and this + could give false triggers in MASTER_POS_WAIT() that we have reached + the target position when in fact we have not. + */ + rli->inc_group_relay_log_pos(get_event_len(), 0); + flush_relay_log_info(rli); + return 0; +} +#endif /* !MYSQL_CLIENT */ +#endif /* HAVE_REPLICATION */ + + +/************************************************************************** + Create_file_log_event methods +**************************************************************************/ + +/* + Create_file_log_event ctor +*/ + +#ifndef MYSQL_CLIENT +Create_file_log_event:: +Create_file_log_event(THD* thd_arg, sql_exchange* ex, + const char* db_arg, const char* table_name_arg, + List<Item>& fields_arg, enum enum_duplicates handle_dup, + bool ignore, + char* block_arg, uint block_len_arg, bool using_trans) + :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup, ignore, + using_trans), + fake_base(0), block(block_arg), event_buf(0), block_len(block_len_arg), + file_id(thd_arg->file_id = mysql_bin_log.next_file_id()) +{ + DBUG_ENTER("Create_file_log_event"); + sql_ex.force_new_format(); + DBUG_VOID_RETURN; +} +#endif /* !MYSQL_CLIENT */ + + +/* + Create_file_log_event::write_data_body() +*/ + +int Create_file_log_event::write_data_body(IO_CACHE* file) +{ + int res; + if ((res = Load_log_event::write_data_body(file)) || fake_base) + return res; + return (my_b_safe_write(file, (byte*) "", 1) || + my_b_safe_write(file, (byte*) block, block_len)); +} + + +/* + Create_file_log_event::write_data_header() +*/ + +int Create_file_log_event::write_data_header(IO_CACHE* file) +{ + int res; + if ((res = Load_log_event::write_data_header(file)) || fake_base) + return res; + byte buf[CREATE_FILE_HEADER_LEN]; + int4store(buf + CF_FILE_ID_OFFSET, file_id); + return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN); +} + + +/* + Create_file_log_event::write_base() +*/ + +int Create_file_log_event::write_base(IO_CACHE* file) +{ + int res; + fake_base = 1; // pretend we are Load event + res = write(file); + fake_base = 0; + return res; +} + + +/* + Create_file_log_event ctor +*/ + +Create_file_log_event::Create_file_log_event(const char* buf, int len, + bool old_format) + :Load_log_event(buf,0,old_format),fake_base(0),block(0),inited_from_old(0) +{ + int block_offset; + DBUG_ENTER("Create_file_log_event"); + + /* + We must make copy of 'buf' as this event may have to live over a + rotate log entry when used in mysqlbinlog + */ + if (!(event_buf= my_memdup((byte*) buf, len, MYF(MY_WME))) || + (copy_log_event(event_buf, len, old_format))) + DBUG_VOID_RETURN; + + if (!old_format) + { + file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + + + LOAD_HEADER_LEN + CF_FILE_ID_OFFSET); + // + 1 for \0 terminating fname + block_offset = (LOG_EVENT_HEADER_LEN + Load_log_event::get_data_size() + + CREATE_FILE_HEADER_LEN + 1); + if (len < block_offset) + return; + block = (char*)buf + block_offset; + block_len = len - block_offset; + } + else + { + sql_ex.force_new_format(); + inited_from_old = 1; + } + DBUG_VOID_RETURN; +} + + +/* + Create_file_log_event::print() +*/ + +#ifdef MYSQL_CLIENT +void Create_file_log_event::print(FILE* file, bool short_form, + char* last_db, bool enable_local) +{ + if (short_form) + { + if (enable_local && check_fname_outside_temp_buf()) + Load_log_event::print(file, 1, last_db); + return; + } + + if (enable_local) + { + Load_log_event::print(file, short_form, last_db, !check_fname_outside_temp_buf()); + /* + That one is for "file_id: etc" below: in mysqlbinlog we want the #, in + SHOW BINLOG EVENTS we don't. + */ + fprintf(file, "#"); + } + + fprintf(file, " file_id: %d block_len: %d\n", file_id, block_len); +} + + +void Create_file_log_event::print(FILE* file, bool short_form, + char* last_db) +{ + print(file,short_form,last_db,0); +} +#endif /* MYSQL_CLIENT */ + + +/* + Create_file_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Create_file_log_event::pack_info(Protocol *protocol) +{ + char buf[NAME_LEN*2 + 30 + 21*2], *pos; + pos= strmov(buf, "db="); + memcpy(pos, db, db_len); + pos= strmov(pos + db_len, ";table="); + memcpy(pos, table_name, table_name_len); + pos= strmov(pos + table_name_len, ";file_id="); + pos= int10_to_str((long) file_id, pos, 10); + pos= strmov(pos, ";block_len="); + pos= int10_to_str((long) block_len, pos, 10); + protocol->store(buf, (uint) (pos-buf), &my_charset_bin); +} +#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ + + +/* + Create_file_log_event::exec_event() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) int Create_file_log_event::exec_event(struct st_relay_log_info* rli) { char proc_info[17+FN_REFLEN+10], *fname_buf= proc_info+17; @@ -2378,7 +3020,9 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli) if (write_base(&file)) { strmov(p, ".info"); // to have it right in the error message - slave_print_error(rli,my_errno, "Error in Create_file event: could not write to file '%s'", fname_buf); + slave_print_error(rli,my_errno, + "Error in Create_file event: could not write to file '%s'", + fname_buf); goto err; } end_io_cache(&file); @@ -2408,24 +3052,106 @@ err: thd->proc_info= 0; return error ? 1 : Log_event::exec_event(rli); } +#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ -int Delete_file_log_event::exec_event(struct st_relay_log_info* rli) + +/************************************************************************** + Append_block_log_event methods +**************************************************************************/ + +/* + Append_block_log_event ctor +*/ + +#ifndef MYSQL_CLIENT +Append_block_log_event::Append_block_log_event(THD* thd_arg, const char* db_arg, + char* block_arg, + uint block_len_arg, + bool using_trans) + :Log_event(thd_arg,0, using_trans), block(block_arg), + block_len(block_len_arg), file_id(thd_arg->file_id), db(db_arg) { - char fname[FN_REFLEN+10]; - char *p= slave_load_file_stem(fname, file_id, server_id); - memcpy(p, ".data", 6); - (void) my_delete(fname, MYF(MY_WME)); - memcpy(p, ".info", 6); - (void) my_delete(fname, MYF(MY_WME)); - return Log_event::exec_event(rli); } +#endif + + +/* + Append_block_log_event ctor +*/ + +Append_block_log_event::Append_block_log_event(const char* buf, int len) + :Log_event(buf, 0),block(0) +{ + DBUG_ENTER("Append_block_log_event"); + if ((uint)len < APPEND_BLOCK_EVENT_OVERHEAD) + DBUG_VOID_RETURN; + file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); + block = (char*)buf + APPEND_BLOCK_EVENT_OVERHEAD; + block_len = len - APPEND_BLOCK_EVENT_OVERHEAD; + DBUG_VOID_RETURN; +} + + +/* + Append_block_log_event::write_data() +*/ +int Append_block_log_event::write_data(IO_CACHE* file) +{ + byte buf[APPEND_BLOCK_HEADER_LEN]; + int4store(buf + AB_FILE_ID_OFFSET, file_id); + return (my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) || + my_b_safe_write(file, (byte*) block, block_len)); +} + + +/* + Append_block_log_event::print() +*/ + +#ifdef MYSQL_CLIENT +void Append_block_log_event::print(FILE* file, bool short_form, + char* last_db) +{ + if (short_form) + return; + print_header(file); + fputc('\n', file); + fprintf(file, "#Append_block: file_id: %d block_len: %d\n", + file_id, block_len); +} +#endif /* MYSQL_CLIENT */ + + +/* + Append_block_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Append_block_log_event::pack_info(Protocol *protocol) +{ + char buf[256]; + uint length; + length= (uint) my_sprintf(buf, + (buf, ";file_id=%u;block_len=%u", file_id, + block_len)); + protocol->store(buf, length, &my_charset_bin); +} +#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ + + +/* + Append_block_log_event::exec_event() +*/ + +#if defined( HAVE_REPLICATION) && !defined(MYSQL_CLIENT) int Append_block_log_event::exec_event(struct st_relay_log_info* rli) { char proc_info[17+FN_REFLEN+10], *fname= proc_info+17; char *p= slave_load_file_stem(fname, file_id, server_id); int fd; int error = 1; + DBUG_ENTER("Append_block_log_event::exec_event"); memcpy(p, ".data", 6); strnmov(proc_info, "Making temp file ", 17); // no end 0 @@ -2446,9 +3172,177 @@ err: if (fd >= 0) my_close(fd, MYF(0)); thd->proc_info= 0; - return error ? error : Log_event::exec_event(rli); + DBUG_RETURN(error ? error : Log_event::exec_event(rli)); +} +#endif + + +/************************************************************************** + Delete_file_log_event methods +**************************************************************************/ + +/* + Delete_file_log_event ctor +*/ + +#ifndef MYSQL_CLIENT +Delete_file_log_event::Delete_file_log_event(THD *thd_arg, const char* db_arg, + bool using_trans) + :Log_event(thd_arg, 0, using_trans), file_id(thd_arg->file_id), db(db_arg) +{ +} +#endif + +/* + Delete_file_log_event ctor +*/ + +Delete_file_log_event::Delete_file_log_event(const char* buf, int len) + :Log_event(buf, 0),file_id(0) +{ + if ((uint)len < DELETE_FILE_EVENT_OVERHEAD) + return; + file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); +} + + +/* + Delete_file_log_event::write_data() +*/ + +int Delete_file_log_event::write_data(IO_CACHE* file) +{ + byte buf[DELETE_FILE_HEADER_LEN]; + int4store(buf + DF_FILE_ID_OFFSET, file_id); + return my_b_safe_write(file, buf, DELETE_FILE_HEADER_LEN); +} + + +/* + Delete_file_log_event::print() +*/ + +#ifdef MYSQL_CLIENT +void Delete_file_log_event::print(FILE* file, bool short_form, + char* last_db) +{ + if (short_form) + return; + print_header(file); + fputc('\n', file); + fprintf(file, "#Delete_file: file_id=%u\n", file_id); +} +#endif /* MYSQL_CLIENT */ + +/* + Delete_file_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Delete_file_log_event::pack_info(Protocol *protocol) +{ + char buf[64]; + uint length; + length= (uint) my_sprintf(buf, (buf, ";file_id=%u", (uint) file_id)); + protocol->store(buf, (int32) length, &my_charset_bin); +} +#endif + +/* + Delete_file_log_event::exec_event() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int Delete_file_log_event::exec_event(struct st_relay_log_info* rli) +{ + char fname[FN_REFLEN+10]; + char *p= slave_load_file_stem(fname, file_id, server_id); + memcpy(p, ".data", 6); + (void) my_delete(fname, MYF(MY_WME)); + memcpy(p, ".info", 6); + (void) my_delete(fname, MYF(MY_WME)); + return Log_event::exec_event(rli); +} +#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ + + +/************************************************************************** + Execute_load_log_event methods +**************************************************************************/ + +/* + Execute_load_log_event ctor +*/ + +#ifndef MYSQL_CLIENT +Execute_load_log_event::Execute_load_log_event(THD *thd_arg, const char* db_arg, + bool using_trans) + :Log_event(thd_arg, 0, using_trans), file_id(thd_arg->file_id), db(db_arg) +{ +} +#endif + + +/* + Execute_load_log_event ctor +*/ + +Execute_load_log_event::Execute_load_log_event(const char* buf, int len) + :Log_event(buf, 0), file_id(0) +{ + if ((uint)len < EXEC_LOAD_EVENT_OVERHEAD) + return; + file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + EL_FILE_ID_OFFSET); +} + + +/* + Execute_load_log_event::write_data() +*/ + +int Execute_load_log_event::write_data(IO_CACHE* file) +{ + byte buf[EXEC_LOAD_HEADER_LEN]; + int4store(buf + EL_FILE_ID_OFFSET, file_id); + return my_b_safe_write(file, buf, EXEC_LOAD_HEADER_LEN); } + +/* + Execute_load_log_event::print() +*/ + +#ifdef MYSQL_CLIENT +void Execute_load_log_event::print(FILE* file, bool short_form, + char* last_db) +{ + if (short_form) + return; + print_header(file); + fputc('\n', file); + fprintf(file, "#Exec_load: file_id=%d\n", + file_id); +} +#endif + +/* + Execute_load_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Execute_load_log_event::pack_info(Protocol *protocol) +{ + char buf[64]; + uint length; + length= (uint) my_sprintf(buf, (buf, ";file_id=%u", (uint) file_id)); + protocol->store(buf, (int32) length, &my_charset_bin); +} + + +/* + Execute_load_log_event::exec_event() +*/ + int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) { char fname[FN_REFLEN+10]; @@ -2538,4 +3432,88 @@ err: return error ? error : Log_event::exec_event(rli); } -#endif /* !MYSQL_CLIENT */ +#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ + + +/************************************************************************** + sql_ex_info methods +**************************************************************************/ + +/* + sql_ex_info::write_data() +*/ + +int sql_ex_info::write_data(IO_CACHE* file) +{ + if (new_format()) + { + return (write_str(file, field_term, field_term_len) || + write_str(file, enclosed, enclosed_len) || + write_str(file, line_term, line_term_len) || + write_str(file, line_start, line_start_len) || + write_str(file, escaped, escaped_len) || + my_b_safe_write(file,(byte*) &opt_flags,1)); + } + else + { + old_sql_ex old_ex; + old_ex.field_term= *field_term; + old_ex.enclosed= *enclosed; + old_ex.line_term= *line_term; + old_ex.line_start= *line_start; + old_ex.escaped= *escaped; + old_ex.opt_flags= opt_flags; + old_ex.empty_flags=empty_flags; + return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex)); + } +} + + +/* + sql_ex_info::init() +*/ + +char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) +{ + cached_new_format = use_new_format; + if (use_new_format) + { + empty_flags=0; + /* + The code below assumes that buf will not disappear from + under our feet during the lifetime of the event. This assumption + holds true in the slave thread if the log is in new format, but is not + the case when we have old format because we will be reusing net buffer + to read the actual file before we write out the Create_file event. + */ + if (read_str(buf, buf_end, field_term, field_term_len) || + read_str(buf, buf_end, enclosed, enclosed_len) || + read_str(buf, buf_end, line_term, line_term_len) || + read_str(buf, buf_end, line_start, line_start_len) || + read_str(buf, buf_end, escaped, escaped_len)) + return 0; + opt_flags = *buf++; + } + else + { + field_term_len= enclosed_len= line_term_len= line_start_len= escaped_len=1; + field_term = buf++; // Use first byte in string + enclosed= buf++; + line_term= buf++; + line_start= buf++; + escaped= buf++; + opt_flags = *buf++; + empty_flags= *buf++; + if (empty_flags & FIELD_TERM_EMPTY) + field_term_len=0; + if (empty_flags & ENCLOSED_EMPTY) + enclosed_len=0; + if (empty_flags & LINE_TERM_EMPTY) + line_term_len=0; + if (empty_flags & LINE_START_EMPTY) + line_start_len=0; + if (empty_flags & ESCAPED_EMPTY) + escaped_len=0; + } + return buf; +} diff --git a/sql/log_event.h b/sql/log_event.h index 2eaaab260fc..6c4e65f7460 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -22,7 +22,7 @@ #undef write // remove pthread.h macro definition, conflict with write() class member #endif -#if defined(__GNUC__) && !defined(MYSQL_CLIENT) +#if defined(USE_PRAGMA_INTERFACE) && !defined(MYSQL_CLIENT) #pragma interface /* gcc class implementation */ #endif @@ -34,15 +34,21 @@ #define LOG_READ_TOO_LARGE -7 #define LOG_EVENT_OFFSET 4 + #define BINLOG_VERSION 3 /* We could have used SERVER_VERSION_LENGTH, but this introduces an obscure dependency - if somebody decided to change SERVER_VERSION_LENGTH - this would have broke the replication protocol + this would have broken the replication protocol */ #define ST_SERVER_VER_LEN 50 +/* + These are flags and structs to handle all the LOAD DATA INFILE options (LINES + TERMINATED etc). +*/ + #define DUMPFILE_FLAG 0x1 #define OPT_ENCLOSED_FLAG 0x2 #define REPLACE_FLAG 0x4 @@ -54,6 +60,11 @@ #define LINE_START_EMPTY 0x8 #define ESCAPED_EMPTY 0x10 +/***************************************************************************** + + old_sql_ex struct + + ****************************************************************************/ struct old_sql_ex { char field_term; @@ -67,6 +78,11 @@ struct old_sql_ex #define NUM_LOAD_DELIM_STRS 5 +/***************************************************************************** + + sql_ex_info struct + + ****************************************************************************/ struct sql_ex_info { char* field_term; @@ -99,17 +115,29 @@ struct sql_ex_info } }; -/* - Binary log consists of events. Each event has a fixed length header, - followed by possibly variable ( depending on the type of event) length - data body. The data body consists of an optional fixed length segment - (post-header), and an optional variable length segment. See #defines and - comments below for the format specifics -*/ +/***************************************************************************** + + MySQL Binary Log + + This log consists of events. Each event has a fixed-length header, + possibly followed by a variable length data body. + + The data body consists of an optional fixed length segment (post-header) + and an optional variable length segment. + + See the #defines below for the format specifics. + + The events which really update data are Query_log_event and + Load_log_event/Create_file_log_event/Execute_load_log_event (these 3 act + together to replicate LOAD DATA INFILE, with the help of + Append_block_log_event which prepares temporary files to load into the table). + + ****************************************************************************/ + +#define LOG_EVENT_HEADER_LEN 19 /* the fixed header length */ +#define OLD_HEADER_LEN 13 /* the fixed header length in 3.23 */ /* event-specific post-header sizes */ -#define LOG_EVENT_HEADER_LEN 19 -#define OLD_HEADER_LEN 13 #define QUERY_HEADER_LEN (4 + 4 + 1 + 2) #define LOAD_HEADER_LEN (4 + 4 + 4 + 1 +1 + 4) #define START_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4) @@ -119,7 +147,10 @@ struct sql_ex_info #define EXEC_LOAD_HEADER_LEN 4 #define DELETE_FILE_HEADER_LEN 4 -/* event header offsets */ +/* + Event header offsets; + these point to places inside the fixed header. +*/ #define EVENT_TYPE_OFFSET 4 #define SERVER_ID_OFFSET 5 @@ -133,7 +164,7 @@ struct sql_ex_info #define ST_SERVER_VER_OFFSET 2 #define ST_CREATED_OFFSET (ST_SERVER_VER_OFFSET + ST_SERVER_VER_LEN) -/* slave event post-header */ +/* slave event post-header (this event is never written) */ #define SL_MASTER_PORT_OFFSET 8 #define SL_MASTER_POS_OFFSET 0 @@ -157,6 +188,14 @@ struct sql_ex_info #define RAND_SEED1_OFFSET 0 #define RAND_SEED2_OFFSET 8 +/* User_var event post-header */ + +#define UV_VAL_LEN_SIZE 4 +#define UV_VAL_IS_NULL 1 +#define UV_VAL_TYPE_SIZE 1 +#define UV_NAME_LEN_SIZE 4 +#define UV_CHARSET_NUMBER_SIZE 4 + /* Load event post-header */ #define L_THREAD_ID_OFFSET 0 @@ -173,14 +212,20 @@ struct sql_ex_info #define R_POS_OFFSET 0 #define R_IDENT_OFFSET 8 +/* CF to DF handle LOAD DATA INFILE */ + +/* CF = "Create File" */ #define CF_FILE_ID_OFFSET 0 #define CF_DATA_OFFSET CREATE_FILE_HEADER_LEN +/* AB = "Append Block" */ #define AB_FILE_ID_OFFSET 0 #define AB_DATA_OFFSET APPEND_BLOCK_HEADER_LEN +/* EL = "Execute Load" */ #define EL_FILE_ID_OFFSET 0 +/* DF = "Delete File" */ #define DF_FILE_ID_OFFSET 0 #define QUERY_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN) @@ -193,18 +238,52 @@ struct sql_ex_info #define EXEC_LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+EXEC_LOAD_HEADER_LEN) #define APPEND_BLOCK_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+APPEND_BLOCK_HEADER_LEN) - +/* 4 bytes which all binlogs should begin with */ #define BINLOG_MAGIC "\xfe\x62\x69\x6e" -#define LOG_EVENT_TIME_F 0x1 -#define LOG_EVENT_FORCED_ROTATE_F 0x2 +/* + The 2 flags below were useless : + - the first one was never set + - the second one was set in all Rotate events on the master, but not used for + anything useful. + So they are now removed and their place may later be reused for other + flags. Then one must remember that Rotate events in 4.x have + LOG_EVENT_FORCED_ROTATE_F set, so one should not rely on the value of the + replacing flag when reading a Rotate event. + I keep the defines here just to remember what they were. +*/ +#ifdef TO_BE_REMOVED +#define LOG_EVENT_TIME_F 0x1 +#define LOG_EVENT_FORCED_ROTATE_F 0x2 +#endif +/* + If the query depends on the thread (for example: TEMPORARY TABLE). + Currently this is used by mysqlbinlog to know it must print + SET @@PSEUDO_THREAD_ID=xx; before the query (it would not hurt to print it + for every query but this would be slow). +*/ +#define LOG_EVENT_THREAD_SPECIFIC_F 0x4 + +/* + Suppress the generation of 'USE' statements before the actual + statement. This flag should be set for any events that does not need + the current database set to function correctly. Most notable cases + are 'CREATE DATABASE' and 'DROP DATABASE'. + + This flags should only be used in exceptional circumstances, since + it introduce a significant change in behaviour regarding the + replication logic together with the flags --binlog-do-db and + --replicated-do-db. + */ +#define LOG_EVENT_SUPPRESS_USE_F 0x8 enum Log_event_type { - UNKNOWN_EVENT = 0, START_EVENT = 1, QUERY_EVENT =2, STOP_EVENT=3, - ROTATE_EVENT = 4, INTVAR_EVENT=5, LOAD_EVENT=6, SLAVE_EVENT=7, - CREATE_FILE_EVENT=8, APPEND_BLOCK_EVENT=9, EXEC_LOAD_EVENT=10, - DELETE_FILE_EVENT=11, NEW_LOAD_EVENT=12, RAND_EVENT=13 + UNKNOWN_EVENT= 0, START_EVENT= 1, QUERY_EVENT= 2, STOP_EVENT= 3, + ROTATE_EVENT= 4, INTVAR_EVENT= 5, LOAD_EVENT=6, SLAVE_EVENT= 7, + CREATE_FILE_EVENT= 8, APPEND_BLOCK_EVENT= 9, EXEC_LOAD_EVENT= 10, + DELETE_FILE_EVENT= 11, NEW_LOAD_EVENT= 12, RAND_EVENT= 13, + USER_VAR_EVENT= 14 }; enum Int_event_type @@ -221,33 +300,94 @@ class THD; struct st_relay_log_info; +/***************************************************************************** + + Log_event class + + This is the abstract base class for binary log events. + + ****************************************************************************/ class Log_event { public: + /* + The offset in the log where this event originally appeared (it is preserved + in relay logs, making SHOW SLAVE STATUS able to print coordinates of the + event in the master's binlog). Note: when a transaction is written by the + master to its binlog (wrapped in BEGIN/COMMIT) the log_pos of all the + queries it contains is the one of the BEGIN (this way, when one does SHOW + SLAVE STATUS it sees the offset of the BEGIN, which is logical as rollback + may occur), except the COMMIT query which has its real offset. + */ my_off_t log_pos; - char *temp_buf; + /* + A temp buffer for read_log_event; it is later analysed according to the + event's type, and its content is distributed in the event-specific fields. + */ + char *temp_buf; + /* + Timestamp on the master(for debugging and replication of NOW()/TIMESTAMP). + It is important for queries and LOAD DATA INFILE. This is set at the event's + creation time, except for Query and Load (et al.) events where this is set + at the query's execution time, which guarantees good replication (otherwise, + we could have a query and its event with different timestamps). + */ time_t when; + /* The number of seconds the query took to run on the master. */ ulong exec_time; + /* + The master's server id (is preserved in the relay log; used to prevent from + infinite loops in circular replication). + */ uint32 server_id; uint cached_event_len; + + /* + Some 16 flags. Only one is really used now; look above for + LOG_EVENT_TIME_F, LOG_EVENT_FORCED_ROTATE_F, + LOG_EVENT_THREAD_SPECIFIC_F, and LOG_EVENT_SUPPRESS_USE_F for + notes. + */ uint16 flags; + bool cache_stmt; #ifndef MYSQL_CLIENT THD* thd; Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt); Log_event(); + /* + read_log_event() functions read an event from a binlog or relay log; used by + SHOW BINLOG EVENTS, the binlog_dump thread on the master (reads master's + binlog), the slave IO thread (reads the event sent by binlog_dump), the + slave SQL thread (reads the event from the relay log). + */ // if mutex is 0, the read will proceed without mutex static Log_event* read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, bool old_format); static int read_log_event(IO_CACHE* file, String* packet, pthread_mutex_t* log_lock); + /* set_log_pos() is used to fill log_pos with tell(log). */ void set_log_pos(MYSQL_LOG* log); - virtual void pack_info(String* packet); - int net_send(THD* thd, const char* log_name, my_off_t pos); + /* + init_show_field_list() prepares the column names and types for the output of + SHOW BINLOG EVENTS; it is used only by SHOW BINLOG EVENTS. + */ static void init_show_field_list(List<Item>* field_list); +#ifdef HAVE_REPLICATION + int net_send(Protocol *protocol, const char* log_name, my_off_t pos); + /* + pack_info() is used by SHOW BINLOG EVENTS; as print() it prepares and sends + a string to display to the user, so it resembles print(). + */ + virtual void pack_info(Protocol *protocol); + /* + The SQL slave thread calls exec_event() to execute the event; this is where + the slave's data is modified. + */ virtual int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ virtual const char* get_db() { return thd ? thd->db : 0; @@ -255,6 +395,7 @@ public: #else // avoid having to link mysqlbinlog against libpthread static Log_event* read_log_event(IO_CACHE* file, bool old_format); + /* print*() functions are used by mysqlbinlog */ virtual void print(FILE* file, bool short_form = 0, char* last_db = 0) = 0; void print_timestamp(FILE* file, time_t *ts = 0); void print_header(FILE* file); @@ -293,17 +434,25 @@ public: } virtual int get_data_size() { return 0;} virtual int get_data_body_offset() { return 0; } - int get_event_len() + virtual int get_event_len() { return (cached_event_len ? cached_event_len : (cached_event_len = LOG_EVENT_HEADER_LEN + get_data_size())); } static Log_event* read_log_event(const char* buf, int event_len, const char **error, bool old_format); + /* returns the human readable name of the event's type */ const char* get_type_str(); }; +/***************************************************************************** + + Query Log Event class + + Logs SQL queries + + ****************************************************************************/ class Query_log_event: public Log_event { protected: @@ -330,10 +479,12 @@ public: #ifndef MYSQL_CLIENT Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, - bool using_trans); + bool using_trans, bool suppress_use); const char* get_db() { return db; } - void pack_info(String* packet); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); #endif @@ -360,7 +511,15 @@ public: } }; +#ifdef HAVE_REPLICATION + +/***************************************************************************** + + Slave Log Event class + Note that this class is currently not used at all; no code writes a + Slave_log_event (though some code in repl_failsafe.cc reads Slave_log_event). + ****************************************************************************/ class Slave_log_event: public Log_event { protected: @@ -376,7 +535,7 @@ public: #ifndef MYSQL_CLIENT Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli); - void pack_info(String* packet); + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, bool short_form = 0, char* last_db = 0); @@ -390,6 +549,14 @@ public: int write_data(IO_CACHE* file ); }; +#endif /* HAVE_REPLICATION */ + + +/***************************************************************************** + + Load Log Event class + + ****************************************************************************/ class Load_log_event: public Log_event { protected: @@ -418,7 +585,7 @@ public: { fname= afname; fname_len= alen; - local_fname= true; + local_fname= TRUE; } /* fname doesn't point to memory inside Log_event::temp_buf */ int check_fname_outside_temp_buf() @@ -432,17 +599,19 @@ public: Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, - List<Item>& fields_arg, enum enum_duplicates handle_dup, + List<Item>& fields_arg, enum enum_duplicates handle_dup, bool ignore, bool using_trans); - void set_fields(List<Item> &fields_arg); - void pack_info(String* packet); + void set_fields(const char* db, List<Item> &fields_arg); const char* get_db() { return db; } +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli) { return exec_event(thd->slave_net,rli,0); } int exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); void print(FILE* file, bool short_form, char* last_db, bool commented); @@ -472,6 +641,11 @@ public: extern char server_version[SERVER_VERSION_LENGTH]; +/***************************************************************************** + + Start Log Event class + + ****************************************************************************/ class Start_log_event: public Log_event { public: @@ -505,8 +679,10 @@ public: created = (time_t) when; memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); } - void pack_info(String* packet); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); #endif @@ -523,6 +699,13 @@ public: }; +/***************************************************************************** + + Intvar Log Event class + + Logs special variables such as auto_increment values + + ****************************************************************************/ class Intvar_log_event: public Log_event { public: @@ -533,8 +716,10 @@ public: Intvar_log_event(THD* thd_arg,uchar type_arg, ulonglong val_arg) :Log_event(thd_arg,0,0),val(val_arg),type(type_arg) {} - void pack_info(String* packet); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); #endif @@ -543,15 +728,17 @@ public: ~Intvar_log_event() {} Log_event_type get_type_code() { return INTVAR_EVENT;} const char* get_var_type_name(); - int get_data_size() { return sizeof(type) + sizeof(val);} + int get_data_size() { return 9; /* sizeof(type) + sizeof(val) */;} int write_data(IO_CACHE* file); bool is_valid() { return 1; } }; /***************************************************************************** - * - * Rand log event class - * + + Rand Log Event class + + Logs random seed used by the next RAND(), and by PASSWORD() in 4.1. + ****************************************************************************/ class Rand_log_event: public Log_event { @@ -563,8 +750,10 @@ class Rand_log_event: public Log_event Rand_log_event(THD* thd_arg, ulonglong seed1_arg, ulonglong seed2_arg) :Log_event(thd_arg,0,0),seed1(seed1_arg),seed2(seed2_arg) {} - void pack_info(String* packet); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); #endif @@ -572,16 +761,66 @@ class Rand_log_event: public Log_event Rand_log_event(const char* buf, bool old_format); ~Rand_log_event() {} Log_event_type get_type_code() { return RAND_EVENT;} - int get_data_size() { return sizeof(ulonglong) * 2; } + int get_data_size() { return 16; /* sizeof(ulonglong) * 2*/ } int write_data(IO_CACHE* file); bool is_valid() { return 1; } }; +/***************************************************************************** + + User var Log Event class + + Every time a query uses the value of a user variable, a User_var_log_event is + written before the Query_log_event, to set the user variable. + + ****************************************************************************/ +class User_var_log_event: public Log_event +{ +public: + char *name; + uint name_len; + char *val; + ulong val_len; + Item_result type; + uint charset_number; + bool is_null; +#ifndef MYSQL_CLIENT + User_var_log_event(THD* thd_arg, char *name_arg, uint name_len_arg, + char *val_arg, ulong val_len_arg, Item_result type_arg, + uint charset_number_arg) + :Log_event(), name(name_arg), name_len(name_len_arg), val(val_arg), + val_len(val_len_arg), type(type_arg), charset_number(charset_number_arg) + { is_null= !val; } + void pack_info(Protocol* protocol); + int exec_event(struct st_relay_log_info* rli); +#else + void print(FILE* file, bool short_form = 0, char* last_db = 0); +#endif + + User_var_log_event(const char* buf, bool old_format); + ~User_var_log_event() {} + Log_event_type get_type_code() { return USER_VAR_EVENT;} + int get_data_size() + { + return (is_null ? UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL : + UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + + UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE + val_len); + } + int write_data(IO_CACHE* file); + bool is_valid() { return 1; } +}; + +/***************************************************************************** + + Stop Log Event class + + ****************************************************************************/ +#ifdef HAVE_REPLICATION class Stop_log_event: public Log_event { public: -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Stop_log_event() :Log_event() {} int exec_event(struct st_relay_log_info* rli); @@ -597,24 +836,35 @@ public: bool is_valid() { return 1; } }; +#endif /* HAVE_REPLICATION */ + +/***************************************************************************** + + Rotate Log Event class + + This will be depricated when we move to using sequence ids. + + ****************************************************************************/ class Rotate_log_event: public Log_event { public: + enum { + ZERO_LEN= 1, // if event should report 0 as its length + DUP_NAME= 2 // if constructor should dup the string argument + }; const char* new_log_ident; ulonglong pos; uint ident_len; - bool alloced; + uint flags; #ifndef MYSQL_CLIENT Rotate_log_event(THD* thd_arg, const char* new_log_ident_arg, - uint ident_len_arg = 0, - ulonglong pos_arg = LOG_EVENT_OFFSET) - :Log_event(), new_log_ident(new_log_ident_arg), - pos(pos_arg),ident_len(ident_len_arg ? ident_len_arg : - (uint) strlen(new_log_ident_arg)), alloced(0) - {} - void pack_info(String* packet); + uint ident_len_arg, + ulonglong pos_arg, uint flags); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); #endif @@ -622,10 +872,18 @@ public: Rotate_log_event(const char* buf, int event_len, bool old_format); ~Rotate_log_event() { - if (alloced) - my_free((gptr) new_log_ident, MYF(0)); + if (flags & DUP_NAME) + my_free((gptr) new_log_ident, MYF(MY_ALLOW_ZERO_PTR)); } Log_event_type get_type_code() { return ROTATE_EVENT;} + virtual int get_event_len() + { + if (flags & ZERO_LEN) + return 0; + if (cached_event_len == 0) + cached_event_len= LOG_EVENT_HEADER_LEN + get_data_size(); + return cached_event_len; + } int get_data_size() { return ident_len + ROTATE_HEADER_LEN;} bool is_valid() { return new_log_ident != 0; } int write_data(IO_CACHE* file); @@ -633,6 +891,11 @@ public: /* the classes below are for the new LOAD DATA INFILE logging */ +/***************************************************************************** + + Create File Log Event class + + ****************************************************************************/ class Create_file_log_event: public Load_log_event { protected: @@ -653,11 +916,13 @@ public: Create_file_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List<Item>& fields_arg, - enum enum_duplicates handle_dup, + enum enum_duplicates handle_dup, bool ignore, char* block_arg, uint block_len_arg, bool using_trans); - void pack_info(String* packet); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); void print(FILE* file, bool short_form, char* last_db, bool enable_local); @@ -695,6 +960,11 @@ public: }; +/***************************************************************************** + + Append Block Log Event class + + ****************************************************************************/ class Append_block_log_event: public Log_event { public: @@ -716,8 +986,10 @@ public: #ifndef MYSQL_CLIENT Append_block_log_event(THD* thd, const char* db_arg, char* block_arg, uint block_len_arg, bool using_trans); +#ifdef HAVE_REPLICATION int exec_event(struct st_relay_log_info* rli); - void pack_info(String* packet); + void pack_info(Protocol* protocol); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); #endif @@ -731,7 +1003,11 @@ public: const char* get_db() { return db; } }; +/***************************************************************************** + Delete File Log Event class + + ****************************************************************************/ class Delete_file_log_event: public Log_event { public: @@ -740,10 +1016,13 @@ public: #ifndef MYSQL_CLIENT Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans); - void pack_info(String* packet); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form, char* last_db, bool enable_local); #endif Delete_file_log_event(const char* buf, int event_len); @@ -755,6 +1034,11 @@ public: const char* get_db() { return db; } }; +/***************************************************************************** + + Execute Load Log Event class + + ****************************************************************************/ class Execute_load_log_event: public Log_event { public: @@ -763,8 +1047,10 @@ public: #ifndef MYSQL_CLIENT Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans); - void pack_info(String* packet); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ #else void print(FILE* file, bool short_form = 0, char* last_db = 0); #endif @@ -791,5 +1077,5 @@ public: bool is_valid() { return 1; } }; #endif - +char *str_to_hex(char *to, const char *from, uint len); #endif /* _log_event_h */ diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc index 5c63820a6ae..71c8d588de7 100644 --- a/sql/mf_iocache.cc +++ b/sql/mf_iocache.cc @@ -31,6 +31,7 @@ */ #include "mysql_priv.h" +#ifdef HAVE_REPLICATION extern "C" { @@ -78,3 +79,6 @@ int _my_b_net_read(register IO_CACHE *info, byte *Buffer, } } /* extern "C" */ +#endif /* HAVE_REPLICATION */ + + diff --git a/sql/mini_client.cc b/sql/mini_client.cc deleted file mode 100644 index cd00db5c7df..00000000000 --- a/sql/mini_client.cc +++ /dev/null @@ -1,1459 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - mini MySQL client to be included into the server to do server to server - commincation by Sasha Pachev - - Note: all file-global symbols must begin with mc_ , even the static ones, - just in case we decide to make them external at some point -*/ - -#include <my_global.h> -/* my_pthread must be included early to be able to fix things */ -#if defined(THREAD) -#include <my_pthread.h> /* because of signal() */ -#endif -#include <thr_alarm.h> -#include <mysql_embed.h> -#include <mysql_com.h> -#include <my_sys.h> -#include <violite.h> -#include <mysys_err.h> -#include <m_string.h> -#include <m_ctype.h> -#include "mysql.h" -#include "mini_client.h" -#include "mysql_version.h" -#include "mysqld_error.h" -#include "errmsg.h" - -#if defined( OS2) && defined(MYSQL_SERVER) -#undef ER -#define ER CER -#endif - -extern "C" { // Because of SCO 3.2V4.2 -#include <sys/stat.h> -#include <signal.h> -#ifdef HAVE_PWD_H -#include <pwd.h> -#endif -#if !defined(MSDOS) && !defined(__WIN__) -#include <sys/socket.h> -#include <netinet/in.h> -#include <arpa/inet.h> -#include <netdb.h> -#ifdef HAVE_SELECT_H -# include <select.h> -#endif -#ifdef HAVE_SYS_SELECT_H -#include <sys/select.h> -#endif -#endif /*!defined(MSDOS) && !defined(__WIN__) */ -#ifdef HAVE_SYS_UN_H -# include <sys/un.h> -#endif -#ifndef INADDR_NONE -#define INADDR_NONE -1 -#endif -} - -static void mc_free_rows(MYSQL_DATA *cur); -void mc_end_server(MYSQL *mysql); -static int mc_sock_connect(File s, const struct sockaddr *name, uint namelen, uint to); -static void mc_free_old_query(MYSQL *mysql); -static int mc_send_file_to_server(MYSQL *mysql, const char *filename); -static my_ulonglong mc_net_field_length_ll(uchar **packet); -static ulong mc_net_field_length(uchar **packet); -static int mc_read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row, - ulong *lengths); -static MYSQL_DATA *mc_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields, - uint fields); -#if !(defined(__WIN__) || defined(OS2) || defined(__NETWARE__)) -static int wait_for_data(my_socket fd, uint timeout); -#endif - - -#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | CLIENT_LOCAL_FILES) - -#if defined(MSDOS) || defined(__WIN__) -#define perror(A) -#else -#include <errno.h> -#define SOCKET_ERROR -1 -#endif - -#ifdef __WIN__ -static my_bool is_NT(void) -{ - char *os=getenv("OS"); - return (os && !strcmp(os, "Windows_NT")) ? 1 : 0; -} -#endif - -extern ulong slave_net_timeout; - -/* -** Create a named pipe connection -*/ - -#ifdef __WIN__ - -HANDLE create_named_pipe(NET *net, uint connect_timeout, char **arg_host, - char **arg_unix_socket) -{ - HANDLE hPipe=INVALID_HANDLE_VALUE; - char pipe_name[512]; - DWORD dwMode; - int i; - my_bool testing_named_pipes=0; - char *host= *arg_host, *unix_socket= *arg_unix_socket; - - if (!host || !strcmp(host,LOCAL_HOST)) - host=LOCAL_HOST_NAMEDPIPE; - - pipe_name[sizeof(pipe_name)-1]= 0; /* Safety if too long string */ - strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\", host, "\\pipe\\", - unix_socket, NullS); - DBUG_PRINT("info",("Server name: '%s'. Named Pipe: %s", - host, unix_socket)); - - for (i=0 ; i < 100 ; i++) /* Don't retry forever */ - { - if ((hPipe = CreateFile(pipe_name, - GENERIC_READ | GENERIC_WRITE, - 0, - NULL, - OPEN_EXISTING, - 0, - NULL )) != INVALID_HANDLE_VALUE) - break; - if (GetLastError() != ERROR_PIPE_BUSY) - { - net->last_errno=CR_NAMEDPIPEOPEN_ERROR; - sprintf(net->last_error,ER(net->last_errno),host, unix_socket, - (ulong) GetLastError()); - return INVALID_HANDLE_VALUE; - } - /* wait for for an other instance */ - if (! WaitNamedPipe(pipe_name, connect_timeout*1000) ) - { - net->last_errno=CR_NAMEDPIPEWAIT_ERROR; - sprintf(net->last_error,ER(net->last_errno),host, unix_socket, - (ulong) GetLastError()); - return INVALID_HANDLE_VALUE; - } - } - if (hPipe == INVALID_HANDLE_VALUE) - { - net->last_errno=CR_NAMEDPIPEOPEN_ERROR; - sprintf(net->last_error,ER(net->last_errno),host, unix_socket, - (ulong) GetLastError()); - return INVALID_HANDLE_VALUE; - } - dwMode = PIPE_READMODE_BYTE | PIPE_WAIT; - if ( !SetNamedPipeHandleState(hPipe, &dwMode, NULL, NULL) ) - { - CloseHandle( hPipe ); - net->last_errno=CR_NAMEDPIPESETSTATE_ERROR; - sprintf(net->last_error,ER(net->last_errno),host, unix_socket, - (ulong) GetLastError()); - return INVALID_HANDLE_VALUE; - } - *arg_host=host ; *arg_unix_socket=unix_socket; /* connect arg */ - return (hPipe); -} -#endif - - -/**************************************************************************** -** Init MySQL structure or allocate one -****************************************************************************/ - -MYSQL *mc_mysql_init(MYSQL *mysql) -{ - init_client_errs(); - if (!mysql) - { - if (!(mysql=(MYSQL*) my_malloc(sizeof(*mysql),MYF(MY_WME | MY_ZEROFILL)))) - return 0; - mysql->free_me=1; - mysql->net.vio = 0; - } - else - bzero((char*) (mysql),sizeof(*(mysql))); -#ifdef __WIN__ - mysql->options.connect_timeout=20; -#endif - mysql->net.read_timeout = slave_net_timeout; - return mysql; -} - -/************************************************************************** -** Shut down connection -**************************************************************************/ - -void -mc_end_server(MYSQL *mysql) -{ - DBUG_ENTER("mc_end_server"); - if (mysql->net.vio != 0) - { - DBUG_PRINT("info",("Net: %s", vio_description(mysql->net.vio))); - vio_delete(mysql->net.vio); - mysql->net.vio= 0; /* Marker */ - } - net_end(&mysql->net); - mc_free_old_query(mysql); - DBUG_VOID_RETURN; -} - -static void mc_free_old_query(MYSQL *mysql) -{ - DBUG_ENTER("mc_free_old_query"); - if (mysql->fields) - free_root(&mysql->field_alloc,MYF(0)); - else - init_alloc_root(&mysql->field_alloc,8192,0); /* Assume rowlength < 8192 */ - mysql->fields=0; - mysql->field_count=0; /* For API */ - DBUG_VOID_RETURN; -} - - -/**************************************************************************** - A modified version of connect(). my_connect() allows you to specify - a timeout value, in seconds, that we should wait until we - derermine we can't connect to a particular host. If timeout is 0, - my_connect() will behave exactly like connect(). - - Base version coded by Steve Bernacki, Jr. <steve@navinet.net> -*****************************************************************************/ - -static int mc_sock_connect(my_socket fd, const struct sockaddr *name, - uint namelen, uint timeout) -{ -#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__) - return connect(fd, (struct sockaddr*) name, namelen); -#else - int flags, res, s_err; - - /* - If they passed us a timeout of zero, we should behave - exactly like the normal connect() call does. - */ - - if (timeout == 0) - return connect(fd, (struct sockaddr*) name, namelen); - - flags = fcntl(fd, F_GETFL, 0); /* Set socket to not block */ -#ifdef O_NONBLOCK - fcntl(fd, F_SETFL, flags | O_NONBLOCK); /* and save the flags.. */ -#endif - - res= connect(fd, (struct sockaddr*) name, namelen); - s_err= errno; /* Save the error... */ - fcntl(fd, F_SETFL, flags); - if ((res != 0) && (s_err != EINPROGRESS)) - { - errno= s_err; /* Restore it */ - return(-1); - } - if (res == 0) /* Connected quickly! */ - return(0); - return wait_for_data(fd, timeout); -#endif -} - - -/* - Wait up to timeout seconds for a connection to be established. - - We prefer to do this with poll() as there is no limitations with this. - If not, we will use select() -*/ - -#if !(defined(__WIN__) || defined(OS2) || defined(__NETWARE__)) - -static int wait_for_data(my_socket fd, uint timeout) -{ -#ifdef HAVE_POLL - struct pollfd ufds; - int res; - - ufds.fd= fd; - ufds.events= POLLIN | POLLPRI; - if (!(res= poll(&ufds, 1, (int) timeout*1000))) - { - errno= EINTR; - return -1; - } - if (res < 0 || !(ufds.revents & (POLLIN | POLLPRI))) - return -1; - return 0; -#else - SOCKOPT_OPTLEN_TYPE s_err_size = sizeof(uint); - fd_set sfds; - struct timeval tv; - time_t start_time, now_time; - int res, s_err; - - if (fd >= FD_SETSIZE) /* Check if wrong error */ - return 0; /* Can't use timeout */ - - /* - Our connection is "in progress." We can use the select() call to wait - up to a specified period of time for the connection to suceed. - If select() returns 0 (after waiting howevermany seconds), our socket - never became writable (host is probably unreachable.) Otherwise, if - select() returns 1, then one of two conditions exist: - - 1. An error occured. We use getsockopt() to check for this. - 2. The connection was set up sucessfully: getsockopt() will - return 0 as an error. - - Thanks goes to Andrew Gierth <andrew@erlenstar.demon.co.uk> - who posted this method of timing out a connect() in - comp.unix.programmer on August 15th, 1997. - */ - - FD_ZERO(&sfds); - FD_SET(fd, &sfds); - /* - select could be interrupted by a signal, and if it is, - the timeout should be adjusted and the select restarted - to work around OSes that don't restart select and - implementations of select that don't adjust tv upon - failure to reflect the time remaining - */ - start_time = time(NULL); - for (;;) - { - tv.tv_sec = (long) timeout; - tv.tv_usec = 0; -#if defined(HPUX10) && defined(THREAD) - if ((res = select(fd+1, NULL, (int*) &sfds, NULL, &tv)) > 0) - break; -#else - if ((res = select(fd+1, NULL, &sfds, NULL, &tv)) > 0) - break; -#endif - if (res == 0) /* timeout */ - return -1; - now_time=time(NULL); - timeout-= (uint) (now_time - start_time); - if (errno != EINTR || (int) timeout <= 0) - return -1; - } - - /* - select() returned something more interesting than zero, let's - see if we have any errors. If the next two statements pass, - we've got an open socket! - */ - - s_err=0; - if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (char*) &s_err, &s_err_size) != 0) - return(-1); - - if (s_err) - { /* getsockopt could succeed */ - errno = s_err; - return(-1); /* but return an error... */ - } - return (0); /* ok */ -#endif /* HAVE_POLL */ -} -#endif /* defined(__WIN__) || defined(OS2) || defined(__NETWARE__) */ - - -/***************************************************************************** -** read a packet from server. Give error message if socket was down -** or packet is an error message -*****************************************************************************/ - -ulong -mc_net_safe_read(MYSQL *mysql) -{ - NET *net= &mysql->net; - ulong len=0; - - if (net->vio != 0) - len=my_net_read(net); - - if (len == packet_error || len == 0) - { - DBUG_PRINT("error",("Wrong connection or packet. fd: %s len: %d", - vio_description(net->vio),len)); - if (!vio_was_interrupted(net->vio)) - { - mc_end_server(mysql); - if (net->last_errno != ER_NET_PACKET_TOO_LARGE) - { - net->last_errno=CR_SERVER_LOST; - strmov(net->last_error,ER(net->last_errno)); - } - else - strmov(net->last_error, "Packet too large - increase \ -max_allowed_packet on this server"); - } - return(packet_error); - } - if (net->read_pos[0] == 255) - { - if (len > 3) - { - char *pos=(char*) net->read_pos+1; - if (mysql->protocol_version > 9) - { /* New client protocol */ - net->last_errno=uint2korr(pos); - pos+=2; - len-=2; - if (!net->last_errno) - net->last_errno = CR_UNKNOWN_ERROR; - } - else - { - net->last_errno=CR_UNKNOWN_ERROR; - len--; - } - (void) strmake(net->last_error,(char*) pos, - min(len,sizeof(net->last_error)-1)); - } - else - { - net->last_errno=CR_UNKNOWN_ERROR; - (void) strmov(net->last_error,ER(net->last_errno)); - } - DBUG_PRINT("error",("Got error: %d (%s)", net->last_errno, - net->last_error)); - return(packet_error); - } - return len; -} - - -char *mc_mysql_error(MYSQL *mysql) -{ - return (mysql)->net.last_error; -} - -int mc_mysql_errno(MYSQL *mysql) -{ - return (mysql)->net.last_errno; -} - - -my_bool mc_mysql_reconnect(MYSQL *mysql) -{ - MYSQL tmp_mysql; - DBUG_ENTER("mc_mysql_reconnect"); - - if (!mysql->reconnect) - { - mysql->net.last_errno=CR_SERVER_GONE_ERROR; - strmov(mysql->net.last_error, ER(mysql->net.last_errno)); - DBUG_RETURN(1); - } - mc_mysql_init(&tmp_mysql); - tmp_mysql.options=mysql->options; - if (!mc_mysql_connect(&tmp_mysql,mysql->host,mysql->user,mysql->passwd, - mysql->db, mysql->port, mysql->unix_socket, - mysql->client_flag, mysql->net.read_timeout)) - { - mysql->net.last_errno= tmp_mysql.net.last_errno; - strmov(mysql->net.last_error, tmp_mysql.net.last_error); - DBUG_RETURN(1); - } - tmp_mysql.free_me=mysql->free_me; - mysql->free_me=0; - bzero((char*) &mysql->options,sizeof(&mysql->options)); - mc_mysql_close(mysql); - *mysql=tmp_mysql; - net_clear(&mysql->net); - mysql->affected_rows= ~(my_ulonglong) 0; - DBUG_RETURN(0); -} - - - -int -mc_simple_command(MYSQL *mysql,enum enum_server_command command, - const char *arg, uint length, my_bool skipp_check) -{ - NET *net= &mysql->net; - int result= -1; - - if (mysql->net.vio == 0) - { /* Do reconnect if possible */ - if (mc_mysql_reconnect(mysql)) - goto end; - } - if (mysql->status != MYSQL_STATUS_READY) - { - strmov(net->last_error,ER(mysql->net.last_errno=CR_COMMANDS_OUT_OF_SYNC)); - goto end; - } - - mysql->net.last_error[0]=0; - mysql->net.last_errno=0; - mysql->info=0; - mysql->affected_rows= ~(my_ulonglong) 0; - net_clear(net); /* Clear receive buffer */ - if (!arg) - arg=""; - - if (net_write_command(net,(uchar) command,arg, - length ? length :(uint) strlen(arg))) - { - DBUG_PRINT("error",("Can't send command to server. Error: %d",socket_errno)); - mc_end_server(mysql); - if (mc_mysql_reconnect(mysql)) - goto end; - if (net_write_command(net,(uchar) command,arg, - length ? length :(uint) strlen(arg))) - { - net->last_errno=CR_SERVER_GONE_ERROR; - strmov(net->last_error,ER(net->last_errno)); - goto end; - } - } - result=0; - if (!skipp_check) - result= ((mysql->packet_length=mc_net_safe_read(mysql)) == packet_error ? - -1 : 0); - end: - return result; -} - - -MYSQL * -mc_mysql_connect(MYSQL *mysql,const char *host, const char *user, - const char *passwd, const char *db, - uint port, const char *unix_socket,uint client_flag, - uint net_read_timeout) -{ - char buff[NAME_LEN+USERNAME_LENGTH+100],*end,*host_info; - my_socket sock; - in_addr_t ip_addr; - struct sockaddr_in sock_addr; - ulong pkt_length; - NET *net= &mysql->net; - thr_alarm_t alarmed; - ALARM alarm_buff; - ulong max_allowed_packet; - -#ifdef __WIN__ - HANDLE hPipe=INVALID_HANDLE_VALUE; -#endif -#ifdef HAVE_SYS_UN_H - struct sockaddr_un UNIXaddr; -#endif - DBUG_ENTER("mc_mysql_connect"); - DBUG_PRINT("enter",("host: %s db: %s user: %s connect_time_out: %u read_timeout: %u", - host ? host : "(Null)", - db ? db : "(Null)", - user ? user : "(Null)", - net_read_timeout, - (uint) slave_net_timeout)); - - net->vio = 0; /* If something goes wrong */ - mysql->charset=default_charset_info; /* Set character set */ - if (!port) - port = MYSQL_PORT; /* Should always be set by mysqld */ - if (!unix_socket) - unix_socket=MYSQL_UNIX_ADDR; - - mysql->reconnect=1; /* Reconnect as default */ - mysql->server_status=SERVER_STATUS_AUTOCOMMIT; - if (!mysql->options.connect_timeout) - mysql->options.connect_timeout= net_read_timeout; - - /* - ** Grab a socket and connect it to the server - */ - -#if defined(HAVE_SYS_UN_H) - if ((!host || !strcmp(host,LOCAL_HOST)) && unix_socket) - { - host=LOCAL_HOST; - host_info=(char*) ER(CR_LOCALHOST_CONNECTION); - DBUG_PRINT("info",("Using UNIX sock '%s'",unix_socket)); - if ((sock = socket(AF_UNIX,SOCK_STREAM,0)) == SOCKET_ERROR) - { - net->last_errno=CR_SOCKET_CREATE_ERROR; - sprintf(net->last_error,ER(net->last_errno),socket_errno); - goto error; - } - net->vio = vio_new(sock, VIO_TYPE_SOCKET, TRUE); - bzero((char*) &UNIXaddr,sizeof(UNIXaddr)); - UNIXaddr.sun_family = AF_UNIX; - strmov(UNIXaddr.sun_path, unix_socket); - if (mc_sock_connect(sock, - my_reinterpret_cast(struct sockaddr *) (&UNIXaddr), - sizeof(UNIXaddr), - mysql->options.connect_timeout) <0) - { - DBUG_PRINT("error",("Got error %d on connect to local server", - socket_errno)); - net->last_errno=CR_CONNECTION_ERROR; - sprintf(net->last_error,ER(net->last_errno),unix_socket,socket_errno); - goto error; - } - } - else -#elif defined(__WIN__) - { - if ((unix_socket || - !host && is_NT() || - host && !strcmp(host,LOCAL_HOST_NAMEDPIPE) || - mysql->options.named_pipe || !have_tcpip)) - { - sock=0; - if ((hPipe=create_named_pipe(net, mysql->options.connect_timeout, - (char**) &host, (char**) &unix_socket)) == - INVALID_HANDLE_VALUE) - { - DBUG_PRINT("error", - ("host: '%s' socket: '%s' named_pipe: %d have_tcpip: %d", - host ? host : "<null>", - unix_socket ? unix_socket : "<null>", - (int) mysql->options.named_pipe, - (int) have_tcpip)); - if (mysql->options.named_pipe || - (host && !strcmp(host,LOCAL_HOST_NAMEDPIPE)) || - (unix_socket && !strcmp(unix_socket,MYSQL_NAMEDPIPE))) - goto error; /* User only requested named pipes */ - /* Try also with TCP/IP */ - } - else - { - net->vio=vio_new_win32pipe(hPipe); - sprintf(host_info=buff, ER(CR_NAMEDPIPE_CONNECTION), host, - unix_socket); - } - } - } - if (hPipe == INVALID_HANDLE_VALUE) -#endif - { - unix_socket=0; /* This is not used */ - if (!host) - host=LOCAL_HOST; - sprintf(host_info=buff,ER(CR_TCP_CONNECTION),host); - DBUG_PRINT("info",("Server name: '%s'. TCP sock: %d", host,port)); - thr_alarm_init(&alarmed); - /* - We don't have to check status for thr_alarm as it's not fatal if - we didn't manage to set an alarm. (In this case the socket call - will just block for a while). - */ - thr_alarm(&alarmed, net_read_timeout, &alarm_buff); - sock = (my_socket) socket(AF_INET,SOCK_STREAM,0); - thr_end_alarm(&alarmed); - if (sock == SOCKET_ERROR) - { - net->last_errno=CR_IPSOCK_ERROR; - sprintf(net->last_error,ER(net->last_errno),socket_errno); - goto error; - } - net->vio = vio_new(sock,VIO_TYPE_TCPIP,FALSE); - bzero((char*) &sock_addr,sizeof(sock_addr)); - sock_addr.sin_family = AF_INET; - - /* - ** The server name may be a host name or IP address - */ - - if ((int) (ip_addr = inet_addr(host)) != (int) INADDR_NONE) - { - memcpy_fixed(&sock_addr.sin_addr,&ip_addr,sizeof(ip_addr)); - } - else - { - int tmp_errno; - struct hostent tmp_hostent,*hp; - char buff2[GETHOSTBYNAME_BUFF_SIZE]; - hp = my_gethostbyname_r(host,&tmp_hostent,buff2,sizeof(buff2), - &tmp_errno); - if (!hp) - { - net->last_errno=CR_UNKNOWN_HOST; - sprintf(net->last_error, ER(CR_UNKNOWN_HOST), host, tmp_errno); - my_gethostbyname_r_free(); - goto error; - } - memcpy(&sock_addr.sin_addr,hp->h_addr, (size_t) hp->h_length); - my_gethostbyname_r_free(); - } - sock_addr.sin_port = (ushort) htons((ushort) port); - if (mc_sock_connect(sock, - my_reinterpret_cast(struct sockaddr *) (&sock_addr), - sizeof(sock_addr), - mysql->options.connect_timeout) <0) - { - DBUG_PRINT("error",("Got error %d on connect to '%s'", - socket_errno,host)); - net->last_errno= CR_CONN_HOST_ERROR; - sprintf(net->last_error ,ER(CR_CONN_HOST_ERROR), host, socket_errno); - goto error; - } - } - - if (!net->vio || my_net_init(net, net->vio)) - { - vio_delete(net->vio); - net->vio = 0; - net->last_errno=CR_OUT_OF_MEMORY; - strmov(net->last_error,ER(net->last_errno)); - goto error; - } - vio_keepalive(net->vio,TRUE); - net->read_timeout=slave_net_timeout; - /* Get version info */ - mysql->protocol_version= PROTOCOL_VERSION; /* Assume this */ - if (mysql->options.connect_timeout && - vio_poll_read(net->vio, mysql->options.connect_timeout)) - { - net->last_errno= CR_SERVER_LOST; - strmov(net->last_error,ER(net->last_errno)); - goto error; - } - if ((pkt_length=mc_net_safe_read(mysql)) == packet_error) - goto error; - - /* Check if version of protocol matches current one */ - - mysql->protocol_version= net->read_pos[0]; - DBUG_DUMP("packet",(char*) net->read_pos,10); - DBUG_PRINT("info",("mysql protocol version %d, server=%d", - PROTOCOL_VERSION, mysql->protocol_version)); - if (mysql->protocol_version != PROTOCOL_VERSION && - mysql->protocol_version != PROTOCOL_VERSION-1) - { - net->last_errno= CR_VERSION_ERROR; - sprintf(net->last_error, ER(CR_VERSION_ERROR), mysql->protocol_version, - PROTOCOL_VERSION); - goto error; - } - end=strend((char*) net->read_pos+1); - mysql->thread_id=uint4korr(end+1); - end+=5; - strmake(mysql->scramble_buff,end,8); - end+=9; - if (pkt_length >= (uint) (end+1 - (char*) net->read_pos)) - mysql->server_capabilities=uint2korr(end); - if (pkt_length >= (uint) (end+18 - (char*) net->read_pos)) - { - /* New protocol with 16 bytes to describe server characteristics */ - mysql->server_language=end[2]; - mysql->server_status=uint2korr(end+3); - } - - /* Save connection information */ - if (!user) user=""; - if (!passwd) passwd=""; - if (!my_multi_malloc(MYF(0), - &mysql->host_info, (uint) strlen(host_info)+1, - &mysql->host, (uint) strlen(host)+1, - &mysql->unix_socket, - unix_socket ? (uint) strlen(unix_socket)+1 : (uint) 1, - &mysql->server_version, - (uint) (end - (char*) net->read_pos), - NullS) || - !(mysql->user=my_strdup(user,MYF(0))) || - !(mysql->passwd=my_strdup(passwd,MYF(0)))) - { - strmov(net->last_error, ER(net->last_errno=CR_OUT_OF_MEMORY)); - goto error; - } - strmov(mysql->host_info,host_info); - strmov(mysql->host,host); - if (unix_socket) - strmov(mysql->unix_socket,unix_socket); - else - mysql->unix_socket=0; - strmov(mysql->server_version,(char*) net->read_pos+1); - mysql->port=port; - client_flag|=mysql->options.client_flag; - DBUG_PRINT("info",("Server version = '%s' capabilites: %ld", - mysql->server_version,mysql->server_capabilities)); - - /* Send client information for access check */ - client_flag|=CLIENT_CAPABILITIES; - -#ifdef HAVE_OPENSSL - if (mysql->options.ssl_key || mysql->options.ssl_cert || - mysql->options.ssl_ca || mysql->options.ssl_capath || - mysql->options.ssl_cipher) - mysql->options.use_ssl= 1; - if (mysql->options.use_ssl) - client_flag|=CLIENT_SSL; -#endif /* HAVE_OPENSSL */ - - if (db) - client_flag|=CLIENT_CONNECT_WITH_DB; -#ifdef HAVE_COMPRESS - if ((mysql->server_capabilities & CLIENT_COMPRESS) && - (mysql->options.compress || (client_flag & CLIENT_COMPRESS))) - client_flag|=CLIENT_COMPRESS; /* We will use compression */ - else -#endif - client_flag&= ~CLIENT_COMPRESS; - -#ifdef HAVE_OPENSSL - if ((mysql->server_capabilities & CLIENT_SSL) && - (mysql->options.use_ssl || (client_flag & CLIENT_SSL))) - { - DBUG_PRINT("info", ("Changing IO layer to SSL")); - client_flag |= CLIENT_SSL; - } - else - { - if (client_flag & CLIENT_SSL) - { - DBUG_PRINT("info", ("Leaving IO layer intact because server doesn't support SSL")); - } - client_flag &= ~CLIENT_SSL; - } -#endif /* HAVE_OPENSSL */ - - int2store(buff,client_flag); - mysql->client_flag=client_flag; - -#ifdef HAVE_OPENSSL - /* - Oops.. are we careful enough to not send ANY information without - encryption? - */ - if (client_flag & CLIENT_SSL) - { - if (my_net_write(net,buff,(uint) (2)) || net_flush(net)) - { - net->last_errno= CR_SERVER_LOST; - strmov(net->last_error,ER(net->last_errno)); - goto error; - } - /* Do the SSL layering. */ - DBUG_PRINT("info", ("IO layer change in progress...")); - DBUG_PRINT("info", ("IO context %p",((struct st_VioSSLConnectorFd*)mysql->connector_fd)->ssl_context)); - sslconnect((struct st_VioSSLConnectorFd*)(mysql->connector_fd),mysql->net.vio, (long)(mysql->options.connect_timeout)); - DBUG_PRINT("info", ("IO layer change done!")); - } -#endif /* HAVE_OPENSSL */ - max_allowed_packet=mysql->net.max_packet_size; - int3store(buff+2,max_allowed_packet); - - - if (user && user[0]) - strmake(buff+5,user,32); - else - { - user = getenv("USER"); - if (!user) user = "mysql"; - strmov((char*) buff+5, user ); - } - - DBUG_PRINT("info",("user: %s",buff+5)); - end=scramble(strend(buff+5)+1, mysql->scramble_buff, passwd, - (my_bool) (mysql->protocol_version == 9)); - if (db) - { - end=strmake(end+1,db,NAME_LEN); - mysql->db=my_strdup(db,MYF(MY_WME)); - db=0; - } - if (my_net_write(net,buff,(ulong) (end-buff)) || net_flush(net)) - { - net->last_errno= CR_SERVER_LOST; - strmov(net->last_error,ER(net->last_errno)); - goto error; - } - if (mc_net_safe_read(mysql) == packet_error) - goto error; - if (client_flag & CLIENT_COMPRESS) /* We will use compression */ - net->compress=1; - DBUG_PRINT("exit",("Mysql handler: %lx",mysql)); - DBUG_RETURN(mysql); - -error: - DBUG_PRINT("error",("message: %u (%s)",net->last_errno,net->last_error)); - { - /* Free alloced memory */ - my_bool free_me=mysql->free_me; - mc_end_server(mysql); - mysql->free_me=0; - mc_mysql_close(mysql); - mysql->free_me=free_me; - } - DBUG_RETURN(0); -} - - -#ifdef HAVE_OPENSSL -/* -************************************************************************** -** Free strings in the SSL structure and clear 'use_ssl' flag. -** NB! Errors are not reported until you do mysql_real_connect. -************************************************************************** -*/ -int -mysql_ssl_clear(MYSQL *mysql) -{ - my_free(mysql->options.ssl_key, MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql->options.ssl_cert, MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql->options.ssl_ca, MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql->options.ssl_capath, MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql->options.ssl_cipher, MYF(MY_ALLOW_ZERO_PTR)); - mysql->options.ssl_key = 0; - mysql->options.ssl_cert = 0; - mysql->options.ssl_ca = 0; - mysql->options.ssl_capath = 0; - mysql->options.ssl_cipher= 0; - mysql->options.use_ssl = FALSE; - my_free(mysql->connector_fd,MYF(MY_ALLOW_ZERO_PTR)); - mysql->connector_fd = 0; - return 0; -} -#endif /* HAVE_OPENSSL */ - -/************************************************************************* -** Send a QUIT to the server and close the connection -** If handle is alloced by mysql connect free it. -*************************************************************************/ - -void -mc_mysql_close(MYSQL *mysql) -{ - DBUG_ENTER("mysql_close"); - if (mysql) /* Some simple safety */ - { - if (mysql->net.vio != 0) - { - mc_free_old_query(mysql); - mysql->status=MYSQL_STATUS_READY; /* Force command */ - mysql->reconnect=0; - mc_simple_command(mysql,COM_QUIT,NullS,0,1); - mc_end_server(mysql); - } - my_free((gptr) mysql->host_info,MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql->user,MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql->passwd,MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql->db,MYF(MY_ALLOW_ZERO_PTR)); - /* Clear pointers for better safety */ - mysql->host_info=mysql->user=mysql->passwd=mysql->db=0; - bzero((char*) &mysql->options,sizeof(mysql->options)); -#ifdef HAVE_OPENSSL - mysql_ssl_clear(mysql); -#endif /* HAVE_OPENSSL */ - if (mysql->free_me) - my_free((gptr) mysql,MYF(0)); - } - DBUG_VOID_RETURN; -} - -void mc_mysql_free_result(MYSQL_RES *result) -{ - DBUG_ENTER("mc_mysql_free_result"); - DBUG_PRINT("enter",("mysql_res: %lx",result)); - if (result) - { - if (result->handle && result->handle->status == MYSQL_STATUS_USE_RESULT) - { - DBUG_PRINT("warning",("Not all rows in set were read; Ignoring rows")); - for (;;) - { - ulong pkt_len; - if ((pkt_len=mc_net_safe_read(result->handle)) == packet_error) - break; - if (pkt_len == 1 && result->handle->net.read_pos[0] == 254) - break; /* End of data */ - } - result->handle->status=MYSQL_STATUS_READY; - } - mc_free_rows(result->data); - if (result->fields) - free_root(&result->field_alloc,MYF(0)); - if (result->row) - my_free((gptr) result->row,MYF(0)); - my_free((gptr) result,MYF(0)); - } - DBUG_VOID_RETURN; -} - -static void mc_free_rows(MYSQL_DATA *cur) -{ - if (cur) - { - free_root(&cur->alloc,MYF(0)); - my_free((gptr) cur,MYF(0)); - } -} - -static MYSQL_FIELD * -mc_unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, - my_bool default_value, my_bool long_flag_protocol) -{ - MYSQL_ROWS *row; - MYSQL_FIELD *field,*result; - DBUG_ENTER("unpack_fields"); - - field=result=(MYSQL_FIELD*) alloc_root(alloc,sizeof(MYSQL_FIELD)*fields); - if (!result) - DBUG_RETURN(0); - - for (row=data->data; row ; row = row->next,field++) - { - field->table= strdup_root(alloc,(char*) row->data[0]); - field->name= strdup_root(alloc,(char*) row->data[1]); - field->length= (uint) uint3korr(row->data[2]); - field->type= (enum enum_field_types) (uchar) row->data[3][0]; - if (long_flag_protocol) - { - field->flags= uint2korr(row->data[4]); - field->decimals=(uint) (uchar) row->data[4][2]; - } - else - { - field->flags= (uint) (uchar) row->data[4][0]; - field->decimals=(uint) (uchar) row->data[4][1]; - } - if (INTERNAL_NUM_FIELD(field)) - field->flags|= NUM_FLAG; - if (default_value && row->data[5]) - field->def=strdup_root(alloc,(char*) row->data[5]); - else - field->def=0; - field->max_length= 0; - } - mc_free_rows(data); /* Free old data */ - DBUG_RETURN(result); -} - -int mc_mysql_send_query(MYSQL* mysql, const char* query, uint length) -{ - return mc_simple_command(mysql, COM_QUERY, query, length, 1); -} - - -int mc_mysql_read_query_result(MYSQL *mysql) -{ - uchar *pos; - ulong field_count; - MYSQL_DATA *fields; - ulong length; - DBUG_ENTER("mc_mysql_read_query_result"); - - if ((length = mc_net_safe_read(mysql)) == packet_error) - DBUG_RETURN(-1); - mc_free_old_query(mysql); /* Free old result */ -get_info: - pos=(uchar*) mysql->net.read_pos; - if ((field_count= mc_net_field_length(&pos)) == 0) - { - mysql->affected_rows= mc_net_field_length_ll(&pos); - mysql->insert_id= mc_net_field_length_ll(&pos); - if (mysql->server_capabilities & CLIENT_TRANSACTIONS) - { - mysql->server_status=uint2korr(pos); pos+=2; - } - if (pos < mysql->net.read_pos+length && mc_net_field_length(&pos)) - mysql->info=(char*) pos; - DBUG_RETURN(0); - } - if (field_count == NULL_LENGTH) /* LOAD DATA LOCAL INFILE */ - { - int error=mc_send_file_to_server(mysql,(char*) pos); - if ((length=mc_net_safe_read(mysql)) == packet_error || error) - DBUG_RETURN(-1); - goto get_info; /* Get info packet */ - } - if (!(mysql->server_status & SERVER_STATUS_AUTOCOMMIT)) - mysql->server_status|= SERVER_STATUS_IN_TRANS; - - mysql->extra_info= mc_net_field_length_ll(&pos); /* Maybe number of rec */ - if (!(fields=mc_read_rows(mysql,(MYSQL_FIELD*) 0,5))) - DBUG_RETURN(-1); - if (!(mysql->fields=mc_unpack_fields(fields,&mysql->field_alloc, - (uint) field_count,0, - (my_bool) test(mysql->server_capabilities & - CLIENT_LONG_FLAG)))) - DBUG_RETURN(-1); - mysql->status=MYSQL_STATUS_GET_RESULT; - mysql->field_count=field_count; - DBUG_RETURN(0); -} - -int mc_mysql_query(MYSQL *mysql, const char *query, uint length) -{ - DBUG_ENTER("mysql_real_query"); - DBUG_PRINT("enter",("handle: %lx",mysql)); - DBUG_PRINT("query",("Query = \"%s\"",query)); - if (!length) - length = strlen(query); - if (mc_simple_command(mysql,COM_QUERY,query,length,1)) - DBUG_RETURN(-1); - DBUG_RETURN(mc_mysql_read_query_result(mysql)); -} - -static int mc_send_file_to_server(MYSQL *mysql, const char *filename) -{ - int fd, readcount, result= -1; - uint packet_length=MY_ALIGN(mysql->net.max_packet-16,IO_SIZE); - char *buf, tmp_name[FN_REFLEN]; - DBUG_ENTER("send_file_to_server"); - - if (!(buf=my_malloc(packet_length,MYF(0)))) - { - strmov(mysql->net.last_error, ER(mysql->net.last_errno=CR_OUT_OF_MEMORY)); - DBUG_RETURN(-1); - } - - fn_format(tmp_name,filename,"","",4); /* Convert to client format */ - if ((fd = my_open(tmp_name,O_RDONLY, MYF(0))) < 0) - { - my_net_write(&mysql->net,"",0); // Server needs one packet - net_flush(&mysql->net); - mysql->net.last_errno=EE_FILENOTFOUND; - my_snprintf(mysql->net.last_error,sizeof(mysql->net.last_error)-1, - EE(mysql->net.last_errno),tmp_name, errno); - goto err; - } - - while ((readcount = (int) my_read(fd,(byte*) buf,packet_length,MYF(0))) > 0) - { - if (my_net_write(&mysql->net,buf,readcount)) - { - DBUG_PRINT("error",("Lost connection to MySQL server during LOAD DATA of local file")); - mysql->net.last_errno=CR_SERVER_LOST; - strmov(mysql->net.last_error,ER(mysql->net.last_errno)); - goto err; - } - } - /* Send empty packet to mark end of file */ - if (my_net_write(&mysql->net,"",0) || net_flush(&mysql->net)) - { - mysql->net.last_errno=CR_SERVER_LOST; - sprintf(mysql->net.last_error,ER(mysql->net.last_errno),errno); - goto err; - } - if (readcount < 0) - { - mysql->net.last_errno=EE_READ; /* the errmsg for not entire file read */ - my_snprintf(mysql->net.last_error,sizeof(mysql->net.last_error)-1, - tmp_name,errno); - goto err; - } - result=0; // Ok - -err: - if (fd >= 0) - (void) my_close(fd,MYF(0)); - my_free(buf,MYF(0)); - DBUG_RETURN(result); -} - - -/* Get the length of next field. Change parameter to point at fieldstart */ -static ulong mc_net_field_length(uchar **packet) -{ - reg1 uchar *pos= *packet; - if (*pos < 251) - { - (*packet)++; - return (ulong) *pos; - } - if (*pos == 251) - { - (*packet)++; - return NULL_LENGTH; - } - if (*pos == 252) - { - (*packet)+=3; - return (ulong) uint2korr(pos+1); - } - if (*pos == 253) - { - (*packet)+=4; - return (ulong) uint3korr(pos+1); - } - (*packet)+=9; /* Must be 254 when here */ - return (ulong) uint4korr(pos+1); -} - -/* Same as above, but returns ulonglong values */ - -static my_ulonglong mc_net_field_length_ll(uchar **packet) -{ - reg1 uchar *pos= *packet; - if (*pos < 251) - { - (*packet)++; - return (my_ulonglong) *pos; - } - if (*pos == 251) - { - (*packet)++; - return (my_ulonglong) NULL_LENGTH; - } - if (*pos == 252) - { - (*packet)+=3; - return (my_ulonglong) uint2korr(pos+1); - } - if (*pos == 253) - { - (*packet)+=4; - return (my_ulonglong) uint3korr(pos+1); - } - (*packet)+=9; /* Must be 254 when here */ -#ifdef NO_CLIENT_LONGLONG - return (my_ulonglong) uint4korr(pos+1); -#else - return (my_ulonglong) uint8korr(pos+1); -#endif -} - -/* Read all rows (fields or data) from server */ - -static MYSQL_DATA *mc_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields, - uint fields) -{ - uint field; - ulong pkt_len; - ulong len; - uchar *cp; - char *to; - MYSQL_DATA *result; - MYSQL_ROWS **prev_ptr,*cur; - NET *net = &mysql->net; - DBUG_ENTER("mc_read_rows"); - - if ((pkt_len=mc_net_safe_read(mysql)) == packet_error) - DBUG_RETURN(0); - if (!(result=(MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA), - MYF(MY_ZEROFILL)))) - { - net->last_errno=CR_OUT_OF_MEMORY; - strmov(net->last_error,ER(net->last_errno)); - DBUG_RETURN(0); - } - init_alloc_root(&result->alloc,8192,0); /* Assume rowlength < 8192 */ - result->alloc.min_malloc=sizeof(MYSQL_ROWS); - prev_ptr= &result->data; - result->rows=0; - result->fields=fields; - - while (*(cp=net->read_pos) != 254 || pkt_len != 1) - { - result->rows++; - if (!(cur= (MYSQL_ROWS*) alloc_root(&result->alloc, - sizeof(MYSQL_ROWS))) || - !(cur->data= ((MYSQL_ROW) - alloc_root(&result->alloc, - (fields+1)*sizeof(char *)+pkt_len)))) - { - mc_free_rows(result); - net->last_errno=CR_OUT_OF_MEMORY; - strmov(net->last_error,ER(net->last_errno)); - DBUG_RETURN(0); - } - *prev_ptr=cur; - prev_ptr= &cur->next; - to= (char*) (cur->data+fields+1); - for (field=0 ; field < fields ; field++) - { - if ((len=(ulong) mc_net_field_length(&cp)) == NULL_LENGTH) - { /* null field */ - cur->data[field] = 0; - } - else - { - cur->data[field] = to; - memcpy(to,(char*) cp,len); to[len]=0; - to+=len+1; - cp+=len; - if (mysql_fields) - { - if (mysql_fields[field].max_length < len) - mysql_fields[field].max_length=len; - } - } - } - cur->data[field]=to; /* End of last field */ - if ((pkt_len=mc_net_safe_read(mysql)) == packet_error) - { - mc_free_rows(result); - DBUG_RETURN(0); - } - } - *prev_ptr=0; /* last pointer is null */ - DBUG_PRINT("exit",("Got %d rows",result->rows)); - DBUG_RETURN(result); -} - - -/* -** Read one row. Uses packet buffer as storage for fields. -** When next packet is read, the previous field values are destroyed -*/ - - -static int mc_read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row, - ulong *lengths) -{ - uint field; - ulong pkt_len,len; - uchar *pos,*prev_pos; - - if ((pkt_len=mc_net_safe_read(mysql)) == packet_error) - return -1; - if (pkt_len == 1 && mysql->net.read_pos[0] == 254) - return 1; /* End of data */ - prev_pos= 0; /* allowed to write at packet[-1] */ - pos=mysql->net.read_pos; - for (field=0 ; field < fields ; field++) - { - if ((len=(ulong) mc_net_field_length(&pos)) == NULL_LENGTH) - { /* null field */ - row[field] = 0; - *lengths++=0; - } - else - { - row[field] = (char*) pos; - pos+=len; - *lengths++=len; - } - if (prev_pos) - *prev_pos=0; /* Terminate prev field */ - prev_pos=pos; - } - row[field]=(char*) prev_pos+1; /* End of last field */ - *prev_pos=0; /* Terminate last field */ - return 0; -} - -my_ulonglong mc_mysql_num_rows(MYSQL_RES *res) -{ - return res->row_count; -} - -unsigned int mc_mysql_num_fields(MYSQL_RES *res) -{ - return res->field_count; -} - -void mc_mysql_data_seek(MYSQL_RES *result, my_ulonglong row) -{ - MYSQL_ROWS *tmp=0; - DBUG_PRINT("info",("mysql_data_seek(%ld)",(long) row)); - if (result->data) - for (tmp=result->data->data; row-- && tmp ; tmp = tmp->next) ; - result->current_row=0; - result->data_cursor = tmp; -} - -MYSQL_ROW STDCALL mc_mysql_fetch_row(MYSQL_RES *res) -{ - DBUG_ENTER("mc_mysql_fetch_row"); - if (!res->data) - { /* Unbufferred fetch */ - if (!res->eof) - { - if (!(mc_read_one_row(res->handle,res->field_count,res->row, - res->lengths))) - { - res->row_count++; - DBUG_RETURN(res->current_row=res->row); - } - else - { - DBUG_PRINT("info",("end of data")); - res->eof=1; - res->handle->status=MYSQL_STATUS_READY; - } - } - DBUG_RETURN((MYSQL_ROW) NULL); - } - { - MYSQL_ROW tmp; - if (!res->data_cursor) - { - DBUG_PRINT("info",("end of data")); - DBUG_RETURN(res->current_row=(MYSQL_ROW) NULL); - } - tmp = res->data_cursor->data; - res->data_cursor = res->data_cursor->next; - DBUG_RETURN(res->current_row=tmp); - } -} - -int mc_mysql_select_db(MYSQL *mysql, const char *db) -{ - int error; - DBUG_ENTER("mysql_select_db"); - DBUG_PRINT("enter",("db: '%s'",db)); - - if ((error=mc_simple_command(mysql,COM_INIT_DB,db,(uint) strlen(db),0))) - DBUG_RETURN(error); - my_free(mysql->db,MYF(MY_ALLOW_ZERO_PTR)); - mysql->db=my_strdup(db,MYF(MY_WME)); - DBUG_RETURN(0); -} - - -MYSQL_RES *mc_mysql_store_result(MYSQL *mysql) -{ - MYSQL_RES *result; - DBUG_ENTER("mysql_store_result"); - - if (!mysql->fields) - DBUG_RETURN(0); - if (mysql->status != MYSQL_STATUS_GET_RESULT) - { - strmov(mysql->net.last_error, - ER(mysql->net.last_errno=CR_COMMANDS_OUT_OF_SYNC)); - DBUG_RETURN(0); - } - mysql->status=MYSQL_STATUS_READY; /* server is ready */ - if (!(result=(MYSQL_RES*) my_malloc(sizeof(MYSQL_RES)+ - sizeof(ulong)*mysql->field_count, - MYF(MY_ZEROFILL)))) - { - mysql->net.last_errno=CR_OUT_OF_MEMORY; - strmov(mysql->net.last_error, ER(mysql->net.last_errno)); - DBUG_RETURN(0); - } - result->eof=1; /* Marker for buffered */ - result->lengths=(ulong*) (result+1); - if (!(result->data=mc_read_rows(mysql,mysql->fields,mysql->field_count))) - { - my_free((gptr) result,MYF(0)); - DBUG_RETURN(0); - } - mysql->affected_rows= result->row_count= result->data->rows; - result->data_cursor= result->data->data; - result->fields= mysql->fields; - result->field_alloc= mysql->field_alloc; - result->field_count= mysql->field_count; - result->current_field=0; - result->current_row=0; /* Must do a fetch first */ - mysql->fields=0; /* fields is now in result */ - DBUG_RETURN(result); /* Data fetched */ -} diff --git a/sql/mini_client.h b/sql/mini_client.h deleted file mode 100644 index 24c13646170..00000000000 --- a/sql/mini_client.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef _MINI_CLIENT_H -#define _MINI_CLIENT_H - - -MYSQL* mc_mysql_connect(MYSQL *mysql,const char *host, const char *user, - const char *passwd, const char *db, - uint port, const char *unix_socket,uint client_flag, - uint net_read_timeout); -int mc_simple_command(MYSQL *mysql,enum enum_server_command command, - const char *arg, uint length, my_bool skipp_check); -void mc_mysql_close(MYSQL *mysql); -MYSQL *mc_mysql_init(MYSQL *mysql); -void mc_mysql_debug(const char *debug); -ulong mc_net_safe_read(MYSQL *mysql); -char *mc_mysql_error(MYSQL *mysql); -int mc_mysql_errno(MYSQL *mysql); -my_bool mc_mysql_reconnect(MYSQL* mysql); -int mc_mysql_send_query(MYSQL* mysql, const char* query, uint length); -int mc_mysql_read_query_result(MYSQL *mysql); -int mc_mysql_query(MYSQL *mysql, const char *query, uint length); -MYSQL_RES * mc_mysql_store_result(MYSQL *mysql); -void mc_mysql_free_result(MYSQL_RES *result); -void mc_mysql_data_seek(MYSQL_RES *result, my_ulonglong row); -my_ulonglong mc_mysql_num_rows(MYSQL_RES *res); -unsigned int mc_mysql_num_fields(MYSQL_RES *res); -MYSQL_ROW STDCALL mc_mysql_fetch_row(MYSQL_RES *res); -int mc_mysql_select_db(MYSQL *mysql, const char *db); -void mc_end_server(MYSQL *mysql); - -#endif diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 3972a01f7ba..6676d994cfa 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -18,26 +18,27 @@ #include <mysql_version.h> #include <mysql_embed.h> #include <my_sys.h> +#include <my_time.h> #include <m_string.h> #include <hash.h> #include <signal.h> #include <thr_lock.h> #include <my_base.h> /* Needed by field.h */ -#include <my_bitmap.h> -#include <my_getopt.h> +#include "sql_bitmap.h" #ifdef __EMX__ #undef write /* remove pthread.h macro definition for EMX */ #endif -#ifdef BIG_JOINS -typedef ulonglong table_map; /* Used for table bits in join */ -#else -typedef ulong table_map; /* Used for table bits in join */ -#endif /* BIG_JOINS */ +/* TODO convert all these three maps to Bitmap classes */ +typedef ulonglong table_map; /* Used for table bits in join */ +typedef Bitmap<64> key_map; /* Used for finding keys */ +typedef ulong key_part_map; /* Used for finding key parts */ -typedef ulong key_map; /* Used for finding keys */ -typedef ulong key_part_map; /* Used for finding key parts */ +/* useful constants */ +extern const key_map key_map_empty; +extern key_map key_map_full; /* Should be threaded as const */ +extern const char *primary_key_name; #include "mysql_com.h" #include <violite.h> @@ -50,21 +51,28 @@ char *sql_strdup(const char *str); char *sql_strmake(const char *str,uint len); gptr sql_memdup(const void * ptr,unsigned size); void sql_element_free(void *ptr); +char *sql_strmake_with_convert(const char *str, uint32 arg_length, + CHARSET_INFO *from_cs, + uint32 max_res_length, + CHARSET_INFO *to_cs, uint32 *result_length); void kill_one_thread(THD *thd, ulong id); bool net_request_file(NET* net, const char* fname); char* query_table_status(THD *thd,const char *db,const char *table_name); + #define x_free(A) { my_free((gptr) (A),MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR)); } #define safeFree(x) { if(x) { my_free((gptr) x,MYF(0)); x = NULL; } } #define PREV_BITS(type,A) ((type) (((type) 1 << (A)) -1)) #define all_bits_set(A,B) ((A) & (B) != (B)) +extern CHARSET_INFO *system_charset_info, *files_charset_info ; +extern CHARSET_INFO *national_charset_info, *table_alias_charset; + /*************************************************************************** Configuration parameters ****************************************************************************/ #define ACL_CACHE_SIZE 256 -#define HASH_PASSWORD_LENGTH 16 #define MAX_PASSWORD_LENGTH 32 #define HOST_CACHE_SIZE 128 #define MAX_ACCEPT_RETRY 10 // Test accept this many times @@ -85,6 +93,9 @@ char* query_table_status(THD *thd,const char *db,const char *table_name); #define ACL_ALLOC_BLOCK_SIZE 1024 #define UDF_ALLOC_BLOCK_SIZE 1024 #define TABLE_ALLOC_BLOCK_SIZE 1024 +#define BDB_LOG_ALLOC_BLOCK_SIZE 1024 +#define WARN_ALLOC_BLOCK_SIZE 2048 +#define WARN_ALLOC_PREALLOC_SIZE 1024 /* The following parameters is to decide when to use an extra cache to @@ -161,62 +172,95 @@ char* query_table_status(THD *thd,const char *db,const char *table_name); #define TEST_CORE_ON_SIGNAL 256 /* Give core if signal */ #define TEST_NO_STACKTRACE 512 #define TEST_SIGINT 1024 /* Allow sigint on threads */ +#define TEST_SYNCHRONIZATION 2048 /* get server to do sleep in some + places */ /* options for select set by the yacc parser (stored in lex->options) */ -#define SELECT_DISTINCT 1 -#define SELECT_STRAIGHT_JOIN 2 -#define SELECT_DESCRIBE 4 -#define SELECT_SMALL_RESULT 8 -#define SELECT_BIG_RESULT 16 -#define OPTION_FOUND_ROWS 32 -#define OPTION_TO_QUERY_CACHE 64 -#define SELECT_NO_JOIN_CACHE 256 /* Intern */ - -#define OPTION_BIG_TABLES 512 /* for SQL OPTION */ -#define OPTION_BIG_SELECTS 1024 /* for SQL OPTION */ -#define OPTION_LOG_OFF 2048 -#define OPTION_UPDATE_LOG 4096 /* update log flag */ -#define TMP_TABLE_ALL_COLUMNS 8192 -#define OPTION_WARNINGS 16384 -#define OPTION_AUTO_IS_NULL 32768 -#define OPTION_FOUND_COMMENT 65536L -#define OPTION_SAFE_UPDATES OPTION_FOUND_COMMENT*2 -#define OPTION_BUFFER_RESULT OPTION_SAFE_UPDATES*2 -#define OPTION_BIN_LOG OPTION_BUFFER_RESULT*2 -#define OPTION_NOT_AUTOCOMMIT OPTION_BIN_LOG*2 -#define OPTION_BEGIN OPTION_NOT_AUTOCOMMIT*2 -#define OPTION_TABLE_LOCK OPTION_BEGIN*2 -#define OPTION_QUICK OPTION_TABLE_LOCK*2 -#define OPTION_QUOTE_SHOW_CREATE OPTION_QUICK*2 -#define OPTION_INTERNAL_SUBTRANSACTIONS OPTION_QUOTE_SHOW_CREATE*2 +#define SELECT_DISTINCT (1L << 0) +#define SELECT_STRAIGHT_JOIN (1L << 1) +#define SELECT_DESCRIBE (1L << 2) +#define SELECT_SMALL_RESULT (1L << 3) +#define SELECT_BIG_RESULT (1L << 4) +#define OPTION_FOUND_ROWS (1L << 5) +#define OPTION_TO_QUERY_CACHE (1L << 6) +#define SELECT_NO_JOIN_CACHE (1L << 7) /* Intern */ +#define OPTION_BIG_TABLES (1L << 8) /* for SQL OPTION */ +#define OPTION_BIG_SELECTS (1L << 9) /* for SQL OPTION */ +#define OPTION_LOG_OFF (1L << 10) +#define OPTION_UPDATE_LOG (1L << 11) /* update log flag */ +#define TMP_TABLE_ALL_COLUMNS (1L << 12) +#define OPTION_WARNINGS (1L << 13) +#define OPTION_AUTO_IS_NULL (1L << 14) +#define OPTION_FOUND_COMMENT (1L << 15) +#define OPTION_SAFE_UPDATES (1L << 16) +#define OPTION_BUFFER_RESULT (1L << 17) +#define OPTION_BIN_LOG (1L << 18) +#define OPTION_NOT_AUTOCOMMIT (1L << 19) +#define OPTION_BEGIN (1L << 20) +#define OPTION_TABLE_LOCK (1L << 21) +#define OPTION_QUICK (1L << 22) +#define OPTION_QUOTE_SHOW_CREATE (1L << 23) +#define OPTION_INTERNAL_SUBTRANSACTIONS (1L << 24) /* Set if we are updating a non-transaction safe table */ -#define OPTION_STATUS_NO_TRANS_UPDATE OPTION_INTERNAL_SUBTRANSACTIONS*2 +#define OPTION_STATUS_NO_TRANS_UPDATE (1L << 25) -/* The following is set when parsing the query */ -#define QUERY_NO_INDEX_USED OPTION_STATUS_NO_TRANS_UPDATE*2 -#define QUERY_NO_GOOD_INDEX_USED QUERY_NO_INDEX_USED*2 /* The following can be set when importing tables in a 'wrong order' to suppress foreign key checks */ -#define OPTION_NO_FOREIGN_KEY_CHECKS QUERY_NO_GOOD_INDEX_USED*2 +#define OPTION_NO_FOREIGN_KEY_CHECKS (1L << 26) /* The following speeds up inserts to InnoDB tables by suppressing unique key checks in some cases */ -#define OPTION_RELAXED_UNIQUE_CHECKS OPTION_NO_FOREIGN_KEY_CHECKS*2 -#define SELECT_NO_UNLOCK ((ulong) OPTION_RELAXED_UNIQUE_CHECKS*2) -/* NOTE: we have now used up all 32 bits of the OPTION flag! */ +#define OPTION_RELAXED_UNIQUE_CHECKS (1L << 27) +#define SELECT_NO_UNLOCK (1L << 28) +/* Thr following is used to detect a conflict with DISTINCT + in the user query has requested */ +#define SELECT_ALL (1L << 29) + +/* + Force the used temporary table to be a MyISAM table (because we will use + fulltext functions when reading from it. +*/ +#define TMP_TABLE_FORCE_MYISAM (1L << 30) + +/* If set to 0, then the thread will ignore all warnings with level notes. + Set by executing SET SQL_NOTES=1 */ +#define OPTION_SQL_NOTES (1L << 31) /* Bits for different SQL modes modes (including ANSI mode) */ #define MODE_REAL_AS_FLOAT 1 #define MODE_PIPES_AS_CONCAT 2 #define MODE_ANSI_QUOTES 4 #define MODE_IGNORE_SPACE 8 -#define MODE_SERIALIZABLE 16 +#define MODE_NOT_USED 16 #define MODE_ONLY_FULL_GROUP_BY 32 #define MODE_NO_UNSIGNED_SUBTRACTION 64 #define MODE_NO_DIR_IN_CREATE 128 +#define MODE_POSTGRESQL 256 +#define MODE_ORACLE 512 +#define MODE_MSSQL 1024 +#define MODE_DB2 2048 +#define MODE_MAXDB 4096 +#define MODE_NO_KEY_OPTIONS 8192 +#define MODE_NO_TABLE_OPTIONS 16384 +#define MODE_NO_FIELD_OPTIONS 32768 +#define MODE_MYSQL323 65536 +#define MODE_MYSQL40 (MODE_MYSQL323*2) +#define MODE_ANSI (MODE_MYSQL40*2) +#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2) #define RAID_BLOCK_SIZE 1024 +#define MY_CHARSET_BIN_MB_MAXLEN 1 + +// uncachable cause +#define UNCACHEABLE_DEPENDENT 1 +#define UNCACHEABLE_RAND 2 +#define UNCACHEABLE_SIDEEFFECT 4 +// forcing to save JOIN for explain +#define UNCACHEABLE_EXPLAIN 8 +/* Don't evaluate subqueries in prepare even if they're not correlated */ +#define UNCACHEABLE_PREPARE 16 + #ifdef EXTRA_DEBUG /* Sync points allow us to force the server to reach a certain line of code @@ -243,6 +287,7 @@ void debug_sync_point(const char* lock_name, uint lock_timeout); /* Options to add_table_to_list() */ #define TL_OPTION_UPDATING 1 #define TL_OPTION_FORCE_INDEX 2 +#define TL_OPTION_IGNORE_LEAVES 4 /* Some portable defines */ @@ -256,8 +301,17 @@ void debug_sync_point(const char* lock_name, uint lock_timeout); #define WEEK_YEAR 2 #define WEEK_FIRST_WEEKDAY 4 +enum enum_parsing_place +{ + NO_MATTER, + IN_HAVING, + SELECT_LIST, + IN_WHERE +}; + struct st_table; class THD; +class Item_arena; /* Struct to handle simple linked lists */ @@ -308,18 +362,60 @@ inline THD *_current_thd(void) #include "handler.h" #include "table.h" #include "field.h" /* Field definitions */ +#include "protocol.h" #include "sql_udf.h" +class user_var_entry; +enum enum_var_type +{ + OPT_DEFAULT, OPT_SESSION, OPT_GLOBAL +}; +class sys_var; #include "item.h" +typedef Comp_creator* (*chooser_compare_func_creator)(bool invert); +/* sql_parse.cc */ +void free_items(Item *item); +void cleanup_items(Item *item); +class THD; +void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0); +int check_one_table_access(THD *thd, ulong privilege, + TABLE_LIST *tables); +bool check_merge_table_access(THD *thd, char *db, + TABLE_LIST *table_list); +int multi_update_precheck(THD *thd, TABLE_LIST *tables); +int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count); +int update_precheck(THD *thd, TABLE_LIST *tables); +int delete_precheck(THD *thd, TABLE_LIST *tables); +int insert_precheck(THD *thd, TABLE_LIST *tables); +int create_table_precheck(THD *thd, TABLE_LIST *tables, + TABLE_LIST *create_table); +Item *negate_expression(THD *thd, Item *expr); #include "sql_class.h" +#include "sql_acl.h" +#include "tztime.h" #include "opt_range.h" #ifdef HAVE_QUERY_CACHE +struct Query_cache_query_flags +{ + unsigned int client_long_flag:1; + unsigned int client_protocol_41:1; + uint character_set_client_num; + uint character_set_results_num; + uint collation_connection_num; + ha_rows limit; + Time_zone *time_zone; + ulong sql_mode; + ulong max_sort_length; + ulong group_concat_max_len; +}; +#define QUERY_CACHE_FLAGS_SIZE sizeof(Query_cache_query_flags) #include "sql_cache.h" #define query_cache_store_query(A, B) query_cache.store_query(A, B) #define query_cache_destroy() query_cache.destroy() #define query_cache_result_size_limit(A) query_cache.result_size_limit(A) #define query_cache_init() query_cache.init() #define query_cache_resize(A) query_cache.resize(A) +#define query_cache_set_min_res_unit(A) query_cache.set_min_res_unit(A) #define query_cache_invalidate3(A, B, C) query_cache.invalidate(A, B, C) #define query_cache_invalidate1(A) query_cache.invalidate(A) #define query_cache_send_result_to_client(A, B, C) \ @@ -327,11 +423,13 @@ inline THD *_current_thd(void) #define query_cache_invalidate_by_MyISAM_filename_ref \ &query_cache_invalidate_by_MyISAM_filename #else +#define QUERY_CACHE_FLAGS_SIZE 0 #define query_cache_store_query(A, B) #define query_cache_destroy() #define query_cache_result_size_limit(A) #define query_cache_init() #define query_cache_resize(A) +#define query_cache_set_min_res_unit(A) #define query_cache_invalidate3(A, B, C) #define query_cache_invalidate1(A) #define query_cache_send_result_to_client(A, B, C) 0 @@ -342,14 +440,16 @@ inline THD *_current_thd(void) #define query_cache_invalidate_by_MyISAM_filename_ref NULL #endif /*HAVE_QUERY_CACHE*/ -int mysql_create_db(THD *thd, char *db, uint create_info, bool silent); +int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); +int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, ushort flags); -int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists); +int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, + my_bool drop_temporary); int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool log_query); + bool drop_temporary, bool log_query); int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables, - bool if_exists, + bool if_exists, bool drop_temporary, bool log_query); int quick_rm_table(enum db_type base,const char *db, const char *table_name); @@ -357,9 +457,15 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list); bool mysql_change_db(THD *thd,const char *name); void mysql_parse(THD *thd,char *inBuf,uint length); bool mysql_test_parse_for_slave(THD *thd,char *inBuf,uint length); +bool is_update_query(enum enum_sql_command command); +bool alloc_query(THD *thd, char *packet, ulong packet_length); void mysql_init_select(LEX *lex); -bool mysql_new_select(LEX *lex); +void mysql_init_query(THD *thd, uchar *buf, uint length); +void mysql_reset_thd_for_next_command(THD *thd); +bool mysql_new_select(LEX *lex, bool move_down); +void create_select_for_variable(const char *var_name); void mysql_init_multi_delete(LEX *lex); +void fix_multi_delete_lex(LEX* lex); void init_max_user_conn(void); void init_update_queries(void); void free_max_user_conn(void); @@ -367,32 +473,31 @@ extern "C" pthread_handler_decl(handle_one_connection,arg); extern "C" pthread_handler_decl(handle_bootstrap,arg); void end_thread(THD *thd,bool put_in_cache); void flush_thread_cache(); -void mysql_execute_command(void); +void mysql_execute_command(THD *thd); bool do_command(THD *thd); bool dispatch_command(enum enum_server_command command, THD *thd, char* packet, uint packet_length); +void log_slow_statement(THD *thd); bool check_dup(const char *db, const char *name, TABLE_LIST *tables); -#ifndef EMBEDDED_LIBRARY -bool check_stack_overrun(THD *thd,char *dummy); -#else -#define check_stack_overrun(A, B) 0 -#endif -bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables); bool table_cache_init(void); void table_cache_free(void); uint cached_tables(void); void kill_mysql(void); -void close_connection(NET *net,uint errcode=0,bool lock=1); -bool check_access(THD *thd, ulong access, const char *db=0, ulong *save_priv=0, - bool no_grant=0, bool no_errors=0); +void close_connection(THD *thd, uint errcode, bool lock); +bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, + bool *write_to_binlog); +bool check_access(THD *thd, ulong access, const char *db, ulong *save_priv, + bool no_grant, bool no_errors); bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables, - bool no_errors=0); + bool no_errors); bool check_global_access(THD *thd, ulong want_access); int mysql_backup_table(THD* thd, TABLE_LIST* table_list); int mysql_restore_table(THD* thd, TABLE_LIST* table_list); +int mysql_checksum_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); int mysql_check_table(THD* thd, TABLE_LIST* table_list, HA_CHECK_OPT* check_opt); int mysql_repair_table(THD* thd, TABLE_LIST* table_list, @@ -401,47 +506,50 @@ int mysql_analyze_table(THD* thd, TABLE_LIST* table_list, HA_CHECK_OPT* check_opt); int mysql_optimize_table(THD* thd, TABLE_LIST* table_list, HA_CHECK_OPT* check_opt); -bool check_simple_select(); +int mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list, + LEX_STRING *key_cache_name); +int mysql_preload_keys(THD* thd, TABLE_LIST* table_list); +int reassign_keycache_tables(THD* thd, KEY_CACHE *src_cache, + KEY_CACHE *dst_cache); -/* net_pkg.c */ -void send_warning(NET *net, uint sql_errno, const char *err=0); -void net_printf(NET *net,uint sql_errno, ...); -void send_ok(NET *net,ha_rows affected_rows=0L,ulonglong id=0L, - const char *info=0); -void send_eof(NET *net,bool no_flush=0); -char *net_store_length(char *packet,ulonglong length); -char *net_store_length(char *packet,uint length); -char *net_store_data(char *to,const char *from); -char *net_store_data(char *to,int32 from); -char *net_store_data(char *to,longlong from); - -bool net_store_null(String *packet); -bool net_store_data(String *packet,uint32 from); -bool net_store_data(String *packet,longlong from); -bool net_store_data(String *packet,const char *from); -bool net_store_data(String *packet,const char *from,uint length); -bool net_store_data(String *packet,struct tm *tmp); -bool net_store_data(String* packet, I_List<i_string>* str_list); -bool net_store_data(String *packet,CONVERT *convert, const char *from, - uint length); -bool net_store_data(String *packet, CONVERT *convert, const char *from); +bool check_simple_select(); SORT_FIELD * make_unireg_sortorder(ORDER *order, uint *length); -int setup_order(THD *thd,TABLE_LIST *tables, List<Item> &fields, - List <Item> &all_fields, ORDER *order); +int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + List<Item> &fields, List <Item> &all_fields, ORDER *order); +int setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + List<Item> &fields, List<Item> &all_fields, ORDER *order, + bool *hidden_group_fields); int handle_select(THD *thd, LEX *lex, select_result *result); -int mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &list,COND *conds, - ORDER *order, ORDER *group,Item *having,ORDER *proc_param, - ulong select_type,select_result *result); -int mysql_union(THD *thd,LEX *lex,select_result *result); +int mysql_select(THD *thd, Item ***rref_pointer_array, + TABLE_LIST *tables, uint wild_num, List<Item> &list, + COND *conds, uint og_num, ORDER *order, ORDER *group, + Item *having, ORDER *proc_param, ulong select_type, + select_result *result, SELECT_LEX_UNIT *unit, + SELECT_LEX *select_lex); +void free_underlaid_joins(THD *thd, SELECT_LEX *select); +int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, + select_result *result); +int mysql_explain_select(THD *thd, SELECT_LEX *sl, char const *type, + select_result *result); +int mysql_union(THD *thd, LEX *lex, select_result *result, + SELECT_LEX_UNIT *unit); +int mysql_handle_derived(LEX *lex); Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, - Item ***copy_func, Field **from_field, - bool group,bool modify_item); + Item ***copy_func, Field **from_field, + bool group, bool modify_item, uint convert_blob_length, + bool make_copy_field); +int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, + List<create_field> &fields, + List<Key> &keys, uint &db_options, + handler *file, KEY *&key_info_buffer, + uint &key_count, int select_field_count); int mysql_create_table(THD *thd,const char *db, const char *table_name, HA_CREATE_INFO *create_info, List<create_field> &fields, List<Key> &keys, - bool tmp_table); + bool tmp_table, uint select_field_count); + TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, const char *db, const char *name, List<create_field> *extra_fields, @@ -452,13 +560,15 @@ int mysql_alter_table(THD *thd, char *new_db, char *new_name, HA_CREATE_INFO *create_info, TABLE_LIST *table_list, List<create_field> &fields, - List<Key> &keys,List<Alter_drop> &drop_list, - List<Alter_column> &alter_list, - ORDER *order, - bool drop_primary, + List<Key> &keys, + uint order_num, ORDER *order, enum enum_duplicates handle_duplicates, - enum enum_enable_or_disable keys_onoff=LEAVE_AS_IS, - bool simple_alter=0); + bool ignore, + ALTER_INFO *alter_info, bool do_send_ok=1); +int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok); +int mysql_create_like_table(THD *thd, TABLE_LIST *table, + HA_CREATE_INFO *create_info, + Table_ident *src_table); bool mysql_rename_table(enum db_type base, const char *old_db, const char * old_name, @@ -466,22 +576,34 @@ bool mysql_rename_table(enum db_type base, const char * new_name); int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys); int mysql_drop_index(THD *thd, TABLE_LIST *table_list, - List<Alter_drop> &drop_list); + ALTER_INFO *alter_info); +int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, + TABLE_LIST *update_table_list, + Item **conds, uint order_num, ORDER *order); int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields, - List<Item> &values,COND *conds, - ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates); -int mysql_multi_update_lock(THD *thd, - TABLE_LIST *table_list, - List<Item> *fields); + List<Item> &values,COND *conds, + uint order_num, ORDER *order, ha_rows limit, + enum enum_duplicates handle_duplicates, bool ignore); int mysql_multi_update(THD *thd, TABLE_LIST *table_list, List<Item> *fields, List<Item> *values, COND *conds, ulong options, - enum enum_duplicates handle_duplicates); + enum enum_duplicates handle_duplicates, bool ignore, + SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); +int mysql_multi_update_lock(THD *thd, + TABLE_LIST *table_list, + List<Item> *fields, + SELECT_LEX *select_lex); +int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, + TABLE_LIST *insert_table_list, + TABLE_LIST *dup_table_list, TABLE *table, + List<Item> &fields, List_item *values, + List<Item> &update_fields, + List<Item> &update_values, enum_duplicates duplic); int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, - List<List_item> &values, enum_duplicates flag); -void kill_delayed_threads(void); -int mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, ORDER *order, + List<List_item> &values, List<Item> &update_fields, + List<Item> &update_values, enum_duplicates flag, bool ignore); +int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); +int mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order, ha_rows rows, ulong options); int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok); TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update); @@ -498,14 +620,19 @@ bool wait_for_tables(THD *thd); bool table_is_used(TABLE *table, bool wait_for_name_lock); bool drop_locked_tables(THD *thd,const char *db, const char *table_name); void abort_locked_tables(THD *thd,const char *db, const char *table_name); -Field *find_field_in_tables(THD *thd,Item_field *item,TABLE_LIST *tables); +void execute_init_command(THD *thd, sys_var_str *init_command_var, + rw_lock_t *var_mutex); +extern const Field *not_found_field; +Field *find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, + TABLE_LIST **where, bool report_error); Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grant,bool allow_rowid); + bool check_grant,bool allow_rowid, + uint *cached_field_index_ptr); #ifdef HAVE_OPENSSL #include <openssl/des.h> -struct st_des_keyblock -{ - DES_cblock key1, key2, key3; +struct st_des_keyblock +{ + DES_cblock key1, key2, key3; }; struct st_des_keyschedule { @@ -516,13 +643,12 @@ extern struct st_des_keyschedule des_keyschedule[10]; extern uint des_default_key; extern pthread_mutex_t LOCK_des_key_file; bool load_des_key_file(const char *file_name); -void free_des_key_file(); #endif /* HAVE_OPENSSL */ /* sql_do.cc */ int mysql_do(THD *thd, List<Item> &values); -/* sql_list.c */ +/* sql_show.cc */ int mysqld_show_dbs(THD *thd,const char *wild); int mysqld_show_open_tables(THD *thd,const char *wild); int mysqld_show_tables(THD *thd,const char *db,const char *wild); @@ -531,9 +657,13 @@ int mysqld_show_fields(THD *thd,TABLE_LIST *table, const char *wild, bool verbose); int mysqld_show_keys(THD *thd, TABLE_LIST *table); int mysqld_show_logs(THD *thd); +void append_identifier(THD *thd, String *packet, const char *name, + uint length); +int get_quote_char_for_identifier(THD *thd, const char *name, uint length); void mysqld_list_fields(THD *thd,TABLE_LIST *table, const char *wild); int mysqld_dump_create_info(THD *thd, TABLE *table, int fd = -1); int mysqld_show_create(THD *thd, TABLE_LIST *table_list); +int mysqld_show_create_db(THD *thd, char *dbname, HA_CREATE_INFO *create); void mysqld_list_processes(THD *thd,const char *user,bool verbose); int mysqld_show_status(THD *thd); @@ -543,6 +673,29 @@ int mysqld_show(THD *thd, const char *wild, show_var_st *variables, pthread_mutex_t *mutex); int mysql_find_files(THD *thd,List<char> *files, const char *db, const char *path, const char *wild, bool dir); +int mysqld_show_charsets(THD *thd,const char *wild); +int mysqld_show_collations(THD *thd,const char *wild); +int mysqld_show_storage_engines(THD *thd); +int mysqld_show_privileges(THD *thd); +int mysqld_show_column_types(THD *thd); +int mysqld_help (THD *thd, const char *text); + +/* sql_prepare.cc */ +int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, + LEX_STRING *name=NULL); +void mysql_stmt_execute(THD *thd, char *packet, uint packet_length); +void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name); +void mysql_stmt_free(THD *thd, char *packet); +void mysql_stmt_reset(THD *thd, char *packet); +void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length); + +/* sql_error.cc */ +MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, uint code, + const char *msg); +void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, + uint code, const char *format, ...); +void mysql_reset_errors(THD *thd); +my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show); /* sql_handler.cc */ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen= 0); @@ -559,18 +712,16 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, /* sql_base.cc */ #define TMP_TABLE_KEY_EXTRA 8 void set_item_name(Item *item,char *pos,uint length); -bool add_field_to_list(char *field_name, enum enum_field_types type, +bool add_field_to_list(THD *thd, char *field_name, enum enum_field_types type, char *length, char *decimal, - uint type_modifier, Item *default_value,char *change, - TYPELIB *interval); + uint type_modifier, + Item *default_value, Item *on_update_value, + LEX_STRING *comment, + char *change, List<String> *interval_list, + CHARSET_INFO *cs, + uint uint_geom_type); void store_position_for_column(const char *name); -bool add_to_list(SQL_LIST &list,Item *group,bool asc=0); -TABLE_LIST *add_table_to_list(Table_ident *table,LEX_STRING *alias, - ulong table_option, - thr_lock_type flags=TL_UNLOCK, - List<String> *use_index=0, - List<String> *ignore_index=0); -void set_lock_for_tables(thr_lock_type lock_type); +bool add_to_list(THD *thd, SQL_LIST &list,Item *group,bool asc=0); void add_join_on(TABLE_LIST *b,Item *expr); void add_join_natural(TABLE_LIST *a,TABLE_LIST *b); bool add_proc_to_list(THD *thd, Item *item); @@ -578,36 +729,51 @@ TABLE *unlink_open_table(THD *thd,TABLE *list,TABLE *find); SQL_SELECT *make_select(TABLE *head, table_map const_tables, table_map read_tables, COND *conds, int *error); -Item ** find_item_in_list(Item *item,List<Item> &items); -bool insert_fields(THD *thd,TABLE_LIST *tables, +enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, + IGNORE_ERRORS}; +extern const Item **not_found_item; +Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter, + find_item_error_report_type report_error, + bool *unaliased); +bool get_key_map_from_key_list(key_map *map, TABLE *table, + List<String> *index_list); +bool insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, const char *table_name, List_iterator<Item> *it); bool setup_tables(TABLE_LIST *tables); -int setup_fields(THD *thd,TABLE_LIST *tables,List<Item> &item, - bool set_query_id,List<Item> *sum_func_list, - bool allow_sum_func); +int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, + List<Item> *sum_func_list, uint wild_num); +int setup_fields(THD *thd, Item** ref_pointer_array, TABLE_LIST *tables, + List<Item> &item, bool set_query_id, + List<Item> *sum_func_list, bool allow_sum_func); int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds); -int setup_ftfuncs(THD *thd); -int init_ftfuncs(THD *thd, bool no_order); +int setup_ftfuncs(SELECT_LEX* select); +int init_ftfuncs(THD *thd, SELECT_LEX* select, bool no_order); void wait_for_refresh(THD *thd); -int open_tables(THD *thd,TABLE_LIST *tables); +int open_tables(THD *thd, TABLE_LIST *tables, uint *counter); +int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables); int open_and_lock_tables(THD *thd,TABLE_LIST *tables); -int lock_tables(THD *thd,TABLE_LIST *tables); +int open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables); +void relink_tables_for_derived(THD *thd); +int lock_tables(THD *thd, TABLE_LIST *tables, uint counter); TABLE *open_temporary_table(THD *thd, const char *path, const char *db, const char *table_name, bool link_in_list); bool rm_temporary_table(enum db_type base, char *path); -bool send_fields(THD *thd,List<Item> &item,uint send_field_count); void free_io_cache(TABLE *entry); void intern_close_table(TABLE *entry); bool close_thread_table(THD *thd, TABLE **table_ptr); -void close_thread_tables(THD *thd,bool locked=0); void close_temporary_tables(THD *thd); +TABLE_LIST * find_table_in_list(TABLE_LIST *table, + const char *db_name, const char *table_name); +TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, + const char *db_name, + const char *table_name); TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name); bool close_temporary_table(THD *thd, const char *db, const char *table_name); void close_temporary(TABLE *table, bool delete_table=1); bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db, const char *table_name); -void remove_db_from_cache(const my_string db); +void remove_db_from_cache(const char *db); void flush_tables(); /* bits for last argument to remove_table_from_cache() */ @@ -630,6 +796,7 @@ bool eval_const_cond(COND *cond); /* sql_load.cc */ int mysql_load(THD *thd,sql_exchange *ex, TABLE_LIST *table_list, List<Item> &fields, enum enum_duplicates handle_duplicates, + bool ignore, bool local_file,thr_lock_type lock_type); int write_record(TABLE *table,COPY_INFO *info); @@ -645,24 +812,25 @@ extern "C" pthread_handler_decl(handle_manager, arg); #ifndef DBUG_OFF void print_where(COND *cond,const char *info); void print_cached_tables(void); -void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special); +void TEST_filesort(SORT_FIELD *sortorder,uint s_length); #endif void mysql_print_status(THD *thd); /* key.cc */ int find_ref_key(TABLE *form,Field *field, uint *offset); void key_copy(byte *key,TABLE *form,uint index,uint key_length); void key_restore(TABLE *form,byte *key,uint index,uint key_length); -int key_cmp(TABLE *form,const byte *key,uint index,uint key_length); +bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length); void key_unpack(String *to,TABLE *form,uint index); bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields); -void init_errmessage(void); +int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length); +bool init_errmessage(void); void sql_perror(const char *message); -void vprint_msg_to_log( enum loglevel level, const char *format, va_list args ); -void sql_print_error( const char *format, ... ); -void sql_print_warning( const char *format, ...); -void sql_print_information( const char *format, ...); +void vprint_msg_to_log(enum loglevel level, const char *format, va_list args); +void sql_print_error(const char *format, ...); +void sql_print_warning(const char *format, ...); +void sql_print_information(const char *format, ...); @@ -673,8 +841,30 @@ bool open_log(MYSQL_LOG *log, const char *hostname, const char *index_file_name, enum_log_type type, bool read_append, bool no_auto_events, ulong max_size); + /* mysqld.cc */ -void clear_error_message(THD *thd); +extern void yyerror(const char*); + +/* item_func.cc */ +extern bool check_reserved_words(LEX_STRING *name); + +/* strfunc.cc */ +ulonglong find_set(TYPELIB *lib, const char *x, uint length, CHARSET_INFO *cs, + char **err_pos, uint *err_len, bool *set_warning); +uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match); +uint find_type2(TYPELIB *lib, const char *find, uint length, CHARSET_INFO *cs); +void unhex_type2(TYPELIB *lib); +uint check_word(TYPELIB *lib, const char *val, const char *end, + const char **end_of_word); + +bool is_keyword(const char *name, uint len); + + +#define MY_DB_OPT_FILE "db.opt" +bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create); +bool my_dbopt_init(void); +void my_dbopt_cleanup(void); +void my_dbopt_free(void); /* External variables @@ -682,21 +872,31 @@ void clear_error_message(THD *thd); extern time_t start_time; extern char *mysql_data_home,server_version[SERVER_VERSION_LENGTH], - max_sort_char, mysql_real_data_home[], *charsets_list; -extern my_string mysql_tmpdir; + mysql_real_data_home[], *opt_mysql_tmpdir, mysql_charsets_dir[], + def_ft_boolean_syntax[sizeof(ft_boolean_syntax)]; +#define mysql_tmpdir (my_tmpdir(&mysql_tmpdir_list)) +extern MY_TMPDIR mysql_tmpdir_list; extern const char *command_name[]; -extern const char *first_keyword, *localhost, *delayed_user; +extern const char *first_keyword, *my_localhost, *delayed_user, *binary_keyword; extern const char **errmesg; /* Error messages */ extern const char *myisam_recover_options_str; -extern uchar *days_in_month; -extern char language[LIBLEN],reg_ext[FN_EXTLEN]; +extern const char *in_left_expr_name, *in_additional_cond; +extern Eq_creator eq_creator; +extern Ne_creator ne_creator; +extern Gt_creator gt_creator; +extern Lt_creator lt_creator; +extern Ge_creator ge_creator; +extern Le_creator le_creator; +extern char language[FN_REFLEN], reg_ext[FN_EXTLEN]; extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN]; -extern char pidfile_name[FN_REFLEN], time_zone[30], *opt_init_file; +extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file; extern char log_error_file[FN_REFLEN]; extern double log_10[32]; +extern ulonglong log_10_int[20]; extern ulonglong keybuff_size; extern ulong refresh_version,flush_version, thread_id,query_id,opened_tables; -extern ulong created_tmp_tables, created_tmp_disk_tables; +extern ulong created_tmp_tables, created_tmp_disk_tables, bytes_sent; +extern ulong binlog_cache_use, binlog_cache_disk_use; extern ulong aborted_threads,aborted_connects; extern ulong delayed_insert_timeout; extern ulong delayed_insert_limit, delayed_queue_size; @@ -706,32 +906,39 @@ extern ulong filesort_rows, filesort_range_count, filesort_scan_count; extern ulong filesort_merge_passes; extern ulong select_range_check_count, select_range_count, select_scan_count; extern ulong select_full_range_join_count,select_full_join_count; -extern ulong slave_open_temp_tables, query_cache_size; +extern ulong slave_open_temp_tables; +extern ulong query_cache_size, query_cache_min_res_unit; extern ulong thd_startup_options, slow_launch_threads, slow_launch_time; extern ulong server_id, concurrency; extern ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count; extern ulong ha_read_key_count, ha_read_next_count, ha_read_prev_count; extern ulong ha_read_first_count, ha_read_last_count; -extern ulong ha_read_rnd_count, ha_read_rnd_next_count; +extern ulong ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count; extern ulong ha_commit_count, ha_rollback_count,table_cache_size; extern ulong max_connections,max_connect_errors, connect_timeout; +extern ulong slave_net_timeout, slave_trans_retries; extern ulong max_user_connections; -extern ulong long_query_count, what_to_log,flush_time,opt_sql_mode; +extern ulong max_prepared_stmt_count, prepared_stmt_count; +extern ulong long_query_count, what_to_log,flush_time; extern ulong query_buff_size, thread_stack,thread_stack_min; extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit; extern ulong max_binlog_size, max_relay_log_size; extern ulong rpl_recovery_rank, thread_cache_size; extern ulong com_stat[(uint) SQLCOM_END], com_other, back_log; +extern ulong com_stmt_prepare, com_stmt_execute, com_stmt_send_long_data; +extern ulong com_stmt_reset, com_stmt_close; extern ulong specialflag, current_pid; - +extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter; +extern my_bool relay_log_purge, opt_innodb_safe_binlog; extern uint test_flags,select_errors,ha_open_options; -extern uint protocol_version,dropping_tables; +extern uint protocol_version, mysqld_port, dropping_tables; extern uint delay_key_write_options, lower_case_table_names; extern bool opt_endinfo, using_udf_functions, locked_in_memory; -extern bool opt_using_transactions, mysql_embedded; +extern bool opt_using_transactions, mysqld_embedded; extern bool using_update_log, opt_large_files, server_id_supplied; extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log; extern bool opt_disable_networking, opt_skip_show_db; +extern bool opt_character_set_client_handshake; extern bool volatile abort_loop, shutdown_in_progress, grant_option; extern uint volatile thread_count, thread_running, global_read_lock; extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types; @@ -739,34 +946,64 @@ extern my_bool opt_safe_show_db, opt_local_infile; extern my_bool opt_slave_compressed_protocol, use_temp_pool; extern my_bool opt_readonly, lower_case_file_system; extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs; +extern my_bool opt_secure_auth; +extern my_bool opt_log_slow_admin_statements; +extern uint opt_crash_binlog_innodb; +extern char *shared_memory_base_name, *mysqld_unix_port; +extern bool opt_enable_shared_memory; +extern char *default_tz_name; extern MYSQL_LOG mysql_log,mysql_update_log,mysql_slow_log,mysql_bin_log; extern FILE *bootstrap_file; -extern pthread_key(MEM_ROOT*,THR_MALLOC); -extern pthread_key(NET*, THR_NET); +extern FILE *stderror_file; +extern pthread_key(MEM_ROOT**,THR_MALLOC); extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_thread_count,LOCK_mapped_file,LOCK_user_locks, LOCK_status, - LOCK_grant, LOCK_error_log, LOCK_delayed_insert, + LOCK_error_log, LOCK_delayed_insert, LOCK_uuid_generator, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone, LOCK_slave_list, LOCK_active_mi, LOCK_manager, - LOCK_global_system_variables; + LOCK_global_system_variables, LOCK_user_conn, + LOCK_prepared_stmt_count; +#ifdef HAVE_OPENSSL +extern pthread_mutex_t LOCK_des_key_file; +#endif +extern rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; extern pthread_cond_t COND_refresh, COND_thread_count, COND_manager; extern pthread_attr_t connection_attrib; extern I_List<THD> threads; +extern I_List<NAMED_LIST> key_caches; extern MY_BITMAP temp_pool; -extern DATE_FORMAT dayord; -extern String empty_string; +extern String my_empty_string; +extern const String my_null_string; extern SHOW_VAR init_vars[],status_vars[], internal_vars[]; +extern SHOW_COMP_OPTION have_isam; +extern SHOW_COMP_OPTION have_innodb; +extern SHOW_COMP_OPTION have_berkeley_db; +extern SHOW_COMP_OPTION have_ndbcluster; extern struct system_variables global_system_variables; extern struct system_variables max_system_variables; extern struct rand_struct sql_rand; +extern const char *opt_date_time_formats[]; +extern KNOWN_DATE_TIME_FORMAT known_date_time_formats[]; + +extern String null_string; +extern HASH open_cache; +extern TABLE *unused_tables; +extern I_List<i_string> binlog_do_db, binlog_ignore_db; +extern const char* any_db; +extern struct my_option my_long_options[]; + /* optional things, have_* variables */ extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db; +extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db; extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink; extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb; +extern SHOW_COMP_OPTION have_geometry, have_rtree_keys; extern SHOW_COMP_OPTION have_crypt; +extern SHOW_COMP_OPTION have_compress; +extern SHOW_COMP_OPTION have_blackhole_db; #ifndef __WIN__ extern pthread_t signal_thread; @@ -811,13 +1048,22 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list, void unireg_init(ulong options); void unireg_end(void); -int rea_create_table(my_string file_name,HA_CREATE_INFO *create_info, +bool mysql_create_frm(THD *thd, my_string file_name, + const char *db, const char *table, + HA_CREATE_INFO *create_info, + List<create_field> &create_field, + uint key_count,KEY *key_info,handler *db_type); +int rea_create_table(THD *thd, my_string file_name, + const char *db, const char *table, + HA_CREATE_INFO *create_info, List<create_field> &create_field, uint key_count,KEY *key_info); int format_number(uint inputflag,uint max_length,my_string pos,uint length, my_string *errpos); int openfrm(const char *name,const char *alias,uint filestat,uint prgflag, uint ha_open_flags, TABLE *outparam); +int readfrm(const char *name, const void** data, uint* length); +int writefrm(const char* name, const void* data, uint len); int closefrm(TABLE *table); db_type get_table_type(const char *name); int read_string(File file, gptr *to, uint length); @@ -825,53 +1071,75 @@ void free_blobs(TABLE *table); int set_zone(int nr,int min_zone,int max_zone); ulong convert_period_to_month(ulong period); ulong convert_month_to_period(ulong month); -long calc_daynr(uint year,uint month,uint day); uint calc_days_in_year(uint year); void get_date_from_daynr(long daynr,uint *year, uint *month, uint *day); -void init_time(void); -long my_gmt_sec(TIME *, long *current_timezone); -time_t str_to_timestamp(const char *str,uint length); -bool str_to_time(const char *str,uint length,TIME *l_time); -longlong str_to_datetime(const char *str,uint length,bool fuzzy_date); -timestamp_type str_to_TIME(const char *str, uint length, TIME *l_time, - bool fuzzy_date); +my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist); +bool str_to_time_with_warn(const char *str,uint length,TIME *l_time); +timestamp_type str_to_datetime_with_warn(const char *str, uint length, + TIME *l_time, uint flags); +longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date, + int *was_cut); +void localtime_to_TIME(TIME *to, struct tm *from); +void calc_time_from_sec(TIME *to, long seconds, long microseconds); + +void make_truncated_value_warning(THD *thd, const char *str_val, + uint str_length, timestamp_type time_type); +extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type, + const char *format_str, + uint format_length); +extern DATE_TIME_FORMAT *date_time_format_copy(THD *thd, + DATE_TIME_FORMAT *format); +const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format, + timestamp_type type); +extern bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, + timestamp_type type, String *str); +void make_datetime(const DATE_TIME_FORMAT *format, const TIME *l_time, + String *str); +void make_date(const DATE_TIME_FORMAT *format, const TIME *l_time, + String *str); +void make_time(const DATE_TIME_FORMAT *format, const TIME *l_time, + String *str); +ulonglong TIME_to_ulonglong_datetime(const TIME *time); +ulonglong TIME_to_ulonglong_date(const TIME *time); +ulonglong TIME_to_ulonglong_time(const TIME *time); +ulonglong TIME_to_ulonglong(const TIME *time); int test_if_number(char *str,int *res,bool allow_wildcards); void change_byte(byte *,uint,char,char); -extern "C" void unireg_abort(int exit_code); void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form, SQL_SELECT *select, int use_record_cache, bool print_errors); +void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, + bool print_error, uint idx); void end_read_record(READ_RECORD *info); -ha_rows filesort(TABLE *form,struct st_sort_field *sortorder, uint s_length, - SQL_SELECT *select, ha_rows special,ha_rows max_rows, - ha_rows *examined_rows); +ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder, + uint s_length, SQL_SELECT *select, + ha_rows max_rows, ha_rows *examined_rows); +void filesort_free_buffers(TABLE *table); void change_double_for_sort(double nr,byte *to); int get_quick_record(SQL_SELECT *select); int calc_weekday(long daynr,bool sunday_first_day_of_week); uint calc_week(TIME *l_time, uint week_behaviour, uint *year); void find_date(char *pos,uint *vek,uint flag); TYPELIB *convert_strings_to_array_type(my_string *typelibs, my_string *end); -TYPELIB *typelib(List<String> &strings); +TYPELIB *typelib(MEM_ROOT *mem_root, List<String> &strings); ulong get_form_pos(File file, uchar *head, TYPELIB *save_names); ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames, const char *newname); ulong next_io_size(ulong pos); -void append_unescaped(String *res,const char *pos); -int create_frm(char *name,uint reclength,uchar *fileinfo, +void append_unescaped(String *res, const char *pos, uint length); +int create_frm(char *name, const char *db, const char *table, + uint reclength,uchar *fileinfo, HA_CREATE_INFO *create_info, uint keys); void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form); int rename_file_ext(const char * from,const char * to,const char * ext); bool check_db_name(char *db); bool check_column_name(const char *name); bool check_table_name(const char *name, uint length); -char *get_field(MEM_ROOT *mem,TABLE *table,uint fieldnr); -int wild_case_compare(const char *str,const char *wildstr); -int wild_compare(const char *str,const char *str_end, - const char *wildstr,const char *wildend,char escape); -int wild_case_compare(const char *str,const char *str_end, - const char *wildstr,const char *wildend,char escape); +char *get_field(MEM_ROOT *mem, Field *field); +bool get_field(MEM_ROOT *mem, Field *field, class String *res); +int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr); /* from hostname.cc */ struct in_addr; @@ -887,9 +1155,11 @@ extern bool sql_cache_init(); extern void sql_cache_free(); extern int sql_cache_hit(THD *thd, char *inBuf, uint length); -/* item.cc */ -Item *get_system_var(enum_var_type var_type, LEX_STRING name); - +/* item_func.cc */ +Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name, + LEX_STRING component); +int get_var_with_binlog(THD *thd, LEX_STRING &name, + user_var_entry **out_entry); /* log.cc */ bool flush_error_log(void); @@ -897,26 +1167,34 @@ bool flush_error_log(void); void free_list(I_List <i_string_pair> *list); void free_list(I_List <i_string> *list); +/* sql_yacc.cc */ +extern int yyparse(void *thd); + +/* frm_crypt.cc */ +#ifdef HAVE_CRYPTED_FRM +SQL_CRYPT *get_crypt_for_frm(void); +#endif + /* Some inline functions for more speed */ -inline bool add_item_to_list(Item *item) +inline bool add_item_to_list(THD *thd, Item *item) { - return current_lex->select->item_list.push_back(item); + return thd->lex->current_select->add_item_to_list(thd, item); } -inline bool add_value_to_list(Item *value) +inline bool add_value_to_list(THD *thd, Item *value) { - return current_lex->value_list.push_back(value); + return thd->lex->value_list.push_back(value); } -inline bool add_order_to_list(Item *item,bool asc) +inline bool add_order_to_list(THD *thd, Item *item, bool asc) { - return add_to_list(current_lex->select->order_list,item,asc); + return thd->lex->current_select->add_order_to_list(thd, item, asc); } -inline bool add_group_to_list(Item *item,bool asc) +inline bool add_group_to_list(THD *thd, Item *item, bool asc) { - return add_to_list(current_lex->select->group_list,item,asc); + return thd->lex->current_select->add_group_to_list(thd, item, asc); } inline void mark_as_null_row(TABLE *table) @@ -929,10 +1207,86 @@ inline void mark_as_null_row(TABLE *table) inline void table_case_convert(char * name, uint length) { if (lower_case_table_names) - casedn(name, length); + my_casedn(files_charset_info, name, length); } inline const char *table_case_name(HA_CREATE_INFO *info, const char *name) { return ((lower_case_table_names == 2 && info->alias) ? info->alias : name); } + +inline ulong sql_rnd_with_mutex() +{ + pthread_mutex_lock(&LOCK_thread_count); + ulong tmp=(ulong) (my_rnd(&sql_rand) * 0xffffffff); /* make all bits random */ + pthread_mutex_unlock(&LOCK_thread_count); + return tmp; +} + +Comp_creator *comp_eq_creator(bool invert); +Comp_creator *comp_ge_creator(bool invert); +Comp_creator *comp_gt_creator(bool invert); +Comp_creator *comp_le_creator(bool invert); +Comp_creator *comp_lt_creator(bool invert); +Comp_creator *comp_ne_creator(bool invert); + +Item * all_any_subquery_creator(Item *left_expr, + chooser_compare_func_creator cmp, + bool all, + SELECT_LEX *select_lex); + +/* + clean/setup table fields and map + + SYNOPSYS + setup_table_map() + table - TABLE structure pointer (which should be setup) + table_list TABLE_LIST structure pointer (owner of TABLE) + tablenr - table number +*/ + +inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) +{ + table->used_fields= 0; + table->const_table= 0; + table->null_row= 0; + table->status= STATUS_NO_RECORD; + table->keys_in_use_for_query= table->keys_in_use; + table->maybe_null= test(table->outer_join= table_list->outer_join); + table->tablenr= tablenr; + table->map= (table_map) 1 << tablenr; + table->force_index= table_list->force_index; +} + + +/* + SYNOPSYS + hexchar_to_int() + convert a hex digit into number +*/ + +inline int hexchar_to_int(char c) +{ + if (c <= '9' && c >= '0') + return c-'0'; + c|=32; + if (c <= 'f' && c >= 'a') + return c-'a'+10; + return -1; +} + + +/* + Some functions that are different in the embedded library and the normal + server +*/ + +#ifndef EMBEDDED_LIBRARY +extern "C" void unireg_abort(int exit_code); +void kill_delayed_threads(void); +bool check_stack_overrun(THD *thd,char *dummy); +#else +#define unireg_abort(exit_code) DBUG_RETURN(exit_code) +inline void kill_delayed_threads(void) {} +#define check_stack_overrun(A, B) 0 +#endif diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 00bcdbf7132..740e1a419c7 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -17,7 +17,6 @@ #include "mysql_priv.h" #include <m_ctype.h> #include <my_dir.h> -#include "sql_acl.h" #include "slave.h" #include "sql_repl.h" #include "repl_failsafe.h" @@ -30,25 +29,60 @@ #include "ha_innodb.h" #endif #include "ha_myisam.h" +#ifdef HAVE_ISAM +#include "ha_isam.h" +#endif +#ifdef HAVE_NDBCLUSTER_DB +#include "ha_ndbcluster.h" +#endif + +#ifdef HAVE_INNOBASE_DB +#define OPT_INNODB_DEFAULT 1 +#else +#define OPT_INNODB_DEFAULT 0 +#endif +#ifdef HAVE_BERKLEY_DB +#define OPT_BDB_DEFAULT 1 +#else +#define OPT_BDB_DEFAULT 0 +#endif +#ifdef HAVE_ISAM_DB +#define OPT_ISAM_DEFAULT 1 +#else +#define OPT_ISAM_DEFAULT 0 +#endif +#ifdef HAVE_NDBCLUSTER_DB +#define OPT_NDBCLUSTER_DEFAULT 0 +#if defined(NDB_SHM_TRANSPORTER) && MYSQL_VERSION_ID >= 50000 +#define OPT_NDB_SHM_DEFAULT 1 +#else +#define OPT_NDB_SHM_DEFAULT 0 +#endif +#else +#define OPT_NDBCLUSTER_DEFAULT 0 +#endif + #include <nisam.h> #include <thr_alarm.h> #include <ft_global.h> -#include <assert.h> +#include <errmsg.h> + +#define mysqld_charset &my_charset_latin1 #ifndef DBUG_OFF #define ONE_THREAD #endif -#define SHUTDOWN_THD -#define MAIN_THD -#define SIGNAL_THD - #ifdef HAVE_purify #define IF_PURIFY(A,B) (A) #else #define IF_PURIFY(A,B) (B) #endif +#ifndef INADDR_NONE +#define INADDR_NONE -1 // Error value from inet_addr +#endif + /* stack traces are only supported on linux intel */ #if defined(__linux__) && defined(__i386__) && defined(USE_PSTACK) #define HAVE_STACK_TRACE_ON_SEGV @@ -60,7 +94,7 @@ char pstack_file_name[80]; #if defined(HAVE_DEC_3_2_THREADS) || defined(SIGNALS_DONT_BREAK_READ) || defined(HAVE_purify) && defined(__linux__) #define HAVE_CLOSE_SERVER_SOCK 1 -#endif +#endif extern "C" { // Because of SCO 3.2V4.2 #include <errno.h> @@ -128,22 +162,22 @@ int deny_severity = LOG_WARNING; #include <zEvent.h> //For NSS event structures #include <zPublics.h> -void *neb_consumer_id=NULL; //For storing NEB consumer id -char datavolname[256]={0}; -VolumeID_t datavolid; -event_handle_t eh; -Report_t ref; -void *refneb=NULL; -bool event_flag=FALSE; -int volumeid=-1; +static void *neb_consumer_id= NULL; //For storing NEB consumer id +static char datavolname[256]= {0}; +static VolumeID_t datavolid; +static event_handle_t eh; +static Report_t ref; +static void *refneb= NULL; +my_bool event_flag= FALSE; +static int volumeid= -1; /* NEB event callback */ unsigned long neb_event_callback(struct EventBlock *eblock); -void registerwithneb(); -void getvolumename(); -void getvolumeID(BYTE *volumeName); +static void registerwithneb(); +static void getvolumename(); +static void getvolumeID(BYTE *volumeName); #endif /* __NETWARE__ */ - + #ifdef _AIX41 int initgroups(const char *,unsigned int); @@ -182,310 +216,355 @@ inline void reset_floating_point_exceptions() #else #define THR_KILL_SIGNAL SIGUSR2 // Can't use this with LinuxThreads #endif +#define MYSQL_KILL_SIGNAL SIGTERM #ifdef HAVE_GLIBC2_STYLE_GETHOSTBYNAME_R #include <sys/types.h> #else #include <my_pthread.h> // For thr_setconcurency() #endif -#if defined(HAVE_GETRLIMIT) && defined(RLIMIT_NOFILE) && !defined(HAVE_mit_thread) -#define SET_RLIMIT_NOFILE -#endif #ifdef SOLARIS extern "C" int gethostname(char *name, int namelen); #endif -#define MYSQL_KILL_SIGNAL SIGTERM - -#ifndef DBUG_OFF -static const char* default_dbug_option=IF_WIN("d:t:i:O,\\mysqld.trace", - "d:t:i:o,/tmp/mysqld.trace"); -#endif -#ifdef __NT__ -static char pipe_name[512]; -static SECURITY_ATTRIBUTES saPipeSecurity; -static SECURITY_DESCRIPTOR sdPipeDescriptor; -static HANDLE hPipe = INVALID_HANDLE_VALUE; -static pthread_cond_t COND_handler_count; -static uint handler_count; -#endif -#ifdef __WIN__ -static bool start_mode=0, use_opt_args; -static int opt_argc; -static char **opt_argv; -#endif - -#ifdef HAVE_BERKELEY_DB -SHOW_COMP_OPTION have_berkeley_db=SHOW_OPTION_YES; -#else -SHOW_COMP_OPTION have_berkeley_db=SHOW_OPTION_NO; -#endif -#ifdef HAVE_INNOBASE_DB -SHOW_COMP_OPTION have_innodb=SHOW_OPTION_YES; -#else -SHOW_COMP_OPTION have_innodb=SHOW_OPTION_NO; -#endif -#ifdef HAVE_ISAM -SHOW_COMP_OPTION have_isam=SHOW_OPTION_YES; -#else -SHOW_COMP_OPTION have_isam=SHOW_OPTION_NO; -#endif -#ifdef USE_RAID -SHOW_COMP_OPTION have_raid=SHOW_OPTION_YES; -#else -SHOW_COMP_OPTION have_raid=SHOW_OPTION_NO; -#endif -#ifdef HAVE_OPENSSL -SHOW_COMP_OPTION have_openssl=SHOW_OPTION_YES; -#else -SHOW_COMP_OPTION have_openssl=SHOW_OPTION_NO; -#endif -#ifdef HAVE_BROKEN_REALPATH -SHOW_COMP_OPTION have_symlink=SHOW_OPTION_NO; -#else -SHOW_COMP_OPTION have_symlink=SHOW_OPTION_YES; -#endif -#ifdef HAVE_QUERY_CACHE -SHOW_COMP_OPTION have_query_cache=SHOW_OPTION_YES; -#else -SHOW_COMP_OPTION have_query_cache=SHOW_OPTION_NO; -#endif -#ifdef HAVE_CRYPT -SHOW_COMP_OPTION have_crypt=SHOW_OPTION_YES; -#else -SHOW_COMP_OPTION have_crypt=SHOW_OPTION_NO; -#endif +/* Constants */ -bool opt_large_files= sizeof(my_off_t) > 4; +const char *show_comp_option_name[]= {"YES", "NO", "DISABLED"}; +const char *sql_mode_names[] = +{ + "REAL_AS_FLOAT", "PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", + "?", "ONLY_FULL_GROUP_BY", "NO_UNSIGNED_SUBTRACTION", + "NO_DIR_IN_CREATE", + "POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS", + "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "MYSQL323", "MYSQL40", "ANSI", + "NO_AUTO_VALUE_ON_ZERO", NullS +}; +TYPELIB sql_mode_typelib= { array_elements(sql_mode_names)-1,"", + sql_mode_names, NULL }; +const char *first_keyword= "first", *binary_keyword= "BINARY"; +const char *my_localhost= "localhost", *delayed_user= "DELAYED"; #if SIZEOF_OFF_T > 4 && defined(BIG_TABLES) #define GET_HA_ROWS GET_ULL #else #define GET_HA_ROWS GET_ULONG #endif -#ifdef HAVE_LIBWRAP -char *libwrapName= NULL; -#endif +bool opt_large_files= sizeof(my_off_t) > 4; /* - Variables to store startup options + Used with --help for detailed option */ +bool opt_help= 0; +bool opt_verbose= 0; -my_bool opt_skip_slave_start = 0; // If set, slave is not autostarted -/* - If set, some standard measures to enforce slave data integrity will not - be performed -*/ -my_bool opt_reckless_slave = 0; +arg_cmp_func Arg_comparator::comparator_matrix[4][2] = +{{&Arg_comparator::compare_string, &Arg_comparator::compare_e_string}, + {&Arg_comparator::compare_real, &Arg_comparator::compare_e_real}, + {&Arg_comparator::compare_int_signed, &Arg_comparator::compare_e_int}, + {&Arg_comparator::compare_row, &Arg_comparator::compare_e_row}}; + + +/* Global variables */ -ulong back_log, connect_timeout, concurrency; -char mysql_home[FN_REFLEN], pidfile_name[FN_REFLEN], time_zone[30]; -char log_error_file[FN_REFLEN]; bool opt_log, opt_update_log, opt_bin_log, opt_slow_log; bool opt_error_log= IF_WIN(1,0); bool opt_disable_networking=0, opt_skip_show_db=0; +bool opt_character_set_client_handshake= 1; bool lower_case_table_names_used= 0; +bool server_id_supplied = 0; +bool opt_endinfo,using_udf_functions, locked_in_memory; +bool opt_using_transactions, using_update_log; +bool volatile abort_loop, select_thread_in_use, signal_thread_in_use; +bool volatile ready_to_exit, shutdown_in_progress, grant_option; + +my_bool opt_skip_slave_start = 0; // If set, slave is not autostarted +my_bool opt_reckless_slave = 0; my_bool opt_enable_named_pipe= 0, opt_debugging= 0; my_bool opt_local_infile, opt_external_locking, opt_slave_compressed_protocol; -my_bool lower_case_file_system= 0; -uint delay_key_write_options= (uint) DELAY_KEY_WRITE_ON; -uint lower_case_table_names; - -static my_bool opt_do_pstack = 0; -static ulong opt_specialflag=SPECIAL_ENGLISH; - -static ulong opt_myisam_block_size; -static my_socket unix_sock= INVALID_SOCKET,ip_sock= INVALID_SOCKET; -static my_string opt_logname=0,opt_update_logname=0, - opt_binlog_index_name = 0,opt_slow_logname=0; - -static char* mysql_home_ptr= mysql_home; -static char* pidfile_name_ptr= pidfile_name; -char* log_error_file_ptr= log_error_file; -static pthread_t select_thread; -static my_bool opt_noacl=0, opt_bootstrap=0, opt_myisam_log=0; my_bool opt_safe_user_create = 0, opt_no_mix_types = 0; my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0; -my_bool opt_log_slave_updates= 0, opt_console= 0, opt_allow_suspicious_udfs; -my_bool opt_readonly = 0, opt_sync_bdb_logs, opt_sync_frm; - -volatile bool mqh_used = 0; -FILE *bootstrap_file=0; -int segfaulted = 0; // ensure we do not enter SIGSEGV handler twice - -/* - If sql_bin_update is true, SQL_LOG_UPDATE and SQL_LOG_BIN are kept in sync, - and are treated as aliases for each other -*/ - -static bool kill_in_progress=FALSE; -struct rand_struct sql_rand; // used by sql_class.cc:THD::THD() -static int cleanup_done; -static char **defaults_argv; -char glob_hostname[FN_REFLEN]; - -#include "sslopt-vars.h" -#ifdef HAVE_OPENSSL -char *des_key_file = 0; -struct st_VioSSLAcceptorFd *ssl_acceptor_fd= 0; -#endif /* HAVE_OPENSSL */ - -I_List <i_string_pair> replicate_rewrite_db; -I_List<i_string> replicate_do_db, replicate_ignore_db; -// allow the user to tell us which db to replicate and which to ignore -I_List<i_string> binlog_do_db, binlog_ignore_db; - -/* if we guessed server_id , we need to know about it */ -ulong server_id= 0; // Must be long becasue of set_var.cc -bool server_id_supplied = 0; +my_bool opt_log_slave_updates= 0; +my_bool opt_console= 0, opt_bdb, opt_innodb, opt_isam, opt_ndbcluster; +#ifdef HAVE_NDBCLUSTER_DB +const char *opt_ndbcluster_connectstring= 0; +my_bool opt_ndb_shm, opt_ndb_optimized_node_selection; +#endif +my_bool opt_readonly, use_temp_pool, relay_log_purge; +my_bool opt_sync_bdb_logs, opt_sync_frm, opt_allow_suspicious_udfs; +my_bool opt_secure_auth= 0; +my_bool opt_short_log_format= 0; +my_bool opt_log_queries_not_using_indexes= 0; +my_bool opt_log_slow_admin_statements= 0; +my_bool lower_case_file_system= 0; +my_bool opt_innodb_safe_binlog= 0; +volatile bool mqh_used = 0; -uint mysql_port; -uint test_flags = 0, select_errors=0, dropping_tables=0,ha_open_options=0; -uint volatile thread_count=0, thread_running=0, kill_cached_threads=0, - wake_thread=0; -ulong thd_startup_options=(OPTION_UPDATE_LOG | OPTION_AUTO_IS_NULL | - OPTION_BIN_LOG | OPTION_QUOTE_SHOW_CREATE ); -uint protocol_version=PROTOCOL_VERSION; -struct system_variables global_system_variables; -struct system_variables max_system_variables; -ulonglong keybuff_size; -ulong table_cache_size, - thread_stack, - thread_stack_min,what_to_log= ~ (1L << (uint) COM_TIME), - query_buff_size, - slow_launch_time = 2L, - slave_open_temp_tables=0, - open_files_limit=0, max_binlog_size, max_relay_log_size; -ulong com_stat[(uint) SQLCOM_END], com_other; -ulong slave_net_timeout; +#ifdef HAVE_INITGROUPS +static bool calling_initgroups= FALSE; /* Used in SIGSEGV handler. */ +#endif +uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options; +uint delay_key_write_options, protocol_version; +uint lower_case_table_names; +uint opt_crash_binlog_innodb; +uint volatile thread_count, thread_running, kill_cached_threads, wake_thread; +ulong back_log, connect_timeout, concurrency; +ulong server_id, thd_startup_options; +ulong table_cache_size, thread_stack, thread_stack_min, what_to_log; +ulong query_buff_size, slow_launch_time, slave_open_temp_tables; +ulong open_files_limit, max_binlog_size, max_relay_log_size; +ulong slave_net_timeout, slave_trans_retries; ulong thread_cache_size=0, binlog_cache_size=0, max_binlog_cache_size=0; ulong query_cache_size=0; -#ifdef HAVE_QUERY_CACHE -ulong query_cache_limit=0; -Query_cache query_cache; -#endif - -volatile ulong cached_thread_count=0; - -// replication parameters, if master_host is not NULL, we are a slave -my_string master_user = (char*) "test", master_password = 0, master_host=0, - master_info_file = (char*) "master.info", - relay_log_info_file = (char*) "relay-log.info", - master_ssl_key=0, master_ssl_cert=0, master_ssl_capath=0, master_ssl_cipher=0; -my_string report_user = 0, report_password = 0, report_host=0; - -const char *localhost=LOCAL_HOST; -const char *delayed_user="DELAYED"; -uint master_port = MYSQL_PORT, master_connect_retry = 60; -uint report_port = MYSQL_PORT; -my_bool master_ssl = 0; - -ulong master_retry_count=0; -ulong bytes_sent= 0L, bytes_received= 0L, net_big_packet_count= 0L; - -bool opt_endinfo,using_udf_functions, locked_in_memory; -bool opt_using_transactions, using_update_log; -bool volatile abort_loop, select_thread_in_use, signal_thread_in_use; -bool volatile ready_to_exit, shutdown_in_progress, grant_option; -ulong refresh_version=1L,flush_version=1L; /* Increments on each reload */ -ulong query_id=1L,long_query_count,aborted_threads, killed_threads, - aborted_connects,delayed_insert_timeout,delayed_insert_limit, - delayed_queue_size,delayed_insert_threads,delayed_insert_writes, - delayed_rows_in_use,delayed_insert_errors,flush_time, thread_created; +ulong com_stat[(uint) SQLCOM_END], com_other; +ulong com_stmt_prepare, com_stmt_execute, com_stmt_send_long_data; +ulong com_stmt_close, com_stmt_reset; +ulong bytes_sent, bytes_received, net_big_packet_count; +ulong refresh_version, flush_version; /* Increments on each reload */ +ulong query_id, long_query_count; +ulong aborted_threads, killed_threads, aborted_connects; +ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size; +ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use; +ulong delayed_insert_errors,flush_time, thread_created; ulong filesort_rows, filesort_range_count, filesort_scan_count; ulong filesort_merge_passes; ulong select_range_check_count, select_range_count, select_scan_count; ulong select_full_range_join_count,select_full_join_count; ulong specialflag=0,opened_tables=0,created_tmp_tables=0, created_tmp_disk_tables=0; -ulong max_connections, max_used_connections, +ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; +ulong max_connections,max_used_connections, max_connect_errors, max_user_connections = 0; +/* + Limit of the total number of prepared statements in the server. + Is necessary to protect the server against out-of-memory attacks. +*/ +ulong max_prepared_stmt_count; +/* + Current total number of prepared statements in the server. This number + is exact, and therefore may not be equal to the difference between + `com_stmt_prepare' and `com_stmt_close' (global status variables), as + the latter ones account for all registered attempts to prepare + a statement (including unsuccessful ones). Prepared statements are + currently connection-local: if the same SQL query text is prepared in + two different connections, this counts as two distinct prepared + statements. +*/ +ulong prepared_stmt_count=0; ulong thread_id=1L,current_pid; -ulong slow_launch_threads = 0; - +ulong slow_launch_threads = 0, sync_binlog_period; +ulong expire_logs_days = 0; +ulong rpl_recovery_rank=0; +ulong my_bind_addr; /* the address we bind to */ +volatile ulong cached_thread_count= 0; + +double log_10[32]; /* 10 potences */ +time_t start_time; + +char mysql_home[FN_REFLEN], pidfile_name[FN_REFLEN], system_time_zone[30]; +char *default_tz_name; +char log_error_file[FN_REFLEN], glob_hostname[FN_REFLEN]; +char* log_error_file_ptr= log_error_file; char mysql_real_data_home[FN_REFLEN], - language[LIBLEN],reg_ext[FN_EXTLEN], - mysql_charsets_dir[FN_REFLEN], *charsets_list, - max_sort_char,*mysqld_user,*mysqld_chroot, *opt_init_file; -char *language_ptr= language; + language[FN_REFLEN], reg_ext[FN_EXTLEN], mysql_charsets_dir[FN_REFLEN], + *mysqld_user,*mysqld_chroot, *opt_init_file, + *opt_init_connect, *opt_init_slave, + def_ft_boolean_syntax[sizeof(ft_boolean_syntax)]; + +const key_map key_map_empty(0); +key_map key_map_full(0); // Will be initialized later + +const char *opt_date_time_formats[3]; + +char *language_ptr, *default_collation_name, *default_character_set_name; char mysql_data_home_buff[2], *mysql_data_home=mysql_real_data_home; struct passwd *user_info; -#ifndef EMBEDDED_LIBRARY -bool mysql_embedded=0; -#else -bool mysql_embedded=1; -#endif - -static char *opt_bin_logname = 0; -char *opt_relay_logname = 0, *opt_relaylog_index_name=0; char server_version[SERVER_VERSION_LENGTH]; -const char *first_keyword="first"; +char *mysqld_unix_port, *opt_mysql_tmpdir; +char *my_bind_addr_str; const char **errmesg; /* Error messages */ const char *myisam_recover_options_str="OFF"; +const char *myisam_stats_method_str="nulls_unequal"; const char *sql_mode_str="OFF"; -ulong rpl_recovery_rank=0; - -my_string mysql_unix_port=NULL, opt_mysql_tmpdir=NULL, mysql_tmpdir=NULL; -ulong my_bind_addr; /* the address we bind to */ -char *my_bind_addr_str; -DATE_FORMAT dayord; -double log_10[32]; /* 10 potences */ +/* name of reference on left espression in rewritten IN subquery */ +const char *in_left_expr_name= "<left expr>"; +/* name of additional condition */ +const char *in_additional_cond= "<IN COND>"; +/* classes for comparation parsing/processing */ +Eq_creator eq_creator; +Ne_creator ne_creator; +Gt_creator gt_creator; +Lt_creator lt_creator; +Ge_creator ge_creator; +Le_creator le_creator; + + +FILE *bootstrap_file; +FILE *stderror_file=0; + +I_List<i_string_pair> replicate_rewrite_db; +I_List<i_string> replicate_do_db, replicate_ignore_db; +// allow the user to tell us which db to replicate and which to ignore +I_List<i_string> binlog_do_db, binlog_ignore_db; I_List<THD> threads,thread_cache; -time_t start_time; +I_List<NAMED_LIST> key_caches; -ulong opt_sql_mode = 0L; -const char *sql_mode_names[] = -{ - "REAL_AS_FLOAT", "PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", - "SERIALIZE","ONLY_FULL_GROUP_BY", "NO_UNSIGNED_SUBTRACTION", - "NO_DIR_IN_CREATE", - NullS -}; -TYPELIB sql_mode_typelib= {array_elements(sql_mode_names)-1,"", - sql_mode_names}; +struct system_variables global_system_variables; +struct system_variables max_system_variables; +MY_TMPDIR mysql_tmpdir_list; MY_BITMAP temp_pool; -my_bool use_temp_pool=0; -pthread_key(MEM_ROOT*,THR_MALLOC); +CHARSET_INFO *system_charset_info, *files_charset_info ; +CHARSET_INFO *national_charset_info, *table_alias_charset; + +SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster, + have_example_db, have_archive_db, have_csv_db; +SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache; +SHOW_COMP_OPTION have_geometry, have_rtree_keys; +SHOW_COMP_OPTION have_crypt, have_compress; +SHOW_COMP_OPTION have_blackhole_db; + +/* Thread specific variables */ + +pthread_key(MEM_ROOT**,THR_MALLOC); pthread_key(THD*, THR_THD); -pthread_key(NET*, THR_NET); pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count, - LOCK_mapped_file, LOCK_status, LOCK_grant, - LOCK_error_log, + LOCK_mapped_file, LOCK_status, + LOCK_error_log, LOCK_uuid_generator, LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received, LOCK_global_system_variables, LOCK_user_conn, LOCK_slave_list, LOCK_active_mi; - +/* + The below lock protects access to two global server variables: + max_prepared_stmt_count and prepared_stmt_count. These variables + set the limit and hold the current total number of prepared statements + in the server, respectively. As PREPARE/DEALLOCATE rate in a loaded + server may be fairly high, we need a dedicated lock. +*/ +pthread_mutex_t LOCK_prepared_stmt_count; +#ifdef HAVE_OPENSSL +pthread_mutex_t LOCK_des_key_file; +#endif +rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; pthread_cond_t COND_refresh,COND_thread_count, COND_slave_stopped, COND_slave_start; pthread_cond_t COND_thread_cache,COND_flush_thread_cache; pthread_t signal_thread; pthread_attr_t connection_attrib; +/* replication parameters, if master_host is not NULL, we are a slave */ +uint master_port= MYSQL_PORT, master_connect_retry = 60; +uint report_port= MYSQL_PORT; +ulong master_retry_count=0; +char *master_user, *master_password, *master_host, *master_info_file; +char *relay_log_info_file, *report_user, *report_password, *report_host; +char *opt_relay_logname = 0, *opt_relaylog_index_name=0; +my_bool master_ssl; +char *master_ssl_key, *master_ssl_cert; +char *master_ssl_ca, *master_ssl_capath, *master_ssl_cipher; + +/* Static variables */ + +static bool kill_in_progress, segfaulted; +static my_bool opt_do_pstack, opt_noacl, opt_bootstrap, opt_myisam_log; +static int cleanup_done; +static ulong opt_specialflag, opt_myisam_block_size; +static char *opt_logname, *opt_update_logname, *opt_binlog_index_name; +static char *opt_slow_logname; +static char *mysql_home_ptr, *pidfile_name_ptr; +static char **defaults_argv; +static char *opt_bin_logname; + +static my_socket unix_sock,ip_sock; +static pthread_t select_thread; +struct rand_struct sql_rand; // used by sql_class.cc:THD::THD() + +/* OS specific variables */ + #ifdef __WIN__ #undef getpid #include <process.h> + +static pthread_cond_t COND_handler_count; +static uint handler_count; +static bool start_mode=0, use_opt_args; +static int opt_argc; +static char **opt_argv; + #if !defined(EMBEDDED_LIBRARY) -HANDLE hEventShutdown; +static HANDLE hEventShutdown; static char shutdown_event_name[40]; #include "nt_servc.h" static NTService Service; // Service object for WinNT -#endif +#endif /* EMBEDDED_LIBRARY */ +#endif /* __WIN__ */ + +#ifdef __NT__ +static char pipe_name[512]; +static SECURITY_ATTRIBUTES saPipeSecurity; +static SECURITY_DESCRIPTOR sdPipeDescriptor; +static HANDLE hPipe = INVALID_HANDLE_VALUE; #endif #ifdef OS2 pthread_cond_t eventShutdown; #endif +#ifndef EMBEDDED_LIBRARY +bool mysqld_embedded=0; +#else +bool mysqld_embedded=1; +#endif + +#ifndef DBUG_OFF +static const char* default_dbug_option; +#endif +#ifdef HAVE_LIBWRAP +char *libwrapName= NULL; +#endif +#ifdef HAVE_QUERY_CACHE +ulong query_cache_limit= 0; +ulong query_cache_min_res_unit= QUERY_CACHE_MIN_RESULT_DATA_SIZE; +Query_cache query_cache; +#endif +#ifdef HAVE_SMEM +char *shared_memory_base_name= default_shared_memory_base_name; +bool opt_enable_shared_memory; +HANDLE smem_event_connect_request= 0; +#endif + +#include "sslopt-vars.h" +#ifdef HAVE_OPENSSL +#include <openssl/crypto.h> + +typedef struct CRYPTO_dynlock_value +{ + rw_lock_t lock; +} openssl_lock_t; + +char *des_key_file; +struct st_VioSSLAcceptorFd *ssl_acceptor_fd; +static openssl_lock_t *openssl_stdlocks; + +static openssl_lock_t *openssl_dynlock_create(const char *, int); +static void openssl_dynlock_destroy(openssl_lock_t *, const char *, int); +static void openssl_lock_function(int, int, const char *, int); +static void openssl_lock(int, openssl_lock_t *, const char *, int); +static unsigned long openssl_id_function(); +#endif /* HAVE_OPENSSL */ + + +/* Function declarations */ + static void start_signal_handler(void); extern "C" pthread_handler_decl(signal_hand, arg); -static void set_options(void); +static void mysql_init_variables(void); static void get_options(int argc,char **argv); static void set_server_version(void); +static int init_thread_environment(); static char *get_relative_path(const char *path); static void fix_paths(void); extern "C" pthread_handler_decl(handle_connections_sockets,arg); @@ -496,16 +575,18 @@ static bool read_init_file(char *file_name); #ifdef __NT__ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg); #endif -extern "C" pthread_handler_decl(handle_slave,arg); -#ifdef SET_RLIMIT_NOFILE -static uint set_maximum_open_files(uint max_file_limit); +#ifdef HAVE_SMEM +static pthread_handler_decl(handle_connections_shared_memory,arg); #endif +extern "C" pthread_handler_decl(handle_slave,arg); static ulong find_bit_type(const char *x, TYPELIB *bit_lib); static void clean_up(bool print_message); static void clean_up_mutexes(void); +static void wait_for_signal_thread_to_end(void); static int test_if_case_insensitive(const char *dir_name); static void create_pid_file(); +#ifndef EMBEDDED_LIBRARY /**************************************************************************** ** Code to end mysqld ****************************************************************************/ @@ -515,7 +596,6 @@ static void close_connections(void) #ifdef EXTRA_DEBUG int count=0; #endif - NET net; DBUG_ENTER("close_connections"); /* Clear thread cache */ @@ -541,7 +621,7 @@ static void close_connections(void) struct timespec abstime; int error; LINT_INIT(error); - DBUG_PRINT("info",("Waiting for select_thread")); + DBUG_PRINT("info",("Waiting for select thread")); #ifndef DONT_USE_THR_ALARM if (pthread_kill(select_thread,THR_CLIENT_ALARM)) @@ -581,7 +661,7 @@ static void close_connections(void) { HANDLE temp; DBUG_PRINT( "quit", ("Closing named pipes") ); - + /* Create connection to the handle named pipe handler to break the loop */ if ((temp = CreateFile(pipe_name, GENERIC_READ | GENERIC_WRITE, @@ -605,14 +685,17 @@ static void close_connections(void) { (void) shutdown(unix_sock,2); (void) closesocket(unix_sock); - (void) unlink(mysql_unix_port); + (void) unlink(mysqld_unix_port); unix_sock= INVALID_SOCKET; } #endif end_thr_alarm(0); // Abort old alarms. - end_slave(); - /* First signal all threads that it's time to die */ + /* + First signal all threads that it's time to die + This will give the threads some time to gracefully abort their + statements and inform their clients that the server is about to die. + */ THD *tmp; (void) pthread_mutex_lock(&LOCK_thread_count); // For unlink from list @@ -622,7 +705,10 @@ static void close_connections(void) { DBUG_PRINT("quit",("Informing thread %ld that it's time to die", tmp->thread_id)); - tmp->killed=1; + /* We skip slave threads on this first loop through. */ + if (tmp->slave_thread) continue; + + tmp->killed= 1; if (tmp->mysys_var) { tmp->mysys_var->abort=1; @@ -638,12 +724,17 @@ static void close_connections(void) } (void) pthread_mutex_unlock(&LOCK_thread_count); // For unlink from list + end_slave(); + if (thread_count) - sleep(1); // Give threads time to die + sleep(2); // Give threads time to die - /* Force remaining threads to die by closing the connection to the client */ + /* + Force remaining threads to die by closing the connection to the client + This will ensure that threads that are waiting for a command from the + client on a blocking read call are aborted. + */ - (void) my_net_init(&net, (st_vio*) 0); for (;;) { DBUG_PRINT("quit",("Locking LOCK_thread_count")); @@ -655,17 +746,17 @@ static void close_connections(void) break; } #ifndef __bsdi__ // Bug in BSDI kernel - if ((net.vio=tmp->net.vio) != 0) + if (tmp->vio_ok()) { - sql_print_error(ER(ER_FORCING_CLOSE),my_progname, - tmp->thread_id,tmp->user ? tmp->user : ""); - close_connection(&net,0,0); + if (global_system_variables.log_warnings) + sql_print_warning(ER(ER_FORCING_CLOSE),my_progname, + tmp->thread_id,tmp->user ? tmp->user : ""); + close_connection(tmp,0,0); } #endif DBUG_PRINT("quit",("Unlocking LOCK_thread_count")); (void) pthread_mutex_unlock(&LOCK_thread_count); } - net_end(&net); /* All threads has now been aborted */ DBUG_PRINT("quit",("Waiting for threads to die (count=%u)",thread_count)); (void) pthread_mutex_lock(&LOCK_thread_count); @@ -679,6 +770,7 @@ static void close_connections(void) DBUG_PRINT("quit",("close_connections thread")); DBUG_VOID_RETURN; } +#endif /*EMBEDDED_LIBRARY*/ static void close_server_sock() @@ -697,7 +789,7 @@ static void close_server_sock() The following code is disabled for normal systems as it causes MySQL to hang on AIX 4.3 during shutdown */ - DBUG_PRINT("info",("calling closesocket on TCP/IP socket")); + DBUG_PRINT("info",("calling closesocket on TCP/IP socket")); VOID(closesocket(tmp_sock)); #endif } @@ -715,7 +807,7 @@ static void close_server_sock() DBUG_PRINT("info",("calling closesocket on unix/IP socket")); VOID(closesocket(tmp_sock)); #endif - VOID(unlink(mysql_unix_port)); + VOID(unlink(mysqld_unix_port)); } DBUG_VOID_RETURN; #endif @@ -729,7 +821,7 @@ void kill_mysql(void) #ifdef SIGNALS_DONT_BREAK_READ abort_loop=1; // Break connection loops close_server_sock(); // Force accept to wake up -#endif +#endif #if defined(__WIN__) #if !defined(EMBEDDED_LIBRARY) @@ -765,13 +857,12 @@ void kill_mysql(void) abort_loop=1; if (pthread_create(&tmp,&connection_attrib, kill_server_thread, (void*) 0)) - sql_print_error("Error: Can't create thread to kill server"); + sql_print_error("Can't create thread to kill server"); } -#endif +#endif DBUG_VOID_RETURN; } - /* Force server down. kill all connections and threads and exit */ #if defined(OS2) || defined(__NETWARE__) @@ -787,18 +878,30 @@ static void __cdecl kill_server(int sig_ptr) { int sig=(int) (long) sig_ptr; // This is passed a int DBUG_ENTER("kill_server"); - +#ifndef EMBEDDED_LIBRARY // if there is a signal during the kill in progress, ignore the other if (kill_in_progress) // Safety RETURN_FROM_KILL_SERVER; kill_in_progress=TRUE; abort_loop=1; // This should be set - signal(sig,SIG_IGN); + my_sigset(sig,SIG_IGN); if (sig == MYSQL_KILL_SIGNAL || sig == 0) - sql_print_error(ER(ER_NORMAL_SHUTDOWN),my_progname); + sql_print_information(ER(ER_NORMAL_SHUTDOWN),my_progname); else sql_print_error(ER(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */ +#if defined(HAVE_SMEM) && defined(__WIN__) + /* + Send event to smem_event_connect_request for aborting + */ + if (!SetEvent(smem_event_connect_request)) + { + DBUG_PRINT("error", + ("Got error: %ld from SetEvent of smem_event_connect_request", + GetLastError())); + } +#endif + #if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__) && !defined(OS2)) my_thread_init(); // If this is a new thread #endif @@ -807,14 +910,14 @@ static void __cdecl kill_server(int sig_ptr) unireg_abort(1); /* purecov: inspected */ else unireg_end(); - #ifdef __NETWARE__ - if(!event_flag) - pthread_join(select_thread, NULL); // wait for main thread + if (!event_flag) + pthread_join(select_thread, NULL); // wait for main thread #endif /* __NETWARE__ */ - + pthread_exit(0); /* purecov: deadcode */ +#endif /* EMBEDDED_LIBRARY */ RETURN_FROM_KILL_SERVER; } @@ -822,7 +925,6 @@ static void __cdecl kill_server(int sig_ptr) #if defined(USE_ONE_SIGNAL_HAND) || (defined(__NETWARE__) && defined(SIGNALS_DONT_BREAK_READ)) extern "C" pthread_handler_decl(kill_server_thread,arg __attribute__((unused))) { - SHUTDOWN_THD; my_thread_init(); // Initialize new thread kill_server(0); my_thread_end(); // Normally never reached @@ -830,21 +932,16 @@ extern "C" pthread_handler_decl(kill_server_thread,arg __attribute__((unused))) } #endif -#if defined(__amiga__) -#undef sigset -#define sigset signal -#endif - extern "C" sig_handler print_signal_warning(int sig) { if (!DBUG_IN_USE) { if (global_system_variables.log_warnings) - sql_print_error("Warning: Got signal %d from thread %d", + sql_print_warning("Got signal %d from thread %d", sig,my_thread_id()); } #ifdef DONT_REMEMBER_SIGNAL - sigset(sig,print_signal_warning); /* int. thread system calls */ + my_sigset(sig,print_signal_warning); /* int. thread system calls */ #endif #if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) if (sig == SIGALRM) @@ -866,6 +963,7 @@ extern "C" sig_handler print_signal_warning(int sig) (Mac OS X) we have to call exit() instead if pthread_exit(). */ +#ifndef EMBEDDED_LIBRARY void unireg_end(void) { clean_up(1); @@ -877,18 +975,19 @@ void unireg_end(void) #endif } - extern "C" void unireg_abort(int exit_code) { DBUG_ENTER("unireg_abort"); if (exit_code) sql_print_error("Aborting\n"); - clean_up(1); /* purecov: inspected */ + clean_up(exit_code || !opt_bootstrap); /* purecov: inspected */ DBUG_PRINT("quit",("done with cleanup in unireg_abort")); + wait_for_signal_thread_to_end(); clean_up_mutexes(); my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); exit(exit_code); /* purecov: inspected */ } +#endif void clean_up(bool print_message) @@ -902,54 +1001,72 @@ void clean_up(bool print_message) mysql_update_log.cleanup(); mysql_bin_log.cleanup(); +#ifdef HAVE_REPLICATION if (use_slave_mask) bitmap_free(&slave_error_mask); +#endif + my_tz_free(); + my_dbopt_free(); +#ifndef NO_EMBEDDED_ACCESS_CHECKS acl_free(1); grant_free(); +#endif query_cache_destroy(); table_cache_free(); hostname_cache_free(); item_user_lock_free(); lex_free(); /* Free some memory */ set_var_free(); + free_charsets(); #ifdef HAVE_DLOPEN if (!opt_noacl) udf_free(); #endif (void) ha_panic(HA_PANIC_CLOSE); /* close all tables and logs */ - end_key_cache(); + delete_elements(&key_caches, (void (*)(const char*, gptr)) free_key_cache); + multi_keycache_free(); end_thr_alarm(1); /* Free allocated memory */ #ifdef USE_RAID end_raid(); #endif + my_free_open_file_info(); + my_free((char*) global_system_variables.date_format, + MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*) global_system_variables.time_format, + MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*) global_system_variables.datetime_format, + MYF(MY_ALLOW_ZERO_PTR)); if (defaults_argv) free_defaults(defaults_argv); - my_free(charsets_list, MYF(MY_ALLOW_ZERO_PTR)); - my_free(mysql_tmpdir,MYF(MY_ALLOW_ZERO_PTR)); + my_free(sys_init_connect.value, MYF(MY_ALLOW_ZERO_PTR)); + my_free(sys_init_slave.value, MYF(MY_ALLOW_ZERO_PTR)); + free_tmpdir(&mysql_tmpdir_list); +#ifdef HAVE_REPLICATION my_free(slave_load_tmpdir,MYF(MY_ALLOW_ZERO_PTR)); +#endif x_free(opt_bin_logname); x_free(opt_relay_logname); bitmap_free(&temp_pool); free_max_user_conn(); +#ifdef HAVE_REPLICATION end_slave_list(); free_list(&replicate_do_db); free_list(&replicate_ignore_db); free_list(&binlog_do_db); free_list(&binlog_ignore_db); free_list(&replicate_rewrite_db); - +#endif #ifdef HAVE_OPENSSL if (ssl_acceptor_fd) my_free((gptr) ssl_acceptor_fd, MYF(MY_ALLOW_ZERO_PTR)); - free_des_key_file(); #endif /* HAVE_OPENSSL */ #ifdef USE_REGEX - regex_end(); + my_regex_end(); #endif if (print_message && errmesg) - sql_print_error(ER(ER_SHUTDOWN_COMPLETE),my_progname); -#if !defined(__WIN__) && !defined(EMBEDDED_LIBRARY) + sql_print_information(ER(ER_SHUTDOWN_COMPLETE),my_progname); +#if !defined(EMBEDDED_LIBRARY) if (!opt_bootstrap) (void) my_delete(pidfile_name,MYF(0)); // This may not always exist #endif @@ -970,11 +1087,34 @@ void clean_up(bool print_message) } /* clean_up */ +/* + This is mainly needed when running with purify, but it's still nice to + know that all child threads have died when mysqld exits +*/ + +static void wait_for_signal_thread_to_end() +{ +#ifndef __NETWARE__ + uint i; + /* + Wait up to 10 seconds for signal thread to die. We use this mainly to + avoid getting warnings that my_thread_end has not been called + */ + for (i= 0 ; i < 100 && signal_thread_in_use; i++) + { + if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL)) + break; + my_sleep(100); // Give it time to die + } +#endif +} + + static void clean_up_mutexes() { (void) pthread_mutex_destroy(&LOCK_mysql_create_db); (void) pthread_mutex_destroy(&LOCK_Acl); - (void) pthread_mutex_destroy(&LOCK_grant); + (void) rwlock_destroy(&LOCK_grant); (void) pthread_mutex_destroy(&LOCK_open); (void) pthread_mutex_destroy(&LOCK_thread_count); (void) pthread_mutex_destroy(&LOCK_mapped_file); @@ -988,15 +1128,26 @@ static void clean_up_mutexes() (void) pthread_mutex_destroy(&LOCK_bytes_sent); (void) pthread_mutex_destroy(&LOCK_bytes_received); (void) pthread_mutex_destroy(&LOCK_user_conn); +#ifdef HAVE_OPENSSL + (void) pthread_mutex_destroy(&LOCK_des_key_file); + for (int i= 0; i < CRYPTO_num_locks(); ++i) + (void) rwlock_destroy(&openssl_stdlocks[i].lock); + OPENSSL_free(openssl_stdlocks); +#endif +#ifdef HAVE_REPLICATION (void) pthread_mutex_destroy(&LOCK_rpl_status); + (void) pthread_cond_destroy(&COND_rpl_status); +#endif (void) pthread_mutex_destroy(&LOCK_active_mi); + (void) rwlock_destroy(&LOCK_sys_init_connect); + (void) rwlock_destroy(&LOCK_sys_init_slave); (void) pthread_mutex_destroy(&LOCK_global_system_variables); + (void) pthread_mutex_destroy(&LOCK_prepared_stmt_count); (void) pthread_cond_destroy(&COND_thread_count); (void) pthread_cond_destroy(&COND_refresh); (void) pthread_cond_destroy(&COND_thread_cache); (void) pthread_cond_destroy(&COND_flush_thread_cache); (void) pthread_cond_destroy(&COND_manager); - (void) pthread_cond_destroy(&COND_rpl_status); } /**************************************************************************** @@ -1006,34 +1157,36 @@ static void clean_up_mutexes() static void set_ports() { char *env; - if (!mysql_port && !opt_disable_networking) + if (!mysqld_port && !opt_disable_networking) { // Get port if not from commandline struct servent *serv_ptr; - mysql_port = MYSQL_PORT; - if ((serv_ptr = getservbyname("mysql", "tcp"))) - mysql_port = ntohs((u_short) serv_ptr->s_port); /* purecov: inspected */ + mysqld_port= MYSQL_PORT; + if ((serv_ptr= getservbyname("mysql", "tcp"))) + mysqld_port= ntohs((u_short) serv_ptr->s_port); /* purecov: inspected */ if ((env = getenv("MYSQL_TCP_PORT"))) - mysql_port = (uint) atoi(env); /* purecov: inspected */ + mysqld_port= (uint) atoi(env); /* purecov: inspected */ } - if (!mysql_unix_port) + if (!mysqld_unix_port) { #ifdef __WIN__ - mysql_unix_port = (char*) MYSQL_NAMEDPIPE; + mysqld_unix_port= (char*) MYSQL_NAMEDPIPE; #else - mysql_unix_port = (char*) MYSQL_UNIX_ADDR; + mysqld_unix_port= (char*) MYSQL_UNIX_ADDR; #endif if ((env = getenv("MYSQL_UNIX_PORT"))) - mysql_unix_port = env; /* purecov: inspected */ + mysqld_unix_port= env; /* purecov: inspected */ } } +#ifndef EMBEDDED_LIBRARY +/* Change to run as another user if started with --user */ static struct passwd *check_user(const char *user) { -#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) +#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) struct passwd *user_info; uid_t user_id= geteuid(); - + // Don't bother if we aren't superuser if (user_id) { @@ -1042,9 +1195,9 @@ static struct passwd *check_user(const char *user) // Don't give a warning, if real user is same as given with --user user_info= getpwnam(user); if ((!user_info || user_id != user_info->pw_uid) && - global_system_variables.log_warnings) - fprintf(stderr, - "Warning: One can only use the --user switch if running as root\n"); + global_system_variables.log_warnings) + sql_print_warning( + "One can only use the --user switch if running as root\n"); } return NULL; } @@ -1052,19 +1205,20 @@ static struct passwd *check_user(const char *user) { if (!opt_bootstrap) { - fprintf(stderr,"Fatal error: Please read \"Security\" section of the manual to find out how to run mysqld as root!\n"); + sql_print_error("Fatal error: Please read \"Security\" section of the manual to find out how to run mysqld as root!\n"); unireg_abort(1); } return NULL; } if (!strcmp(user,"root")) - return NULL; // Avoid problem with dynamic libraries + return NULL; // Avoid problem with dynamic libraries + if (!(user_info= getpwnam(user))) { // Allow a numeric uid to be used const char *pos; - for (pos= user; isdigit(*pos); pos++); - if (*pos) // Not numeric id + for (pos= user; my_isdigit(mysqld_charset,*pos); pos++) ; + if (*pos) // Not numeric id goto err; if (!(user_info= getpwuid(atoi(user)))) goto err; @@ -1072,28 +1226,29 @@ static struct passwd *check_user(const char *user) return user_info; } else - { return user_info; - } err: - fprintf(stderr, - "Fatal error: Can't change to run as user '%s'. Please check that the user exists!\n", - user); + sql_print_error("Fatal error: Can't change to run as user '%s' ; Please check that the user exists!\n",user); unireg_abort(1); - return NULL; -#else - return NULL; -#endif +#endif + return NULL; } - static void set_user(const char *user, struct passwd *user_info) { #if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) DBUG_ASSERT(user_info); #ifdef HAVE_INITGROUPS + /* + We can get a SIGSEGV when calling initgroups() on some systems when NSS + is configured to use LDAP and the server is statically linked. We set + calling_initgroups as a flag to the SIGSEGV handler that is then used to + output a specific message to help the user resolve this problem. + */ + calling_initgroups= TRUE; initgroups((char*) user, user_info->pw_gid); + calling_initgroups= FALSE; #endif if (setgid(user_info->pw_gid) == -1) { @@ -1108,16 +1263,17 @@ static void set_user(const char *user, struct passwd *user_info) #endif } + static void set_effective_user(struct passwd *user_info) { #if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) DBUG_ASSERT(user_info); - if (setregid((gid_t)-1,user_info->pw_gid) == -1) + if (setregid((gid_t)-1, user_info->pw_gid) == -1) { sql_perror("setregid"); unireg_abort(1); - } - if (setreuid((uid_t)-1,user_info->pw_uid) == -1) + } + if (setreuid((uid_t)-1, user_info->pw_uid) == -1) { sql_perror("setreuid"); unireg_abort(1); @@ -1163,9 +1319,9 @@ static void server_init(void) set_ports(); - if (mysql_port != 0 && !opt_disable_networking && !opt_bootstrap) + if (mysqld_port != 0 && !opt_disable_networking && !opt_bootstrap) { - DBUG_PRINT("general",("IP Socket is %d",mysql_port)); + DBUG_PRINT("general",("IP Socket is %d",mysqld_port)); ip_sock = socket(AF_INET, SOCK_STREAM, 0); if (ip_sock == INVALID_SOCKET) { @@ -1176,7 +1332,7 @@ static void server_init(void) bzero((char*) &IPaddr, sizeof(IPaddr)); IPaddr.sin_family = AF_INET; IPaddr.sin_addr.s_addr = my_bind_addr; - IPaddr.sin_port = (unsigned short) htons((unsigned short) mysql_port); + IPaddr.sin_port = (unsigned short) htons((unsigned short) mysqld_port); #ifndef __WIN__ /* @@ -1184,19 +1340,19 @@ static void server_init(void) user to open two mysqld servers with the same TCP/IP port. */ (void) setsockopt(ip_sock,SOL_SOCKET,SO_REUSEADDR,(char*)&arg,sizeof(arg)); -#endif +#endif /* __WIN__ */ if (bind(ip_sock, my_reinterpret_cast(struct sockaddr *) (&IPaddr), sizeof(IPaddr)) < 0) { DBUG_PRINT("error",("Got error: %d from bind",socket_errno)); sql_perror("Can't start server: Bind on TCP/IP port"); - sql_print_error("Do you already have another mysqld server running on port: %d ?",mysql_port); + sql_print_error("Do you already have another mysqld server running on port: %d ?",mysqld_port); unireg_abort(1); } if (listen(ip_sock,(int) back_log) < 0) { sql_perror("Can't start server: listen() on TCP/IP port"); - sql_print_error("Error: listen() on TCP/IP failed with error %d", + sql_print_error("listen() on TCP/IP failed with error %d", socket_errno); unireg_abort(1); } @@ -1205,28 +1361,26 @@ static void server_init(void) if ((user_info= check_user(mysqld_user))) { #if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) - if (locked_in_memory && !getuid()) + if (locked_in_memory) // getuid() == 0 here set_effective_user(user_info); else - set_user(mysqld_user, user_info); -#else - set_user(mysqld_user, user_info); #endif + set_user(mysqld_user, user_info); } #ifdef __NT__ /* create named pipe */ - if (Service.IsNT() && mysql_unix_port[0] && !opt_bootstrap && + if (Service.IsNT() && mysqld_unix_port[0] && !opt_bootstrap && opt_enable_named_pipe) { pipe_name[sizeof(pipe_name)-1]= 0; /* Safety if too long string */ strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\.\\pipe\\", - mysql_unix_port, NullS); - bzero((char*) &saPipeSecurity, sizeof(saPipeSecurity) ); - bzero((char*) &sdPipeDescriptor, sizeof(sdPipeDescriptor) ); + mysqld_unix_port, NullS); + bzero((char*) &saPipeSecurity, sizeof(saPipeSecurity)); + bzero((char*) &sdPipeDescriptor, sizeof(sdPipeDescriptor)); if (!InitializeSecurityDescriptor(&sdPipeDescriptor, - SECURITY_DESCRIPTOR_REVISION) ) + SECURITY_DESCRIPTOR_REVISION)) { sql_perror("Can't start server : Initialize security descriptor"); unireg_abort(1); @@ -1268,19 +1422,25 @@ static void server_init(void) /* ** Create the UNIX socket */ - if (mysql_unix_port[0] && !opt_bootstrap) + if (mysqld_unix_port[0] && !opt_bootstrap) { - DBUG_PRINT("general",("UNIX Socket is %s",mysql_unix_port)); + DBUG_PRINT("general",("UNIX Socket is %s",mysqld_unix_port)); - if ((unix_sock = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) + if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) + { + sql_print_error("The socket file path is too long (> %d): %s", + sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); + unireg_abort(1); + } + if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { sql_perror("Can't start server : UNIX Socket "); /* purecov: inspected */ unireg_abort(1); /* purecov: inspected */ } bzero((char*) &UNIXaddr, sizeof(UNIXaddr)); UNIXaddr.sun_family = AF_UNIX; - strmov(UNIXaddr.sun_path, mysql_unix_port); - (void) unlink(mysql_unix_port); + strmov(UNIXaddr.sun_path, mysqld_unix_port); + (void) unlink(mysqld_unix_port); (void) setsockopt(unix_sock,SOL_SOCKET,SO_REUSEADDR,(char*)&arg, sizeof(arg)); umask(0); @@ -1288,15 +1448,15 @@ static void server_init(void) sizeof(UNIXaddr)) < 0) { sql_perror("Can't start server : Bind on unix socket"); /* purecov: tested */ - sql_print_error("Do you already have another mysqld server running on socket: %s ?",mysql_unix_port); + sql_print_error("Do you already have another mysqld server running on socket: %s ?",mysqld_unix_port); unireg_abort(1); /* purecov: tested */ } umask(((~my_umask) & 0666)); #if defined(S_IFSOCK) && defined(SECURE_SOCKETS) - (void) chmod(mysql_unix_port,S_IFSOCK); /* Fix solaris 2.6 bug */ + (void) chmod(mysqld_unix_port,S_IFSOCK); /* Fix solaris 2.6 bug */ #endif if (listen(unix_sock,(int) back_log) < 0) - sql_print_error("Warning: listen() on Unix socket failed with error %d", + sql_print_warning("listen() on Unix socket failed with error %d", socket_errno); } #endif @@ -1304,37 +1464,57 @@ static void server_init(void) DBUG_VOID_RETURN; } +#endif /*!EMBEDDED_LIBRARY*/ void yyerror(const char *s) { - NET *net=my_pthread_getspecific_ptr(NET*,THR_NET); - char *yytext=(char*) current_lex->tok_start; - if (!strcmp(s,"parse error") || !strcmp(s,"syntax error")) + THD *thd=current_thd; + char *yytext= (char*) thd->lex->tok_start; + /* "parse error" changed into "syntax error" between bison 1.75 and 1.875 */ + if (strcmp(s,"parse error") == 0 || strcmp(s,"syntax error") == 0) s=ER(ER_SYNTAX_ERROR); - net_printf(net,ER_PARSE_ERROR, s, yytext ? (char*) yytext : "", - current_lex->yylineno); + net_printf(thd,ER_PARSE_ERROR, s, yytext ? (char*) yytext : "", + thd->lex->yylineno); } -void close_connection(NET *net,uint errcode,bool lock) +#ifndef EMBEDDED_LIBRARY +/* + Close a connection + + SYNOPSIS + close_connection() + thd Thread handle + errcode Error code to print to console + lock 1 if we have have to lock LOCK_thread_count + + NOTES + For the connection that is doing shutdown, this is called twice +*/ + +void close_connection(THD *thd, uint errcode, bool lock) { - st_vio* vio; + st_vio *vio; DBUG_ENTER("close_connection"); DBUG_PRINT("enter",("fd: %s error: '%s'", - net->vio? vio_description(net->vio):"(not connected)", - errcode ? ER(errcode) : "")); + thd->net.vio ? vio_description(thd->net.vio) : + "(not connected)", + errcode ? ER(errcode) : "")); if (lock) (void) pthread_mutex_lock(&LOCK_thread_count); - if ((vio=net->vio) != 0) + thd->killed=1; + if ((vio=thd->net.vio) != 0) { if (errcode) - send_error(net,errcode,ER(errcode)); /* purecov: inspected */ + send_error(thd, errcode, ER(errcode)); /* purecov: inspected */ vio_close(vio); /* vio is freed in delete thd */ } if (lock) (void) pthread_mutex_unlock(&LOCK_thread_count); DBUG_VOID_RETURN; } +#endif /* EMBEDDED_LIBRARY */ + /* Called when a thread is aborted */ /* ARGSUSED */ @@ -1377,6 +1557,7 @@ void end_thread(THD *thd, bool put_in_cache) thd=thread_cache.get(); thd->real_id=pthread_self(); (void) thd->store_globals(); + thd->thr_create_time= time(NULL); threads.append(thd); pthread_mutex_unlock(&LOCK_thread_count); DBUG_VOID_RETURN; @@ -1426,23 +1607,6 @@ void flush_thread_cache() } -/* - Aborts a thread nicely. Commes here on SIGPIPE - TODO: One should have to fix that thr_alarm know about this - thread too. -*/ - -#ifdef THREAD_SPECIFIC_SIGPIPE -extern "C" sig_handler abort_thread(int sig __attribute__((unused))) -{ - THD *thd=current_thd; - DBUG_ENTER("abort_thread"); - if (thd) - thd->killed=1; - DBUG_VOID_RETURN; -} -#endif - /****************************************************************************** Setup a signal thread with handles all signals. Because Linux doesn't support schemas use a mutex to check that @@ -1463,7 +1627,11 @@ static void init_signals(void) } static void start_signal_handler(void) -{} +{ + // Save vm id of this process + if (!opt_bootstrap) + create_pid_file(); +} static void check_data_home(const char *path) {} @@ -1474,21 +1642,20 @@ static void check_data_home(const char *path) // down server event callback void mysql_down_server_cb(void *, void *) { - event_flag = TRUE; + event_flag = TRUE; kill_server(0); } // destroy callback resources void mysql_cb_destroy(void *) -{ - UnRegisterEventNotification(eh); // cleanup down event notification +{ + UnRegisterEventNotification(eh); // cleanup down event notification NX_UNWRAP_INTERFACE(ref); - - /* Deregister NSS volume deactivation event */ - NX_UNWRAP_INTERFACE(refneb); + /* Deregister NSS volume deactivation event */ + NX_UNWRAP_INTERFACE(refneb); if (neb_consumer_id) - UnRegisterConsumer(neb_consumer_id, NULL); + UnRegisterConsumer(neb_consumer_id, NULL); } @@ -1508,7 +1675,7 @@ void mysql_cb_init() Register for volume deactivation event Wrap the callback function, as it is called by non-LibC thread */ - (void *)NX_WRAP_INTERFACE(neb_event_callback, 1, &refneb); + (void *) NX_WRAP_INTERFACE(neb_event_callback, 1, &refneb); registerwithneb(); NXVmRegisterExitHandler(mysql_cb_destroy, NULL); // clean-up @@ -1517,7 +1684,7 @@ void mysql_cb_init() /* To get the name of the NetWare volume having MySQL data folder */ -void getvolumename() +static void getvolumename() { char *p; /* @@ -1534,7 +1701,7 @@ void getvolumename() Registering with NEB for NSS Volume Deactivation event */ -void registerwithneb() +static void registerwithneb() { ConsumerRegistrationInfo reg_info; @@ -1586,6 +1753,7 @@ void registerwithneb() /* Callback for NSS Volume Deactivation event */ + ulong neb_event_callback(struct EventBlock *eblock) { EventChangeVolStateEnter_s *voldata; @@ -1607,7 +1775,9 @@ ulong neb_event_callback(struct EventBlock *eblock) consoleprintf("MySQL data volume is deactivated, shutting down MySQL Server \n"); event_flag= TRUE; nw_panic = TRUE; + event_flag= TRUE; kill_server(0); + } } return 0; @@ -1620,7 +1790,7 @@ ulong neb_event_callback(struct EventBlock *eblock) #define ADMIN_VOL_PATH "_ADMIN:/Volumes/" -void getvolumeID(BYTE *volumeName) +static void getvolumeID(BYTE *volumeName) { char path[zMAX_FULL_NAME]; Key_t rootKey= 0, fileKey= 0; @@ -1628,7 +1798,7 @@ void getvolumeID(BYTE *volumeName) zInfo_s info; STATUS status; - /* Get the root key */ + /* Get the root key */ if ((status= zRootKey(0, &rootKey)) != zOK) { consoleprintf("\nGetNSSVolumeProperties - Failed to get root key, status: %d\n.", (int) status); @@ -1681,8 +1851,8 @@ static void init_signals(void) for (uint i=0 ; i < sizeof(signals)/sizeof(int) ; i++) signal(signals[i], kill_server); mysql_cb_init(); // initialize callbacks -} +} static void start_signal_handler(void) { @@ -1708,7 +1878,7 @@ static void check_data_home(const char *path) static void sig_reload(int signo) { // Flush everything - reload_acl_and_cache((THD*) 0,REFRESH_LOG, (TABLE_LIST*) 0); + reload_acl_and_cache((THD*) 0,REFRESH_LOG, (TABLE_LIST*) 0, NULL); signal(signo, SIG_ACK); } @@ -1732,7 +1902,6 @@ static void init_signals(void) signal(SIGALRM, SIG_IGN); signal(SIGBREAK,SIG_IGN); signal_thread = pthread_self(); - SIGNAL_THD; } static void start_signal_handler(void) @@ -1761,7 +1930,7 @@ extern "C" sig_handler handle_segfault(int sig) fprintf(stderr, "Fatal signal %d while backtracing\n", sig); exit(1); } - + segfaulted = 1; fprintf(stderr,"\ mysqld got signal %d;\n\ @@ -1773,19 +1942,20 @@ or misconfigured. This error can also be caused by malfunctioning hardware.\n", We will try our best to scrape up some info that will hopefully help diagnose\n\ the problem, but since we have already crashed, something is definitely wrong\n\ and this may fail.\n\n"); - fprintf(stderr, "key_buffer_size=%lu\n", (ulong) keybuff_size); + fprintf(stderr, "key_buffer_size=%lu\n", + (ulong) dflt_key_cache->key_cache_mem_size); fprintf(stderr, "read_buffer_size=%ld\n", global_system_variables.read_buff_size); fprintf(stderr, "max_used_connections=%ld\n", max_used_connections); fprintf(stderr, "max_connections=%ld\n", max_connections); fprintf(stderr, "threads_connected=%d\n", thread_count); fprintf(stderr, "It is possible that mysqld could use up to \n\ key_buffer_size + (read_buffer_size + sort_buffer_size)*max_connections = %ld K\n\ -bytes of memory\n", ((ulong) keybuff_size + +bytes of memory\n", ((ulong) dflt_key_cache->key_cache_mem_size + (global_system_variables.read_buff_size + global_system_variables.sortbuff_size) * max_connections)/ 1024); fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n"); - + #if defined(HAVE_LINUXTHREADS) if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS) { @@ -1793,7 +1963,7 @@ bytes of memory\n", ((ulong) keybuff_size + You seem to be running 32-bit Linux and have %d concurrent connections.\n\ If you have not changed STACK_SIZE in LinuxThreads and built the binary \n\ yourself, LinuxThreads is quite likely to steal a part of the global heap for\n\ -the thread stack. Please read http://www.mysql.com/doc/L/i/Linux.html\n\n", +the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n", thread_count); } #endif /* HAVE_LINUXTHREADS */ @@ -1818,6 +1988,17 @@ information that should help you find out what is causing the crash.\n"); fflush(stderr); #endif /* HAVE_STACKTRACE */ +#ifdef HAVE_INITGROUPS + if (calling_initgroups) + fprintf(stderr, "\n\ +This crash occured while the server was calling initgroups(). This is\n\ +often due to the use of a mysqld that is statically linked against glibc\n\ +and configured to use LDAP in /etc/nsswitch.conf. You will need to either\n\ +upgrade to a version of glibc that does not have this problem (2.3.4 or\n\ +later when used with nscd), disable LDAP in your nsswitch.conf, or use a\n\ +mysqld that is not statically linked.\n"); +#endif + if (test_flags & TEST_CORE_ON_SIGNAL) { fprintf(stderr, "Writing a core file\n"); @@ -1841,8 +2022,8 @@ static void init_signals(void) DBUG_ENTER("init_signals"); if (test_flags & TEST_SIGINT) - sigset(THR_KILL_SIGNAL,end_thread_signal); - sigset(THR_SERVER_ALARM,print_signal_warning); // Should never be called! + my_sigset(THR_KILL_SIGNAL,end_thread_signal); + my_sigset(THR_SERVER_ALARM,print_signal_warning); // Should never be called! if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL)) { @@ -1869,20 +2050,15 @@ static void init_signals(void) if (test_flags & TEST_CORE_ON_SIGNAL) { /* Change limits so that we will get a core file */ - struct rlimit rl; + STRUCT_RLIMIT rl; rl.rlim_cur = rl.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_CORE, &rl) && global_system_variables.log_warnings) - sql_print_error("Warning: setrlimit could not change the size of core files to 'infinity'; We may not be able to generate a core file on signals"); + sql_print_warning("setrlimit could not change the size of core files to 'infinity'; We may not be able to generate a core file on signals"); } #endif (void) sigemptyset(&set); -#ifdef THREAD_SPECIFIC_SIGPIPE - sigset(SIGPIPE,abort_thread); + my_sigset(SIGPIPE,SIG_IGN); sigaddset(&set,SIGPIPE); -#else - (void) signal(SIGPIPE,SIG_IGN); // Can't know which thread - sigaddset(&set,SIGPIPE); -#endif sigaddset(&set,SIGINT); #ifndef IGNORE_SIGHUP_SIGQUIT sigaddset(&set,SIGQUIT); @@ -1911,6 +2087,7 @@ static void init_signals(void) } +#ifndef EMBEDDED_LIBRARY static void start_signal_handler(void) { int error; @@ -2016,6 +2193,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) while ((error=my_sigwait(&set,&sig)) == EINTR) ; if (cleanup_done) { + DBUG_PRINT("quit",("signal_handler: calling my_thread_end()")); my_thread_end(); signal_thread_in_use= 0; pthread_exit(0); // Safety @@ -2025,7 +2203,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) case SIGQUIT: case SIGKILL: #ifdef EXTRA_DEBUG - sql_print_error("Got signal %d to shutdown mysqld",sig); + sql_print_information("Got signal %d to shutdown mysqld",sig); #endif DBUG_PRINT("info",("Got signal: %d abort_loop: %d",sig,abort_loop)); if (!abort_loop) @@ -2037,7 +2215,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) my_pthread_attr_setprio(&connection_attrib,INTERRUPT_PRIOR); if (pthread_create(&tmp,&connection_attrib, kill_server_thread, (void*) sig)) - sql_print_error("Error: Can't create thread to kill server"); + sql_print_error("Can't create thread to kill server"); #else kill_server((void*) sig); // MIT THREAD has a alarm thread #endif @@ -2046,12 +2224,12 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) case SIGHUP: if (!abort_loop) { + mysql_print_status((THD*) 0); // Print some debug info reload_acl_and_cache((THD*) 0, (REFRESH_LOG | REFRESH_TABLES | REFRESH_FAST | REFRESH_STATUS | REFRESH_GRANT | REFRESH_THREADS | REFRESH_HOSTS), - (TABLE_LIST*) 0); // Flush logs - mysql_print_status((THD*) 0); // Send debug some info + (TABLE_LIST*) 0, NULL); // Flush logs } break; #ifdef USE_ONE_SIGNAL_HAND @@ -2061,13 +2239,14 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) #endif default: #ifdef EXTRA_DEBUG - sql_print_error("Warning: Got signal: %d error: %d",sig,error); /* purecov: tested */ + sql_print_warning("Got signal: %d error: %d",sig,error); /* purecov: tested */ #endif break; /* purecov: tested */ } } return(0); /* purecov: deadcode */ } +#endif /*!EMBEDDED_LIBRARY*/ static void check_data_home(const char *path) {} @@ -2084,33 +2263,41 @@ static void check_data_home(const char *path) /* ARGSUSED */ extern "C" int my_message_sql(uint error, const char *str, myf MyFlags) { - NET *net; + THD *thd; DBUG_ENTER("my_message_sql"); - DBUG_PRINT("error",("Message: '%s'",str)); - if ((net=my_pthread_getspecific_ptr(NET*,THR_NET))) + DBUG_PRINT("error", ("Message: '%s'", str)); + if ((thd= current_thd)) { - if (!net->last_error[0]) // Return only first message + /* + thd->lex->current_select == 0 if lex structure is not inited + (not query command (COM_QUERY)) + */ + if (thd->lex->current_select && + thd->lex->current_select->no_error && !thd->is_fatal_error) { - strmake(net->last_error,str,sizeof(net->last_error)-1); - net->last_errno=error ? error : ER_UNKNOWN_ERROR; + DBUG_PRINT("error", ("Error converted to warning: current_select: no_error %d fatal_error: %d", + (thd->lex->current_select ? + thd->lex->current_select->no_error : 0), + (int) thd->is_fatal_error)); + + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, str); + } + else + { + NET *net= &thd->net; + net->report_error= 1; + if (!net->last_error[0]) // Return only first message + { + strmake(net->last_error, str, sizeof(net->last_error)-1); + net->last_errno= error ? error : ER_UNKNOWN_ERROR; + } } } - if (!net || MyFlags & ME_NOREFRESH) + if (!thd || MyFlags & ME_NOREFRESH) sql_print_error("%s: %s",my_progname,str); /* purecov: inspected */ DBUG_RETURN(0); } - -/* - Forget last error message (if we got one) -*/ - -void clear_error_message(THD *thd) -{ - thd->net.last_error[0]= 0; -} - - #ifdef __WIN__ struct utsname @@ -2128,14 +2315,13 @@ int uname(struct utsname *a) extern "C" pthread_handler_decl(handle_shutdown,arg) { MSG msg; - SHUTDOWN_THD; my_thread_init(); /* this call should create the message queue for this thread */ PeekMessage(&msg, NULL, 1, 65534,PM_NOREMOVE); #if !defined(EMBEDDED_LIBRARY) if (WaitForSingleObject(hEventShutdown,INFINITE)==WAIT_OBJECT_0) -#endif +#endif /* EMBEDDED_LIBRARY */ kill_server(MYSQL_KILL_SIGNAL); return 0; } @@ -2157,7 +2343,6 @@ int STDCALL handle_kill(ulong ctrl_type) #ifdef OS2 extern "C" pthread_handler_decl(handle_shutdown,arg) { - SHUTDOWN_THD; my_thread_init(); // wait semaphore @@ -2181,7 +2366,13 @@ extern "C" pthread_handler_decl(handle_shutdown,arg) #endif -const char *load_default_groups[]= { "mysqld","server",MYSQL_BASE_VERSION,0,0}; +const char *load_default_groups[]= { +#ifdef HAVE_NDBCLUSTER_DB +"mysql_cluster", +#endif +"mysqld","server",MYSQL_BASE_VERSION,0,0}; +static const int load_default_groups_sz= +sizeof(load_default_groups)/sizeof(load_default_groups[0]); bool open_log(MYSQL_LOG *log, const char *hostname, const char *opt_name, const char *extension, @@ -2214,29 +2405,59 @@ bool open_log(MYSQL_LOG *log, const char *hostname, } +/* + Initialize one of the global date/time format variables -#ifdef __WIN__ -int win_main(int argc, char **argv) -#else -int main(int argc, char **argv) -#endif + SYNOPSIS + init_global_datetime_format() + format_type What kind of format should be supported + var_ptr Pointer to variable that should be updated + + NOTES + The default value is taken from either opt_date_time_formats[] or + the ISO format (ANSI SQL) + + RETURN + 0 ok + 1 error +*/ + +bool init_global_datetime_format(timestamp_type format_type, + DATE_TIME_FORMAT **var_ptr) { - DEBUGGER_OFF; + /* Get command line option */ + const char *str= opt_date_time_formats[format_type]; - my_umask=0660; // Default umask for new files - my_umask_dir=0700; // Default umask for new directories - MAIN_THD; - /* - Initialize signal_th and shutdown_th to main_th for default value - as we need to initialize them to something safe. They are used - when compiled with safemalloc. - */ - SIGNAL_THD; - SHUTDOWN_THD; - MY_INIT(argv[0]); // init my_sys library & pthreads + if (!str) // No specified format + { + str= get_date_time_format_str(&known_date_time_formats[ISO_FORMAT], + format_type); + /* + Set the "command line" option to point to the generated string so + that we can set global formats back to default + */ + opt_date_time_formats[format_type]= str; + } + if (!(*var_ptr= date_time_format_make(format_type, str, strlen(str)))) + { + fprintf(stderr, "Wrong date/time format specifier: %s\n", str); + return 1; + } + return 0; +} + + +static int init_common_variables(const char *conf_file_name, int argc, + char **argv, const char **groups) +{ + umask(((~my_umask) & 0666)); tzset(); // Set tzname + max_system_variables.pseudo_thread_id= (ulong)~0; start_time=time((time_t*) 0); + if (init_thread_environment()) + return 1; + mysql_init_variables(); #ifdef OS2 { @@ -2250,10 +2471,19 @@ int main(int argc, char **argv) { struct tm tm_tmp; localtime_r(&start_time,&tm_tmp); - strmov(time_zone,tzname[tm_tmp.tm_isdst != 0 ? 1 : 0]); - } -#endif + strmake(system_time_zone, tzname[tm_tmp.tm_isdst != 0 ? 1 : 0], + sizeof(system_time_zone)-1); + } +#endif + /* + We set SYSTEM time zone as reasonable default and + also for failure of my_tz_init() and bootstrap mode. + If user explicitly set time zone with --default-time-zone + option we will change this value in my_tz_init(). + */ + global_system_variables.time_zone= my_tz_SYSTEM; + /* Init mutexes for the global MYSQL_LOG objects. As safe_mutex depends on what MY_INIT() does, we can't init the mutexes of @@ -2270,69 +2500,98 @@ int main(int argc, char **argv) strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5); strmov(fn_ext(pidfile_name),".pid"); // Add proper extension -#ifdef _CUSTOMSTARTUPCONFIG_ - if (_cust_check_startup()) - { - /* _cust_check_startup will report startup failure error */ - exit( 1 ); - } -#endif - load_defaults(MYSQL_CONFIG_NAME,load_default_groups,&argc,&argv); + load_defaults(conf_file_name, groups, &argc, &argv); defaults_argv=argv; - - /* Get default temporary directory */ - opt_mysql_tmpdir=getenv("TMPDIR"); /* Use this if possible */ -#if defined( __WIN__) || defined(OS2) - if (!opt_mysql_tmpdir) - opt_mysql_tmpdir=getenv("TEMP"); - if (!opt_mysql_tmpdir) - opt_mysql_tmpdir=getenv("TMP"); -#endif - if (!opt_mysql_tmpdir || !opt_mysql_tmpdir[0]) - opt_mysql_tmpdir=(char*) P_tmpdir; /* purecov: inspected */ - - /* needed by get_options */ - - (void) pthread_mutex_init(&LOCK_error_log,MY_MUTEX_INIT_FAST); - - set_options(); get_options(argc,argv); set_server_version(); DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname, server_version, SYSTEM_TYPE,MACHINE_TYPE)); - /* These must be set early */ + /* connections and databases needs lots of files */ + { + uint files, wanted_files; - (void) pthread_mutex_init(&LOCK_mysql_create_db,MY_MUTEX_INIT_SLOW); - (void) pthread_mutex_init(&LOCK_Acl,MY_MUTEX_INIT_SLOW); - (void) pthread_mutex_init(&LOCK_grant,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_open,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_mapped_file,MY_MUTEX_INIT_SLOW); - (void) pthread_mutex_init(&LOCK_status,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_delayed_insert,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_delayed_status,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_delayed_create,MY_MUTEX_INIT_SLOW); - (void) pthread_mutex_init(&LOCK_manager,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_crypt,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_bytes_sent,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_bytes_received,MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_user_conn, MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST); - (void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST); - (void) pthread_cond_init(&COND_thread_count,NULL); - (void) pthread_cond_init(&COND_refresh,NULL); - (void) pthread_cond_init(&COND_thread_cache,NULL); - (void) pthread_cond_init(&COND_flush_thread_cache,NULL); - (void) pthread_cond_init(&COND_manager,NULL); - (void) pthread_cond_init(&COND_rpl_status, NULL); - init_signals(); + wanted_files= 10+(uint) max(max_connections*5, + max_connections+table_cache_size*2); + set_if_bigger(wanted_files, open_files_limit); + files= my_set_max_open_files(wanted_files); - if (set_default_charset_by_name(sys_charset.value, MYF(MY_WME))) - exit(1); - charsets_list = list_charsets(MYF(MY_COMPILED_SETS|MY_CONFIG_SETS)); + if (files < wanted_files) + { + if (!open_files_limit) + { + max_connections= (ulong) min((files-10),max_connections); + table_cache_size= (ulong) max((files-10-max_connections)/2,64); + DBUG_PRINT("warning", + ("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld", + files, max_connections, table_cache_size)); + if (global_system_variables.log_warnings) + sql_print_warning("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld", + files, max_connections, table_cache_size); + } + else if (global_system_variables.log_warnings) + sql_print_warning("Could not increase number of max_open_files to more than %u (request: %u)", files, wanted_files); + } + open_files_limit= files; + } + unireg_init(opt_specialflag); /* Set up extern variabels */ + if (init_errmessage()) /* Read error messages from file */ + return 1; + init_client_errs(); + lex_init(); + item_init(); + set_var_init(); + mysys_uses_curses=0; +#ifdef USE_REGEX + my_regex_init(&my_charset_latin1); +#endif + if (!(default_charset_info= get_charset_by_csname(default_character_set_name, + MY_CS_PRIMARY, + MYF(MY_WME)))) + return 1; + if (default_collation_name) + { + CHARSET_INFO *default_collation; + default_collation= get_charset_by_name(default_collation_name, MYF(0)); + if (!default_collation) + { + sql_print_error(ER(ER_UNKNOWN_COLLATION), default_collation_name); + return 1; + } + if (!my_charset_same(default_charset_info, default_collation)) + { + sql_print_error(ER(ER_COLLATION_CHARSET_MISMATCH), + default_collation_name, + default_charset_info->csname); + return 1; + } + default_charset_info= default_collation; + } + /* Set collactions that depends on the default collation */ + global_system_variables.collation_server= default_charset_info; + global_system_variables.collation_database= default_charset_info; + global_system_variables.collation_connection= default_charset_info; + global_system_variables.character_set_results= default_charset_info; + global_system_variables.character_set_client= default_charset_info; + global_system_variables.collation_connection= default_charset_info; + + sys_init_connect.value_length= 0; + if ((sys_init_connect.value= opt_init_connect)) + sys_init_connect.value_length= strlen(opt_init_connect); + else + sys_init_connect.value=my_strdup("",MYF(0)); + + sys_init_slave.value_length= 0; + if ((sys_init_slave.value= opt_init_slave)) + sys_init_slave.value_length= strlen(opt_init_slave); + else + sys_init_slave.value=my_strdup("",MYF(0)); + + if (use_temp_pool && bitmap_init(&temp_pool,0,1024,1)) + return 1; + if (my_dbopt_init()) + return 1; /* Ensure that lower_case_table_names is set on system where we have case @@ -2347,8 +2606,8 @@ int main(int argc, char **argv) if (lower_case_table_names_used) { if (global_system_variables.log_warnings) - sql_print_error("\ -Warning: You have forced lower_case_table_names to 0 through a command-line \ + sql_print_warning("\ +You have forced lower_case_table_names to 0 through a command-line \ option, even though your file system '%s' is case insensitive. This means \ that you can corrupt a MyISAM table by accessing it with different cases. \ You should consider changing lower_case_table_names to 1 or 2", @@ -2357,126 +2616,200 @@ You should consider changing lower_case_table_names to 1 or 2", else { if (global_system_variables.log_warnings) - sql_print_error("Warning: Setting lower_case_table_names=2 because file system for %s is case insensitive", mysql_real_data_home); + sql_print_warning("Setting lower_case_table_names=2 because file system for %s is case insensitive", mysql_real_data_home); lower_case_table_names= 2; } } - -#ifdef HAVE_OPENSSL - if (opt_use_ssl) + else if (lower_case_table_names == 2 && + !(lower_case_file_system= + (test_if_case_insensitive(mysql_real_data_home) == 1))) { - /* having ssl_acceptor_fd != 0 signals the use of SSL */ - ssl_acceptor_fd= new_VioSSLAcceptorFd(opt_ssl_key, opt_ssl_cert, - opt_ssl_ca, opt_ssl_capath, - opt_ssl_cipher); - DBUG_PRINT("info",("ssl_acceptor_fd: %lx", (long) ssl_acceptor_fd)); - if (!ssl_acceptor_fd) - opt_use_ssl = 0; + if (global_system_variables.log_warnings) + sql_print_warning("lower_case_table_names was set to 2, even though your " + "the file system '%s' is case sensitive. Now setting " + "lower_case_table_names to 0 to avoid future problems.", + mysql_real_data_home); + lower_case_table_names= 0; } -#endif /* HAVE_OPENSSL */ -#ifdef HAVE_LIBWRAP - libwrapName= my_progname+dirname_length(my_progname); - openlog(libwrapName, LOG_PID, LOG_AUTH); -#endif + /* Reset table_alias_charset, now that lower_case_table_names is set. */ + table_alias_charset= (lower_case_table_names ? + files_charset_info : + &my_charset_bin); - if (!(opt_specialflag & SPECIAL_NO_PRIOR)) - my_pthread_setprio(pthread_self(),CONNECT_PRIOR); + return 0; +} + + +static int init_thread_environment() +{ + (void) pthread_mutex_init(&LOCK_mysql_create_db,MY_MUTEX_INIT_SLOW); + (void) pthread_mutex_init(&LOCK_Acl,MY_MUTEX_INIT_SLOW); + (void) pthread_mutex_init(&LOCK_open,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_mapped_file,MY_MUTEX_INIT_SLOW); + (void) pthread_mutex_init(&LOCK_status,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_error_log,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_delayed_insert,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_delayed_status,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_delayed_create,MY_MUTEX_INIT_SLOW); + (void) pthread_mutex_init(&LOCK_manager,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_crypt,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_bytes_sent,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_bytes_received,MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_user_conn, MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST); +#ifdef HAVE_OPENSSL + (void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST); +#endif + (void) my_rwlock_init(&LOCK_sys_init_connect, NULL); + (void) my_rwlock_init(&LOCK_sys_init_slave, NULL); + (void) my_rwlock_init(&LOCK_grant, NULL); + (void) pthread_cond_init(&COND_thread_count,NULL); + (void) pthread_cond_init(&COND_refresh,NULL); + (void) pthread_cond_init(&COND_thread_cache,NULL); + (void) pthread_cond_init(&COND_flush_thread_cache,NULL); + (void) pthread_cond_init(&COND_manager,NULL); +#ifdef HAVE_REPLICATION + (void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST); + (void) pthread_cond_init(&COND_rpl_status, NULL); +#endif /* Parameter for threads created for connections */ (void) pthread_attr_init(&connection_attrib); (void) pthread_attr_setdetachstate(&connection_attrib, PTHREAD_CREATE_DETACHED); - pthread_attr_setstacksize(&connection_attrib,thread_stack); -#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE + pthread_attr_setscope(&connection_attrib, PTHREAD_SCOPE_SYSTEM); + if (!(opt_specialflag & SPECIAL_NO_PRIOR)) + my_pthread_attr_setprio(&connection_attrib,WAIT_PRIOR); + + if (pthread_key_create(&THR_THD,NULL) || + pthread_key_create(&THR_MALLOC,NULL)) { - /* Retrieve used stack size; Needed for checking stack overflows */ - size_t stack_size= 0; - pthread_attr_getstacksize(&connection_attrib, &stack_size); - /* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */ - if (stack_size && stack_size < thread_stack) - { - if (global_system_variables.log_warnings) - sql_print_error("Warning: Asked for %ld thread stack, but got %ld", - thread_stack, stack_size); - thread_stack= stack_size; - } + sql_print_error("Can't create thread-keys"); + return 1; } +#ifdef HAVE_OPENSSL + openssl_stdlocks= (openssl_lock_t*) OPENSSL_malloc(CRYPTO_num_locks() * + sizeof(openssl_lock_t)); + for (int i= 0; i < CRYPTO_num_locks(); ++i) + (void) my_rwlock_init(&openssl_stdlocks[i].lock, NULL); + CRYPTO_set_dynlock_create_callback(openssl_dynlock_create); + CRYPTO_set_dynlock_destroy_callback(openssl_dynlock_destroy); + CRYPTO_set_dynlock_lock_callback(openssl_lock); + CRYPTO_set_locking_callback(openssl_lock_function); + CRYPTO_set_id_callback(openssl_id_function); #endif -#ifdef __NETWARE__ - /* Increasing stacksize of threads on NetWare */ + return 0; +} - pthread_attr_setstacksize(&connection_attrib, NW_THD_STACKSIZE); -#endif - if (!(opt_specialflag & SPECIAL_NO_PRIOR)) - my_pthread_attr_setprio(&connection_attrib,WAIT_PRIOR); - pthread_attr_setscope(&connection_attrib, PTHREAD_SCOPE_SYSTEM); -#if defined( SET_RLIMIT_NOFILE) || defined( OS2) - /* connections and databases needs lots of files */ +#ifdef HAVE_OPENSSL +static unsigned long openssl_id_function() +{ + return (unsigned long) pthread_self(); +} + + +static openssl_lock_t *openssl_dynlock_create(const char *file, int line) +{ + openssl_lock_t *lock= new openssl_lock_t; + my_rwlock_init(&lock->lock, NULL); + return lock; +} + + +static void openssl_dynlock_destroy(openssl_lock_t *lock, const char *file, + int line) +{ + rwlock_destroy(&lock->lock); + delete lock; +} + + +static void openssl_lock_function(int mode, int n, const char *file, int line) +{ + if (n < 0 || n > CRYPTO_num_locks()) { - uint wanted_files=10+(uint) max(max_connections*5, - max_connections+table_cache_size*2); - set_if_bigger(wanted_files, open_files_limit); - // Note that some system returns 0 if we succeed here: - uint files=set_maximum_open_files(wanted_files); - if (files && files < wanted_files && ! open_files_limit) - { - max_connections= (ulong) min((files-10),max_connections); - table_cache_size= (ulong) max((files-10-max_connections)/2,64); - DBUG_PRINT("warning", - ("Changed limits: max_connections: %ld table_cache: %ld", - max_connections,table_cache_size)); - if (global_system_variables.log_warnings) - sql_print_error("Warning: Changed limits: max_connections: %ld table_cache: %ld",max_connections,table_cache_size); - } - open_files_limit= files; + /* Lock number out of bounds. */ + sql_print_error("Fatal: OpenSSL interface problem (n = %d)", n); + abort(); } -#else - open_files_limit= 0; /* Can't set or detect limit */ -#endif - unireg_init(opt_specialflag); /* Set up extern variabels */ - init_errmessage(); /* Read error messages from file */ - lex_init(); - item_init(); - set_var_init(); - mysys_uses_curses=0; -#ifdef USE_REGEX - regex_init(); -#endif - select_thread=pthread_self(); - select_thread_in_use=1; - if (use_temp_pool && bitmap_init(&temp_pool,1024,1)) - unireg_abort(1); + openssl_lock(mode, &openssl_stdlocks[n], file, line); +} - /* - We have enough space for fiddling with the argv, continue - */ - umask(((~my_umask) & 0666)); - check_data_home(mysql_real_data_home); - if (my_setwd(mysql_real_data_home,MYF(MY_WME))) + +static void openssl_lock(int mode, openssl_lock_t *lock, const char *file, + int line) +{ + int err; + char const *what; + + switch (mode) { + case CRYPTO_LOCK|CRYPTO_READ: + what = "read lock"; + err = rw_rdlock(&lock->lock); + break; + case CRYPTO_LOCK|CRYPTO_WRITE: + what = "write lock"; + err = rw_wrlock(&lock->lock); + break; + case CRYPTO_UNLOCK|CRYPTO_READ: + case CRYPTO_UNLOCK|CRYPTO_WRITE: + what = "unlock"; + err = rw_unlock(&lock->lock); + break; + default: + /* Unknown locking mode. */ + sql_print_error("Fatal: OpenSSL interface problem (mode=0x%x)", mode); + abort(); + } + if (err) { - unireg_abort(1); /* purecov: inspected */ + sql_print_error("Fatal: can't %s OpenSSL %s lock", what); + abort(); } - mysql_data_home= mysql_data_home_buff; - mysql_data_home[0]=FN_CURLIB; // all paths are relative from here - mysql_data_home[1]=0; - server_init(); - if (table_cache_init() || hostname_cache_init()) +} +#endif /* HAVE_OPENSSL */ + + +static void init_ssl() +{ +#ifdef HAVE_OPENSSL + if (opt_use_ssl) { - unireg_abort(1); + /* having ssl_acceptor_fd != 0 signals the use of SSL */ + ssl_acceptor_fd= new_VioSSLAcceptorFd(opt_ssl_key, opt_ssl_cert, + opt_ssl_ca, opt_ssl_capath, + opt_ssl_cipher); + DBUG_PRINT("info",("ssl_acceptor_fd: %lx", (long) ssl_acceptor_fd)); + if (!ssl_acceptor_fd) + opt_use_ssl = 0; } + if (des_key_file) + load_des_key_file(des_key_file); +#endif /* HAVE_OPENSSL */ +} + + +static int init_server_components() +{ + DBUG_ENTER("init_server_components"); + if (table_cache_init() || hostname_cache_init()) + unireg_abort(1); + query_cache_result_size_limit(query_cache_limit); + query_cache_set_min_res_unit(query_cache_min_res_unit); query_cache_init(); query_cache_resize(query_cache_size); randominit(&sql_rand,(ulong) start_time,(ulong) start_time/2); reset_floating_point_exceptions(); init_thr_lock(); +#ifdef HAVE_REPLICATION init_slave_list(); -#ifdef HAVE_OPENSSL - if (des_key_file) - load_des_key_file(des_key_file); -#endif /* HAVE_OPENSSL */ - +#endif /* Setup log files */ if (opt_log) open_log(&mysql_log, glob_hostname, opt_logname, ".log", NullS, @@ -2487,15 +2820,49 @@ You should consider changing lower_case_table_names to 1 or 2", NullS, LOG_NEW, 0, 0, 0); using_update_log=1; } - if (opt_slow_log) open_log(&mysql_slow_log, glob_hostname, opt_slow_logname, "-slow.log", - NullS, LOG_NORMAL, 0, 0, 0); + NullS, LOG_NORMAL, 0, 0, 0); + + if (opt_bin_log) + { + /* If we fail to open binlog, it's going to hinder our recovery, so die */ + if (open_log(&mysql_bin_log, glob_hostname, opt_bin_logname, "-bin", + opt_binlog_index_name, LOG_BIN, 0, 0, max_binlog_size)) + unireg_abort(1); + using_update_log=1; +#ifdef HAVE_REPLICATION + if (expire_logs_days) + { + long purge_time= time(0) - expire_logs_days*24*60*60; + if (purge_time >= 0) + mysql_bin_log.purge_logs_before_date(purge_time); + } +#endif + } + else if (opt_log_slave_updates) + { + sql_print_warning("\ +you need to use --log-bin to make --log-slave-updates work. \ +Now disabling --log-slave-updates."); + } + +#ifdef HAVE_REPLICATION + if (opt_log_slave_updates && replicate_same_server_id) + { + sql_print_error("\ +using --replicate-same-server-id in conjunction with \ +--log-slave-updates is impossible, it would lead to infinite loops in this \ +server."); + unireg_abort(1); + } +#endif if (opt_error_log) { if (!log_error_file_ptr[0]) - fn_format(log_error_file, glob_hostname, mysql_data_home, ".err", 0); + fn_format(log_error_file, glob_hostname, mysql_data_home, ".err", + MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */ else fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err", MY_UNPACK_FILENAME | MY_SAFE_PATH); @@ -2503,22 +2870,75 @@ You should consider changing lower_case_table_names to 1 or 2", opt_error_log= 1; // Too long file name else { +#ifndef EMBEDDED_LIBRARY if (freopen(log_error_file, "a+", stdout)) - freopen(log_error_file, "a+", stderr); +#endif + stderror_file= freopen(log_error_file, "a+", stderr); + } + } + + if (opt_innodb_safe_binlog) + { + if (have_innodb != SHOW_OPTION_YES) + sql_print_warning("--innodb-safe-binlog is meaningful only if " + "the InnoDB storage engine is enabled in the server."); +#ifdef HAVE_INNOBASE_DB + if (innobase_flush_log_at_trx_commit != 1) + { + sql_print_warning("--innodb-safe-binlog is meaningful only if " + "innodb_flush_log_at_trx_commit is 1; now setting it " + "to 1."); + innobase_flush_log_at_trx_commit= 1; } + if (innobase_unix_file_flush_method) + { + /* + This option has so many values that it's hard to know which value is + good (especially "littlesync", and on Windows... see + srv/srv0start.c). + */ + sql_print_warning("--innodb-safe-binlog requires that " + "the innodb_flush_method actually synchronizes the " + "InnoDB log to disk; it is your responsibility " + "to verify that the method you chose does it."); + } + if (sync_binlog_period != 1) + { + sql_print_warning("--innodb-safe-binlog is meaningful only if " + "the global sync_binlog variable is 1; now setting it " + "to 1."); + sync_binlog_period= 1; + } +#endif } + if (ha_init()) { sql_print_error("Can't init databases"); - if (unix_sock != INVALID_SOCKET) - unlink(mysql_unix_port); unireg_abort(1); } - ha_key_cache(); -#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) + if (opt_myisam_log) + (void) mi_log(1); + + /* + Now that InnoDB is initialized, we can know the last good binlog position + and cut the binlog if needed. This function does nothing if there was no + crash recovery by InnoDB. + */ + if (opt_innodb_safe_binlog) + { + /* not fatal if fails (but print errors) */ + mysql_bin_log.cut_spurious_tail(); + } + mysql_bin_log.report_pos_in_innodb(); + + /* call ha_init_key_cache() on all key caches to init them */ + process_key_caches(&ha_init_key_cache); + +#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) && !defined(EMBEDDED_LIBRARY) if (locked_in_memory && !getuid()) { - if (setreuid((uid_t)-1,0) == -1) + if (setreuid((uid_t)-1, 0) == -1) { // this should never happen sql_perror("setreuid"); unireg_abort(1); @@ -2526,115 +2946,286 @@ You should consider changing lower_case_table_names to 1 or 2", if (mlockall(MCL_CURRENT)) { if (global_system_variables.log_warnings) - sql_print_error("Warning: Failed to lock memory. Errno: %d\n",errno); + sql_print_warning("Failed to lock memory. Errno: %d\n",errno); + locked_in_memory= 0; } - else - locked_in_memory=1; if (user_info) set_user(mysqld_user, user_info); } -#else - locked_in_memory=0; + else #endif + locked_in_memory=0; - if (opt_myisam_log) - (void) mi_log(1); ft_init_stopwords(); + init_max_user_conn(); + init_update_queries(); + DBUG_RETURN(0); +} + + +static void create_maintenance_thread() +{ + if ( +#ifdef HAVE_BERKELEY_DB + (have_berkeley_db == SHOW_OPTION_YES) || +#endif + (flush_time && flush_time != ~(ulong) 0L)) + { + pthread_t hThread; + if (pthread_create(&hThread,&connection_attrib,handle_manager,0)) + sql_print_warning("Can't create thread to manage maintenance"); + } +} + + +static void create_shutdown_thread() +{ +#if !defined(EMBEDDED_LIBRARY) #ifdef __WIN__ - if (!opt_console) - FreeConsole(); // Remove window + hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name); + pthread_t hThread; + if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0)) + sql_print_warning("Can't create thread to handle shutdown requests"); + + // On "Stop Service" we have to do regular shutdown + Service.SetShutdownEvent(hEventShutdown); +#endif +#ifdef OS2 + pthread_cond_init(&eventShutdown, NULL); + pthread_t hThread; + if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0)) + sql_print_warning("Can't create thread to handle shutdown requests"); #endif +#endif // EMBEDDED_LIBRARY +} - /* - init signals & alarm - After this we can't quit by a simple unireg_abort - */ - error_handler_hook = my_message_sql; - if (pthread_key_create(&THR_THD,NULL) || pthread_key_create(&THR_NET,NULL) || - pthread_key_create(&THR_MALLOC,NULL)) + +#if defined(__NT__) || defined(HAVE_SMEM) +static void handle_connections_methods() +{ + pthread_t hThread; + DBUG_ENTER("handle_connections_methods"); +#ifdef __NT__ + if (hPipe == INVALID_HANDLE_VALUE && + (!have_tcpip || opt_disable_networking) && + !opt_enable_shared_memory) { - sql_print_error("Can't create thread-keys"); - if (unix_sock != INVALID_SOCKET) - unlink(mysql_unix_port); - unireg_abort(1); + sql_print_error("TCP/IP, --shared-memory, or --named-pipe should be configured on NT OS"); + unireg_abort(1); // Will not return } - start_signal_handler(); // Creates pidfile - if (acl_init((THD*) 0, opt_noacl)) +#endif + + pthread_mutex_lock(&LOCK_thread_count); + (void) pthread_cond_init(&COND_handler_count,NULL); + handler_count=0; +#ifdef __NT__ + if (hPipe != INVALID_HANDLE_VALUE) { - abort_loop=1; - select_thread_in_use=0; -#ifndef __NETWARE__ - (void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL); -#endif /* __NETWARE__ */ -#ifndef __WIN__ - if (!opt_bootstrap) - (void) my_delete(pidfile_name,MYF(MY_WME)); // Not needed anymore + handler_count++; + if (pthread_create(&hThread,&connection_attrib, + handle_connections_namedpipes, 0)) + { + sql_print_warning("Can't create thread to handle named pipes"); + handler_count--; + } + } +#endif /* __NT__ */ + if (have_tcpip && !opt_disable_networking) + { + handler_count++; + if (pthread_create(&hThread,&connection_attrib, + handle_connections_sockets, 0)) + { + sql_print_warning("Can't create thread to handle TCP/IP"); + handler_count--; + } + } +#ifdef HAVE_SMEM + if (opt_enable_shared_memory) + { + handler_count++; + if (pthread_create(&hThread,&connection_attrib, + handle_connections_shared_memory, 0)) + { + sql_print_warning("Can't create thread to handle shared memory"); + handler_count--; + } + } +#endif + + while (handler_count > 0) + pthread_cond_wait(&COND_handler_count,&LOCK_thread_count); + pthread_mutex_unlock(&LOCK_thread_count); + DBUG_VOID_RETURN; +} + +void decrement_handler_count() +{ + pthread_mutex_lock(&LOCK_thread_count); + handler_count--; + pthread_mutex_unlock(&LOCK_thread_count); + pthread_cond_signal(&COND_handler_count); +} +#else +#define decrement_handler_count() +#endif /* defined(__NT__) || defined(HAVE_SMEM) */ + + +#ifndef EMBEDDED_LIBRARY +#ifdef __WIN__ +int win_main(int argc, char **argv) +#else +int main(int argc, char **argv) #endif - if (unix_sock != INVALID_SOCKET) - unlink(mysql_unix_port); - unireg_abort(1); +{ + + DEBUGGER_OFF; + + MY_INIT(argv[0]); // init my_sys library & pthreads + +#ifdef _CUSTOMSTARTUPCONFIG_ + if (_cust_check_startup()) + { + / * _cust_check_startup will report startup failure error * / + exit( 1 ); } - if (!opt_noacl) - (void) grant_init((THD*) 0); - init_max_user_conn(); - init_update_queries(); - DBUG_ASSERT(current_thd == 0); +#endif -#ifdef HAVE_DLOPEN - if (!opt_noacl) - udf_init(); + if (init_common_variables(MYSQL_CONFIG_NAME, + argc, argv, load_default_groups)) + unireg_abort(1); // Will do exit + + init_signals(); + if (!(opt_specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),CONNECT_PRIOR); + pthread_attr_setstacksize(&connection_attrib,thread_stack); +#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE + { + /* Retrieve used stack size; Needed for checking stack overflows */ + size_t stack_size= 0; + pthread_attr_getstacksize(&connection_attrib, &stack_size); + /* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */ + if (stack_size && stack_size < thread_stack) + { + if (global_system_variables.log_warnings) + sql_print_warning("Asked for %ld thread stack, but got %ld", + thread_stack, stack_size); + thread_stack= stack_size; + } + } +#endif +#ifdef __NETWARE__ + /* Increasing stacksize of threads on NetWare */ + + pthread_attr_setstacksize(&connection_attrib, NW_THD_STACKSIZE); #endif - if (opt_bootstrap) /* If running with bootstrap, do not start replication. */ - opt_skip_slave_start= 1; - /* init_slave() must be called after the thread keys are created */ - init_slave(); - DBUG_ASSERT(current_thd == 0); + thread_stack_min=thread_stack - STACK_MIN_SIZE; + + (void) thr_setconcurrency(concurrency); // 10 by default + + select_thread=pthread_self(); + select_thread_in_use=1; + init_ssl(); + +#ifdef HAVE_LIBWRAP + libwrapName= my_progname+dirname_length(my_progname); + openlog(libwrapName, LOG_PID, LOG_AUTH); +#endif + + /* + We have enough space for fiddling with the argv, continue + */ + check_data_home(mysql_real_data_home); + if (my_setwd(mysql_real_data_home,MYF(MY_WME))) + { + unireg_abort(1); /* purecov: inspected */ + } + mysql_data_home= mysql_data_home_buff; + mysql_data_home[0]=FN_CURLIB; // all paths are relative from here + mysql_data_home[1]=0; + server_init(); + if (opt_bin_log && !server_id) { server_id= !master_host ? 1 : 2; - switch (server_id) { #ifdef EXTRA_DEBUG + switch (server_id) { case 1: - sql_print_error("\ -Warning: You have enabled the binary log, but you haven't set server-id to \ + sql_print_warning("\ +You have enabled the binary log, but you haven't set server-id to \ a non-zero value: we force server id to 1; updates will be logged to the \ binary log, but connections from slaves will not be accepted."); break; -#endif case 2: - sql_print_error("\ -Warning: You should set server-id to a non-0 value if master_host is set; \ + sql_print_warning("\ +You should set server-id to a non-0 value if master_host is set; \ we force server id to 2, but this MySQL server will not act as a slave."); break; } +#endif } - if (opt_bin_log) + + if (init_server_components()) + exit(1); + +#ifdef __WIN__ + if (!opt_console) { - open_log(&mysql_bin_log, glob_hostname, opt_bin_logname, "-bin", - opt_binlog_index_name, LOG_BIN, 0, 0, max_binlog_size); - using_update_log=1; + freopen(log_error_file,"a+",stdout); + freopen(log_error_file,"a+",stderr); + FreeConsole(); // Remove window } - else if (opt_log_slave_updates) +#endif + + /* + init signals & alarm + After this we can't quit by a simple unireg_abort + */ + error_handler_hook = my_message_sql; + start_signal_handler(); // Creates pidfile + if (acl_init(opt_noacl) || + my_tz_init((THD *)0, default_tz_name, opt_bootstrap)) { - sql_print_error("\ -Warning: you need to use --log-bin to make --log-slave-updates work. \ -Now disabling --log-slave-updates."); + abort_loop=1; + select_thread_in_use=0; +#ifndef __NETWARE__ + (void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL); +#endif /* __NETWARE__ */ + + if (!opt_bootstrap) + (void) my_delete(pidfile_name,MYF(MY_WME)); // Not needed anymore + + if (unix_sock != INVALID_SOCKET) + unlink(mysqld_unix_port); + exit(1); } + if (!opt_noacl) + (void) grant_init(); - if (opt_log_slave_updates && replicate_same_server_id) +#ifdef HAVE_DLOPEN + if (!opt_noacl) + udf_init(); +#endif + if (opt_bootstrap) /* If running with bootstrap, do not start replication. */ + opt_skip_slave_start= 1; + /* + init_slave() must be called after the thread keys are created. + Some parts of the code (e.g. SHOW STATUS LIKE 'slave_running' and other + places) assume that active_mi != 0, so let's fail if it's 0 (out of + memory); a message has already been printed. + */ + if (init_slave() && !active_mi) { - sql_print_error("\ -Error: using --replicate-same-server-id in conjunction with \ ---log-slave-updates is impossible, it would lead to infinite loops in this \ -server."); - unireg_abort(1); + end_thr_alarm(1); // Don't allow alarms + unireg_abort(1); } if (opt_bootstrap) { - int error=bootstrap(stdin); + select_thread_in_use= 0; // Allow 'kill' to work + int error= bootstrap(stdin); end_thr_alarm(1); // Don't allow alarms unireg_abort(error ? 1 : 0); } @@ -2646,85 +3237,21 @@ server."); unireg_abort(1); } } - (void) thr_setconcurrency(concurrency); // 10 by default -#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY) //IRENA - { - hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name); - pthread_t hThread; - if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0)) - sql_print_error("Warning: Can't create thread to handle shutdown requests"); - // On "Stop Service" we have to do regular shutdown - Service.SetShutdownEvent(hEventShutdown); - } -#endif -#ifdef OS2 - { - pthread_cond_init( &eventShutdown, NULL); - pthread_t hThread; - if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0)) - sql_print_error("Warning: Can't create thread to handle shutdown requests"); - } -#endif - - if ( -#ifdef HAVE_BERKELEY_DB - !berkeley_skip || -#endif - (flush_time && flush_time != ~(ulong) 0L)) - { - pthread_t hThread; - if (pthread_create(&hThread,&connection_attrib,handle_manager,0)) - sql_print_error("Warning: Can't create thread to manage maintenance"); - } + create_shutdown_thread(); + create_maintenance_thread(); printf(ER(ER_READY),my_progname,server_version, - ((unix_sock == INVALID_SOCKET) ? (char*) "" : mysql_unix_port), - mysql_port); + ((unix_sock == INVALID_SOCKET) ? (char*) "" : mysqld_unix_port), + mysqld_port, ""); if (MYSQL_COMPILATION_COMMENT[0] != '\0') fputs(" " MYSQL_COMPILATION_COMMENT, stdout); + putchar('\n'); fflush(stdout); -#ifdef __NT__ - if (hPipe == INVALID_HANDLE_VALUE && - (!have_tcpip || opt_disable_networking)) - { - sql_print_error("TCP/IP or --enable-named-pipe should be configured on NT OS"); - unireg_abort(1); - } - else - { - pthread_mutex_lock(&LOCK_thread_count); - (void) pthread_cond_init(&COND_handler_count,NULL); - { - pthread_t hThread; - handler_count=0; - if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe) - { - handler_count++; - if (pthread_create(&hThread,&connection_attrib, - handle_connections_namedpipes, 0)) - { - sql_print_error("Warning: Can't create thread to handle named pipes"); - handler_count--; - } - } - if (have_tcpip && !opt_disable_networking) - { - handler_count++; - if (pthread_create(&hThread,&connection_attrib, - handle_connections_sockets, 0)) - { - sql_print_error("Warning: Can't create thread to handle named pipes"); - handler_count--; - } - } - while (handler_count > 0) - pthread_cond_wait(&COND_handler_count,&LOCK_thread_count); - } - pthread_mutex_unlock(&LOCK_thread_count); - } +#if defined(__NT__) || defined(HAVE_SMEM) + handle_connections_methods(); #else #ifdef __WIN__ if ( !have_tcpip || opt_disable_networking) @@ -2734,13 +3261,10 @@ server."); } #endif handle_connections_sockets(0); -#ifdef EXTRA_DEBUG2 - sql_print_error("Exiting main thread"); -#endif #endif /* __NT__ */ /* (void) pthread_attr_destroy(&connection_attrib); */ - + DBUG_PRINT("quit",("Exiting main thread")); #ifndef __WIN__ @@ -2773,27 +3297,16 @@ server."); CloseHandle(hEventShutdown); } #endif -#ifndef __NETWARE__ - { - uint i; - /* - Wait up to 10 seconds for signal thread to die. We use this mainly to - avoid getting warnings that my_thread_end has not been called - */ - for (i= 0 ; i < 100 && signal_thread_in_use; i++) - { - if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL)) - break; - my_sleep(100); // Give it time to die - } - } -#endif + wait_for_signal_thread_to_end(); clean_up_mutexes(); my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); + exit(0); return(0); /* purecov: deadcode */ } +#endif /* EMBEDDED_LIBRARY */ + /**************************************************************************** Main and thread entry function for Win32 @@ -2828,7 +3341,7 @@ static char *add_quoted_string(char *to, const char *from, char *to_end) SYNOPSIS default_service_handling() - argv Pointer to argument list + argv Pointer to argument list servicename Internal name of service displayname Display name of service (in taskbar ?) file_path Path to this program @@ -2887,17 +3400,21 @@ int main(int argc, char **argv) need to have an unique named hEventShudown through the application PID e.g.: MySQLShutdown1890; MySQLShutdown2342 */ - int2str((int) GetCurrentProcessId(),strmov(shutdown_event_name, + int10_to_str((int) GetCurrentProcessId(),strmov(shutdown_event_name, "MySQLShutdown"), 10); + /* Must be initialized early for comparison of service name */ + system_charset_info= &my_charset_utf8_general_ci; + if (Service.GetOS()) /* true NT family */ { char file_path[FN_REFLEN]; my_path(file_path, argv[0], ""); /* Find name in path */ fn_format(file_path,argv[0],file_path,"", MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_RESOLVE_SYMLINKS); + if (argc == 2) - { + { if (!default_service_handling(argv, MYSQL_SERVICENAME, MYSQL_SERVICENAME, file_path, "")) return 0; @@ -2909,8 +3426,8 @@ int main(int argc, char **argv) but we started a bad tradition by calling it MySQL from the start and we are now stuck with it. */ - if (my_strcasecmp(argv[1],"mysql")) - load_default_groups[3]= argv[1]; + if (my_strcasecmp(system_charset_info, argv[1],"mysql")) + load_default_groups[load_default_groups_sz-2]= argv[1]; start_mode= 1; Service.Init(argv[1], mysql_service); return 0; @@ -2930,8 +3447,8 @@ int main(int argc, char **argv) opt_argc= 2; // Skip service-name opt_argv=argv; start_mode= 1; - if (my_strcasecmp(argv[2],"mysql")) - load_default_groups[3]= argv[2]; + if (my_strcasecmp(system_charset_info, argv[2],"mysql")) + load_default_groups[load_default_groups_sz-2]= argv[2]; Service.Init(argv[2], mysql_service); return 0; } @@ -2970,23 +3487,23 @@ int main(int argc, char **argv) static int bootstrap(FILE *file) { - THD *thd= new THD; - int error; + int error= 0; DBUG_ENTER("bootstrap"); + THD *thd= new THD; thd->bootstrap=1; - thd->client_capabilities=0; my_net_init(&thd->net,(st_vio*) 0); thd->max_client_packet_length= thd->net.max_packet; - thd->master_access= ~0; + thd->master_access= ~(ulong)0; thd->thread_id=thread_id++; thread_count++; bootstrap_file=file; +#ifndef EMBEDDED_LIBRARY // TODO: Enable this if (pthread_create(&thd->real_id,&connection_attrib,handle_bootstrap, (void*) thd)) { - sql_print_error("Warning: Can't create thread to handle bootstrap"); + sql_print_warning("Can't create thread to handle bootstrap"); DBUG_RETURN(-1); } /* Wait for thread to die */ @@ -2997,13 +3514,21 @@ static int bootstrap(FILE *file) DBUG_PRINT("quit",("One thread died (count=%u)",thread_count)); } (void) pthread_mutex_unlock(&LOCK_thread_count); - error= thd->fatal_error; +#else + thd->mysql= 0; + handle_bootstrap((void *)thd); +#endif + + error= thd->is_fatal_error; +#ifndef EMBEDDED_LIBRARY net_end(&thd->net); +#endif thd->cleanup(); delete thd; DBUG_RETURN(error); } + static bool read_init_file(char *file_name) { FILE *file; @@ -3017,6 +3542,7 @@ static bool read_init_file(char *file_name) } +#ifndef EMBEDDED_LIBRARY static void create_new_thread(THD *thd) { DBUG_ENTER("create_new_thread"); @@ -3030,17 +3556,12 @@ static void create_new_thread(THD *thd) if (thread_count - delayed_insert_threads >= max_connections+1 || abort_loop) { DBUG_PRINT("error",("Too many connections")); - close_connection(net,ER_CON_COUNT_ERROR); + close_connection(thd, ER_CON_COUNT_ERROR, 1); delete thd; DBUG_VOID_RETURN; } pthread_mutex_lock(&LOCK_thread_count); - if (thread_count-delayed_insert_threads > max_used_connections) - max_used_connections=thread_count-delayed_insert_threads; thd->thread_id=thread_id++; - for (uint i=0; i < 8 ; i++) // Generate password teststring - thd->scramble[i]= (char) (my_rnd(&sql_rand)*94+33); - thd->scramble[8]=0; thd->real_id=pthread_self(); // Keep purify happy @@ -3068,6 +3589,8 @@ static void create_new_thread(THD *thd) thread_count++; thread_created++; threads.append(thd); + if (thread_count-delayed_insert_threads > max_used_connections) + max_used_connections=thread_count-delayed_insert_threads; DBUG_PRINT("info",(("creating thread %d"), thd->thread_id)); thd->connect_time = time(NULL); if ((error=pthread_create(&thd->real_id,&connection_attrib, @@ -3081,9 +3604,9 @@ static void create_new_thread(THD *thd) thd->killed=1; // Safety (void) pthread_mutex_unlock(&LOCK_thread_count); statistic_increment(aborted_connects,&LOCK_status); - net_printf(net,ER_CANT_CREATE_THREAD,error); + net_printf(thd,ER_CANT_CREATE_THREAD,error); (void) pthread_mutex_lock(&LOCK_thread_count); - close_connection(net,0,0); + close_connection(thd,0,0); delete thd; (void) pthread_mutex_unlock(&LOCK_thread_count); DBUG_VOID_RETURN; @@ -3095,6 +3618,8 @@ static void create_new_thread(THD *thd) DBUG_PRINT("info",("Thread created")); DBUG_VOID_RETURN; } +#endif /* EMBEDDED_LIBRARY */ + #ifdef SIGNALS_DONT_BREAK_READ inline void kill_broken_server() @@ -3121,6 +3646,7 @@ inline void kill_broken_server() /* Handle new connections and spawn new process to handle them */ +#ifndef EMBEDDED_LIBRARY extern "C" pthread_handler_decl(handle_connections_sockets, arg __attribute__((unused))) { @@ -3316,7 +3842,7 @@ extern "C" pthread_handler_decl(handle_connections_sockets, continue; } if (sock == unix_sock) - thd->host=(char*) localhost; + thd->host=(char*) my_localhost; #ifdef __WIN__ /* Set default wait_timeout */ ulong wait_timeout= global_system_variables.net_wait_timeout * 1000; @@ -3330,13 +3856,7 @@ extern "C" pthread_handler_decl(handle_connections_sockets, // kill server must be invoked from thread 1! kill_server(MYSQL_KILL_SIGNAL); #endif - -#ifdef __NT__ - pthread_mutex_lock(&LOCK_thread_count); - handler_count--; - pthread_mutex_unlock(&LOCK_thread_count); - pthread_cond_signal(&COND_handler_count); -#endif + decrement_handler_count(); DBUG_RETURN(0); } @@ -3407,78 +3927,316 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) if (!(thd->net.vio = vio_new_win32pipe(hConnectedPipe)) || my_net_init(&thd->net, thd->net.vio)) { - close_connection(&thd->net,ER_OUT_OF_RESOURCES); + close_connection(thd, ER_OUT_OF_RESOURCES, 1); delete thd; continue; } /* host name is unknown */ - thd->host = my_strdup(localhost,MYF(0)); /* Host is unknown */ + thd->host = my_strdup(my_localhost,MYF(0)); /* Host is unknown */ create_new_thread(thd); } - pthread_mutex_lock(&LOCK_thread_count); - handler_count--; - pthread_mutex_unlock(&LOCK_thread_count); - pthread_cond_signal(&COND_handler_count); + decrement_handler_count(); DBUG_RETURN(0); } #endif /* __NT__ */ -/****************************************************************************** -** handle start options +/* + Thread of shared memory's service + + SYNOPSIS + pthread_handler_decl() + handle_connections_shared_memory Thread handle + arg Arguments of thread +*/ + +#ifdef HAVE_SMEM +pthread_handler_decl(handle_connections_shared_memory,arg) +{ + /* file-mapping object, use for create shared memory */ + HANDLE handle_connect_file_map= 0; + char *handle_connect_map= 0; // pointer on shared memory + HANDLE event_connect_answer= 0; + ulong smem_buffer_length= shared_memory_buffer_length + 4; + ulong connect_number= 1; + char tmp[63]; + char *suffix_pos; + char connect_number_char[22], *p; + const char *errmsg= 0; + SECURITY_ATTRIBUTES *sa_event= 0, *sa_mapping= 0; + my_thread_init(); + DBUG_ENTER("handle_connections_shared_memorys"); + DBUG_PRINT("general",("Waiting for allocated shared memory.")); + + if (my_security_attr_create(&sa_event, &errmsg, + GENERIC_ALL, SYNCHRONIZE | EVENT_MODIFY_STATE)) + goto error; + + if (my_security_attr_create(&sa_mapping, &errmsg, + GENERIC_ALL, FILE_MAP_READ | FILE_MAP_WRITE)) + goto error; + + /* + The name of event and file-mapping events create agree next rule: + shared_memory_base_name+unique_part + Where: + shared_memory_base_name is unique value for each server + unique_part is unique value for each object (events and file-mapping) + */ + suffix_pos= strxmov(tmp,shared_memory_base_name,"_",NullS); + strmov(suffix_pos, "CONNECT_REQUEST"); + if ((smem_event_connect_request= CreateEvent(sa_event, + FALSE, FALSE, tmp)) == 0) + { + errmsg= "Could not create request event"; + goto error; + } + strmov(suffix_pos, "CONNECT_ANSWER"); + if ((event_connect_answer= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0) + { + errmsg="Could not create answer event"; + goto error; + } + strmov(suffix_pos, "CONNECT_DATA"); + if ((handle_connect_file_map= + CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping, + PAGE_READWRITE, 0, sizeof(connect_number), tmp)) == 0) + { + errmsg= "Could not create file mapping"; + goto error; + } + if ((handle_connect_map= (char *)MapViewOfFile(handle_connect_file_map, + FILE_MAP_WRITE,0,0, + sizeof(DWORD))) == 0) + { + errmsg= "Could not create shared memory service"; + goto error; + } + + while (!abort_loop) + { + /* Wait a request from client */ + WaitForSingleObject(smem_event_connect_request,INFINITE); + + /* + it can be after shutdown command + */ + if (abort_loop) + goto error; + + HANDLE handle_client_file_map= 0; + char *handle_client_map= 0; + HANDLE event_client_wrote= 0; + HANDLE event_client_read= 0; // for transfer data server <-> client + HANDLE event_server_wrote= 0; + HANDLE event_server_read= 0; + HANDLE event_conn_closed= 0; + THD *thd= 0; + + p= int10_to_str(connect_number, connect_number_char, 10); + /* + The name of event and file-mapping events create agree next rule: + shared_memory_base_name+unique_part+number_of_connection + Where: + shared_memory_base_name is uniquel value for each server + unique_part is unique value for each object (events and file-mapping) + number_of_connection is connection-number between server and client + */ + suffix_pos= strxmov(tmp,shared_memory_base_name,"_",connect_number_char, + "_",NullS); + strmov(suffix_pos, "DATA"); + if ((handle_client_file_map= + CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping, + PAGE_READWRITE, 0, smem_buffer_length, tmp)) == 0) + { + errmsg= "Could not create file mapping"; + goto errorconn; + } + if ((handle_client_map= (char*)MapViewOfFile(handle_client_file_map, + FILE_MAP_WRITE,0,0, + smem_buffer_length)) == 0) + { + errmsg= "Could not create memory map"; + goto errorconn; + } + strmov(suffix_pos, "CLIENT_WROTE"); + if ((event_client_wrote= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0) + { + errmsg= "Could not create client write event"; + goto errorconn; + } + strmov(suffix_pos, "CLIENT_READ"); + if ((event_client_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0) + { + errmsg= "Could not create client read event"; + goto errorconn; + } + strmov(suffix_pos, "SERVER_READ"); + if ((event_server_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0) + { + errmsg= "Could not create server read event"; + goto errorconn; + } + strmov(suffix_pos, "SERVER_WROTE"); + if ((event_server_wrote= CreateEvent(sa_event, + FALSE, FALSE, tmp)) == 0) + { + errmsg= "Could not create server write event"; + goto errorconn; + } + strmov(suffix_pos, "CONNECTION_CLOSED"); + if ((event_conn_closed= CreateEvent(sa_event, + TRUE, FALSE, tmp)) == 0) + { + errmsg= "Could not create closed connection event"; + goto errorconn; + } + if (abort_loop) + goto errorconn; + if (!(thd= new THD)) + goto errorconn; + /* Send number of connection to client */ + int4store(handle_connect_map, connect_number); + if (!SetEvent(event_connect_answer)) + { + errmsg= "Could not send answer event"; + goto errorconn; + } + /* Set event that client should receive data */ + if (!SetEvent(event_client_read)) + { + errmsg= "Could not set client to read mode"; + goto errorconn; + } + if (!(thd->net.vio= vio_new_win32shared_memory(&thd->net, + handle_client_file_map, + handle_client_map, + event_client_wrote, + event_client_read, + event_server_wrote, + event_server_read, + event_conn_closed)) || + my_net_init(&thd->net, thd->net.vio)) + { + close_connection(thd, ER_OUT_OF_RESOURCES, 1); + errmsg= 0; + goto errorconn; + } + thd->host= my_strdup(my_localhost,MYF(0)); /* Host is unknown */ + create_new_thread(thd); + connect_number++; + continue; + +errorconn: + /* Could not form connection; Free used handlers/memort and retry */ + if (errmsg) + { + char buff[180]; + strxmov(buff, "Can't create shared memory connection: ", errmsg, ".", + NullS); + sql_perror(buff); + } + if (handle_client_file_map) + CloseHandle(handle_client_file_map); + if (handle_client_map) + UnmapViewOfFile(handle_client_map); + if (event_server_wrote) + CloseHandle(event_server_wrote); + if (event_server_read) + CloseHandle(event_server_read); + if (event_client_wrote) + CloseHandle(event_client_wrote); + if (event_client_read) + CloseHandle(event_client_read); + if (event_conn_closed) + CloseHandle(event_conn_closed); + delete thd; + } + + /* End shared memory handling */ +error: + if (errmsg) + { + char buff[180]; + strxmov(buff, "Can't create shared memory service: ", errmsg, ".", NullS); + sql_perror(buff); + } + my_security_attr_free(sa_event); + my_security_attr_free(sa_mapping); + if (handle_connect_map) UnmapViewOfFile(handle_connect_map); + if (handle_connect_file_map) CloseHandle(handle_connect_file_map); + if (event_connect_answer) CloseHandle(event_connect_answer); + if (smem_event_connect_request) CloseHandle(smem_event_connect_request); + + decrement_handler_count(); + DBUG_RETURN(0); +} +#endif /* HAVE_SMEM */ +#endif /* EMBEDDED_LIBRARY */ + + +/**************************************************************************** + Handle start options ******************************************************************************/ -enum options_mysqld { +enum options_mysqld +{ OPT_ISAM_LOG=256, OPT_SKIP_NEW, OPT_SKIP_GRANT, OPT_SKIP_LOCK, OPT_ENABLE_LOCK, OPT_USE_LOCKING, - OPT_SOCKET, OPT_UPDATE_LOG, - OPT_BIN_LOG, OPT_SKIP_RESOLVE, + OPT_SOCKET, OPT_UPDATE_LOG, + OPT_BIN_LOG, OPT_SKIP_RESOLVE, OPT_SKIP_NETWORKING, OPT_BIN_LOG_INDEX, OPT_BIND_ADDRESS, OPT_PID_FILE, - OPT_SKIP_PRIOR, OPT_BIG_TABLES, + OPT_SKIP_PRIOR, OPT_BIG_TABLES, OPT_STANDALONE, OPT_ONE_THREAD, OPT_CONSOLE, OPT_LOW_PRIORITY_UPDATES, - OPT_SKIP_HOST_CACHE, OPT_LONG_FORMAT, - OPT_FLUSH, OPT_SAFE, + OPT_SKIP_HOST_CACHE, OPT_SHORT_LOG_FORMAT, + OPT_FLUSH, OPT_SAFE, OPT_BOOTSTRAP, OPT_SKIP_SHOW_DB, - OPT_TABLE_TYPE, OPT_INIT_FILE, - OPT_DELAY_KEY_WRITE_ALL, OPT_SLOW_QUERY_LOG, + OPT_STORAGE_ENGINE, OPT_INIT_FILE, + OPT_DELAY_KEY_WRITE_ALL, OPT_SLOW_QUERY_LOG, OPT_DELAY_KEY_WRITE, OPT_CHARSETS_DIR, - OPT_BDB_HOME, OPT_BDB_LOG, + OPT_BDB_HOME, OPT_BDB_LOG, OPT_BDB_TMP, OPT_BDB_SYNC, - OPT_BDB_LOCK, OPT_BDB_SKIP, - OPT_BDB_NO_RECOVER, OPT_BDB_SHARED, + OPT_BDB_LOCK, OPT_BDB, + OPT_BDB_NO_RECOVER, OPT_BDB_SHARED, OPT_MASTER_HOST, OPT_MASTER_USER, OPT_MASTER_PASSWORD, OPT_MASTER_PORT, OPT_MASTER_INFO_FILE, OPT_MASTER_CONNECT_RETRY, OPT_MASTER_RETRY_COUNT, OPT_MASTER_SSL, OPT_MASTER_SSL_KEY, OPT_MASTER_SSL_CERT, OPT_MASTER_SSL_CAPATH, - OPT_MASTER_SSL_CIPHER, - OPT_SQL_BIN_UPDATE_SAME, OPT_REPLICATE_DO_DB, + OPT_MASTER_SSL_CIPHER, OPT_MASTER_SSL_CA, + OPT_SQL_BIN_UPDATE_SAME, OPT_REPLICATE_DO_DB, OPT_REPLICATE_IGNORE_DB, OPT_LOG_SLAVE_UPDATES, OPT_BINLOG_DO_DB, OPT_BINLOG_IGNORE_DB, OPT_WANT_CORE, OPT_CONCURRENT_INSERT, OPT_MEMLOCK, OPT_MYISAM_RECOVER, - OPT_REPLICATE_REWRITE_DB, OPT_SERVER_ID, + OPT_REPLICATE_REWRITE_DB, OPT_SERVER_ID, OPT_SKIP_SLAVE_START, OPT_SKIP_INNOBASE, - OPT_SAFEMALLOC_MEM_LIMIT, OPT_REPLICATE_DO_TABLE, - OPT_REPLICATE_IGNORE_TABLE, OPT_REPLICATE_WILD_DO_TABLE, + OPT_SAFEMALLOC_MEM_LIMIT, OPT_REPLICATE_DO_TABLE, + OPT_REPLICATE_IGNORE_TABLE, OPT_REPLICATE_WILD_DO_TABLE, OPT_REPLICATE_WILD_IGNORE_TABLE, OPT_REPLICATE_SAME_SERVER_ID, - OPT_DISCONNECT_SLAVE_EVENT_COUNT, + OPT_DISCONNECT_SLAVE_EVENT_COUNT, OPT_ABORT_SLAVE_EVENT_COUNT, OPT_INNODB_DATA_HOME_DIR, OPT_INNODB_DATA_FILE_PATH, - OPT_INNODB_LOG_GROUP_HOME_DIR, - OPT_INNODB_LOG_ARCH_DIR, - OPT_INNODB_LOG_ARCHIVE, - OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, - OPT_INNODB_FLUSH_METHOD, - OPT_INNODB_FAST_SHUTDOWN, - OPT_SAFE_SHOW_DB, - OPT_INNODB_SKIP, OPT_SKIP_SAFEMALLOC, + OPT_INNODB_LOG_GROUP_HOME_DIR, + OPT_INNODB_LOG_ARCH_DIR, + OPT_INNODB_LOG_ARCHIVE, + OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, + OPT_INNODB_FLUSH_METHOD, + OPT_INNODB_FAST_SHUTDOWN, + OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB, + OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, + OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG, + OPT_INNODB, OPT_ISAM, + OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, + OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, + OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, + OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL, @@ -3491,33 +4249,39 @@ enum options_mysqld { OPT_RPL_RECOVERY_RANK,OPT_INIT_RPL_ROLE, OPT_RELAY_LOG, OPT_RELAY_LOG_INDEX, OPT_RELAY_LOG_INFO_FILE, OPT_SLAVE_SKIP_ERRORS, OPT_DES_KEY_FILE, OPT_LOCAL_INFILE, - OPT_RECKLESS_SLAVE, OPT_SSL_SSL, OPT_SSL_KEY, OPT_SSL_CERT, OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CIPHER, OPT_BACK_LOG, OPT_BINLOG_CACHE_SIZE, OPT_CONNECT_TIMEOUT, OPT_DELAYED_INSERT_TIMEOUT, OPT_DELAYED_INSERT_LIMIT, OPT_DELAYED_QUEUE_SIZE, - OPT_FLUSH_TIME, OPT_FT_MIN_WORD_LEN, - OPT_FT_MAX_WORD_LEN, OPT_FT_MAX_WORD_LEN_FOR_SORT, OPT_FT_STOPWORD_FILE, + OPT_FLUSH_TIME, OPT_FT_MIN_WORD_LEN, OPT_FT_BOOLEAN_SYNTAX, + OPT_FT_MAX_WORD_LEN, OPT_FT_QUERY_EXPANSION_LIMIT, OPT_FT_STOPWORD_FILE, OPT_INTERACTIVE_TIMEOUT, OPT_JOIN_BUFF_SIZE, - OPT_KEY_BUFFER_SIZE, OPT_LONG_QUERY_TIME, + OPT_KEY_BUFFER_SIZE, OPT_KEY_CACHE_BLOCK_SIZE, + OPT_KEY_CACHE_DIVISION_LIMIT, OPT_KEY_CACHE_AGE_THRESHOLD, + OPT_LONG_QUERY_TIME, OPT_LOWER_CASE_TABLE_NAMES, OPT_MAX_ALLOWED_PACKET, OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE, OPT_MAX_CONNECTIONS, OPT_MAX_CONNECT_ERRORS, OPT_MAX_DELAYED_THREADS, OPT_MAX_HEP_TABLE_SIZE, - OPT_MAX_JOIN_SIZE, OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH, + OPT_MAX_JOIN_SIZE, OPT_MAX_PREPARED_STMT_COUNT, + OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH, OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS, + OPT_MAX_LENGTH_FOR_SORT_DATA, OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE, + OPT_MAX_ERROR_COUNT, OPT_MYISAM_DATA_POINTER_SIZE, OPT_MYISAM_BLOCK_SIZE, OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE, OPT_MYISAM_MAX_SORT_FILE_SIZE, OPT_MYISAM_SORT_BUFFER_SIZE, + OPT_MYISAM_STATS_METHOD, OPT_NET_BUFFER_LENGTH, OPT_NET_RETRY_COUNT, OPT_NET_READ_TIMEOUT, OPT_NET_WRITE_TIMEOUT, - OPT_OPEN_FILES_LIMIT, - OPT_QUERY_CACHE_LIMIT, OPT_QUERY_CACHE_SIZE, + OPT_OPEN_FILES_LIMIT, + OPT_PRELOAD_BUFFER_SIZE, + OPT_QUERY_CACHE_LIMIT, OPT_QUERY_CACHE_MIN_RES_UNIT, OPT_QUERY_CACHE_SIZE, OPT_QUERY_CACHE_TYPE, OPT_QUERY_CACHE_WLOCK_INVALIDATE, OPT_RECORD_BUFFER, - OPT_RECORD_RND_BUFFER, OPT_RELAY_LOG_SPACE_LIMIT, + OPT_RECORD_RND_BUFFER, OPT_RELAY_LOG_SPACE_LIMIT, OPT_RELAY_LOG_PURGE, OPT_SLAVE_NET_TIMEOUT, OPT_SLAVE_COMPRESSED_PROTOCOL, OPT_SLOW_LAUNCH_TIME, - OPT_READONLY, OPT_DEBUGGING, + OPT_SLAVE_TRANS_RETRIES, OPT_READONLY, OPT_DEBUGGING, OPT_SORT_BUFFER, OPT_TABLE_CACHE, OPT_THREAD_CONCURRENCY, OPT_THREAD_CACHE_SIZE, OPT_TMP_TABLE_SIZE, OPT_THREAD_STACK, @@ -3527,6 +4291,7 @@ enum options_mysqld { OPT_INNODB_LOG_FILE_SIZE, OPT_INNODB_LOG_BUFFER_SIZE, OPT_INNODB_BUFFER_POOL_SIZE, + OPT_INNODB_BUFFER_POOL_AWE_MEM_MB, OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE, OPT_INNODB_MAX_PURGE_LAG, OPT_INNODB_FILE_IO_THREADS, @@ -3535,8 +4300,9 @@ enum options_mysqld { OPT_INNODB_FORCE_RECOVERY, OPT_INNODB_STATUS_FILE, OPT_INNODB_MAX_DIRTY_PAGES_PCT, - OPT_INNODB_AUTOEXTEND_INCREMENT, OPT_INNODB_TABLE_LOCKS, + OPT_INNODB_OPEN_FILES, + OPT_INNODB_AUTOEXTEND_INCREMENT, OPT_BDB_CACHE_SIZE, OPT_BDB_LOG_BUFFER_SIZE, OPT_BDB_MAX_LOCK, @@ -3545,7 +4311,27 @@ enum options_mysqld { OPT_RANGE_ALLOC_BLOCK_SIZE, OPT_ALLOW_SUSPICIOUS_UDFS, OPT_QUERY_ALLOC_BLOCK_SIZE, OPT_QUERY_PREALLOC_SIZE, OPT_TRANS_ALLOC_BLOCK_SIZE, OPT_TRANS_PREALLOC_SIZE, - OPT_SYNC_FRM, OPT_BDB_NOSYNC + OPT_SYNC_FRM, OPT_SYNC_BINLOG, + OPT_SYNC_REPLICATION, + OPT_SYNC_REPLICATION_SLAVE_ID, + OPT_SYNC_REPLICATION_TIMEOUT, + OPT_BDB_NOSYNC, + OPT_ENABLE_SHARED_MEMORY, + OPT_SHARED_MEMORY_BASE_NAME, + OPT_OLD_PASSWORDS, + OPT_EXPIRE_LOGS_DAYS, + OPT_GROUP_CONCAT_MAX_LEN, + OPT_DEFAULT_COLLATION, + OPT_CHARACTER_SET_CLIENT_HANDSHAKE, + OPT_INIT_CONNECT, + OPT_INIT_SLAVE, + OPT_SECURE_AUTH, + OPT_DATE_FORMAT, + OPT_TIME_FORMAT, + OPT_DATETIME_FORMAT, + OPT_LOG_QUERIES_NOT_USING_INDEXES, + OPT_DEFAULT_TIME_ZONE, + OPT_LOG_SLOW_ADMIN_STATEMENTS }; @@ -3553,7 +4339,16 @@ enum options_mysqld { struct my_option my_long_options[] = { - {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax", 0, 0, 0, + {"help", '?', "Display this help and exit.", + (gptr*) &opt_help, (gptr*) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, + 0, 0}, +#ifdef HAVE_REPLICATION + {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT, + "Option used by mysql-test for debugging and testing of replication.", + (gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count, + 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#endif /* HAVE_REPLICATION */ + {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode will also set transaction isolation level 'serializable'.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"allow-suspicious-udfs", OPT_ALLOW_SUSPICIOUS_UDFS, "Allows use of UDFs consisting of only one symbol xxx() " @@ -3566,111 +4361,130 @@ struct my_option my_long_options[] = "Path to installation directory. All paths are usually resolved relative to this.", (gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \ +Disable with --skip-bdb (will save memory).", + (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0, + 0, 0, 0}, #ifdef HAVE_BERKELEY_DB - {"bdb-home", OPT_BDB_HOME, "Berkeley home directory", (gptr*) &berkeley_home, + {"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home, (gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"bdb-lock-detect", OPT_BDB_LOCK, - "Berkeley lock detect (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec)", + "Berkeley lock detect (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-logdir", OPT_BDB_LOG, "Berkeley DB log file directory", + {"bdb-logdir", OPT_BDB_LOG, "Berkeley DB log file directory.", (gptr*) &berkeley_logdir, (gptr*) &berkeley_logdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"bdb-no-recover", OPT_BDB_NO_RECOVER, - "Don't try to recover Berkeley DB tables on start", 0, 0, 0, GET_NO_ARG, + "Don't try to recover Berkeley DB tables on start.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"bdb-no-sync", OPT_BDB_NOSYNC, "Disable synchronously flushing logs. This option is deprecated, use --skip-sync-bdb-logs or sync-bdb-logs=0 instead", // (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"sync-bdb-logs", OPT_BDB_SYNC, - "Synchronously flush logs. Enabled by default", - (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, - NO_ARG, 1, 0, 0, 0, 0, 0}, {"bdb-shared-data", OPT_BDB_SHARED, - "Start Berkeley DB in multi-process mode", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, + "Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name", + {"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.", (gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_BERKELEY_DB */ - {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default", - (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, - 0, 0, 0, 0}, - {"skip-bdb", OPT_BDB_SKIP, "Don't use berkeley db (will save memory)", - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"big-tables", OPT_BIG_TABLES, - "Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors)", + {"big-tables", OPT_BIG_TABLES, + "Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.", + (gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog-do-db", OPT_BINLOG_DO_DB, "Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"binlog-ignore-db", OPT_BINLOG_IGNORE_DB, - "Tells the master that updates to the given database should not be logged tothe binary log", + {"binlog-ignore-db", OPT_BINLOG_IGNORE_DB, + "Tells the master that updates to the given database should not be logged tothe binary log.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to", - (gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bootstrap", OPT_BOOTSTRAP, "Used by mysql installation scripts", 0, 0, 0, + {"bootstrap", OPT_BOOTSTRAP, "Used by mysql installation scripts.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows", - (gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, - 0, 0, 0}, -#ifdef __WIN__ - {"standalone", OPT_STANDALONE, - "Dummy option to start as a standalone program (NT)", 0, 0, 0, GET_NO_ARG, - NO_ARG, 0, 0, 0, 0, 0, 0}, -#endif - {"core-file", OPT_WANT_CORE, "Write core on errors", 0, 0, 0, GET_NO_ARG, - NO_ARG, 0, 0, 0, 0, 0, 0}, + {"character-set-client-handshake", OPT_CHARACTER_SET_CLIENT_HANDSHAKE, + "Don't ignore client side character set value sent during handshake.", + (gptr*) &opt_character_set_client_handshake, + (gptr*) &opt_character_set_client_handshake, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"character-set-server", 'C', "Set the default character set.", + (gptr*) &default_character_set_name, (gptr*) &default_character_set_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + {"character-sets-dir", OPT_CHARSETS_DIR, + "Directory where character sets are.", (gptr*) &charsets_dir, + (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"chroot", 'r', "Chroot mysqld daemon during startup.", (gptr*) &mysqld_chroot, (gptr*) &mysqld_chroot, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"character-sets-dir", OPT_CHARSETS_DIR, - "Directory where character sets are", (gptr*) &charsets_dir, - (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"datadir", 'h', "Path to the database root", (gptr*) &mysql_data_home, + {"collation-server", OPT_DEFAULT_COLLATION, "Set the default collation.", + (gptr*) &default_collation_name, (gptr*) &default_collation_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + {"concurrent-insert", OPT_CONCURRENT_INSERT, + "Use concurrent insert with MyISAM. Disable with --skip-concurrent-insert.", + (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.", + (gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, + 0, 0, 0}, + {"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG, + NO_ARG, 0, 0, 0, 0, 0, 0}, + {"datadir", 'h', "Path to the database root.", (gptr*) &mysql_data_home, (gptr*) &mysql_data_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifndef DBUG_OFF {"debug", '#', "Debug log.", (gptr*) &default_dbug_option, (gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, -#ifdef SAFEMALLOC - {"skip-safemalloc", OPT_SKIP_SAFEMALLOC, - "Don't use the memory allocation checking", 0, 0, 0, GET_NO_ARG, NO_ARG, - 0, 0, 0, 0, 0, 0}, #endif -#endif -#ifdef HAVE_OPENSSL - {"des-key-file", OPT_DES_KEY_FILE, - "Load keys for des_encrypt() and des_encrypt from given file", - (gptr*) &des_key_file, (gptr*) &des_key_file, 0, GET_STR, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, -#endif /* HAVE_OPENSSL */ - {"default-character-set", 'C', "Set the default character set", - (gptr*) &sys_charset.value, (gptr*) &sys_charset.value, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - {"default-table-type", OPT_TABLE_TYPE, - "Set the default table type for tables", 0, 0, + {"default-character-set", 'C', "Set the default character set (deprecated option, use --character-set-server instead).", + (gptr*) &default_character_set_name, (gptr*) &default_character_set_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + {"default-collation", OPT_DEFAULT_COLLATION, "Set the default collation (deprecated option, use --collation-server instead).", + (gptr*) &default_collation_name, (gptr*) &default_collation_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + {"default-storage-engine", OPT_STORAGE_ENGINE, + "Set the default storage engine (table type) for tables.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"delay-key-write", OPT_DELAY_KEY_WRITE, "Type of DELAY_KEY_WRITE", + {"default-table-type", OPT_STORAGE_ENGINE, + "(deprecated) Use --default-storage-engine.", 0, 0, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.", + (gptr*) &default_tz_name, (gptr*) &default_tz_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + {"delay-key-write", OPT_DELAY_KEY_WRITE, "Type of DELAY_KEY_WRITE.", 0,0,0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"delay-key-write-for-all-tables", OPT_DELAY_KEY_WRITE_ALL, - "Don't flush key buffers between writes for any MyISAM table (Deprecated option, use --delay-key-write=all instead)", + "Don't flush key buffers between writes for any MyISAM table (Deprecated option, use --delay-key-write=all instead).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_OPENSSL + {"des-key-file", OPT_DES_KEY_FILE, + "Load keys for des_encrypt() and des_encrypt from given file.", + (gptr*) &des_key_file, (gptr*) &des_key_file, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, +#endif /* HAVE_OPENSSL */ +#ifdef HAVE_REPLICATION + {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT, + "Option used by mysql-test for debugging and testing of replication.", + (gptr*) &disconnect_slave_event_count, + (gptr*) &disconnect_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, + 0, 0, 0}, +#endif /* HAVE_REPLICATION */ {"enable-locking", OPT_ENABLE_LOCK, - "Deprecated option, use --external-locking instead", + "Deprecated option, use --external-locking instead.", (gptr*) &opt_external_locking, (gptr*) &opt_external_locking, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef __NT__ - {"enable-named-pipe", OPT_HAVE_NAMED_PIPE, "Enable the named pipe (NT)", + {"enable-named-pipe", OPT_HAVE_NAMED_PIPE, "Enable the named pipe (NT).", (gptr*) &opt_enable_named_pipe, (gptr*) &opt_enable_named_pipe, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif - {"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure", + {"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure.", (gptr*) &opt_do_pstack, (gptr*) &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"exit-info", 'T', "Used for debugging; Use at your own risk!", 0, 0, 0, GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"flush", OPT_FLUSH, "Flush tables to disk between SQL commands", 0, 0, 0, + {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running.", + (gptr*) &opt_external_locking, (gptr*) &opt_external_locking, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"flush", OPT_FLUSH, "Flush tables to disk between SQL commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, /* We must always support the next option to make scripts like mysqltest easier to do */ @@ -3678,233 +4492,316 @@ struct my_option my_long_options[] = "Set up signals usable for debugging", (gptr*) &opt_debugging, (gptr*) &opt_debugging, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"init-rpl-role", OPT_INIT_RPL_ROLE, "Set the replication role", 0, 0, 0, + {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection", + (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.", + (gptr*) &opt_init_file, (gptr*) &opt_init_file, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, + {"init-rpl-role", OPT_INIT_RPL_ROLE, "Set the replication role.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed when a slave connects to this master", + (gptr*) &opt_init_slave, (gptr*) &opt_init_slave, 0, GET_STR_ALLOC, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb", OPT_INNODB, "Enable InnoDB (if this version of MySQL supports it). \ +Disable with --skip-innodb (will save memory).", + (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, OPT_INNODB_DEFAULT, 0, 0, + 0, 0, 0}, {"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH, - "Path to individual files and their sizes", + "Path to individual files and their sizes.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_INNOBASE_DB - {"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT, - "Data file autoextend increment in megabytes", - (gptr*) &srv_auto_extend_increment, - (gptr*) &srv_auto_extend_increment, - 0, GET_LONG, REQUIRED_ARG, 8L, 1L, 1000L, 0, 1L, 0}, {"innodb_data_home_dir", OPT_INNODB_DATA_HOME_DIR, - "The common part for Innodb table spaces", (gptr*) &innobase_data_home_dir, + "The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir, (gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb_log_group_home_dir", OPT_INNODB_LOG_GROUP_HOME_DIR, - "Path to innodb log files.", (gptr*) &innobase_log_group_home_dir, - (gptr*) &innobase_log_group_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, - 0, 0}, - {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR, - "Where full logs should be archived", (gptr*) &innobase_log_arch_dir, - (gptr*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb_log_archive", OPT_INNODB_LOG_ARCHIVE, - "Set to 1 if you want to have logs archived", 0, 0, 0, GET_LONG, OPT_ARG, - 0, 0, 0, 0, 0, 0}, + {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN, + "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown, + (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE, + "Stores each InnoDB table to an .ibd file in the database dir.", + (gptr*) &innobase_file_per_table, + (gptr*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"innodb_flush_log_at_trx_commit", OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, - "Set to 0 (write and flush once per second), 1 (write and flush at each commit) or 2 (write at commit, flush once per second)", + "Set to 0 (write and flush once per second), 1 (write and flush at each commit) or 2 (write at commit, flush once per second).", (gptr*) &innobase_flush_log_at_trx_commit, (gptr*) &innobase_flush_log_at_trx_commit, 0, GET_UINT, OPT_ARG, 1, 0, 2, 0, 0, 0}, {"innodb_flush_method", OPT_INNODB_FLUSH_METHOD, - "With which method to flush data", (gptr*) &innobase_unix_file_flush_method, + "With which method to flush data.", (gptr*) &innobase_unix_file_flush_method, (gptr*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN, - "Speeds up server shutdown process", (gptr*) &innobase_fast_shutdown, - (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, - {"innodb_status_file", OPT_INNODB_STATUS_FILE, - "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file", - (gptr*) &innobase_create_status_file, (gptr*) &innobase_create_status_file, - 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_locks_unsafe_for_binlog", OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, + "Force InnoDB not to use next-key locking. Instead use only row-level locking", + (gptr*) &innobase_locks_unsafe_for_binlog, + (gptr*) &innobase_locks_unsafe_for_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR, + "Where full logs should be archived.", (gptr*) &innobase_log_arch_dir, + (gptr*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_log_archive", OPT_INNODB_LOG_ARCHIVE, + "Set to 1 if you want to have logs archived.", 0, 0, 0, GET_LONG, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"innodb_log_group_home_dir", OPT_INNODB_LOG_GROUP_HOME_DIR, + "Path to InnoDB log files.", (gptr*) &innobase_log_group_home_dir, + (gptr*) &innobase_log_group_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, + 0, 0}, {"innodb_max_dirty_pages_pct", OPT_INNODB_MAX_DIRTY_PAGES_PCT, - "Percentage of dirty pages allowed in bufferpool", (gptr*) &srv_max_buf_pool_modified_pct, + "Percentage of dirty pages allowed in bufferpool.", (gptr*) &srv_max_buf_pool_modified_pct, (gptr*) &srv_max_buf_pool_modified_pct, 0, GET_ULONG, REQUIRED_ARG, 90, 0, 100, 0, 0, 0}, {"innodb_max_purge_lag", OPT_INNODB_MAX_PURGE_LAG, "Desired maximum length of the purge queue (0 = no limit)", (gptr*) &srv_max_purge_lag, (gptr*) &srv_max_purge_lag, 0, GET_LONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1L, 0}, + {"innodb_status_file", OPT_INNODB_STATUS_FILE, + "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file", + (gptr*) &innobase_create_status_file, (gptr*) &innobase_create_status_file, + 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"innodb_table_locks", OPT_INNODB_TABLE_LOCKS, "Enable InnoDB locking in LOCK TABLES", (gptr*) &global_system_variables.innodb_table_locks, (gptr*) &global_system_variables.innodb_table_locks, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, #endif /* End HAVE_INNOBASE_DB */ - {"help", '?', "Display this help and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, - 0, 0, 0, 0, 0}, - {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup", - (gptr*) &opt_init_file, (gptr*) &opt_init_file, 0, GET_STR, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, - {"log", 'l', "Log connections and queries to file", (gptr*) &opt_logname, - (gptr*) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"isam", OPT_ISAM, "Enable ISAM (if this version of MySQL supports it). \ +Disable with --skip-isam.", + (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, OPT_ISAM_DEFAULT, 0, 0, + 0, 0, 0}, {"language", 'L', - "Client error messages in given language. May be given as a full path", + "Client error messages in given language. May be given as a full path.", (gptr*) &language_ptr, (gptr*) &language_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"local-infile", OPT_LOCAL_INFILE, - "Enable/disable LOAD DATA LOCAL INFILE (takes values 1|0)", + "Enable/disable LOAD DATA LOCAL INFILE (takes values 1|0).", (gptr*) &opt_local_infile, (gptr*) &opt_local_infile, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"log", 'l', "Log connections and queries to file.", (gptr*) &opt_logname, + (gptr*) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-bin", OPT_BIN_LOG, - "Log update queries in binary format", + "Log update queries in binary format.", (gptr*) &opt_bin_logname, (gptr*) &opt_bin_logname, 0, GET_STR_ALLOC, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-bin-index", OPT_BIN_LOG_INDEX, - "File that holds the names for last binary log files", + "File that holds the names for last binary log files.", (gptr*) &opt_binlog_index_name, (gptr*) &opt_binlog_index_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file", - (gptr*) &myisam_log_filename, (gptr*) &myisam_log_filename, 0, GET_STR, + {"log-error", OPT_ERROR_LOG_FILE, "Log error file.", + (gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"log-update", OPT_UPDATE_LOG, - "Log updates to file.# where # is a unique number if not given.", - (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR, + {"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file.", + (gptr*) &myisam_log_filename, (gptr*) &myisam_log_filename, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"log-slow-queries", OPT_SLOW_QUERY_LOG, - "Log slow queries to this log file. Defaults logging to hostname-slow.log", - (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, - 0, 0, 0, 0, 0, 0}, - {"log-long-format", OPT_LONG_FORMAT, - "Log some extra information to update log", 0, 0, 0, GET_NO_ARG, NO_ARG, - 0, 0, 0, 0, 0, 0}, + {"log-long-format", '0', + "Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.", + 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"log-queries-not-using-indexes", OPT_LOG_QUERIES_NOT_USING_INDEXES, + "Log queries that are executed without benefit of any index to the slow log if it is open.", + (gptr*) &opt_log_queries_not_using_indexes, (gptr*) &opt_log_queries_not_using_indexes, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"log-short-format", OPT_SHORT_LOG_FORMAT, + "Don't log extra information to update and slow-query logs.", + (gptr*) &opt_short_log_format, (gptr*) &opt_short_log_format, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-slave-updates", OPT_LOG_SLAVE_UPDATES, "Tells the slave to log the updates from the slave thread to the binary log. You will need to turn it on if you plan to daisy-chain the slaves.", (gptr*) &opt_log_slave_updates, (gptr*) &opt_log_slave_updates, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"log-slow-admin-statements", OPT_LOG_SLOW_ADMIN_STATEMENTS, + "Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to the slow log if it is open.", + (gptr*) &opt_log_slow_admin_statements, + (gptr*) &opt_log_slow_admin_statements, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"log-slow-queries", OPT_SLOW_QUERY_LOG, + "Log slow queries to this log file. Defaults logging to hostname-slow.log file. Must be enabled to activate other slow log options.", + (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"log-update", OPT_UPDATE_LOG, + "Log updates to file.# where # is a unique number if not given.", + (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR, + OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"log-warnings", 'W', "Log some non-critical warnings to the error log file. Use this option twice or --log-warnings=2 if you also want 'Aborted connections' warnings.", + (gptr*) &global_system_variables.log_warnings, + (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, + 0, 0, 0}, {"low-priority-updates", OPT_LOW_PRIORITY_UPDATES, - "INSERT/DELETE/UPDATE has lower priority than selects", + "INSERT/DELETE/UPDATE has lower priority than selects.", (gptr*) &global_system_variables.low_priority_updates, (gptr*) &max_system_variables.low_priority_updates, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"master-connect-retry", OPT_MASTER_CONNECT_RETRY, + "The number of seconds the slave thread will sleep before retrying to connect to the master in case the master goes down or the connection is lost.", + (gptr*) &master_connect_retry, (gptr*) &master_connect_retry, 0, GET_UINT, + REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, {"master-host", OPT_MASTER_HOST, "Master hostname or IP address for replication. If not set, the slave thread will not be started. Note that the setting of master-host will be ignored if there exists a valid master.info file.", (gptr*) &master_host, (gptr*) &master_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"master-user", OPT_MASTER_USER, - "The username the slave thread will use for authentication when connecting to the master. The user must have FILE privilege. If the master user is not set, user test is assumed. The value in master.info will take precedence if it can be read.", - (gptr*) &master_user, (gptr*) &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0, - 0, 0, 0, 0}, + {"master-info-file", OPT_MASTER_INFO_FILE, + "The location and name of the file that remembers the master and where the I/O replication \ +thread is in the master's binlogs.", + (gptr*) &master_info_file, (gptr*) &master_info_file, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-password", OPT_MASTER_PASSWORD, "The password the slave thread will authenticate with when connecting to the master. If not set, an empty password is assumed.The value in master.info will take precedence if it can be read.", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + (gptr*)&master_password, (gptr*)&master_password, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-port", OPT_MASTER_PORT, - "The port the master is listening on. If not set, the compiled setting of MYSQL_PORT is assumed. If you have not tinkered with configure options, this should be 3306. The value in master.info will take precedence if it can be read", + "The port the master is listening on. If not set, the compiled setting of MYSQL_PORT is assumed. If you have not tinkered with configure options, this should be 3306. The value in master.info will take precedence if it can be read.", (gptr*) &master_port, (gptr*) &master_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, - {"master-connect-retry", OPT_MASTER_CONNECT_RETRY, - "The number of seconds the slave thread will sleep before retrying to connect to the master in case the master goes down or the connection is lost.", - (gptr*) &master_connect_retry, (gptr*) &master_connect_retry, 0, GET_UINT, - REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, {"master-retry-count", OPT_MASTER_RETRY_COUNT, "The number of tries the slave will make to connect to the master before giving up.", (gptr*) &master_retry_count, (gptr*) &master_retry_count, 0, GET_ULONG, REQUIRED_ARG, 3600*24, 0, 0, 0, 0, 0}, - {"master-info-file", OPT_MASTER_INFO_FILE, - "The location and name of the file that remembers the master and where the I/O replication \ -thread is in the master's binlogs.", - (gptr*) &master_info_file, (gptr*) &master_info_file, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl", OPT_MASTER_SSL, - "Planned to enable the slave to connect to the master using SSL. Does nothing yet.", + "Enable the slave to connect to the master using SSL.", (gptr*) &master_ssl, (gptr*) &master_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"master-ssl-key", OPT_MASTER_SSL_KEY, - "Master SSL keyfile name. Only applies if you have enabled master-ssl. Does \ -nothing yet.", - (gptr*) &master_ssl_key, (gptr*) &master_ssl_key, 0, GET_STR, OPT_ARG, + {"master-ssl-ca", OPT_MASTER_SSL_CA, + "Master SSL CA file. Only applies if you have enabled master-ssl.", + (gptr*) &master_ssl_ca, (gptr*) &master_ssl_ca, 0, GET_STR, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"master-ssl-capath", OPT_MASTER_SSL_CAPATH, + "Master SSL CA path. Only applies if you have enabled master-ssl.", + (gptr*) &master_ssl_capath, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl-cert", OPT_MASTER_SSL_CERT, "Master SSL certificate file name. Only applies if you have enabled \ -master-ssl. Does nothing yet.", +master-ssl", (gptr*) &master_ssl_cert, (gptr*) &master_ssl_cert, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"master-ssl-capath", OPT_MASTER_SSL_CAPATH, - "Master SSL CA path. Only applies if you have enabled master-ssl. \ -Does nothing yet.", - (gptr*) &master_ssl_capath, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG, - 0, 0, 0, 0, 0, 0}, {"master-ssl-cipher", OPT_MASTER_SSL_CIPHER, - "Master SSL cipher. Only applies if you have enabled master-ssl. \ -Does nothing yet.", + "Master SSL cipher. Only applies if you have enabled master-ssl.", (gptr*) &master_ssl_cipher, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"master-ssl-key", OPT_MASTER_SSL_KEY, + "Master SSL keyfile name. Only applies if you have enabled master-ssl.", + (gptr*) &master_ssl_key, (gptr*) &master_ssl_key, 0, GET_STR, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"master-user", OPT_MASTER_USER, + "The username the slave thread will use for authentication when connecting to the master. The user must have FILE privilege. If the master user is not set, user test is assumed. The value in master.info will take precedence if it can be read.", + (gptr*) &master_user, (gptr*) &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0, + 0, 0, 0, 0}, +#ifdef HAVE_REPLICATION + {"max-binlog-dump-events", OPT_MAX_BINLOG_DUMP_EVENTS, + "Option used by mysql-test for debugging and testing of replication.", + (gptr*) &max_binlog_dump_events, (gptr*) &max_binlog_dump_events, 0, + GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#endif /* HAVE_REPLICATION */ + {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", (gptr*) &locked_in_memory, + (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"myisam-recover", OPT_MYISAM_RECOVER, "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"memlock", OPT_MEMLOCK, "Lock mysqld in memory", (gptr*) &locked_in_memory, - (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT, - "Option used by mysql-test for debugging and testing of replication", - (gptr*) &disconnect_slave_event_count, - (gptr*) &disconnect_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, - 0, 0, 0}, - {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT, - "Option used by mysql-test for debugging and testing of replication", - (gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count, - 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"max-binlog-dump-events", OPT_MAX_BINLOG_DUMP_EVENTS, - "Option used by mysql-test for debugging and testing of replication", - (gptr*) &max_binlog_dump_events, (gptr*) &max_binlog_dump_events, 0, - GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL, - "Option used by mysql-test for debugging and testing of replication", - (gptr*) &opt_sporadic_binlog_dump_fail, - (gptr*) &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, - 0}, - {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT, - "Simulate memory shortage when compiled with the --with-debug=full option", - 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"new", 'n', "Use some 4.1 features and syntax (4.1 compatibility mode)", + {"ndbcluster", OPT_NDBCLUSTER, "Enable NDB Cluster (if this version of MySQL supports it). \ +Disable with --skip-ndbcluster (will save memory).", + (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, + OPT_NDBCLUSTER_DEFAULT, 0, 0, 0, 0, 0}, +#ifdef HAVE_NDBCLUSTER_DB + {"ndb-connectstring", OPT_NDB_CONNECTSTRING, + "Connect string for ndbcluster.", + (gptr*) &opt_ndbcluster_connectstring, + (gptr*) &opt_ndbcluster_connectstring, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, + "Specify number of autoincrement values that are prefetched.", + (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, + (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, + 0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0}, + {"ndb-force-send", OPT_NDB_FORCE_SEND, + "Force send of buffers to ndb immediately without waiting for " + "other threads.", + (gptr*) &global_system_variables.ndb_force_send, + (gptr*) &global_system_variables.ndb_force_send, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_force_send", OPT_NDB_FORCE_SEND, + "same as --ndb-force-send.", + (gptr*) &global_system_variables.ndb_force_send, + (gptr*) &global_system_variables.ndb_force_send, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT, + "Use exact records count during query planning and for fast " + "select count(*), disable for faster queries.", + (gptr*) &global_system_variables.ndb_use_exact_count, + (gptr*) &global_system_variables.ndb_use_exact_count, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_use_exact_count", OPT_NDB_USE_EXACT_COUNT, + "same as --ndb-use-exact-count.", + (gptr*) &global_system_variables.ndb_use_exact_count, + (gptr*) &global_system_variables.ndb_use_exact_count, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb-shm", OPT_NDB_SHM, + "Use shared memory connections when available.", + (gptr*) &opt_ndb_shm, + (gptr*) &opt_ndb_shm, + 0, GET_BOOL, OPT_ARG, OPT_NDB_SHM_DEFAULT, 0, 0, 0, 0, 0}, + {"ndb-optimized-node-selection", OPT_NDB_OPTIMIZED_NODE_SELECTION, + "Select nodes for transactions in a more optimal way.", + (gptr*) &opt_ndb_optimized_node_selection, + (gptr*) &opt_ndb_optimized_node_selection, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, +#endif + {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, (gptr*) &max_system_variables.new_mode, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef NOT_YET - {"no-mix-table-types", OPT_NO_MIX_TYPE, "Don't allow commands with uses two different table types", + {"no-mix-table-types", OPT_NO_MIX_TYPE, "Don't allow commands with uses two different table types.", (gptr*) &opt_no_mix_types, (gptr*) &opt_no_mix_types, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif - {"old-protocol", 'o', "Use the old (3.20) protocol client/server protocol", - (gptr*) &protocol_version, (gptr*) &protocol_version, 0, GET_UINT, NO_ARG, - PROTOCOL_VERSION, 0, 0, 0, 0, 0}, + {"old-passwords", OPT_OLD_PASSWORDS, "Use old password encryption method (needed for 4.0 and older clients).", + (gptr*) &global_system_variables.old_passwords, + (gptr*) &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, #ifdef ONE_THREAD {"one-thread", OPT_ONE_THREAD, - "Only use one thread (for debugging under Linux)", 0, 0, 0, GET_NO_ARG, + "Only use one thread (for debugging under Linux).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif - {"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld", + {"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.", (gptr*) &pidfile_name_ptr, (gptr*) &pidfile_name_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"log-error", OPT_ERROR_LOG_FILE, "Log error file", - (gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR, - OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"port", 'P', "Port number to use for connection.", (gptr*) &mysql_port, - (gptr*) &mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"port", 'P', "Port number to use for connection.", (gptr*) &mysqld_port, + (gptr*) &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"relay-log", OPT_RELAY_LOG, + "The location and name to use for relay logs.", + (gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0, + GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"relay-log-index", OPT_RELAY_LOG_INDEX, + "The location and name to use for the file that keeps a list of the last \ +relay logs.", + (gptr*) &opt_relaylog_index_name, (gptr*) &opt_relaylog_index_name, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE, + "The location and name of the file that remembers where the SQL replication \ +thread is in the relay logs.", + (gptr*) &relay_log_info_file, (gptr*) &relay_log_info_file, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-do-db", OPT_REPLICATE_DO_DB, "Tells the slave thread to restrict replication to the specified database. To specify more than one database, use the directive multiple times, once for each database. Note that this will only work if you do not use cross-database queries such as UPDATE some_db.some_table SET foo='bar' while having selected a different or no database. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-do-table=db_name.%.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-do-table", OPT_REPLICATE_DO_TABLE, "Tells the slave thread to restrict replication to the specified table. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates, in contrast to replicate-do-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE, - "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-ignore-db", OPT_REPLICATE_IGNORE_DB, "Tells the slave thread to not replicate to the specified database. To specify more than one database to ignore, use the directive multiple times, once for each database. This option will not work if you use cross database updates. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-ignore-table=db_name.%. ", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE, "Tells the slave thread to not replicate to the specified table. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-datbase updates, in contrast to replicate-ignore-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB, + "Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name.", + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE, + "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.", + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE, "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB, - "Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_REPLICATION {"replicate-same-server-id", OPT_REPLICATE_SAME_SERVER_ID, "In replication, if set to 1, do not skip events having our server id. \ Default value is 0 (to break infinite loops in circular replication). \ @@ -3912,147 +4809,167 @@ Can't be set to 1 if --log-slave-updates is used.", (gptr*) &replicate_same_server_id, (gptr*) &replicate_same_server_id, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif // In replication, we may need to tell the other servers how to connect {"report-host", OPT_REPORT_HOST, "Hostname or IP of the slave to be reported to to the master during slave registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset if you do not want the slave to register itself with the master. Note that it is not sufficient for the master to simply read the IP of the slave off the socket once the slave connects. Due to NAT and other routing issues, that IP may not be valid for connecting to the slave from the master or other hosts.", (gptr*) &report_host, (gptr*) &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"report-user", OPT_REPORT_USER, "Undocumented", (gptr*) &report_user, - (gptr*) &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"report-password", OPT_REPORT_PASSWORD, "Undocumented", + {"report-password", OPT_REPORT_PASSWORD, "Undocumented.", (gptr*) &report_password, (gptr*) &report_password, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"report-port", OPT_REPORT_PORT, "Port for connecting to slave reported to the master during slave registration. Set it only if the slave is listening on a non-default port or if you have a special tunnel from the master or other clients to the slave. If not sure, leave this option unset.", (gptr*) &report_port, (gptr*) &report_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, - {"rpl-recovery-rank", OPT_RPL_RECOVERY_RANK, "Undocumented", + {"report-user", OPT_REPORT_USER, "Undocumented.", (gptr*) &report_user, + (gptr*) &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"rpl-recovery-rank", OPT_RPL_RECOVERY_RANK, "Undocumented.", (gptr*) &rpl_recovery_rank, (gptr*) &rpl_recovery_rank, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"relay-log", OPT_RELAY_LOG, - "The location and name to use for relay logs", - (gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"relay-log-index", OPT_RELAY_LOG_INDEX, - "The location and name to use for the file that keeps a list of the last \ -relay logs", - (gptr*) &opt_relaylog_index_name, (gptr*) &opt_relaylog_index_name, 0, - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"safe-mode", OPT_SAFE, "Skip some optimize stages (for testing).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifndef TO_BE_DELETED {"safe-show-database", OPT_SAFE_SHOW_DB, - "Deprecated option; One should use GRANT SHOW DATABASES instead...", + "Deprecated option; use GRANT SHOW DATABASES instead...", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"safe-user-create", OPT_SAFE_USER_CREATE, - "Don't allow new user creation by the user who has no write privileges to the mysql.user table", + "Don't allow new user creation by the user who has no write privileges to the mysql.user table.", (gptr*) &opt_safe_user_create, (gptr*) &opt_safe_user_create, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT, + "Simulate memory shortage when compiled with the --with-debug=full option.", + 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"secure-auth", OPT_SECURE_AUTH, "Disallow authentication for accounts that have old (pre-4.1) passwords.", + (gptr*) &opt_secure_auth, (gptr*) &opt_secure_auth, 0, GET_BOOL, NO_ARG, + my_bool(0), 0, 0, 0, 0, 0}, {"server-id", OPT_SERVER_ID, - "Uniquely identifies the server instance in the community of replication partners", + "Uniquely identifies the server instance in the community of replication partners.", (gptr*) &server_id, (gptr*) &server_id, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"set-variable", 'O', "Change the value of a variable. Please note that this option is deprecated;you can set variables directly with --variable-name=value.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_SMEM + {"shared-memory", OPT_ENABLE_SHARED_MEMORY, + "Enable the shared memory.",(gptr*) &opt_enable_shared_memory, (gptr*) &opt_enable_shared_memory, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif +#ifdef HAVE_SMEM + {"shared-memory-base-name",OPT_SHARED_MEMORY_BASE_NAME, + "Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#endif {"show-slave-auth-info", OPT_SHOW_SLAVE_AUTH_INFO, "Show user and password in SHOW SLAVE HOSTS on this master", (gptr*) &opt_show_slave_auth_info, (gptr*) &opt_show_slave_auth_info, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"concurrent-insert", OPT_CONCURRENT_INSERT, - "Use concurrent insert with MyISAM. Disable with prefix --skip-", - (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert, - 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"skip-grant-tables", OPT_SKIP_GRANT, "Start without grant tables. This gives all users FULL ACCESS to all tables!", (gptr*) &opt_noacl, (gptr*) &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip-innodb", OPT_INNODB_SKIP, "Don't use Innodb (will save memory)", - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0, + GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-locking", OPT_SKIP_LOCK, - "Deprecated option, use --skip-external-locking instead", + "Deprecated option, use --skip-external-locking instead.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names", 0, 0, 0, - GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-name-resolve", OPT_SKIP_RESOLVE, - "Don't resolve hostnames. All hostnames are IP's or 'localhost'", + "Don't resolve hostnames. All hostnames are IP's or 'localhost'.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-networking", OPT_SKIP_NETWORKING, "Don't allow connection with TCP/IP.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-new", OPT_SKIP_NEW, "Don't use new, possible wrong routines.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifndef DBUG_OFF +#ifdef SAFEMALLOC + {"skip-safemalloc", OPT_SKIP_SAFEMALLOC, + "Don't use the memory allocation checking.", 0, 0, 0, GET_NO_ARG, NO_ARG, + 0, 0, 0, 0, 0, 0}, +#endif +#endif {"skip-show-database", OPT_SKIP_SHOW_DB, - "Don't allow 'SHOW DATABASE' commands", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, + "Don't allow 'SHOW DATABASE' commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-slave-start", OPT_SKIP_SLAVE_START, "If set, slave is not autostarted.", (gptr*) &opt_skip_slave_start, (gptr*) &opt_skip_slave_start, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-stack-trace", OPT_SKIP_STACK_TRACE, - "Don't print a stack trace on failure", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, + "Don't print a stack trace on failure.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead", + {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-thread-priority", OPT_SKIP_PRIOR, "Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE, - "The location and name of the file that remembers where the SQL replication \ -thread is in the relay logs", - (gptr*) &relay_log_info_file, (gptr*) &relay_log_info_file, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_REPLICATION {"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR, "The location where the slave should put its temporary files when \ -replicating a LOAD DATA INFILE command", +replicating a LOAD DATA INFILE command.", (gptr*) &slave_load_tmpdir, (gptr*) &slave_load_tmpdir, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"slave-skip-errors", OPT_SLAVE_SKIP_ERRORS, - "Tells the slave thread to continue replication when a query returns an error from the provided list", + "Tells the slave thread to continue replication when a query returns an error from the provided list.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"socket", OPT_SOCKET, "Socket file to use for connection", - (gptr*) &mysql_unix_port, (gptr*) &mysql_unix_port, 0, GET_STR, +#endif + {"socket", OPT_SOCKET, "Socket file to use for connection.", + (gptr*) &mysqld_unix_port, (gptr*) &mysqld_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_REPLICATION + {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL, + "Option used by mysql-test for debugging and testing of replication.", + (gptr*) &opt_sporadic_binlog_dump_fail, + (gptr*) &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, + 0}, +#endif /* HAVE_REPLICATION */ {"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME, "If set, setting SQL_LOG_BIN to a value will automatically set SQL_LOG_UPDATE to the same value and vice versa.", (gptr*) &opt_sql_bin_update, (gptr*) &opt_sql_bin_update, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sql-mode", OPT_SQL_MODE, - "Syntax: sql-mode=option[,option[,option...]] where option can be one of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, SERIALIZE, ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.", + "Syntax: sql-mode=option[,option[,option...]] where option can be one of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.", (gptr*) &sql_mode_str, (gptr*) &sql_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_OPENSSL #include "sslopt-longopts.h" #endif +#ifdef __WIN__ + {"standalone", OPT_STANDALONE, + "Dummy option to start as a standalone program (NT).", 0, 0, 0, GET_NO_ARG, + NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif + {"symbolic-links", 's', "Enable symbolic link support.", + (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, + IF_PURIFY(0,1), 0, 0, 0, 0, 0}, {"temp-pool", OPT_TEMP_POOL, "Using this option will cause most temporary files created to use a small set of names, rather than a unique name for each new file.", (gptr*) &use_temp_pool, (gptr*) &use_temp_pool, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, - {"tmpdir", 't', "Path for temporary files", (gptr*) &opt_mysql_tmpdir, + {"tmpdir", 't', + "Path for temporary files. Several paths may be specified, separated by a " +#if defined( __WIN__) || defined(OS2) || defined(__NETWARE__) + "semicolon (;)" +#else + "colon (:)" +#endif + ", in this case they are used in a round-robin fashion.", + (gptr*) &opt_mysql_tmpdir, (gptr*) &opt_mysql_tmpdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"transaction-isolation", OPT_TX_ISOLATION, - "Default transaction isolation level", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, + "Default transaction isolation level.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running", - (gptr*) &opt_external_locking, (gptr*) &opt_external_locking, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"use-symbolic-links", 's', "Enable symbolic link support. Deprecated option; Use --symbolic-links instead", + {"use-symbolic-links", 's', "Enable symbolic link support. Deprecated option; use --symbolic-links instead.", (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, IF_PURIFY(0,1), 0, 0, 0, 0, 0}, - {"symbolic-links", 's', "Enable symbolic link support", - (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, - IF_PURIFY(0,1), 0, 0, 0, 0, 0}, - {"user", 'u', "Run mysqld daemon as user", 0, 0, 0, GET_STR, REQUIRED_ARG, + {"user", 'u', "Run mysqld daemon as user.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"version", 'V', "Output version information and exit", 0, 0, 0, GET_NO_ARG, + {"verbose", 'v', "Used with --help option for detailed help", + (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, + 0, 0}, + {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"version", 'v', "Synonym for option -v", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, - 0, 0, 0, 0}, - {"log-warnings", 'W', "Log some not critical warnings to the log file", - (gptr*) &global_system_variables.log_warnings, - (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, - 0, 0, 0}, - {"warnings", 'W', "Deprecated ; Use --log-warnings instead", + {"warnings", 'W', "Deprecated; use --log-warnings instead.", (gptr*) &global_system_variables.log_warnings, (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, 0, 0, 0}, @@ -4065,6 +4982,10 @@ replicating a LOAD DATA INFILE command", "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG, REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0}, + /* QQ: The following should be removed soon! (bdb_max_lock preferred) */ + {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.", + (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, + REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, {"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE, "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0, @@ -4073,94 +4994,164 @@ replicating a LOAD DATA INFILE command", "The maximum number of locks you can have active on a BDB table.", (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, - /* QQ: The following should be removed soon! */ - {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock", - (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, - REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, #endif /* HAVE_BERKELEY_DB */ {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", (gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG, REQUIRED_ARG, 32*1024L, IO_SIZE, ~0L, 0, IO_SIZE, 0}, - {"connect_timeout", OPT_CONNECT_TIMEOUT, - "The number of seconds the mysqld server is waiting for a connect packet before responding with Bad handshake", + {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE, + "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!", + (gptr*) &global_system_variables.bulk_insert_buff_size, + (gptr*) &max_system_variables.bulk_insert_buff_size, + 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0}, + {"connect_timeout", OPT_CONNECT_TIMEOUT, + "The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.", (gptr*) &connect_timeout, (gptr*) &connect_timeout, 0, GET_ULONG, REQUIRED_ARG, CONNECT_TIMEOUT, 2, LONG_TIMEOUT, 0, 1, 0 }, - {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT, - "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.", - (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0, - GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, +#ifdef HAVE_REPLICATION + {"crash_binlog_innodb", OPT_CRASH_BINLOG_INNODB, + "Used only for testing, to crash when writing Nth event to binlog.", + (gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb, + 0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0}, +#endif + { "date_format", OPT_DATE_FORMAT, + "The DATE format (For future).", + (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE], + (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE], + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "datetime_format", OPT_DATETIME_FORMAT, + "The DATETIME/TIMESTAMP format (for future).", + (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME], + (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME], + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "default_week_format", OPT_DEFAULT_WEEK_FORMAT, + "The default week format used by WEEK() functions.", + (gptr*) &global_system_variables.default_week_format, + (gptr*) &max_system_variables.default_week_format, + 0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0}, {"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT, "After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.", (gptr*) &delayed_insert_limit, (gptr*) &delayed_insert_limit, 0, GET_ULONG, REQUIRED_ARG, DELAYED_LIMIT, 1, ~0L, 0, 1, 0}, + {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT, + "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.", + (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0, + GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, { "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE, "What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.", (gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG, REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0}, + {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS, + "If non-zero, binary logs will be purged after expire_logs_days " + "days; possible purges happen at startup and at binary log rotation.", + (gptr*) &expire_logs_days, + (gptr*) &expire_logs_days, 0, GET_ULONG, + REQUIRED_ARG, 0, 0, 99, 0, 1, 0}, { "flush_time", OPT_FLUSH_TIME, "A dedicated thread is created to flush all tables at the given interval.", (gptr*) &flush_time, (gptr*) &flush_time, 0, GET_ULONG, REQUIRED_ARG, FLUSH_TIME, 0, LONG_TIMEOUT, 0, 1, 0}, - { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, - "The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.", - (gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG, - REQUIRED_ARG, 4, 1, HA_FT_MAXLEN, 0, 1, 0}, + { "ft_boolean_syntax", OPT_FT_BOOLEAN_SYNTAX, + "List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE)", + 0, 0, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "ft_max_word_len", OPT_FT_MAX_WORD_LEN, "The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.", (gptr*) &ft_max_word_len, (gptr*) &ft_max_word_len, 0, GET_ULONG, - REQUIRED_ARG, HA_FT_MAXLEN, 10, HA_FT_MAXLEN, 0, 1, 0}, - { "ft_max_word_len_for_sort", OPT_FT_MAX_WORD_LEN_FOR_SORT, - "The maximum length of the word for repair_by_sorting. Longer words are included the slow way. The lower this value, the more words will be put in one sort bucket.", - (gptr*) &ft_max_word_len_for_sort, (gptr*) &ft_max_word_len_for_sort, 0, GET_ULONG, - REQUIRED_ARG, 20, 4, HA_FT_MAXLEN, 0, 1, 0}, + REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0}, + { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, + "The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.", + (gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG, + REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0}, + { "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT, + "Number of best matches to use for query expansion", + (gptr*) &ft_query_expansion_limit, (gptr*) &ft_query_expansion_limit, 0, GET_ULONG, + REQUIRED_ARG, 20, 0, 1000, 0, 1, 0}, { "ft_stopword_file", OPT_FT_STOPWORD_FILE, "Use stopwords from this file instead of built-in list.", (gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN, + "The maximum length of the result of function group_concat.", + (gptr*) &global_system_variables.group_concat_max_len, + (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG, + REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0}, #ifdef HAVE_INNOBASE_DB - {"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS, - "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.", - (gptr*) &innobase_mirrored_log_groups, - (gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10, - 0, 1, 0}, - {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP, - "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.", - (gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group, - 0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0}, - {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE, - "Size of each log file in a log group in megabytes.", - (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0, - GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0}, - {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE, - "The size of the buffer which InnoDB uses to write log to the log files on disk.", - (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0, - GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0}, - {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE, - "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", - (gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0, - GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0}, {"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE, "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", (gptr*) &innobase_additional_mem_pool_size, (gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG, 1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0}, + {"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT, + "Data file autoextend increment in megabytes", + (gptr*) &srv_auto_extend_increment, + (gptr*) &srv_auto_extend_increment, + 0, GET_LONG, REQUIRED_ARG, 8L, 1L, 1000L, 0, 1L, 0}, + {"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB, + "If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.", + (gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0, + GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0}, + {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE, + "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", + (gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0, + GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0}, {"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS, "Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads, (gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0, 1, 0}, + {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY, + "Helps to save your data in case the disk image of the database becomes corrupt.", + (gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0, + GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0}, {"innodb_lock_wait_timeout", OPT_INNODB_LOCK_WAIT_TIMEOUT, "Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.", (gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout, 0, GET_LONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0}, + {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE, + "The size of the buffer which InnoDB uses to write log to the log files on disk.", + (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0, + GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0}, + {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE, + "Size of each log file in a log group in megabytes.", + (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0, + GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0}, + {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP, + "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.", + (gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group, + 0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0}, + {"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS, + "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.", + (gptr*) &innobase_mirrored_log_groups, + (gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10, + 0, 1, 0}, + {"innodb_open_files", OPT_INNODB_OPEN_FILES, + "How many files at the maximum InnoDB keeps open at the same time.", + (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0, + GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0}, +#ifdef HAVE_REPLICATION + /* + Disabled for the 4.1.3 release. Disabling just this paragraph of code is + enough, as then user can't set it to 1 so it will always be ignored in the + rest of code. + */ +#if MYSQL_VERSION_ID >= 40103 + /* + innodb_safe_binlog is not a variable, just an option. Does not make + sense to make it a variable, as it is only used at startup (and so the + value would be lost at next startup, so setting it on the fly would have no + effect). + */ + {"innodb_safe_binlog", OPT_INNODB_SAFE_BINLOG, + "After a crash recovery by InnoDB, truncate the binary log after the last " + "not-rolled-back statement/transaction.", + (gptr*) &opt_innodb_safe_binlog, (gptr*) &opt_innodb_safe_binlog, + 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, +#endif +#endif {"innodb_thread_concurrency", OPT_INNODB_THREAD_CONCURRENCY, "Helps in performance tuning in heavily concurrent environments.", (gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency, 0, GET_LONG, REQUIRED_ARG, 8, 1, 1000, 0, 1, 0}, - {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY, - "Helps to save your data in case the disk image of the database becomes corrupt.", - (gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0, - GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0}, #endif /* HAVE_INNOBASE_DB */ {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, "The number of seconds the server waits for activity on an interactive connection before closing it.", @@ -4174,10 +5165,30 @@ replicating a LOAD DATA INFILE command", REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0}, {"key_buffer_size", OPT_KEY_BUFFER_SIZE, - "The size of the buffer used for index blocks. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford; 64M on a 256M machine that mainly runs MySQL is quite common.", - (gptr*) &keybuff_size, (gptr*) &keybuff_size, 0, GET_ULL, + "The size of the buffer used for index blocks for MyISAM tables. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford; 64M on a 256M machine that mainly runs MySQL is quite common.", + (gptr*) &dflt_key_cache_var.param_buff_size, + (gptr*) 0, + 0, (GET_ULL | GET_ASK_ADDR), REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, UINT_MAX32, MALLOC_OVERHEAD, IO_SIZE, 0}, + {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD, + "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache", + (gptr*) &dflt_key_cache_var.param_age_threshold, + (gptr*) 0, + 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, + 300, 100, ~0L, 0, 100, 0}, + {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, + "The default size of key cache blocks", + (gptr*) &dflt_key_cache_var.param_block_size, + (gptr*) 0, + 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, + KEY_CACHE_BLOCK_SIZE , 512, 1024*16, MALLOC_OVERHEAD, 512, 0}, + {"key_cache_division_limit", OPT_KEY_CACHE_DIVISION_LIMIT, + "The minimum percentage of warm blocks in key cache", + (gptr*) &dflt_key_cache_var.param_division_limit, + (gptr*) 0, + 0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100, + 1, 100, 0, 1, 0}, {"long_query_time", OPT_LONG_QUERY_TIME, "Log all queries that have taken more than long_query_time seconds to execute to file.", (gptr*) &global_system_variables.long_query_time, @@ -4208,19 +5219,24 @@ value. Will also apply to relay logs if max_relay_log_size is 0. \ The minimum value for this variable is 4096.", (gptr*) &max_binlog_size, (gptr*) &max_binlog_size, 0, GET_ULONG, REQUIRED_ARG, 1024*1024L*1024L, IO_SIZE, 1024*1024L*1024L, 0, IO_SIZE, 0}, - {"max_connections", OPT_MAX_CONNECTIONS, - "The number of simultaneous clients allowed.", (gptr*) &max_connections, - (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1, - 0}, {"max_connect_errors", OPT_MAX_CONNECT_ERRORS, "If there is more than this number of interrupted connections from a host this host will be blocked from further connections.", (gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG, REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0}, + {"max_connections", OPT_MAX_CONNECTIONS, + "The number of simultaneous clients allowed.", (gptr*) &max_connections, + (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1, + 0}, {"max_delayed_threads", OPT_MAX_DELAYED_THREADS, "Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.", (gptr*) &global_system_variables.max_insert_delayed_threads, (gptr*) &max_system_variables.max_insert_delayed_threads, 0, GET_ULONG, REQUIRED_ARG, 20, 0, 16384, 0, 1, 0}, + {"max_error_count", OPT_MAX_ERROR_COUNT, + "Max number of errors/warnings to store for a statement.", + (gptr*) &global_system_variables.max_error_count, + (gptr*) &max_system_variables.max_error_count, + 0, GET_ULONG, REQUIRED_ARG, DEFAULT_ERROR_COUNT, 0, 65535, 0, 1, 0}, {"max_heap_table_size", OPT_MAX_HEP_TABLE_SIZE, "Don't allow creation of heap tables bigger than this.", (gptr*) &global_system_variables.max_heap_table_size, @@ -4231,10 +5247,17 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.max_join_size, (gptr*) &max_system_variables.max_join_size, 0, GET_HA_ROWS, REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, + {"max_length_for_sort_data", OPT_MAX_LENGTH_FOR_SORT_DATA, + "Max number of bytes in sorted records.", + (gptr*) &global_system_variables.max_length_for_sort_data, + (gptr*) &max_system_variables.max_length_for_sort_data, 0, GET_ULONG, + REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0}, + {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT, + "Maximum number of prepared statements in the server.", + (gptr*) &max_prepared_stmt_count, (gptr*) &max_prepared_stmt_count, + 0, GET_ULONG, REQUIRED_ARG, 16382, 0, 1*1024*1024, 0, 1, 0}, {"max_relay_log_size", OPT_MAX_RELAY_LOG_SIZE, - "If non-zero: relay log will be rotated automatically when the size exceeds \ -this value; if zero (the default): when the size exceeds max_binlog_size. \ -0 expected, the minimum value for this variable is 4096.", + "If non-zero: relay log will be rotated automatically when the size exceeds this value; if zero (the default): when the size exceeds max_binlog_size. 0 excepted, the minimum value for this variable is 4096.", (gptr*) &max_relay_log_size, (gptr*) &max_relay_log_size, 0, GET_ULONG, REQUIRED_ARG, 0L, 0L, 1024*1024L*1024L, 0, IO_SIZE, 0}, { "max_seeks_for_key", OPT_MAX_SEEKS_FOR_KEY, @@ -4260,25 +5283,25 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ "After this many write locks, allow some read locks to run in between.", (gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG, REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, - {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE, - "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!", - (gptr*) &global_system_variables.bulk_insert_buff_size, - (gptr*) &max_system_variables.bulk_insert_buff_size, - 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0}, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, - "Block size to be used for MyISAM index pages", + "Block size to be used for MyISAM index pages.", (gptr*) &opt_myisam_block_size, (gptr*) &opt_myisam_block_size, 0, GET_ULONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0}, + {"myisam_data_pointer_size", OPT_MYISAM_DATA_POINTER_SIZE, + "Default pointer size to be used for MyISAM tables.", + (gptr*) &myisam_data_pointer_size, + (gptr*) &myisam_data_pointer_size, 0, GET_ULONG, REQUIRED_ARG, + 4, 2, 7, 0, 1, 0}, {"myisam_max_extra_sort_file_size", OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE, - "Used to help MySQL to decide when to use the slow but safe key cache index create method", + "Used to help MySQL to decide when to use the slow but safe key cache index create method.", (gptr*) &global_system_variables.myisam_max_extra_sort_file_size, (gptr*) &max_system_variables.myisam_max_extra_sort_file_size, 0, GET_ULL, REQUIRED_ARG, (ulonglong) MI_MAX_TEMP_LENGTH, 0, (ulonglong) MAX_FILE_SIZE, 0, 1, 0}, {"myisam_max_sort_file_size", OPT_MYISAM_MAX_SORT_FILE_SIZE, - "Don't use the fast sort index method to created index if the temporary file would get bigger than this!", + "Don't use the fast sort index method to created index if the temporary file would get bigger than this.", (gptr*) &global_system_variables.myisam_max_sort_file_size, (gptr*) &max_system_variables.myisam_max_sort_file_size, 0, GET_ULL, REQUIRED_ARG, (longlong) LONG_MAX, 0, (ulonglong) MAX_FILE_SIZE, @@ -4293,21 +5316,27 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ (gptr*) &global_system_variables.myisam_sort_buff_size, (gptr*) &max_system_variables.myisam_sort_buff_size, 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 4, ~0L, 0, 1, 0}, + {"myisam_stats_method", OPT_MYISAM_STATS_METHOD, + "Specifies how MyISAM index statistics collection code should threat NULLs. " + "Possible values of name are \"nulls_unequal\" (default behavior for 4.1/5.0), " + "\"nulls_equal\" (emulate 4.0 behavior), and \"nulls_ignored\".", + (gptr*) &myisam_stats_method_str, (gptr*) &myisam_stats_method_str, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"net_buffer_length", OPT_NET_BUFFER_LENGTH, "Buffer length for TCP/IP and socket communication.", (gptr*) &global_system_variables.net_buffer_length, (gptr*) &max_system_variables.net_buffer_length, 0, GET_ULONG, REQUIRED_ARG, 16384, 1024, 1024*1024L, 0, 1024, 0}, - {"net_retry_count", OPT_NET_RETRY_COUNT, - "If a read on a communication port is interrupted, retry this many times before giving up.", - (gptr*) &global_system_variables.net_retry_count, - (gptr*) &max_system_variables.net_retry_count,0, - GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0}, {"net_read_timeout", OPT_NET_READ_TIMEOUT, "Number of seconds to wait for more data from a connection before aborting the read.", (gptr*) &global_system_variables.net_read_timeout, (gptr*) &max_system_variables.net_read_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_READ_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, + {"net_retry_count", OPT_NET_RETRY_COUNT, + "If a read on a communication port is interrupted, retry this many times before giving up.", + (gptr*) &global_system_variables.net_retry_count, + (gptr*) &max_system_variables.net_retry_count,0, + GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0}, {"net_write_timeout", OPT_NET_WRITE_TIMEOUT, "Number of seconds to wait for a block to be written to a connection before aborting the write.", (gptr*) &global_system_variables.net_write_timeout, @@ -4316,7 +5345,12 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ {"open_files_limit", OPT_OPEN_FILES_LIMIT, "If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of files.", (gptr*) &open_files_limit, (gptr*) &open_files_limit, 0, GET_ULONG, - REQUIRED_ARG, 0, 0, 65535, 0, 1, 0}, + REQUIRED_ARG, 0, 0, OS_FILE_LIMIT, 0, 1, 0}, + {"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE, + "The size of the buffer that is allocated when preloading indexes", + (gptr*) &global_system_variables.preload_buff_size, + (gptr*) &max_system_variables.preload_buff_size, 0, GET_ULONG, + REQUIRED_ARG, 32*1024L, 1024, 1024*1024*1024L, 0, 1, 0}, {"query_alloc_block_size", OPT_QUERY_ALLOC_BLOCK_SIZE, "Allocation block size for query parsing and execution", (gptr*) &global_system_variables.query_alloc_block_size, @@ -4327,6 +5361,11 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ "Don't cache results that are bigger than this.", (gptr*) &query_cache_limit, (gptr*) &query_cache_limit, 0, GET_ULONG, REQUIRED_ARG, 1024*1024L, 0, (longlong) ULONG_MAX, 0, 1, 0}, + {"query_cache_min_res_unit", OPT_QUERY_CACHE_MIN_RES_UNIT, + "minimal size of unit in wich space for results is allocated (last unit will be trimed after writing all result data.", + (gptr*) &query_cache_min_res_unit, (gptr*) &query_cache_min_res_unit, + 0, GET_ULONG, REQUIRED_ARG, QUERY_CACHE_MIN_RESULT_DATA_SIZE, + 0, (longlong) ULONG_MAX, 0, 1, 0}, #endif /*HAVE_QUERY_CACHE*/ {"query_cache_size", OPT_QUERY_CACHE_SIZE, "The memory allocated to store results from old queries.", @@ -4348,12 +5387,23 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ "Persistent buffer for query parsing and execution", (gptr*) &global_system_variables.query_prealloc_size, (gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG, - REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, + REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, QUERY_ALLOC_PREALLOC_SIZE, + ~0L, 0, 1024, 0}, + {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE, + "Allocation block size for storing ranges during optimization", + (gptr*) &global_system_variables.range_alloc_block_size, + (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG, + REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 4096, ~0L, 0, 1024, 0}, {"read_buffer_size", OPT_RECORD_BUFFER, "Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.", (gptr*) &global_system_variables.read_buff_size, (gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0}, + {"read_only", OPT_READONLY, + "Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege", + (gptr*) &opt_readonly, + (gptr*) &opt_readonly, + 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, {"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER, "When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks. If not set, then it's set to the value of record_buffer.", (gptr*) &global_system_variables.read_rnd_buff_size, @@ -4365,13 +5415,19 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ (gptr*) &global_system_variables.read_buff_size, (gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0}, +#ifdef HAVE_REPLICATION + {"relay_log_purge", OPT_RELAY_LOG_PURGE, + "0 = do not purge relay logs. 1 = purge them as soon as they are no more needed.", + (gptr*) &relay_log_purge, + (gptr*) &relay_log_purge, 0, GET_BOOL, NO_ARG, + 1, 0, 1, 0, 1, 0}, {"relay_log_space_limit", OPT_RELAY_LOG_SPACE_LIMIT, - "Maximum space to use for all relay logs", + "Maximum space to use for all relay logs.", (gptr*) &relay_log_space_limit, (gptr*) &relay_log_space_limit, 0, GET_ULL, REQUIRED_ARG, 0L, 0L, (longlong) ULONG_MAX, 0, 1, 0}, {"slave_compressed_protocol", OPT_SLAVE_COMPRESSED_PROTOCOL, - "Use compression on master/slave protocol", + "Use compression on master/slave protocol.", (gptr*) &opt_slave_compressed_protocol, (gptr*) &opt_slave_compressed_protocol, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, @@ -4379,16 +5435,13 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ "Number of seconds to wait for more data from a master/slave connection before aborting the read.", (gptr*) &slave_net_timeout, (gptr*) &slave_net_timeout, 0, GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, - {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE, - "Allocation block size for storing ranges during optimization", - (gptr*) &global_system_variables.range_alloc_block_size, - (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, - {"read-only", OPT_READONLY, - "Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege", - (gptr*) &opt_readonly, - (gptr*) &opt_readonly, - 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, + {"slave_transaction_retries", OPT_SLAVE_TRANS_RETRIES, + "Number of times the slave SQL thread will retry a transaction in case " + "it failed with a deadlock or elapsed lock wait timeout, " + "before giving up and stopping.", + (gptr*) &slave_trans_retries, (gptr*) &slave_trans_retries, 0, + GET_ULONG, REQUIRED_ARG, 0L, 0L, (longlong) ULONG_MAX, 0, 1, 0}, +#endif /* HAVE_REPLICATION */ {"slow_launch_time", OPT_SLOW_LAUNCH_TIME, "If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented.", (gptr*) &slow_launch_time, (gptr*) &slow_launch_time, 0, GET_ULONG, @@ -4399,27 +5452,64 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ (gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG, MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD, 1, 0}, +#ifdef HAVE_BERKELEY_DB + {"sync-bdb-logs", OPT_BDB_SYNC, + "Synchronously flush logs. Enabled by default", + (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, + NO_ARG, 1, 0, 0, 0, 0, 0}, +#endif /* HAVE_BERKELEY_DB */ + {"sync-binlog", OPT_SYNC_BINLOG, + "Sync the binlog to disk after every #th event. \ +#=0 (the default) does no sync. Syncing slows MySQL down", + (gptr*) &sync_binlog_period, + (gptr*) &sync_binlog_period, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, + 0}, +#ifdef DOES_NOTHING_YET + {"sync-replication", OPT_SYNC_REPLICATION, + "Enable synchronous replication", + (gptr*) &global_system_variables.sync_replication, + (gptr*) &global_system_variables.sync_replication, + 0, GET_ULONG, REQUIRED_ARG, 0, 0, 1, 0, 1, 0}, + {"sync-replication-slave-id", OPT_SYNC_REPLICATION_SLAVE_ID, + "Synchronous replication is wished for this slave", + (gptr*) &global_system_variables.sync_replication_slave_id, + (gptr*) &global_system_variables.sync_replication_slave_id, + 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0}, + {"sync-replication-timeout", OPT_SYNC_REPLICATION_TIMEOUT, + "Synchronous replication timeout", + (gptr*) &global_system_variables.sync_replication_timeout, + (gptr*) &global_system_variables.sync_replication_timeout, + 0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0}, +#endif + {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default", + (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, + 0, 0, 0, 0}, {"table_cache", OPT_TABLE_CACHE, "The number of open tables for all threads.", (gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L, 0, 1, 0}, - {"thread_concurrency", OPT_THREAD_CONCURRENCY, - "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.", - (gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG, - DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0}, {"thread_cache_size", OPT_THREAD_CACHE_SIZE, "How many threads we should keep in a cache for reuse.", (gptr*) &thread_cache_size, (gptr*) &thread_cache_size, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 16384, 0, 1, 0}, + {"thread_concurrency", OPT_THREAD_CONCURRENCY, + "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.", + (gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG, + DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0}, + {"thread_stack", OPT_THREAD_STACK, + "The stack size for each thread.", (gptr*) &thread_stack, + (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, + 1024L*128L, ~0L, 0, 1024, 0}, + { "time_format", OPT_TIME_FORMAT, + "The TIME format (for future).", + (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], + (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"tmp_table_size", OPT_TMP_TABLE_SIZE, "If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.", (gptr*) &global_system_variables.tmp_table_size, (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG, REQUIRED_ARG, 32*1024*1024L, 1024, ~0L, 0, 1, 0}, - {"thread_stack", OPT_THREAD_STACK, - "The stack size for each thread.", (gptr*) &thread_stack, - (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, - 1024*32, ~0L, 0, 1024, 0}, {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE, "Allocation block size for transactions to be stored in binary log", (gptr*) &global_system_variables.trans_alloc_block_size, @@ -4431,16 +5521,11 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, {"wait_timeout", OPT_WAIT_TIMEOUT, - "The number of seconds the server waits for activity on a connection before closing it", + "The number of seconds the server waits for activity on a connection before closing it.", (gptr*) &global_system_variables.net_wait_timeout, (gptr*) &max_system_variables.net_wait_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT), 0, 1, 0}, - { "default-week-format", OPT_DEFAULT_WEEK_FORMAT, - "The default week format used by WEEK() functions.", - (gptr*) &global_system_variables.default_week_format, - (gptr*) &max_system_variables.default_week_format, - 0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -4448,9 +5533,12 @@ this value; if zero (the default): when the size exceeds max_binlog_size. \ struct show_var_st status_vars[]= { {"Aborted_clients", (char*) &aborted_threads, SHOW_LONG}, {"Aborted_connects", (char*) &aborted_connects, SHOW_LONG}, + {"Binlog_cache_disk_use", (char*) &binlog_cache_disk_use, SHOW_LONG}, + {"Binlog_cache_use", (char*) &binlog_cache_use, SHOW_LONG}, {"Bytes_received", (char*) &bytes_received, SHOW_LONG}, {"Bytes_sent", (char*) &bytes_sent, SHOW_LONG}, {"Com_admin_commands", (char*) &com_other, SHOW_LONG}, + {"Com_alter_db", (char*) (com_stat+(uint) SQLCOM_ALTER_DB),SHOW_LONG}, {"Com_alter_table", (char*) (com_stat+(uint) SQLCOM_ALTER_TABLE),SHOW_LONG}, {"Com_analyze", (char*) (com_stat+(uint) SQLCOM_ANALYZE),SHOW_LONG}, {"Com_backup_table", (char*) (com_stat+(uint) SQLCOM_BACKUP_TABLE),SHOW_LONG}, @@ -4458,31 +5546,43 @@ struct show_var_st status_vars[]= { {"Com_change_db", (char*) (com_stat+(uint) SQLCOM_CHANGE_DB),SHOW_LONG}, {"Com_change_master", (char*) (com_stat+(uint) SQLCOM_CHANGE_MASTER),SHOW_LONG}, {"Com_check", (char*) (com_stat+(uint) SQLCOM_CHECK),SHOW_LONG}, + {"Com_checksum", (char*) (com_stat+(uint) SQLCOM_CHECKSUM),SHOW_LONG}, {"Com_commit", (char*) (com_stat+(uint) SQLCOM_COMMIT),SHOW_LONG}, {"Com_create_db", (char*) (com_stat+(uint) SQLCOM_CREATE_DB),SHOW_LONG}, {"Com_create_function", (char*) (com_stat+(uint) SQLCOM_CREATE_FUNCTION),SHOW_LONG}, {"Com_create_index", (char*) (com_stat+(uint) SQLCOM_CREATE_INDEX),SHOW_LONG}, {"Com_create_table", (char*) (com_stat+(uint) SQLCOM_CREATE_TABLE),SHOW_LONG}, + {"Com_dealloc_sql", (char*) (com_stat+(uint) + SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG}, {"Com_delete", (char*) (com_stat+(uint) SQLCOM_DELETE),SHOW_LONG}, {"Com_delete_multi", (char*) (com_stat+(uint) SQLCOM_DELETE_MULTI),SHOW_LONG}, + {"Com_do", (char*) (com_stat+(uint) SQLCOM_DO),SHOW_LONG}, {"Com_drop_db", (char*) (com_stat+(uint) SQLCOM_DROP_DB),SHOW_LONG}, {"Com_drop_function", (char*) (com_stat+(uint) SQLCOM_DROP_FUNCTION),SHOW_LONG}, {"Com_drop_index", (char*) (com_stat+(uint) SQLCOM_DROP_INDEX),SHOW_LONG}, {"Com_drop_table", (char*) (com_stat+(uint) SQLCOM_DROP_TABLE),SHOW_LONG}, + {"Com_drop_user", (char*) (com_stat+(uint) SQLCOM_DROP_USER),SHOW_LONG}, + {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE), + SHOW_LONG}, {"Com_flush", (char*) (com_stat+(uint) SQLCOM_FLUSH),SHOW_LONG}, {"Com_grant", (char*) (com_stat+(uint) SQLCOM_GRANT),SHOW_LONG}, {"Com_ha_close", (char*) (com_stat+(uint) SQLCOM_HA_CLOSE),SHOW_LONG}, {"Com_ha_open", (char*) (com_stat+(uint) SQLCOM_HA_OPEN),SHOW_LONG}, {"Com_ha_read", (char*) (com_stat+(uint) SQLCOM_HA_READ),SHOW_LONG}, + {"Com_help", (char*) (com_stat+(uint) SQLCOM_HELP),SHOW_LONG}, {"Com_insert", (char*) (com_stat+(uint) SQLCOM_INSERT),SHOW_LONG}, {"Com_insert_select", (char*) (com_stat+(uint) SQLCOM_INSERT_SELECT),SHOW_LONG}, {"Com_kill", (char*) (com_stat+(uint) SQLCOM_KILL),SHOW_LONG}, {"Com_load", (char*) (com_stat+(uint) SQLCOM_LOAD),SHOW_LONG}, - {"Com_load_master_data", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_DATA),SHOW_LONG}, + {"Com_load_master_data", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_DATA),SHOW_LONG}, {"Com_load_master_table", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_TABLE),SHOW_LONG}, {"Com_lock_tables", (char*) (com_stat+(uint) SQLCOM_LOCK_TABLES),SHOW_LONG}, {"Com_optimize", (char*) (com_stat+(uint) SQLCOM_OPTIMIZE),SHOW_LONG}, + {"Com_preload_keys", (char*) (com_stat+(uint) SQLCOM_PRELOAD_KEYS),SHOW_LONG}, + {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE), + SHOW_LONG}, {"Com_purge", (char*) (com_stat+(uint) SQLCOM_PURGE),SHOW_LONG}, + {"Com_purge_before_date", (char*) (com_stat+(uint) SQLCOM_PURGE_BEFORE),SHOW_LONG}, {"Com_rename_table", (char*) (com_stat+(uint) SQLCOM_RENAME_TABLE),SHOW_LONG}, {"Com_repair", (char*) (com_stat+(uint) SQLCOM_REPAIR),SHOW_LONG}, {"Com_replace", (char*) (com_stat+(uint) SQLCOM_REPLACE),SHOW_LONG}, @@ -4490,34 +5590,49 @@ struct show_var_st status_vars[]= { {"Com_reset", (char*) (com_stat+(uint) SQLCOM_RESET),SHOW_LONG}, {"Com_restore_table", (char*) (com_stat+(uint) SQLCOM_RESTORE_TABLE),SHOW_LONG}, {"Com_revoke", (char*) (com_stat+(uint) SQLCOM_REVOKE),SHOW_LONG}, + {"Com_revoke_all", (char*) (com_stat+(uint) SQLCOM_REVOKE_ALL),SHOW_LONG}, {"Com_rollback", (char*) (com_stat+(uint) SQLCOM_ROLLBACK),SHOW_LONG}, {"Com_savepoint", (char*) (com_stat+(uint) SQLCOM_SAVEPOINT),SHOW_LONG}, {"Com_select", (char*) (com_stat+(uint) SQLCOM_SELECT),SHOW_LONG}, {"Com_set_option", (char*) (com_stat+(uint) SQLCOM_SET_OPTION),SHOW_LONG}, {"Com_show_binlog_events", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOG_EVENTS),SHOW_LONG}, {"Com_show_binlogs", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOGS),SHOW_LONG}, - {"Com_show_create", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE),SHOW_LONG}, + {"Com_show_charsets", (char*) (com_stat+(uint) SQLCOM_SHOW_CHARSETS),SHOW_LONG}, + {"Com_show_collations", (char*) (com_stat+(uint) SQLCOM_SHOW_COLLATIONS),SHOW_LONG}, + {"Com_show_column_types", (char*) (com_stat+(uint) SQLCOM_SHOW_COLUMN_TYPES),SHOW_LONG}, + {"Com_show_create_db", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE_DB),SHOW_LONG}, + {"Com_show_create_table", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE),SHOW_LONG}, {"Com_show_databases", (char*) (com_stat+(uint) SQLCOM_SHOW_DATABASES),SHOW_LONG}, + {"Com_show_errors", (char*) (com_stat+(uint) SQLCOM_SHOW_ERRORS),SHOW_LONG}, {"Com_show_fields", (char*) (com_stat+(uint) SQLCOM_SHOW_FIELDS),SHOW_LONG}, {"Com_show_grants", (char*) (com_stat+(uint) SQLCOM_SHOW_GRANTS),SHOW_LONG}, {"Com_show_innodb_status", (char*) (com_stat+(uint) SQLCOM_SHOW_INNODB_STATUS),SHOW_LONG}, {"Com_show_keys", (char*) (com_stat+(uint) SQLCOM_SHOW_KEYS),SHOW_LONG}, {"Com_show_logs", (char*) (com_stat+(uint) SQLCOM_SHOW_LOGS),SHOW_LONG}, {"Com_show_master_status", (char*) (com_stat+(uint) SQLCOM_SHOW_MASTER_STAT),SHOW_LONG}, + {"Com_show_ndb_status", (char*) (com_stat+(uint) SQLCOM_SHOW_NDBCLUSTER_STATUS),SHOW_LONG}, {"Com_show_new_master", (char*) (com_stat+(uint) SQLCOM_SHOW_NEW_MASTER),SHOW_LONG}, {"Com_show_open_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_OPEN_TABLES),SHOW_LONG}, + {"Com_show_privileges", (char*) (com_stat+(uint) SQLCOM_SHOW_PRIVILEGES),SHOW_LONG}, {"Com_show_processlist", (char*) (com_stat+(uint) SQLCOM_SHOW_PROCESSLIST),SHOW_LONG}, {"Com_show_slave_hosts", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_HOSTS),SHOW_LONG}, {"Com_show_slave_status", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_STAT),SHOW_LONG}, {"Com_show_status", (char*) (com_stat+(uint) SQLCOM_SHOW_STATUS),SHOW_LONG}, + {"Com_show_storage_engines", (char*) (com_stat+(uint) SQLCOM_SHOW_STORAGE_ENGINES),SHOW_LONG}, {"Com_show_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_TABLES),SHOW_LONG}, {"Com_show_variables", (char*) (com_stat+(uint) SQLCOM_SHOW_VARIABLES),SHOW_LONG}, + {"Com_show_warnings", (char*) (com_stat+(uint) SQLCOM_SHOW_WARNS),SHOW_LONG}, {"Com_slave_start", (char*) (com_stat+(uint) SQLCOM_SLAVE_START),SHOW_LONG}, {"Com_slave_stop", (char*) (com_stat+(uint) SQLCOM_SLAVE_STOP),SHOW_LONG}, + {"Com_stmt_close", (char*) &com_stmt_close, SHOW_LONG}, + {"Com_stmt_execute", (char*) &com_stmt_execute, SHOW_LONG}, + {"Com_stmt_prepare", (char*) &com_stmt_prepare, SHOW_LONG}, + {"Com_stmt_reset", (char*) &com_stmt_reset, SHOW_LONG}, + {"Com_stmt_send_long_data", (char*) &com_stmt_send_long_data, SHOW_LONG}, {"Com_truncate", (char*) (com_stat+(uint) SQLCOM_TRUNCATE),SHOW_LONG}, {"Com_unlock_tables", (char*) (com_stat+(uint) SQLCOM_UNLOCK_TABLES),SHOW_LONG}, {"Com_update", (char*) (com_stat+(uint) SQLCOM_UPDATE),SHOW_LONG}, - {"Com_update_multi", (char*) (com_stat+(uint) SQLCOM_MULTI_UPDATE),SHOW_LONG}, + {"Com_update_multi", (char*) (com_stat+(uint) SQLCOM_UPDATE_MULTI),SHOW_LONG}, {"Connections", (char*) &thread_id, SHOW_LONG_CONST}, {"Created_tmp_disk_tables", (char*) &created_tmp_disk_tables,SHOW_LONG}, {"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG}, @@ -4528,6 +5643,7 @@ struct show_var_st status_vars[]= { {"Flush_commands", (char*) &refresh_version, SHOW_LONG_CONST}, {"Handler_commit", (char*) &ha_commit_count, SHOW_LONG}, {"Handler_delete", (char*) &ha_delete_count, SHOW_LONG}, + {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG}, {"Handler_read_first", (char*) &ha_read_first_count, SHOW_LONG}, {"Handler_read_key", (char*) &ha_read_key_count, SHOW_LONG}, {"Handler_read_next", (char*) &ha_read_next_count, SHOW_LONG}, @@ -4537,14 +5653,22 @@ struct show_var_st status_vars[]= { {"Handler_rollback", (char*) &ha_rollback_count, SHOW_LONG}, {"Handler_update", (char*) &ha_update_count, SHOW_LONG}, {"Handler_write", (char*) &ha_write_count, SHOW_LONG}, - {"Key_blocks_used", (char*) &_my_blocks_used, SHOW_LONG_CONST}, - {"Key_read_requests", (char*) &_my_cache_r_requests, SHOW_LONGLONG}, - {"Key_reads", (char*) &_my_cache_read, SHOW_LONGLONG}, - {"Key_write_requests", (char*) &_my_cache_w_requests, SHOW_LONGLONG}, - {"Key_writes", (char*) &_my_cache_write, SHOW_LONGLONG}, - {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, + {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, + SHOW_KEY_CACHE_LONG}, + {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, + SHOW_KEY_CACHE_CONST_LONG}, + {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, + SHOW_KEY_CACHE_CONST_LONG}, + {"Key_read_requests", (char*) &dflt_key_cache_var.global_cache_r_requests, + SHOW_KEY_CACHE_LONGLONG}, + {"Key_reads", (char*) &dflt_key_cache_var.global_cache_read, + SHOW_KEY_CACHE_LONGLONG}, + {"Key_write_requests", (char*) &dflt_key_cache_var.global_cache_w_requests, + SHOW_KEY_CACHE_LONGLONG}, + {"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, + SHOW_KEY_CACHE_LONGLONG}, + {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST}, - {"Not_flushed_key_blocks", (char*) &_my_blocks_changed, SHOW_LONG_CONST}, {"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST}, {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST}, {"Open_tables", (char*) 0, SHOW_OPENTABLES}, @@ -4552,7 +5676,7 @@ struct show_var_st status_vars[]= { #ifdef HAVE_QUERY_CACHE {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST}, - {"Qcache_free_memory", (char*) &query_cache.free_memory, + {"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_CONST}, {"Qcache_hits", (char*) &query_cache.hits, SHOW_LONG}, {"Qcache_inserts", (char*) &query_cache.inserts, SHOW_LONG}, @@ -4570,7 +5694,8 @@ struct show_var_st status_vars[]= { {"Select_range_check", (char*) &select_range_check_count, SHOW_LONG}, {"Select_scan", (char*) &select_scan_count, SHOW_LONG}, {"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG}, - {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING}, + {"Slave_retried_transactions",(char*) 0, SHOW_SLAVE_RETRIED_TRANS}, + {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING}, {"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG}, {"Slow_queries", (char*) &long_query_count, SHOW_LONG}, {"Sort_merge_passes", (char*) &filesort_merge_passes, SHOW_LONG}, @@ -4619,14 +5744,14 @@ static void print_version(void) server_version,SYSTEM_TYPE,MACHINE_TYPE, MYSQL_COMPILATION_COMMENT); } -static void use_help(void) -{ - print_version(); - printf("Use '--help' or '--no-defaults --help' for a list of available options\n"); -} - static void usage(void) { + if (!(default_charset_info= get_charset_by_csname(default_character_set_name, + MY_CS_PRIMARY, + MYF(MY_WME)))) + exit(1); + if (!default_collation_name) + default_collation_name= (char*) default_charset_info->name; print_version(); puts("\ Copyright (C) 2000 MySQL AB, by Monty and others\n\ @@ -4635,6 +5760,10 @@ and you are welcome to modify and redistribute it under the GPL license\n\n\ Starts the MySQL database server\n"); printf("Usage: %s [OPTIONS]\n", my_progname); + if (!opt_verbose) + puts("\nFor more help options (several pages), use mysqld --verbose --help\n"); + else + { #ifdef __WIN__ puts("NT and Win32 specific options:\n\ --install Install the default service (NT)\n\ @@ -4658,29 +5787,250 @@ Starts the MySQL database server\n"); puts("\n\ To see what values a running MySQL server is using, type\n\ -'mysqladmin variables' instead of 'mysqld --help'."); +'mysqladmin variables' instead of 'mysqld --verbose --help'.\n"); + } } -static void set_options(void) +/* + Initialize all MySQL global variables to default values + + SYNOPSIS + mysql_init_variables() + + NOTES + The reason to set a lot of global variables to zero is to allow one to + restart the embedded server with a clean environment + It's also needed on some exotic platforms where global variables are + not set to 0 when a program starts. + + We don't need to set numeric variables refered to in my_long_options + as these are initialized by my_getopt. +*/ + +static void mysql_init_variables(void) { -#if !defined( my_pthread_setprio ) && !defined( HAVE_PTHREAD_SETSCHEDPARAM ) + /* Things reset to zero */ + opt_skip_slave_start= opt_reckless_slave = 0; + mysql_home[0]= pidfile_name[0]= log_error_file[0]= 0; + opt_log= opt_update_log= opt_bin_log= opt_slow_log= 0; + opt_disable_networking= opt_skip_show_db=0; + opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname=0; + opt_secure_auth= 0; + opt_bootstrap= opt_myisam_log= 0; + mqh_used= 0; + segfaulted= kill_in_progress= 0; + cleanup_done= 0; + defaults_argv= 0; + server_id_supplied= 0; + test_flags= select_errors= dropping_tables= ha_open_options=0; + thread_count= thread_running= kill_cached_threads= wake_thread=0; + slave_open_temp_tables= 0; + com_other= 0; + cached_thread_count= 0; + bytes_sent= bytes_received= 0; + opt_endinfo= using_udf_functions= 0; + opt_using_transactions= using_update_log= 0; + abort_loop= select_thread_in_use= signal_thread_in_use= 0; + ready_to_exit= shutdown_in_progress= grant_option= 0; + long_query_count= aborted_threads= aborted_connects= 0; + delayed_insert_threads= delayed_insert_writes= delayed_rows_in_use= 0; + delayed_insert_errors= thread_created= 0; + filesort_rows= filesort_range_count= filesort_scan_count= 0; + filesort_merge_passes= select_range_check_count= select_range_count= 0; + select_scan_count= select_full_range_join_count= select_full_join_count= 0; + specialflag= opened_tables= created_tmp_tables= created_tmp_disk_tables= 0; + binlog_cache_use= binlog_cache_disk_use= 0; + max_used_connections= slow_launch_threads = 0; + mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0; + errmesg= 0; + mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS; + bzero((gptr) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list)); + bzero((gptr) &com_stat, sizeof(com_stat)); + key_map_full.set_all(); + + /* Character sets */ + system_charset_info= &my_charset_utf8_general_ci; + files_charset_info= &my_charset_utf8_general_ci; + national_charset_info= &my_charset_utf8_general_ci; + table_alias_charset= &my_charset_bin; + + opt_date_time_formats[0]= opt_date_time_formats[1]= opt_date_time_formats[2]= 0; + + /* Things with default values that are not zero */ + delay_key_write_options= (uint) DELAY_KEY_WRITE_ON; + opt_specialflag= SPECIAL_ENGLISH; + unix_sock= ip_sock= INVALID_SOCKET; + mysql_home_ptr= mysql_home; + pidfile_name_ptr= pidfile_name; + log_error_file_ptr= log_error_file; + language_ptr= language; + mysql_data_home= mysql_real_data_home; + thd_startup_options= (OPTION_UPDATE_LOG | OPTION_AUTO_IS_NULL | + OPTION_BIN_LOG | OPTION_QUOTE_SHOW_CREATE | + OPTION_SQL_NOTES); + protocol_version= PROTOCOL_VERSION; + what_to_log= ~ (1L << (uint) COM_TIME); + refresh_version= flush_version= 1L; /* Increments on each reload */ + query_id= thread_id= 1L; + strmov(server_version, MYSQL_SERVER_VERSION); + myisam_recover_options_str= sql_mode_str= "OFF"; + myisam_stats_method_str= "nulls_unequal"; + my_bind_addr = htonl(INADDR_ANY); + threads.empty(); + thread_cache.empty(); + key_caches.empty(); + if (!(dflt_key_cache= get_or_create_key_cache(default_key_cache_base.str, + default_key_cache_base.length))) + exit(1); + multi_keycache_init(); /* set key_cache_hash.default_value = dflt_key_cache */ + + /* Initialize structures that is used when processing options */ + replicate_rewrite_db.empty(); + replicate_do_db.empty(); + replicate_ignore_db.empty(); + binlog_do_db.empty(); + binlog_ignore_db.empty(); + + /* Set directory paths */ + strmake(language, LANGUAGE, sizeof(language)-1); + strmake(mysql_real_data_home, get_relative_path(DATADIR), + sizeof(mysql_real_data_home)-1); + mysql_data_home_buff[0]=FN_CURLIB; // all paths are relative from here + mysql_data_home_buff[1]=0; + + /* Replication parameters */ + master_user= (char*) "test"; + master_password= master_host= 0; + master_info_file= (char*) "master.info", + relay_log_info_file= (char*) "relay-log.info"; + master_ssl_key= master_ssl_cert= master_ssl_ca= + master_ssl_capath= master_ssl_cipher= 0; + report_user= report_password = report_host= 0; /* TO BE DELETED */ + opt_relay_logname= opt_relaylog_index_name= 0; + + /* Variables in libraries */ + charsets_dir= 0; + default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME; + default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME; + sys_charset_system.value= (char*) system_charset_info->csname; + + + /* Set default values for some option variables */ + global_system_variables.table_type= DB_TYPE_MYISAM; + global_system_variables.tx_isolation= ISO_REPEATABLE_READ; + global_system_variables.select_limit= (ulonglong) HA_POS_ERROR; + max_system_variables.select_limit= (ulonglong) HA_POS_ERROR; + global_system_variables.max_join_size= (ulonglong) HA_POS_ERROR; + max_system_variables.max_join_size= (ulonglong) HA_POS_ERROR; + global_system_variables.old_passwords= 0; + + /* + Default behavior for 4.1 and 5.0 is to treat NULL values as unequal + when collecting index statistics for MyISAM tables. + */ + global_system_variables.myisam_stats_method= MI_STATS_METHOD_NULLS_NOT_EQUAL; + + /* Variables that depends on compile options */ +#ifndef DBUG_OFF + default_dbug_option=IF_WIN("d:t:i:O,\\mysqld.trace", + "d:t:i:o,/tmp/mysqld.trace"); +#endif + opt_error_log= IF_WIN(1,0); +#ifdef HAVE_BERKELEY_DB + have_berkeley_db= SHOW_OPTION_YES; +#else + have_berkeley_db= SHOW_OPTION_NO; +#endif +#ifdef HAVE_INNOBASE_DB + have_innodb=SHOW_OPTION_YES; +#else + have_innodb=SHOW_OPTION_NO; +#endif +#ifdef HAVE_ISAM + have_isam=SHOW_OPTION_YES; +#else + have_isam=SHOW_OPTION_NO; +#endif +#ifdef HAVE_EXAMPLE_DB + have_example_db= SHOW_OPTION_YES; +#else + have_example_db= SHOW_OPTION_NO; +#endif +#ifdef HAVE_ARCHIVE_DB + have_archive_db= SHOW_OPTION_YES; +#else + have_archive_db= SHOW_OPTION_NO; +#endif +#ifdef HAVE_BLACKHOLE_DB + have_blackhole_db= SHOW_OPTION_YES; +#else + have_blackhole_db= SHOW_OPTION_NO; +#endif +#ifdef HAVE_CSV_DB + have_csv_db= SHOW_OPTION_YES; +#else + have_csv_db= SHOW_OPTION_NO; +#endif +#ifdef HAVE_NDBCLUSTER_DB + have_ndbcluster=SHOW_OPTION_DISABLED; +#else + have_ndbcluster=SHOW_OPTION_NO; +#endif +#ifdef USE_RAID + have_raid=SHOW_OPTION_YES; +#else + have_raid=SHOW_OPTION_NO; +#endif +#ifdef HAVE_OPENSSL + have_openssl=SHOW_OPTION_YES; +#else + have_openssl=SHOW_OPTION_NO; +#endif +#ifdef HAVE_BROKEN_REALPATH + have_symlink=SHOW_OPTION_NO; +#else + have_symlink=SHOW_OPTION_YES; +#endif +#ifdef HAVE_QUERY_CACHE + have_query_cache=SHOW_OPTION_YES; +#else + have_query_cache=SHOW_OPTION_NO; +#endif +#ifdef HAVE_SPATIAL + have_geometry=SHOW_OPTION_YES; +#else + have_geometry=SHOW_OPTION_NO; +#endif +#ifdef HAVE_RTREE_KEYS + have_rtree_keys=SHOW_OPTION_YES; +#else + have_rtree_keys=SHOW_OPTION_NO; +#endif +#ifdef HAVE_CRYPT + have_crypt=SHOW_OPTION_YES; +#else + have_crypt=SHOW_OPTION_NO; +#endif +#ifdef HAVE_COMPRESS + have_compress= SHOW_OPTION_YES; +#else + have_compress= SHOW_OPTION_NO; +#endif +#ifdef HAVE_LIBWRAP + libwrapName= NullS; +#endif +#ifdef HAVE_OPENSSL + des_key_file = 0; + ssl_acceptor_fd= 0; +#endif +#ifdef HAVE_SMEM + shared_memory_base_name= default_shared_memory_base_name; +#endif +#if !defined(my_pthread_setprio) && !defined(HAVE_PTHREAD_SETSCHEDPARAM) opt_specialflag |= SPECIAL_NO_PRIOR; #endif - sys_charset.value= (char*) MYSQL_CHARSET; - (void) strmake(language, LANGUAGE, sizeof(language)-1); - (void) strmake(mysql_real_data_home, get_relative_path(DATADIR), - sizeof(mysql_real_data_home)-1); - - /* Set default values for some variables */ - global_system_variables.table_type=DB_TYPE_MYISAM; - global_system_variables.tx_isolation=ISO_REPEATABLE_READ; - global_system_variables.select_limit= HA_POS_ERROR; - max_system_variables.select_limit= HA_POS_ERROR; - global_system_variables.max_join_size= HA_POS_ERROR; - max_system_variables.max_join_size= HA_POS_ERROR; - #if defined(__WIN__) || defined(__NETWARE__) /* Allow Win32 and NetWare users to move MySQL anywhere */ { @@ -4695,10 +6045,6 @@ static void set_options(void) tmpenv = DEFAULT_MYSQL_HOME; (void) strmake(mysql_home, tmpenv, sizeof(mysql_home)-1); #endif - - my_disable_locking=myisam_single_user= 1; - opt_external_locking=0; - my_bind_addr = htonl( INADDR_ANY ); } @@ -4714,14 +6060,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), opt_endinfo=1; /* unireg: memory allocation */ break; case 'a': - opt_sql_mode = (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | - MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_SERIALIZABLE - | MODE_ONLY_FULL_GROUP_BY); + global_system_variables.sql_mode= fix_sql_mode(MODE_ANSI); global_system_variables.tx_isolation= ISO_SERIALIZABLE; break; case 'b': strmake(mysql_home,argument,sizeof(mysql_home)-1); break; + case 'C': + default_collation_name= 0; + break; case 'l': opt_log=1; break; @@ -4734,32 +6081,22 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), if (!mysqld_user || !strcmp(mysqld_user, argument)) mysqld_user= argument; else - fprintf(stderr, "Warning: Ignoring user change to '%s' because the user was set to '%s' earlier on the command line\n", argument, mysqld_user); + sql_print_warning("Ignoring user change to '%s' because the user was set to '%s' earlier on the command line\n", argument, mysqld_user); break; case 'L': strmake(language, argument, sizeof(language)-1); break; - case 'o': - protocol_version=PROTOCOL_VERSION-1; - break; +#ifdef HAVE_REPLICATION case OPT_SLAVE_SKIP_ERRORS: init_slave_skip_errors(argument); break; +#endif case OPT_SAFEMALLOC_MEM_LIMIT: #if !defined(DBUG_OFF) && defined(SAFEMALLOC) sf_malloc_mem_limit = atoi(argument); #endif break; -#ifdef EMBEDDED_LIBRARY - case OPT_MAX_ALLOWED_PACKET: - max_allowed_packet= atoi(argument); - break; - case OPT_NET_BUFFER_LENGTH: - net_buffer_length= atoi(argument); - break; -#endif #include <sslopt-case.h> - case 'v': case 'V': print_version(); exit(0); @@ -4771,10 +6108,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), else global_system_variables.log_warnings= atoi(argument); break; - case 'I': - case '?': - usage(); - exit(0); case 'T': test_flags= argument ? (uint) atoi(argument) : 0; test_flags&= ~TEST_NO_THREADS; @@ -4795,6 +6128,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case (int) OPT_ERROR_LOG_FILE: opt_error_log= 1; break; +#ifdef HAVE_REPLICATION case (int) OPT_INIT_RPL_ROLE: { int role; @@ -4821,7 +6155,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case (int)OPT_REPLICATE_REWRITE_DB: { char* key = argument,*p, *val; - + if (!(p= strstr(argument, "->"))) { fprintf(stderr, @@ -4829,7 +6163,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), exit(1); } val= p--; - while (isspace(*p) && p > argument) + while (my_isspace(mysqld_charset, *p) && p > argument) *p-- = 0; if (p == argument) { @@ -4839,7 +6173,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } *val= 0; val+= 2; - while (*val && isspace(*val)) + while (*val && my_isspace(mysqld_charset, *val)) *val++; if (!*val) { @@ -4915,13 +6249,10 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), table_rules_on = 1; break; } +#endif /* HAVE_REPLICATION */ case (int) OPT_SLOW_QUERY_LOG: opt_slow_log=1; break; - case (int)OPT_RECKLESS_SLAVE: - opt_reckless_slave = 1; - init_slave_skip_errors("all"); - break; case (int) OPT_SKIP_NEW: opt_specialflag|= SPECIAL_NO_NEW_FUNC; delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE; @@ -4951,16 +6282,13 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case (int) OPT_SKIP_RESOLVE: opt_specialflag|=SPECIAL_NO_RESOLVE; break; - case (int) OPT_LONG_FORMAT: - opt_specialflag|=SPECIAL_LONG_LOG_FORMAT; - break; case (int) OPT_SKIP_NETWORKING: #if defined(__NETWARE__) sql_perror("Can't start server: skip-networking option is currently not supported on NetWare"); exit(1); -#endif +#endif opt_disable_networking=1; - mysql_port=0; + mysqld_port=0; break; case (int) OPT_SKIP_SHOW_DB: opt_skip_show_db=1; @@ -4981,11 +6309,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), my_use_symdir=0; break; case (int) OPT_BIND_ADDRESS: - if (isdigit(argument[0])) - { - my_bind_addr = (ulong) inet_addr(argument); - } - else + if ((my_bind_addr= (ulong) inet_addr(argument)) == INADDR_NONE) { struct hostent *ent; if (argument[0]) @@ -5033,15 +6357,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case OPT_BOOTSTRAP: opt_noacl=opt_bootstrap=1; break; - case OPT_TABLE_TYPE: + case OPT_STORAGE_ENGINE: { - int type; - if ((type=find_type(argument, &ha_table_typelib, 2)) <= 0) + if ((enum db_type)((global_system_variables.table_type= + ha_resolve_by_name(argument, strlen(argument)))) == + DB_TYPE_UNKNOWN) { - fprintf(stderr,"Unknown table type: %s\n",argument); + fprintf(stderr,"Unknown/unsupported table type: %s\n",argument); exit(1); } - global_system_variables.table_type= type-1; break; } case OPT_SERVER_ID: @@ -5103,8 +6427,12 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), berkeley_lock_type=berkeley_lock_types[type-1]; else { - if (test_if_int(argument,(uint) strlen(argument))) - berkeley_lock_scan_time=atoi(argument); + int err; + char *end; + uint length= strlen(argument); + long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err); + if (test_if_int(argument,(uint) length, end, &my_charset_latin1)) + berkeley_lock_scan_time= value; else { fprintf(stderr,"Unknown lock type: %s\n",argument); @@ -5115,24 +6443,44 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } case OPT_BDB_SHARED: berkeley_init_flags&= ~(DB_PRIVATE); - berkeley_shared_data=1; + berkeley_shared_data= 1; break; #endif /* HAVE_BERKELEY_DB */ - case OPT_BDB_SKIP: + case OPT_BDB: #ifdef HAVE_BERKELEY_DB - berkeley_skip=1; - have_berkeley_db=SHOW_OPTION_DISABLED; + if (opt_bdb) + have_berkeley_db= SHOW_OPTION_YES; + else + have_berkeley_db= SHOW_OPTION_DISABLED; +#endif + break; + case OPT_ISAM: +#ifdef HAVE_ISAM + if (opt_isam) + have_isam= SHOW_OPTION_YES; + else + have_isam= SHOW_OPTION_DISABLED; +#endif + break; + case OPT_NDBCLUSTER: +#ifdef HAVE_NDBCLUSTER_DB + if (opt_ndbcluster) + have_ndbcluster= SHOW_OPTION_YES; + else + have_ndbcluster= SHOW_OPTION_DISABLED; #endif break; - case OPT_INNODB_SKIP: + case OPT_INNODB: #ifdef HAVE_INNOBASE_DB - innodb_skip=1; - have_innodb=SHOW_OPTION_DISABLED; + if (opt_innodb) + have_innodb= SHOW_OPTION_YES; + else + have_innodb= SHOW_OPTION_DISABLED; #endif break; case OPT_INNODB_DATA_FILE_PATH: #ifdef HAVE_INNOBASE_DB - innobase_data_file_path=argument; + innobase_data_file_path= argument; #endif break; #ifdef HAVE_INNOBASE_DB @@ -5163,22 +6511,50 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), ha_open_options|=HA_OPEN_ABORT_IF_CRASHED; break; } + case OPT_MYISAM_STATS_METHOD: + { + int method; + ulong method_conv; + myisam_stats_method_str= argument; + if ((method=find_type(argument, &myisam_stats_method_typelib, 2)) <= 0) + { + fprintf(stderr, "Invalid value of myisam_stats_method: %s.\n", argument); + exit(1); + } + switch (method-1) { + case 0: + method_conv= MI_STATS_METHOD_NULLS_EQUAL; + break; + case 1: + method_conv= MI_STATS_METHOD_NULLS_NOT_EQUAL; + break; + case 2: + method_conv= MI_STATS_METHOD_IGNORE_NULLS; + break; + } + global_system_variables.myisam_stats_method= method_conv; + break; + } case OPT_SQL_MODE: { - sql_mode_str = argument; - if ((opt_sql_mode = - find_bit_type(argument, &sql_mode_typelib)) == ~(ulong) 0) + sql_mode_str= argument; + if ((global_system_variables.sql_mode= + find_bit_type(argument, &sql_mode_typelib)) == ~(ulong) 0) { fprintf(stderr, "Unknown option to sql-mode: %s\n", argument); exit(1); } - global_system_variables.tx_isolation= ((opt_sql_mode & MODE_SERIALIZABLE) ? - ISO_SERIALIZABLE : - ISO_REPEATABLE_READ); + global_system_variables.sql_mode= fix_sql_mode(global_system_variables. + sql_mode); break; } - case OPT_MASTER_PASSWORD: - master_password=argument; + case OPT_FT_BOOLEAN_SYNTAX: + if (ft_boolean_check_syntax_string((byte*) argument)) + { + fprintf(stderr, "Invalid ft-boolean-syntax string: %s\n", argument); + exit(1); + } + strmake(ft_boolean_syntax, argument, sizeof(ft_boolean_syntax)-1); break; case OPT_SKIP_SAFEMALLOC: #ifdef SAFEMALLOC @@ -5192,6 +6568,35 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } return 0; } + /* Initiates DEBUG - but no debugging here ! */ + +extern "C" gptr * +mysql_getopt_value(const char *keyname, uint key_length, + const struct my_option *option) +{ + switch (option->id) { + case OPT_KEY_BUFFER_SIZE: + case OPT_KEY_CACHE_BLOCK_SIZE: + case OPT_KEY_CACHE_DIVISION_LIMIT: + case OPT_KEY_CACHE_AGE_THRESHOLD: + { + KEY_CACHE *key_cache; + if (!(key_cache= get_or_create_key_cache(keyname, key_length))) + exit(1); + switch (option->id) { + case OPT_KEY_BUFFER_SIZE: + return (gptr*) &key_cache->param_buff_size; + case OPT_KEY_CACHE_BLOCK_SIZE: + return (gptr*) &key_cache->param_block_size; + case OPT_KEY_CACHE_DIVISION_LIMIT: + return (gptr*) &key_cache->param_division_limit; + case OPT_KEY_CACHE_AGE_THRESHOLD: + return (gptr*) &key_cache->param_age_threshold; + } + } + } + return option->value; +} void option_error_reporter(enum loglevel level, const char *format, ...) @@ -5202,16 +6607,67 @@ void option_error_reporter(enum loglevel level, const char *format, ...) va_end(args); } - /* Initiates DEBUG - but no debugging here ! */ static void get_options(int argc,char **argv) { int ho_error; + my_getopt_register_get_addr(mysql_getopt_value); + strmake(def_ft_boolean_syntax, ft_boolean_syntax, + sizeof(ft_boolean_syntax)-1); my_getopt_error_reporter= option_error_reporter; - if ((ho_error= handle_options(&argc, &argv, my_long_options, get_one_option))) + if ((ho_error= handle_options(&argc, &argv, my_long_options, + get_one_option))) exit(ho_error); +#ifndef HAVE_NDBCLUSTER_DB + if (opt_ndbcluster) + sql_print_warning("this binary does not contain NDBCLUSTER storage engine"); +#endif +#ifndef HAVE_INNOBASE_DB + if (opt_innodb) + sql_print_warning("this binary does not contain INNODB storage engine"); +#endif +#ifndef HAVE_ISAM + if (opt_isam) + sql_print_warning("this binary does not contain ISAM storage engine"); +#endif +#ifndef HAVE_BERKELEY_DB + if (opt_bdb) + sql_print_warning("this binary does not contain BDB storage engine"); +#endif + if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes) && + !opt_slow_log) + sql_print_warning("options --log-slow-admin-statements and --log-queries-not-using-indexes have no effect if --log-slow-queries is not set"); + + /* + Check that the default storage engine is actually available. + */ + if (!ha_storage_engine_is_enabled((enum db_type) + global_system_variables.table_type)) + { + if (!opt_bootstrap) + { + sql_print_error("Default storage engine (%s) is not available", + ha_get_storage_engine((enum db_type) + global_system_variables.table_type)); + exit(1); + } + global_system_variables.table_type= DB_TYPE_MYISAM; + } + + if (argc > 0) + { + fprintf(stderr, "%s: Too many arguments (first extra is '%s').\nUse --help to get a list of available options\n", my_progname, *argv); + /* FIXME add EXIT_TOO_MANY_ARGUMENTS to "mysys_err.h" and return that code? */ + exit(1); + } + + if (opt_help) + { + usage(); + exit(0); + } #if defined(HAVE_BROKEN_REALPATH) my_use_symdir=0; my_disable_symlinks=1; @@ -5232,8 +6688,13 @@ static void get_options(int argc,char **argv) /* Set global MyISAM variables from delay_key_write_options */ fix_delay_key_write((THD*) 0, OPT_GLOBAL); +#ifndef EMBEDDED_LIBRARY if (mysqld_chroot) set_root(mysqld_chroot); +#else + max_allowed_packet= global_system_variables.max_allowed_packet; + net_buffer_length= global_system_variables.net_buffer_length; +#endif fix_paths(); /* @@ -5244,11 +6705,24 @@ static void get_options(int argc,char **argv) my_default_record_cache_size=global_system_variables.read_buff_size; myisam_max_temp_length= (my_off_t) global_system_variables.myisam_max_sort_file_size; - myisam_max_extra_temp_length= + myisam_max_extra_temp_length= (my_off_t) global_system_variables.myisam_max_extra_sort_file_size; /* Set global variables based on startup options */ myisam_block_size=(uint) 1 << my_bit_log2(opt_myisam_block_size); + + if (opt_short_log_format) + opt_specialflag|= SPECIAL_SHORT_LOG_FORMAT; + if (opt_log_queries_not_using_indexes) + opt_specialflag|= SPECIAL_LOG_QUERIES_NOT_USING_INDEXES; + + if (init_global_datetime_format(MYSQL_TIMESTAMP_DATE, + &global_system_variables.date_format) || + init_global_datetime_format(MYSQL_TIMESTAMP_TIME, + &global_system_variables.time_format) || + init_global_datetime_format(MYSQL_TIMESTAMP_DATETIME, + &global_system_variables.datetime_format)) + exit(1); } @@ -5343,96 +6817,23 @@ static void fix_paths(void) { strxnmov(mysql_charsets_dir, sizeof(mysql_charsets_dir)-1, buff, CHARSET_DIR, NullS); - charsets_dir=mysql_charsets_dir; } + (void) my_load_path(mysql_charsets_dir, mysql_charsets_dir, buff); + charsets_dir=mysql_charsets_dir; - char *end=convert_dirname(buff, opt_mysql_tmpdir, NullS); - if (!(mysql_tmpdir= my_memdup((byte*) buff,(uint) (end-buff)+1, - MYF(MY_FAE)))) + if (init_tmpdir(&mysql_tmpdir_list, opt_mysql_tmpdir)) exit(1); +#ifdef HAVE_REPLICATION if (!slave_load_tmpdir) { if (!(slave_load_tmpdir = (char*) my_strdup(mysql_tmpdir, MYF(MY_FAE)))) exit(1); } +#endif /* HAVE_REPLICATION */ } /* - set how many open files we want to be able to handle - - SYNOPSIS - set_maximum_open_files() - max_file_limit Files to open - - NOTES - The request may not fulfilled becasue of system limitations - - RETURN - Files available to open -*/ - -#ifdef SET_RLIMIT_NOFILE -static uint set_maximum_open_files(uint max_file_limit) -{ - struct rlimit rlimit; - ulong old_cur; - - if (!getrlimit(RLIMIT_NOFILE,&rlimit)) - { - old_cur=rlimit.rlim_cur; - if (rlimit.rlim_cur >= max_file_limit) // Nothing to do - return rlimit.rlim_cur; /* purecov: inspected */ - rlimit.rlim_cur=rlimit.rlim_max=max_file_limit; - if (setrlimit(RLIMIT_NOFILE,&rlimit)) - { - if (global_system_variables.log_warnings) - sql_print_error("Warning: setrlimit couldn't increase number of open files to more than %lu (request: %u)", - old_cur, max_file_limit); /* purecov: inspected */ - max_file_limit=old_cur; - } - else - { - (void) getrlimit(RLIMIT_NOFILE,&rlimit); - if ((uint) rlimit.rlim_cur != max_file_limit && - global_system_variables.log_warnings) - sql_print_error("Warning: setrlimit returned ok, but didn't change limits. Max open files is %ld (request: %u)", - (ulong) rlimit.rlim_cur, - max_file_limit); /* purecov: inspected */ - max_file_limit=rlimit.rlim_cur; - } - } - return max_file_limit; -} -#endif - -#ifdef OS2 -static uint set_maximum_open_files(uint max_file_limit) -{ - LONG cbReqCount; - ULONG cbCurMaxFH, cbCurMaxFH0; - APIRET ulrc; - - // get current limit - cbReqCount = 0; - DosSetRelMaxFH( &cbReqCount, &cbCurMaxFH0); - - // set new limit - cbReqCount = max_file_limit - cbCurMaxFH0; - ulrc = DosSetRelMaxFH( &cbReqCount, &cbCurMaxFH); - if (ulrc) - { - if (global_system_variables.log_warnings) - sql_print_error("Warning: DosSetRelMaxFH couldn't increase number of open files to more than %d", - cbCurMaxFH0); - cbCurMaxFH = cbCurMaxFH0; - } - - return cbCurMaxFH; -} -#endif - -/* Return a bitfield from a string of substrings separated by ',' returns ~(ulong) 0 on error. */ @@ -5466,8 +6867,9 @@ static ulong find_bit_type(const char *x, TYPELIB *bit_lib) j=pos; while (j != end) { - if (toupper(*i++) != toupper(*j++)) - goto skipp; + if (my_toupper(mysqld_charset,*i++) != + my_toupper(mysqld_charset,*j++)) + goto skip; } found_int=bit; if (! *i) @@ -5479,7 +6881,7 @@ static ulong find_bit_type(const char *x, TYPELIB *bit_lib) { found_count++; // Could be one of two values } -skipp: ; +skip: ; } if (found_count != 1) DBUG_RETURN(~(ulong) 0); // No unique value @@ -5520,7 +6922,7 @@ static int test_if_case_insensitive(const char *dir_name) (void) my_delete(buff2, MYF(0)); if ((file= my_create(buff, 0666, O_RDWR, MYF(0))) < 0) { - sql_print_error("Warning: Can't create test file %s", buff); + sql_print_warning("Can't create test file %s", buff); DBUG_RETURN(-1); } my_close(file, MYF(0)); @@ -5541,11 +6943,17 @@ static void create_pid_file() O_WRONLY | O_TRUNC, MYF(MY_WME))) >= 0) { char buff[21], *end; - end= int2str((long) getpid(), buff, 10); + end= int10_to_str((long) getpid(), buff, 10); *end++= '\n'; - (void) my_write(file, (byte*) buff, (uint) (end-buff),MYF(MY_WME)); + if (!my_write(file, (byte*) buff, (uint) (end-buff), MYF(MY_WME | MY_NABP))) + { + (void) my_close(file, MYF(0)); + return; + } (void) my_close(file, MYF(0)); } + sql_perror("Can't start server: can't create PID file"); + exit(1); } @@ -5559,6 +6967,6 @@ template class I_List<THD>; template class I_List_iterator<THD>; template class I_List<i_string>; template class I_List<i_string_pair>; - +template class I_List<NAMED_LIST>; FIX_GCC_LINKING_PROBLEM #endif diff --git a/sql/net_pkg.cc b/sql/net_pkg.cc deleted file mode 100644 index df77d0347f2..00000000000 --- a/sql/net_pkg.cc +++ /dev/null @@ -1,407 +0,0 @@ -/* Copyright (C) 2000-2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#include "mysql_priv.h" -#include <stdarg.h> - - /* Send a error string to client */ - -void send_error(NET *net, uint sql_errno, const char *err) -{ - uint length; - char buff[MYSQL_ERRMSG_SIZE+2]; - THD *thd=current_thd; - DBUG_ENTER("send_error"); - DBUG_PRINT("enter",("sql_errno: %d err: %s", sql_errno, - err ? err : net->last_error[0] ? - net->last_error : "NULL")); - - query_cache_abort(net); - if (thd) - thd->query_error = 1; // needed to catch query errors during replication - if (!err) - { - if (sql_errno) - err=ER(sql_errno); - else - { - if ((err=net->last_error)[0]) - sql_errno=net->last_errno; - else - { - sql_errno=ER_UNKNOWN_ERROR; - err=ER(sql_errno); /* purecov: inspected */ - } - } - } - if (net->vio == 0) - { - if (thd && thd->bootstrap) - { - /* In bootstrap it's ok to print on stderr */ - fprintf(stderr,"ERROR: %d %s\n",sql_errno,err); - } - DBUG_VOID_RETURN; - } - - if (net->return_errno) - { // new client code; Add errno before message - int2store(buff,sql_errno); - length= (uint) (strmake(buff+2,err,MYSQL_ERRMSG_SIZE-1) - buff); - err=buff; - } - else - { - length=(uint) strlen(err); - set_if_smaller(length,MYSQL_ERRMSG_SIZE); - } - VOID(net_write_command(net,(uchar) 255,(char*) err,length)); - if (thd) - thd->fatal_error=0; // Error message is given - DBUG_VOID_RETURN; -} - -/* - At some point we need to be able to distinguish between warnings and - errors; The following function will help make this easier. -*/ - -void send_warning(NET *net, uint sql_errno, const char *err) -{ - DBUG_ENTER("send_warning"); - send_error(net,sql_errno,err); - DBUG_VOID_RETURN; -} - - -/* - Write error package and flush to client - It's a little too low level, but I don't want to allow another buffer -*/ -/* VARARGS3 */ - -void -net_printf(NET *net, uint errcode, ...) -{ - va_list args; - uint length,offset; - const char *format,*text_pos; - int head_length= NET_HEADER_SIZE; - THD *thd=current_thd; - DBUG_ENTER("net_printf"); - DBUG_PRINT("enter",("message: %u",errcode)); - - if (thd) - thd->query_error = 1; // if we are here, something is wrong :-) - query_cache_abort(net); // Safety - va_start(args,errcode); - /* - The following is needed to make net_printf() work with 0 argument for - errorcode and use the argument after that as the format string. This - is useful for rare errors that are not worth the hassle to put in - errmsg.sys, but at the same time, the message is not fixed text - */ - if (errcode) - format= ER(errcode); - else - { - format=va_arg(args,char*); - errcode= ER_UNKNOWN_ERROR; - } - offset= net->return_errno ? 2 : 0; - text_pos=(char*) net->buff+head_length+offset+1; - (void) my_vsnprintf(my_const_cast(char*) (text_pos), - (char*)net->buff_end-text_pos, - format,args); - length=(uint) strlen((char*) text_pos); - if (length >= sizeof(net->last_error)) - length=sizeof(net->last_error)-1; /* purecov: inspected */ - va_end(args); - - /* Replication slave relies on net->last_* to see if there was error */ - net->last_errno= errcode; - strmake(net->last_error, text_pos, sizeof(net->last_error)-1); - - if (net->vio == 0) - { - if (thd && thd->bootstrap) - { - /* - In bootstrap it's ok to print on stderr - This may also happen when we get an error from a slave thread - */ - fprintf(stderr,"ERROR: %d %s\n",errcode,text_pos); - thd->fatal_error=1; - } - DBUG_VOID_RETURN; - } - - int3store(net->buff,length+1+offset); - net->buff[3]= (net->compress) ? 0 : (uchar) (net->pkt_nr++); - net->buff[head_length]=(uchar) 255; // Error package - if (offset) - int2store(text_pos-2, errcode); - VOID(net_real_write(net,(char*) net->buff,length+head_length+1+offset)); - if (thd) - thd->fatal_error=0; // Error message is given - DBUG_VOID_RETURN; -} - - -void -send_ok(NET *net,ha_rows affected_rows,ulonglong id,const char *message) -{ - if (net->no_send_ok) // hack for re-parsing queries - return; - - char buff[MYSQL_ERRMSG_SIZE+10],*pos; - DBUG_ENTER("send_ok"); - buff[0]=0; // No fields - pos=net_store_length(buff+1,(ulonglong) affected_rows); - pos=net_store_length(pos, (ulonglong) id); - if (net->return_status) - { - int2store(pos,*net->return_status); - pos+=2; - } - if (message) - pos=net_store_data((char*) pos,message); - if (net->vio != 0) - { - VOID(my_net_write(net,buff,(uint) (pos-buff))); - VOID(net_flush(net)); - } - DBUG_VOID_RETURN; -} - -void -send_eof(NET *net,bool no_flush) -{ - static char eof_buff[1]= { (char) 254 }; /* Marker for end of fields */ - DBUG_ENTER("send_eof"); - if (net->vio != 0) - { - VOID(my_net_write(net,eof_buff,1)); - if (!no_flush) - VOID(net_flush(net)); - } - DBUG_VOID_RETURN; -} - - -/**************************************************************************** -** Store a field length in logical packet -****************************************************************************/ - -char * -net_store_length(char *pkg, ulonglong length) -{ - uchar *packet=(uchar*) pkg; - if (length < LL(251)) - { - *packet=(uchar) length; - return (char*) packet+1; - } - /* 251 is reserved for NULL */ - if (length < LL(65536)) - { - *packet++=252; - int2store(packet,(uint) length); - return (char*) packet+2; - } - if (length < LL(16777216)) - { - *packet++=253; - int3store(packet,(ulong) length); - return (char*) packet+3; - } - *packet++=254; - int8store(packet,length); - return (char*) packet+8; -} - -char * -net_store_length(char *pkg, uint length) -{ - uchar *packet=(uchar*) pkg; - if (length < 251) - { - *packet=(uchar) length; - return (char*) packet+1; - } - *packet++=252; - int2store(packet,(uint) length); - return (char*) packet+2; -} - -/* The following will only be used for short strings < 65K */ -char * -net_store_data(char *to,const char *from) -{ - uint length=(uint) strlen(from); - to=net_store_length(to,length); - memcpy(to,from,length); - return to+length; -} - - -char * -net_store_data(char *to,int32 from) -{ - char buff[20]; - uint length=(uint) (int10_to_str(from,buff,10)-buff); - to=net_store_length(to,length); - memcpy(to,buff,length); - return to+length; -} - -char * -net_store_data(char *to,longlong from) -{ - char buff[22]; - uint length=(uint) (longlong10_to_str(from,buff,10)-buff); - to=net_store_length(to,length); - memcpy(to,buff,length); - return to+length; -} - - -bool net_store_null(String *packet) -{ - return packet->append((char) 251); -} - -bool -net_store_data(String *packet,const char *from,uint length) -{ - ulong packet_length=packet->length(); - if (packet_length+9+length > packet->alloced_length() && - packet->realloc(packet_length+9+length)) - return 1; - char *to=(char*) net_store_length((char*) packet->ptr()+packet_length, - (ulonglong) length); - memcpy(to,from,length); - packet->length((uint) (to+length-packet->ptr())); - return 0; -} - -/* The following is only used at short, null terminated data */ - -bool -net_store_data(String *packet,const char *from) -{ - uint length=(uint) strlen(from); - uint packet_length=packet->length(); - /* - 3 is the longest coding for storing a string with the used - net_store_length() function. We use 5 here 'just in case' - */ - if (packet_length+5+length > packet->alloced_length() && - packet->realloc(packet_length+5+length)) - return 1; - char *to=(char*) net_store_length((char*) packet->ptr()+packet_length, - length); - memcpy(to,from,length); - packet->length((uint) (to+length-packet->ptr())); - return 0; -} - - -bool -net_store_data(String *packet,uint32 from) -{ - char buff[20]; - return net_store_data(packet,(char*) buff, - (uint) (int10_to_str(from,buff,10)-buff)); -} - -bool -net_store_data(String *packet, longlong from) -{ - char buff[22]; - return net_store_data(packet,(char*) buff, - (uint) (longlong10_to_str(from,buff,10)-buff)); -} - -bool -net_store_data(String *packet,struct tm *tmp) -{ - char buff[20]; - sprintf(buff,"%04d-%02d-%02d %02d:%02d:%02d", - ((int) (tmp->tm_year+1900)) % 10000, - (int) tmp->tm_mon+1, - (int) tmp->tm_mday, - (int) tmp->tm_hour, - (int) tmp->tm_min, - (int) tmp->tm_sec); - return net_store_data(packet,(char*) buff,19); -} - -bool net_store_data(String* packet, I_List<i_string>* str_list) -{ - char buf[256]; - String tmp(buf, sizeof(buf)); - tmp.length(0); - I_List_iterator<i_string> it(*str_list); - i_string* s; - - while ((s=it++)) - { - if (tmp.length()) - tmp.append(','); - tmp.append(s->ptr); - } - - return net_store_data(packet, (char*)tmp.ptr(), tmp.length()); -} - -/* -** translate and store data; These are mainly used by the SHOW functions -*/ - -bool -net_store_data(String *packet,CONVERT *convert, const char *from,uint length) -{ - if (convert) - return convert->store(packet, from, length); - return net_store_data(packet,from,length); -} - -bool -net_store_data(String *packet, CONVERT *convert, const char *from) -{ - uint length=(uint) strlen(from); - if (convert) - return convert->store(packet, from, length); - return net_store_data(packet,from,length); -} - -/* - Function called by my_net_init() to set some check variables -*/ - -extern "C" { -void my_net_local_init(NET *net) -{ - net->max_packet= (uint) global_system_variables.net_buffer_length; - net->read_timeout= (uint) global_system_variables.net_read_timeout; - net->write_timeout=(uint) global_system_variables.net_write_timeout; - net->retry_count= (uint) global_system_variables.net_retry_count; - net->max_packet_size= max(global_system_variables.net_buffer_length, - global_system_variables.max_allowed_packet); -} -} diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 1e66bfc3e19..93fa7ac938c 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -15,6 +15,13 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* + This file is the net layer API for the MySQL client/server protocol, + which is a tightly coupled, proprietary protocol owned by MySQL AB. + Any re-implementations of this protocol must also be under GPL + unless one has got an license from MySQL AB stating otherwise. +*/ + +/* Write and read of logical packets to/from socket Writes are cached into net_buffer_length big packets. @@ -26,6 +33,10 @@ C file. */ +/* + HFTODO this must be hidden if we don't want client capabilities in + embedded library + */ #ifdef __WIN__ #include <winsock.h> #endif @@ -41,6 +52,13 @@ #include <signal.h> #include <errno.h> +#ifdef EMBEDDED_LIBRARY +#undef MYSQL_SERVER +#undef MYSQL_CLIENT +#define MYSQL_CLIENT +#endif /*EMBEDDED_LIBRARY */ + + /* The following handles the differences when this is linked between the client and the server. @@ -66,10 +84,15 @@ void sql_print_error(const char *format,...); #ifdef MYSQL_SERVER #define USE_QUERY_CACHE +/* + The following variables/functions should really not be declared + extern, but as it's hard to include mysql_priv.h here, we have to + live with this for a while. +*/ extern uint test_flags; -extern void query_cache_insert(NET *net, const char *packet, ulong length); extern ulong bytes_sent, bytes_received, net_big_packet_count; extern pthread_mutex_t LOCK_bytes_sent , LOCK_bytes_received; +extern void query_cache_insert(NET *net, const char *packet, ulong length); #else #undef statistic_add #undef statistic_increment @@ -85,7 +108,7 @@ static my_bool net_write_buff(NET *net,const char *packet,ulong len); /* Init with packet info */ -int my_net_init(NET *net, Vio* vio) +my_bool my_net_init(NET *net, Vio* vio) { DBUG_ENTER("my_net_init"); my_net_local_init(net); /* Set some limits */ @@ -104,11 +127,12 @@ int my_net_init(NET *net, Vio* vio) net->where_b = net->remain_in_buf=0; net->last_errno=0; net->query_cache_query=0; + net->report_error= 0; if (vio != 0) /* If real connection */ { net->fd = vio_fd(vio); /* For perl DBI/DBD */ -#if defined(MYSQL_SERVER) && !defined(___WIN__) && !defined(__EMX__) && !defined(OS2) +#if defined(MYSQL_SERVER) && !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) if (!(test_flags & TEST_BLOCKING)) { my_bool old_mode; @@ -132,7 +156,7 @@ void net_end(NET *net) /* Realloc the packet buffer */ -static my_bool net_realloc(NET *net, ulong length) +my_bool net_realloc(NET *net, ulong length) { uchar *buff; ulong pkt_length; @@ -141,10 +165,11 @@ static my_bool net_realloc(NET *net, ulong length) if (length >= net->max_packet_size) { - DBUG_PRINT("error",("Packet too large. Max sixe: %lu", - net->max_packet_size)); - net->error=1; - net->last_errno=ER_NET_PACKET_TOO_LARGE; + DBUG_PRINT("error", ("Packet too large. Max size: %lu", + net->max_packet_size)); + net->error= 1; + net->report_error= 1; + net->last_errno= ER_NET_PACKET_TOO_LARGE; DBUG_RETURN(1); } pkt_length = (length+IO_SIZE-1) & ~(IO_SIZE-1); @@ -156,10 +181,9 @@ static my_bool net_realloc(NET *net, ulong length) NET_HEADER_SIZE + COMP_HEADER_SIZE, MYF(MY_WME)))) { - net->error=1; -#ifdef MYSQL_SERVER - net->last_errno=ER_OUT_OF_RESOURCES; -#endif + net->error= 1; + net->report_error= 1; + net->last_errno= ER_OUT_OF_RESOURCES; DBUG_RETURN(1); } net->buff=net->write_pos=buff; @@ -193,14 +217,14 @@ void net_clear(NET *net) /* Flush write_buffer if not empty. */ -int net_flush(NET *net) +my_bool net_flush(NET *net) { - int error=0; + my_bool error= 0; DBUG_ENTER("net_flush"); if (net->buff != net->write_pos) { - error=net_real_write(net,(char*) net->buff, - (ulong) (net->write_pos - net->buff)); + error=test(net_real_write(net,(char*) net->buff, + (ulong) (net->write_pos - net->buff))); net->write_pos=net->buff; } /* Sync packet number if using compression */ @@ -223,7 +247,7 @@ int net_flush(NET *net) If compression is used the original package is modified! */ -int +my_bool my_net_write(NET *net,const char *packet,ulong len) { uchar buff[NET_HEADER_SIZE]; @@ -250,23 +274,46 @@ my_net_write(NET *net,const char *packet,ulong len) buff[3]= (uchar) net->pkt_nr++; if (net_write_buff(net,(char*) buff,NET_HEADER_SIZE)) return 1; +#ifndef DEBUG_DATA_PACKETS DBUG_DUMP("packet_header",(char*) buff,NET_HEADER_SIZE); +#endif return test(net_write_buff(net,packet,len)); } /* Send a command to the server. - As the command is part of the first data packet, we have to do some data - juggling to put the command in there, without having to create a new - packet. - This function will split big packets into sub-packets if needed. - (Each sub packet can only be 2^24 bytes) + + SYNOPSIS + net_write_command() + net NET handler + command Command in MySQL server (enum enum_server_command) + header Header to write after command + head_len Length of header + packet Query or parameter to query + len Length of packet + + DESCRIPTION + The reason for having both header and packet is so that libmysql + can easy add a header to a special command (like prepared statements) + without having to re-alloc the string. + + As the command is part of the first data packet, we have to do some data + juggling to put the command in there, without having to create a new + packet. + This function will split big packets into sub-packets if needed. + (Each sub packet can only be 2^24 bytes) + + RETURN VALUES + 0 ok + 1 error */ -int -net_write_command(NET *net,uchar command,const char *packet,ulong len) +my_bool +net_write_command(NET *net,uchar command, + const char *header, ulong head_len, + const char *packet, ulong len) { - ulong length=len+1; /* 1 extra byte for command */ + ulong length=len+1+head_len; /* 1 extra byte for command */ uchar buff[NET_HEADER_SIZE+1]; uint header_size=NET_HEADER_SIZE+1; DBUG_ENTER("net_write_command"); @@ -277,25 +324,28 @@ net_write_command(NET *net,uchar command,const char *packet,ulong len) if (length >= MAX_PACKET_LENGTH) { /* Take into account that we have the command in the first header */ - len= MAX_PACKET_LENGTH -1; + len= MAX_PACKET_LENGTH - 1 - head_len; do { int3store(buff, MAX_PACKET_LENGTH); buff[3]= (uchar) net->pkt_nr++; if (net_write_buff(net,(char*) buff, header_size) || - net_write_buff(net,packet,len)) + net_write_buff(net, header, head_len) || + net_write_buff(net, packet, len)) DBUG_RETURN(1); packet+= len; length-= MAX_PACKET_LENGTH; len= MAX_PACKET_LENGTH; + head_len= 0; header_size= NET_HEADER_SIZE; } while (length >= MAX_PACKET_LENGTH); len=length; /* Data left to be written */ } int3store(buff,length); buff[3]= (uchar) net->pkt_nr++; - DBUG_RETURN(test(net_write_buff(net,(char*) buff,header_size) || - net_write_buff(net,packet,len) || net_flush(net))); + DBUG_RETURN(test(net_write_buff(net, (char*) buff, header_size) || + (head_len && net_write_buff(net, (char*) header, head_len)) || + net_write_buff(net, packet, len) || net_flush(net))); } /* @@ -336,6 +386,9 @@ net_write_buff(NET *net,const char *packet,ulong len) else left_length= (ulong) (net->buff_end - net->write_pos); +#ifdef DEBUG_DATA_PACKETS + DBUG_DUMP("data", packet, len); +#endif if (len > left_length) { if (net->write_pos != net->buff) @@ -411,10 +464,12 @@ net_real_write(NET *net,const char *packet,ulong len) COMP_HEADER_SIZE, MYF(MY_WME)))) { #ifdef MYSQL_SERVER - net->last_errno=ER_OUT_OF_RESOURCES; - net->error=2; + net->last_errno= ER_OUT_OF_RESOURCES; + net->error= 2; + /* TODO is it needed to set this variable if we have no socket */ + net->report_error= 1; #endif - net->reading_or_writing=0; + net->reading_or_writing= 0; DBUG_RETURN(1); } memcpy(b+header_length,packet,len); @@ -436,7 +491,7 @@ net_real_write(NET *net,const char *packet,ulong len) thr_alarm(&alarmed,(uint) net->write_timeout,&alarm_buff); #else alarmed=0; - vio_timeout(net->vio, net->write_timeout); + vio_timeout(net->vio, 1, net->write_timeout); #endif /* NO_ALARM */ pos=(char*) packet; end=pos+len; @@ -461,9 +516,10 @@ net_real_write(NET *net,const char *packet,ulong len) my_progname,vio_errno(net->vio)); #endif /* EXTRA_DEBUG */ #ifdef MYSQL_SERVER - net->last_errno=ER_NET_ERROR_ON_WRITE; + net->last_errno= ER_NET_ERROR_ON_WRITE; #endif - net->error=2; /* Close socket */ + net->error= 2; /* Close socket */ + net->report_error= 1; goto end; } retry_count=0; @@ -489,7 +545,8 @@ net_real_write(NET *net,const char *packet,ulong len) continue; } #endif /* defined(THREAD_SAFE_CLIENT) && !defined(MYSQL_SERVER) */ - net->error=2; /* Close socket */ + net->error= 2; /* Close socket */ + net->report_error= 1; #ifdef MYSQL_SERVER net->last_errno= (interrupted ? ER_NET_WRITE_INTERRUPTED : ER_NET_ERROR_ON_WRITE); @@ -627,7 +684,7 @@ my_real_read(NET *net, ulong *complen) if (net_blocking) thr_alarm(&alarmed,net->read_timeout,&alarm_buff); #else - vio_timeout(net->vio, net->read_timeout); + vio_timeout(net->vio, 0, net->read_timeout); #endif /* NO_ALARM */ pos = net->buff + net->where_b; /* net->packet -4 */ @@ -667,9 +724,10 @@ my_real_read(NET *net, ulong *complen) my_progname,vio_errno(net->vio)); #endif /* EXTRA_DEBUG */ len= packet_error; - net->error=2; /* Close socket */ + net->error= 2; /* Close socket */ + net->report_error= 1; #ifdef MYSQL_SERVER - net->last_errno=ER_NET_FCNTL_ERROR; + net->last_errno= ER_NET_FCNTL_ERROR; #endif goto end; } @@ -698,7 +756,8 @@ my_real_read(NET *net, ulong *complen) DBUG_PRINT("error",("Couldn't read packet: remain: %u errno: %d length: %ld", remain, vio_errno(net->vio), length)); len= packet_error; - net->error=2; /* Close socket */ + net->error= 2; /* Close socket */ + net->report_error= 1; #ifdef MYSQL_SERVER net->last_errno= (vio_was_interrupted(net->vio) ? ER_NET_READ_INTERRUPTED : ER_NET_READ_ERROR); @@ -712,6 +771,8 @@ my_real_read(NET *net, ulong *complen) if (i == 0) { /* First parts is packet length */ ulong helping; + DBUG_DUMP("packet_header",(char*) net->buff+net->where_b, + NET_HEADER_SIZE); if (net->buff[net->where_b + 3] != (uchar) net->pkt_nr) { if (net->buff[net->where_b] != (uchar) 255) @@ -720,7 +781,6 @@ my_real_read(NET *net, ulong *complen) ("Packets out of order (Found: %d, expected %u)", (int) net->buff[net->where_b + 3], net->pkt_nr)); - DBUG_DUMP("packet_header",(char*) net->buff+net->where_b, 4); #ifdef EXTRA_DEBUG fprintf(stderr,"Packets out of order (Found: %d, expected %d)\n", (int) net->buff[net->where_b + 3], @@ -728,6 +788,7 @@ my_real_read(NET *net, ulong *complen) #endif } len= packet_error; + net->report_error= 1; #ifdef MYSQL_SERVER net->last_errno=ER_NET_PACKETS_OUT_OF_ORDER; #endif @@ -776,6 +837,10 @@ end: vio_blocking(net->vio, net_blocking, &old_mode); } net->reading_or_writing=0; +#ifdef DEBUG_DATA_PACKETS + if (len != packet_error) + DBUG_DUMP("data",(char*) net->buff+net->where_b, len); +#endif return(len); } @@ -908,7 +973,8 @@ my_net_read(NET *net) if (my_uncompress((byte*) net->buff + net->where_b, &packet_len, &complen)) { - net->error=2; /* caller will close socket */ + net->error= 2; /* caller will close socket */ + net->report_error= 1; #ifdef MYSQL_SERVER net->last_errno=ER_NET_UNCOMPRESS_ERROR; #endif @@ -929,12 +995,3 @@ my_net_read(NET *net) return len; } -bool net_request_file(NET* net, const char* fname) -{ - char tmp [FN_REFLEN+1],*end; - DBUG_ENTER("net_request_file"); - tmp[0] = (char) 251; /* NULL_LENGTH */ - end=strnmov(tmp+1,fname,sizeof(tmp)-2); - DBUG_RETURN(my_net_write(net,tmp,(uint) (end-tmp)) || - net_flush(net)); -} diff --git a/sql/nt_servc.cc b/sql/nt_servc.cc index 8ae3aeff2a6..a04f284a3de 100644 --- a/sql/nt_servc.cc +++ b/sql/nt_servc.cc @@ -431,7 +431,7 @@ BOOL NTService::SeekStatus(LPCSTR szInternName, int OperationType) if (ret_error == ERROR_ACCESS_DENIED) { printf("Install/Remove of the Service Denied!\n"); - if(!is_super_user()) + if (!is_super_user()) printf("That operation should be made by an user with Administrator privileges!\n"); } else @@ -530,13 +530,13 @@ BOOL NTService::is_super_user() UINT x; BOOL ret_value=FALSE; - if(!OpenThreadToken(GetCurrentThread(), TOKEN_QUERY, TRUE,&hAccessToken )) + if (!OpenThreadToken(GetCurrentThread(), TOKEN_QUERY, TRUE,&hAccessToken )) { - if(GetLastError() != ERROR_NO_TOKEN) - return FALSE; + if (GetLastError() != ERROR_NO_TOKEN) + return FALSE; - if(!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hAccessToken)) - return FALSE; + if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hAccessToken)) + return FALSE; } ret_value= GetTokenInformation(hAccessToken,TokenGroups,InfoBuffer, @@ -544,21 +544,21 @@ BOOL NTService::is_super_user() CloseHandle(hAccessToken); - if(!ret_value ) - return FALSE; + if (!ret_value ) + return FALSE; - if(!AllocateAndInitializeSid(&siaNtAuthority, 2, - SECURITY_BUILTIN_DOMAIN_RID, - DOMAIN_ALIAS_RID_ADMINS, - 0, 0, 0, 0, 0, 0, - &psidAdministrators)) - return FALSE; + if (!AllocateAndInitializeSid(&siaNtAuthority, 2, + SECURITY_BUILTIN_DOMAIN_RID, + DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &psidAdministrators)) + return FALSE; ret_value = FALSE; - for(x=0;x<ptgGroups->GroupCount;x++) + for (x=0;x<ptgGroups->GroupCount;x++) { - if( EqualSid(psidAdministrators, ptgGroups->Groups[x].Sid) ) + if ( EqualSid(psidAdministrators, ptgGroups->Groups[x].Sid) ) { ret_value = TRUE; break; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 1bbf967b2bc..71f937f90c6 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -23,7 +23,7 @@ */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -31,8 +31,6 @@ #include <m_ctype.h> #include <nisam.h> #include "sql_select.h" -#include <assert.h> - #ifndef EXTRA_DEBUG #define test_rb_tree(A,B) {} @@ -172,17 +170,18 @@ public: void store(uint length,char **min_key,uint min_key_flag, char **max_key, uint max_key_flag) { - if (!(min_flag & NO_MIN_RANGE) && - !(min_key_flag & (NO_MIN_RANGE | NEAR_MIN))) + if ((min_flag & GEOM_FLAG) || + (!(min_flag & NO_MIN_RANGE) && + !(min_key_flag & (NO_MIN_RANGE | NEAR_MIN)))) { if (maybe_null && *min_value) { **min_key=1; - bzero(*min_key+1,length); + bzero(*min_key+1,length-1); } else - memcpy(*min_key,min_value,length+(int) maybe_null); - (*min_key)+= length+(int) maybe_null; + memcpy(*min_key,min_value,length); + (*min_key)+= length; } if (!(max_flag & NO_MAX_RANGE) && !(max_key_flag & (NO_MAX_RANGE | NEAR_MAX))) @@ -190,18 +189,18 @@ public: if (maybe_null && *max_value) { **max_key=1; - bzero(*max_key+1,length); + bzero(*max_key+1,length-1); } else - memcpy(*max_key,max_value,length+(int) maybe_null); - (*max_key)+= length+(int) maybe_null; + memcpy(*max_key,max_value,length); + (*max_key)+= length; } } void store_min_key(KEY_PART *key,char **range_key, uint *range_key_flag) { SEL_ARG *key_tree= first(); - key_tree->store(key[key_tree->part].part_length, + key_tree->store(key[key_tree->part].store_length, range_key,*range_key_flag,range_key,NO_MAX_RANGE); *range_key_flag|= key_tree->min_flag; if (key_tree->next_key_part && @@ -214,7 +213,7 @@ public: void store_max_key(KEY_PART *key,char **range_key, uint *range_key_flag) { SEL_ARG *key_tree= last(); - key_tree->store(key[key_tree->part].part_length, + key_tree->store(key[key_tree->part].store_length, range_key, NO_MIN_RANGE, range_key,*range_key_flag); (*range_key_flag)|= key_tree->max_flag; if (key_tree->next_key_part && @@ -293,14 +292,12 @@ typedef struct st_qsel_param { COND *cond; } PARAM; -static SEL_TREE * get_mm_parts(PARAM *param,Field *field, +static SEL_TREE * get_mm_parts(PARAM *param,COND *cond_func,Field *field, Item_func::Functype type,Item *value, Item_result cmp_type); -static SEL_ARG *get_mm_leaf(PARAM *param,Field *field,KEY_PART *key_part, +static SEL_ARG *get_mm_leaf(PARAM *param,COND *cond_func,Field *field, + KEY_PART *key_part, Item_func::Functype type,Item *value); -static bool like_range(const char *ptr,uint length,char wild_prefix, - uint field_length, char *min_str,char *max_str, - char max_sort_char,uint *min_length,uint *max_length); static SEL_TREE *get_mm_tree(PARAM *param,COND *cond); static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree); static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree, @@ -310,7 +307,7 @@ static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree, static QUICK_SELECT *get_quick_select(PARAM *param,uint index, SEL_ARG *key_tree); #ifndef DBUG_OFF -static void print_quick(QUICK_SELECT *quick,key_map needed_reg); +static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg); #endif static SEL_TREE *tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_TREE *tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); @@ -355,13 +352,13 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables, select->head=head; select->cond=conds; - if (head->io_cache) + if (head->sort.io_cache) { - select->file= *head->io_cache; + select->file= *head->sort.io_cache; select->records=(ha_rows) (select->file.end_of_file/ head->file->ref_length); - my_free((gptr) (head->io_cache),MYF(0)); - head->io_cache=0; + my_free((gptr) (head->sort.io_cache),MYF(0)); + head->sort.io_cache=0; } DBUG_RETURN(select); } @@ -369,30 +366,41 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables, SQL_SELECT::SQL_SELECT() :quick(0),cond(0),free_cond(0) { - quick_keys=0; needed_reg=0; + quick_keys.clear_all(); needed_reg.clear_all(); my_b_clear(&file); } -SQL_SELECT::~SQL_SELECT() +void SQL_SELECT::cleanup() { delete quick; + quick= 0; if (free_cond) + { + free_cond=0; delete cond; + cond= 0; + } close_cached_file(&file); } + +SQL_SELECT::~SQL_SELECT() +{ + cleanup(); +} + #undef index // Fix for Unixware 7 QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc) - :dont_free(0),error(0),index(key_nr),max_used_key_length(0),head(table), - it(ranges),range(0) + :dont_free(0),sorted(0),error(0),index(key_nr),max_used_key_length(0), + used_key_parts(0), head(table), it(ranges),range(0) { if (!no_alloc) { // Allocates everything through the internal memroot init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); - my_pthread_setspecific_ptr(THR_MALLOC,&alloc); + thd->mem_root= &alloc; } else bzero((char*) &alloc,sizeof(alloc)); @@ -405,7 +413,8 @@ QUICK_SELECT::~QUICK_SELECT() { if (!dont_free) { - file->index_end(); + if (file->inited) + file->ha_index_end(); free_root(&alloc,MYF(0)); } } @@ -573,46 +582,144 @@ SEL_ARG *SEL_ARG::clone_tree() return root; } -/***************************************************************************** -** Test if a key can be used in different ranges -** Returns: -** -1 if impossible select -** 0 if can't use quick_select -** 1 if found usable range -** Updates the following in the select parameter: -** needed_reg ; Bits for keys with may be used if all prev regs are read -** quick ; Parameter to use when reading records. -** In the table struct the following information is updated: -** quick_keys ; Which keys can be used -** quick_rows ; How many rows the key matches -*****************************************************************************/ + +/* + Find the best index to retrieve first N records in given order + + SYNOPSIS + get_index_for_order() + table Table to be accessed + order Required ordering + limit Number of records that will be retrieved + + DESCRIPTION + Find the best index that allows to retrieve first #limit records in the + given order cheaper then one would retrieve them using full table scan. + + IMPLEMENTATION + Run through all table indexes and find the shortest index that allows + records to be retrieved in given order. We look for the shortest index + as we will have fewer index pages to read with it. + + This function is used only by UPDATE/DELETE, so we take into account how + the UPDATE/DELETE code will work: + * index can only be scanned in forward direction + * HA_EXTRA_KEYREAD will not be used + Perhaps these assumptions could be relaxed + + RETURN + index number + MAX_KEY if no such index was found. +*/ + +uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit) +{ + uint idx; + uint match_key= MAX_KEY, match_key_len= MAX_KEY_LENGTH + 1; + ORDER *ord; + + for (ord= order; ord; ord= ord->next) + if (!ord->asc) + return MAX_KEY; + + for (idx= 0; idx < table->keys; idx++) + { + if (!(table->keys_in_use_for_query.is_set(idx))) + continue; + KEY_PART_INFO *keyinfo= table->key_info[idx].key_part; + uint partno= 0; + + /* + The below check is sufficient considering we now have either BTREE + indexes (records are returned in order for any index prefix) or HASH + indexes (records are not returned in order for any index prefix). + */ + if (!(table->file->index_flags(idx, 0, 1) & HA_READ_ORDER)) + continue; + for (ord= order; ord; ord= ord->next, partno++) + { + Item *item= order->item[0]; + if (!(item->type() == Item::FIELD_ITEM && + ((Item_field*)item)->field->eq(keyinfo[partno].field))) + break; + } + + if (!ord && table->key_info[idx].key_length < match_key_len) + { + /* + Ok, the ordering is compatible and this key is shorter then + previous match (we want shorter keys as we'll have to read fewer + index pages for the same number of records) + */ + match_key= idx; + match_key_len= table->key_info[idx].key_length; + } + } + + if (match_key != MAX_KEY) + { + /* + Found an index that allows records to be retrieved in the requested + order. Now we'll check if using the index is cheaper then doing a table + scan. + */ + double full_scan_time= table->file->scan_time(); + double index_scan_time= table->file->read_time(match_key, 1, limit); + if (index_scan_time > full_scan_time) + match_key= MAX_KEY; + } + return match_key; +} + + +/* + Test if a key can be used in different ranges + + SYNOPSIS + SQL_SELECT::test_quick_select(thd,keys_to_use, prev_tables, + limit, force_quick_range) + + Updates the following in the select parameter: + needed_reg - Bits for keys with may be used if all prev regs are read + quick - Parameter to use when reading records. + In the table struct the following information is updated: + quick_keys - Which keys can be used + quick_rows - How many rows the key matches + + RETURN VALUES + -1 if impossible select + 0 if can't use quick_select + 1 if found usable range + + TODO + check if the function really needs to modify keys_to_use, and change the + code to pass it by reference if not +*/ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, table_map prev_tables, ha_rows limit, bool force_quick_range) { - uint basflag; uint idx; double scan_time; DBUG_ENTER("test_quick_select"); DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu", - (ulong) keys_to_use, (ulong) prev_tables, + keys_to_use.to_ulonglong(), (ulong) prev_tables, (ulong) const_tables)); delete quick; quick=0; - needed_reg=0; quick_keys=0; + needed_reg.clear_all(); quick_keys.clear_all(); if (!cond || (specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range || !limit) DBUG_RETURN(0); /* purecov: inspected */ - if (!((basflag= head->file->table_flags()) & HA_KEYPOS_TO_RNDPOS) && - keys_to_use == (uint) ~0 || !keys_to_use) - DBUG_RETURN(0); /* Not smart database */ + if (keys_to_use.is_clear_all()) + DBUG_RETURN(0); records=head->file->records; if (!records) records++; /* purecov: inspected */ scan_time=(double) records / TIME_FOR_COMPARE+1; - read_time=(double) head->file->scan_time()+ scan_time + 1.0; + read_time=(double) head->file->scan_time()+ scan_time + 1.1; if (head->force_index) scan_time= read_time= DBL_MAX; if (limit < records) @@ -622,55 +729,60 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, DBUG_PRINT("info",("Time to scan table: %g", read_time)); - keys_to_use&=head->keys_in_use_for_query; - if (keys_to_use) + keys_to_use.intersect(head->keys_in_use_for_query); + if (!keys_to_use.is_clear_all()) { MEM_ROOT *old_root,alloc; SEL_TREE *tree; KEY_PART *key_parts; + KEY *key_info; PARAM param; /* set up parameter that is passed to all functions */ param.thd= thd; - param.baseflag=basflag; + param.baseflag=head->file->table_flags(); param.prev_tables=prev_tables | const_tables; param.read_tables=read_tables; param.current_table= head->map; param.table=head; param.keys=0; param.mem_root= &alloc; - param.thd->no_errors=1; // Don't warn about NULL - init_sql_alloc(&alloc, param.thd->variables.range_alloc_block_size, 0); + thd->no_errors=1; // Don't warn about NULL + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); if (!(param.key_parts = (KEY_PART*) alloc_root(&alloc, sizeof(KEY_PART)* head->key_parts))) { - param.thd->no_errors=0; + thd->no_errors=0; free_root(&alloc,MYF(0)); // Return memory & allocator DBUG_RETURN(0); // Can't use range } key_parts= param.key_parts; - old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC); - my_pthread_setspecific_ptr(THR_MALLOC,&alloc); + old_root= thd->mem_root; + thd->mem_root= &alloc; - for (idx=0 ; idx < head->keys ; idx++) + key_info= head->key_info; + for (idx=0 ; idx < head->keys ; idx++, key_info++) { - if (!(keys_to_use & ((key_map) 1L << idx))) + KEY_PART_INFO *key_part_info; + if (!keys_to_use.is_set(idx)) continue; - KEY *key_info= &head->key_info[idx]; if (key_info->flags & HA_FULLTEXT) continue; // ToDo: ft-keys in non-ft ranges, if possible SerG param.key[param.keys]=key_parts; - for (uint part=0 ; part < key_info->key_parts ; part++,key_parts++) + key_part_info= key_info->key_part; + for (uint part=0 ; part < key_info->key_parts ; + part++, key_parts++, key_part_info++) { - key_parts->key=param.keys; - key_parts->part=part; - key_parts->part_length= key_info->key_part[part].length; - key_parts->field= key_info->key_part[part].field; - key_parts->null_bit= key_info->key_part[part].null_bit; - if (key_parts->field->type() == FIELD_TYPE_BLOB) - key_parts->part_length+=HA_KEY_BLOB_LENGTH; + key_parts->key= param.keys; + key_parts->part= part; + key_parts->length= key_part_info->length; + key_parts->store_length= key_part_info->store_length; + key_parts->field= key_part_info->field; + key_parts->null_bit= key_part_info->null_bit; + key_parts->image_type = + (key_info->flags & HA_SPATIAL) ? Field::itMBR : Field::itRAW; } param.real_keynr[param.keys++]=idx; } @@ -688,6 +800,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, { SEL_ARG **key,**end,**best_key=0; + for (idx=0,key=tree->keys, end=key+param.keys ; key != end ; key++,idx++) @@ -699,12 +812,13 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, uint keynr= param.real_keynr[idx]; if ((*key)->type == SEL_ARG::MAYBE_KEY || (*key)->maybe_flag) - needed_reg|= (key_map) 1 << keynr; + needed_reg.set_bit(keynr); found_records=check_quick_select(¶m, idx, *key); if (found_records != HA_POS_ERROR && found_records > 2 && - head->used_keys & ((table_map) 1 << keynr) && - (head->file->index_flags(keynr) & HA_KEY_READ_ONLY)) + head->used_keys.is_set(keynr) && + (head->file->index_flags(keynr, param.max_key_part, 1) & + HA_KEYREAD_ONLY)) { /* We can resolve this by only reading through this key. @@ -723,6 +837,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, param.range_count, found_records)+ (double) found_records / TIME_FOR_COMPARE); + DBUG_PRINT("info",("read_time: %g found_read_time: %g", + read_time, found_read_time)); if (read_time > found_read_time && found_records != HA_POS_ERROR) { read_time=found_read_time; @@ -743,10 +859,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, } } free_root(&alloc,MYF(0)); // Return memory & allocator - my_pthread_setspecific_ptr(THR_MALLOC,old_root); - param.thd->no_errors=0; + thd->mem_root= old_root; + thd->no_errors=0; } - DBUG_EXECUTE("info",print_quick(quick,needed_reg);); + DBUG_EXECUTE("info",print_quick(quick,&needed_reg);); /* Assume that if the user is using 'limit' we will only need to scan limit rows if we are using a key @@ -772,7 +888,7 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) while ((item=li++)) { SEL_TREE *new_tree=get_mm_tree(param,item); - if (param->thd->fatal_error) + if (param->thd->is_fatal_error) DBUG_RETURN(0); // out of memory tree=tree_and(param,tree,new_tree); if (tree && tree->type == SEL_TREE::IMPOSSIBLE) @@ -823,15 +939,16 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) if (cond_func->functype() == Item_func::BETWEEN) { - if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) + if (!((Item_func_between *)(cond_func))->negated && + cond_func->arguments()[0]->type() == Item::FIELD_ITEM) { Field *field=((Item_field*) (cond_func->arguments()[0]))->field; Item_result cmp_type=field->cmp_type(); DBUG_RETURN(tree_and(param, - get_mm_parts(param, field, + get_mm_parts(param, cond_func, field, Item_func::GE_FUNC, cond_func->arguments()[1], cmp_type), - get_mm_parts(param, field, + get_mm_parts(param, cond_func, field, Item_func::LE_FUNC, cond_func->arguments()[2], cmp_type))); } @@ -840,17 +957,18 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) if (cond_func->functype() == Item_func::IN_FUNC) { // COND OR Item_func_in *func=(Item_func_in*) cond_func; - if (func->key_item()->type() == Item::FIELD_ITEM) + if (!func->negated && func->key_item()->type() == Item::FIELD_ITEM) { Field *field=((Item_field*) (func->key_item()))->field; Item_result cmp_type=field->cmp_type(); - tree= get_mm_parts(param,field,Item_func::EQ_FUNC, - func->arguments()[0],cmp_type); + tree= get_mm_parts(param,cond_func,field,Item_func::EQ_FUNC, + func->arguments()[1],cmp_type); if (!tree) DBUG_RETURN(tree); // Not key field - for (uint i=1 ; i < func->argument_count(); i++) + for (uint i=2 ; i < func->argument_count(); i++) { - SEL_TREE *new_tree=get_mm_parts(param,field,Item_func::EQ_FUNC, + SEL_TREE *new_tree=get_mm_parts(param,cond_func,field, + Item_func::EQ_FUNC, func->arguments()[i],cmp_type); tree=tree_or(param,tree,new_tree); } @@ -869,7 +987,7 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) /* btw, ft_func's arguments()[0] isn't FIELD_ITEM. SerG*/ if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) { - tree= get_mm_parts(param, + tree= get_mm_parts(param, cond_func, ((Item_field*) (cond_func->arguments()[0]))->field, cond_func->functype(), cond_func->arg_count > 1 ? cond_func->arguments()[1] : @@ -882,7 +1000,7 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) cond_func->have_rev_func() && cond_func->arguments()[1]->type() == Item::FIELD_ITEM) { - DBUG_RETURN(get_mm_parts(param, + DBUG_RETURN(get_mm_parts(param, cond_func, ((Item_field*) (cond_func->arguments()[1]))->field, ((Item_bool_func2*) cond_func)->rev_functype(), @@ -896,13 +1014,21 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) static SEL_TREE * -get_mm_parts(PARAM *param, Field *field, Item_func::Functype type, +get_mm_parts(PARAM *param, COND *cond_func, Field *field, + Item_func::Functype type, Item *value, Item_result cmp_type) { + bool ne_func= FALSE; DBUG_ENTER("get_mm_parts"); if (field->table != param->table) DBUG_RETURN(0); + if (type == Item_func::NE_FUNC) + { + ne_func= TRUE; + type= Item_func::LT_FUNC; + } + KEY_PART *key_part = param->key_parts; KEY_PART *end = param->key_parts_end; SEL_TREE *tree=0; @@ -918,13 +1044,16 @@ get_mm_parts(PARAM *param, Field *field, Item_func::Functype type, DBUG_RETURN(0); // OOM if (!value || !(value->used_tables() & ~param->read_tables)) { - sel_arg=get_mm_leaf(param,key_part->field,key_part,type,value); + sel_arg=get_mm_leaf(param,cond_func, + key_part->field,key_part,type,value); if (!sel_arg) continue; if (sel_arg->type == SEL_ARG::IMPOSSIBLE) { tree->type=SEL_TREE::IMPOSSIBLE; - DBUG_RETURN(tree); + /* If this is an NE_FUNC, we still need to check GT_FUNC. */ + if (!ne_func) + DBUG_RETURN(tree); } } else @@ -937,12 +1066,21 @@ get_mm_parts(PARAM *param, Field *field, Item_func::Functype type, tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg); } } + + if (ne_func) + { + SEL_TREE *tree2= get_mm_parts(param, cond_func, + field, Item_func::GT_FUNC, + value, cmp_type); + /* tree_or() will return 0 if tree2 is 0 */ + tree= tree_or(param,tree,tree2); + } DBUG_RETURN(tree); } static SEL_ARG * -get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, +get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, Item_func::Functype type,Item *value) { uint maybe_null=(uint) field->real_maybe_null(), copies; @@ -951,14 +1089,50 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, char *str, *str2; DBUG_ENTER("get_mm_leaf"); + if (!value) // IS NULL or IS NOT NULL + { + if (field->table->outer_join) // Can't use a key on this + DBUG_RETURN(0); + if (!maybe_null) // Not null field + DBUG_RETURN(type == Item_func::ISNULL_FUNC ? &null_element : 0); + if (!(tree=new SEL_ARG(field,is_null_string,is_null_string))) + DBUG_RETURN(0); // out of memory + if (type == Item_func::ISNOTNULL_FUNC) + { + tree->min_flag=NEAR_MIN; /* IS NOT NULL -> X > NULL */ + tree->max_flag=NO_MAX_RANGE; + } + DBUG_RETURN(tree); + } + + /* + 1. Usually we can't use an index if the column collation + differ from the operation collation. + + 2. However, we can reuse a case insensitive index for + the binary searches: + + WHERE latin1_swedish_ci_column = 'a' COLLATE lati1_bin; + + WHERE latin1_swedish_ci_colimn = BINARY 'a ' + + */ + if (field->result_type() == STRING_RESULT && + value->result_type() == STRING_RESULT && + key_part->image_type == Field::itRAW && + ((Field_str*)field)->charset() != conf_func->compare_collation() && + !(conf_func->compare_collation()->state & MY_CS_BINSORT)) + DBUG_RETURN(0); + if (type == Item_func::LIKE_FUNC) { bool like_error; char buff1[MAX_FIELD_WIDTH],*min_str,*max_str; - String tmp(buff1,sizeof(buff1)),*res; + String tmp(buff1,sizeof(buff1),value->collation.collation),*res; uint length,offset,min_length,max_length; - if (!field->optimize_range((uint) key_part->key)) + if (!field->optimize_range(param->real_keynr[key_part->key], + key_part->part)) DBUG_RETURN(0); // Can't optimize this if (!(res= value->val_str(&tmp))) DBUG_RETURN(&null_element); @@ -977,18 +1151,26 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, DBUG_RETURN(0); // Can only optimize strings offset=maybe_null; - length=key_part->part_length; - if (field->type() == FIELD_TYPE_BLOB) + length=key_part->store_length; + + if (length != key_part->length + maybe_null) { - offset+=HA_KEY_BLOB_LENGTH; - field_length=key_part->part_length-HA_KEY_BLOB_LENGTH; + /* key packed with length prefix */ + offset+= HA_KEY_BLOB_LENGTH; + field_length= length - HA_KEY_BLOB_LENGTH; } else { - if (length < field_length) - length=field_length; // Only if overlapping key + if (unlikely(length < field_length)) + { + /* + This can only happen in a table created with UNIREG where one key + overlaps many fields + */ + length= field_length; + } else - field_length=length; + field_length= length; } length+=offset; if (!(min_str= (char*) alloc_root(param->mem_root, length*2))) @@ -996,30 +1178,17 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, max_str=min_str+length; if (maybe_null) max_str[0]= min_str[0]=0; - if (field->binary()) - like_error=like_range(res->ptr(), res->length(), - ((Item_func_like*)(param->cond))->escape, - field_length, min_str + offset, max_str + offset, - (char) 255, &min_length, &max_length); - else - { -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) - like_error= my_like_range(default_charset_info, res->ptr(), - res->length(), - ((Item_func_like*)(param->cond))->escape, - field_length, min_str + maybe_null, - max_str + maybe_null, &min_length, - &max_length); - else -#endif - like_error=like_range(res->ptr(), res->length(), + + like_error= my_like_range(field->charset(), + res->ptr(), res->length(), ((Item_func_like*)(param->cond))->escape, - field_length, min_str + offset, max_str + offset, - max_sort_char, &min_length, &max_length); - } + wild_one, wild_many, + field_length-maybe_null, + min_str+offset, max_str+offset, + &min_length, &max_length); if (like_error) // Can't optimize with LIKE DBUG_RETURN(0); + if (offset != maybe_null) // Blob { int2store(min_str+maybe_null,min_length); @@ -1028,23 +1197,8 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, DBUG_RETURN(new SEL_ARG(field,min_str,max_str)); } - if (!value) // IS NULL or IS NOT NULL - { - if (field->table->outer_join) // Can't use a key on this - DBUG_RETURN(0); - if (!maybe_null) // Not null field - DBUG_RETURN(type == Item_func::ISNULL_FUNC ? &null_element : 0); - if (!(tree=new SEL_ARG(field,is_null_string,is_null_string))) - DBUG_RETURN(0); // out of memory - if (type == Item_func::ISNOTNULL_FUNC) - { - tree->min_flag=NEAR_MIN; /* IS NOT NULL -> X > NULL */ - tree->max_flag=NO_MAX_RANGE; - } - DBUG_RETURN(tree); - } - - if (!field->optimize_range((uint) key_part->key) && + if (!field->optimize_range(param->real_keynr[key_part->key], + key_part->part) && type != Item_func::EQ_FUNC && type != Item_func::EQUAL_FUNC) DBUG_RETURN(0); // Can't optimize this @@ -1057,8 +1211,8 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, value->result_type() != STRING_RESULT && field->cmp_type() != value->result_type()) DBUG_RETURN(0); - - if (value->save_in_field(field, 1)) + + if (value->save_in_field(field, 1) < 0) { /* This happens when we try to insert a NULL field in a not null column */ DBUG_RETURN(&null_element); // cmp with NULL is never true @@ -1068,12 +1222,13 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, if (field->key_type() == HA_KEYTYPE_VARTEXT) copies= 2; str= str2= (char*) alloc_root(param->mem_root, - (key_part->part_length+maybe_null)*copies+1); + (key_part->store_length)*copies+1); if (!str) DBUG_RETURN(0); if (maybe_null) *str= (char) field->is_real_null(); // Set to 1 if null - field->get_key_image(str+maybe_null,key_part->part_length); + field->get_key_image(str+maybe_null, key_part->length, + field->charset(), key_part->image_type); if (copies == 2) { /* @@ -1082,20 +1237,50 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, all rows between 'X' and 'X ...' */ uint length= uint2korr(str+maybe_null); - str2= str+ key_part->part_length + maybe_null; + str2= str+ key_part->store_length; /* remove end space */ while (length > 0 && str[length+HA_KEY_BLOB_LENGTH+maybe_null-1] == ' ') length--; int2store(str+maybe_null, length); /* Create key that is space filled */ memcpy(str2, str, length + HA_KEY_BLOB_LENGTH + maybe_null); - bfill(str2+ length+ HA_KEY_BLOB_LENGTH +maybe_null, - key_part->part_length-length - HA_KEY_BLOB_LENGTH, ' '); - int2store(str2+maybe_null, key_part->part_length - HA_KEY_BLOB_LENGTH); + my_fill_8bit(field->charset(), + str2+ length+ HA_KEY_BLOB_LENGTH +maybe_null, + key_part->length-length, ' '); + int2store(str2+maybe_null, key_part->length); } if (!(tree=new SEL_ARG(field,str,str2))) DBUG_RETURN(0); // out of memory + /* + Check if we are comparing an UNSIGNED integer with a negative constant. + In this case we know that: + (a) (unsigned_int [< | <=] negative_constant) == FALSE + (b) (unsigned_int [> | >=] negative_constant) == TRUE + In case (a) the condition is false for all values, and in case (b) it + is true for all values, so we can avoid unnecessary retrieval and condition + testing, and we also get correct comparison of unsinged integers with + negative integers (which otherwise fails because at query execution time + negative integers are cast to unsigned if compared with unsigned). + */ + Item_result field_result_type= field->result_type(); + Item_result value_result_type= value->result_type(); + if (field_result_type == INT_RESULT && value_result_type == INT_RESULT && + ((Field_num*)field)->unsigned_flag && !((Item_int*)value)->unsigned_flag) + { + longlong item_val= value->val_int(); + if (item_val < 0) + { + if (type == Item_func::LT_FUNC || type == Item_func::LE_FUNC) + { + tree->type= SEL_ARG::IMPOSSIBLE; + DBUG_RETURN(tree); + } + if (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC) + DBUG_RETURN(0); + } + } + switch (type) { case Item_func::LT_FUNC: if (field_is_equal_to_item(field,value)) @@ -1117,73 +1302,45 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part, case Item_func::GE_FUNC: tree->max_flag=NO_MAX_RANGE; break; - default: + case Item_func::SP_EQUALS_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_EQUAL;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; + break; + case Item_func::SP_DISJOINT_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_DISJOINT;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; + break; + case Item_func::SP_INTERSECTS_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_INTERSECT;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; + break; + case Item_func::SP_TOUCHES_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_INTERSECT;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; break; - } - DBUG_RETURN(tree); -} - -/* -** Calculate min_str and max_str that ranges a LIKE string. -** Arguments: -** ptr Pointer to LIKE string. -** ptr_length Length of LIKE string. -** escape Escape character in LIKE. (Normally '\'). -** All escape characters should be removed from min_str and max_str -** res_length Length of min_str and max_str. -** min_str Smallest case sensitive string that ranges LIKE. -** Should be space padded to res_length. -** max_str Largest case sensitive string that ranges LIKE. -** Normally padded with the biggest character sort value. -** -** The function should return 0 if ok and 1 if the LIKE string can't be -** optimized ! -*/ + case Item_func::SP_CROSSES_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_INTERSECT;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; + break; + case Item_func::SP_WITHIN_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_WITHIN;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; + break; -static bool like_range(const char *ptr,uint ptr_length,char escape, - uint res_length, char *min_str,char *max_str, - char max_sort_chr, uint *min_length, uint *max_length) -{ - const char *end=ptr+ptr_length; - char *min_org=min_str; - char *min_end=min_str+res_length; + case Item_func::SP_CONTAINS_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_CONTAIN;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; + break; + case Item_func::SP_OVERLAPS_FUNC: + tree->min_flag=GEOM_FLAG | HA_READ_MBR_INTERSECT;// NEAR_MIN;//512; + tree->max_flag=NO_MAX_RANGE; + break; - for (; ptr != end && min_str != min_end ; ptr++) - { - if (*ptr == escape && ptr+1 != end) - { - ptr++; // Skip escape - *min_str++= *max_str++ = *ptr; - continue; - } - if (*ptr == wild_one) // '_' in SQL - { - *min_str++='\0'; // This should be min char - *max_str++=max_sort_chr; - continue; - } - if (*ptr == wild_many) // '%' in SQL - { - *min_length= (uint) (min_str - min_org); - *max_length=res_length; - do { - *min_str++ = ' '; // Because if key compression - *max_str++ = max_sort_chr; - } while (min_str != min_end); - return 0; - } - *min_str++= *max_str++ = *ptr; + default: + break; } - *min_length= *max_length = (uint) (min_str - min_org); - - /* Temporary fix for handling wild_one at end of string (key compression) */ - for (char *tmp= min_str ; tmp > min_org && tmp[-1] == '\0';) - *--tmp=' '; - - while (min_str != min_end) - *min_str++ = *max_str++ = ' '; // Because if key compression - return 0; + DBUG_RETURN(tree); } @@ -1276,14 +1433,14 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) if (*key2 && !(*key2)->simple_key()) flag|=CLONE_KEY2_MAYBE; *key1=key_and(*key1,*key2,flag); - if ((*key1)->type == SEL_ARG::IMPOSSIBLE) + if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE) { tree1->type= SEL_TREE::IMPOSSIBLE; - break; - } #ifdef EXTRA_DEBUG - (*key1)->test_use_count(*key1); + (*key1)->test_use_count(*key1); #endif + break; + } } } DBUG_RETURN(tree1); @@ -1379,7 +1536,7 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) { if (key1->part > key2->part) { - swap(SEL_ARG *,key1,key2); + swap_variables(SEL_ARG *, key1, key2); clone_flag=swap_clone_flag(clone_flag); } // key1->part < key2->part @@ -1395,7 +1552,7 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) key2->type != SEL_ARG::MAYBE_KEY) || key1->type == SEL_ARG::MAYBE_KEY) { // Put simple key in key2 - swap(SEL_ARG *,key1,key2); + swap_variables(SEL_ARG *, key1, key2); clone_flag=swap_clone_flag(clone_flag); } @@ -1430,6 +1587,13 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) return key1; } + if ((key1->min_flag | key2->min_flag) & GEOM_FLAG) + { + key1->free_tree(); + key2->free_tree(); + return 0; // Can't optimize this + } + key1->use_count--; key2->use_count--; SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0; @@ -1512,7 +1676,8 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) key1->use_count--; key2->use_count--; - if (key1->part != key2->part) + if (key1->part != key2->part || + (key1->min_flag | key2->min_flag) & GEOM_FLAG) { key1->free_tree(); key2->free_tree(); @@ -1537,7 +1702,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) { if (key2->use_count == 0 || key1->elements > key2->elements) { - swap(SEL_ARG *,key1,key2); + swap_variables(SEL_ARG *,key1,key2); } if (key1->use_count > 0 || !(key1=key1->clone_tree())) return 0; // OOM @@ -1640,6 +1805,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) last=last->next; key1=key1->tree_delete(save); } + last->copy_min(tmp); if (last->copy_min(key2) || last->copy_max(key2)) { // Full range key1->free_tree(); @@ -1778,8 +1944,8 @@ SEL_ARG * SEL_ARG::insert(SEL_ARG *key) { SEL_ARG *element,**par,*last_element; - LINT_INIT(par); LINT_INIT(last_element); + for (element= this; element != &null_element ; ) { last_element=element; @@ -2159,7 +2325,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) uint e_count=0; if (this == root && use_count != 1) { - sql_print_error("Note: Use_count: Wrong count %lu for root",use_count); + sql_print_information("Use_count: Wrong count %lu for root",use_count); return; } if (this->type != SEL_ARG::KEY_RANGE) @@ -2172,7 +2338,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) ulong count=count_key_part_usage(root,pos->next_key_part); if (count > pos->next_key_part->use_count) { - sql_print_error("Note: Use_count: Wrong count for key at %lx, %lu should be %lu", + sql_print_information("Use_count: Wrong count for key at %lx, %lu should be %lu", pos,pos->next_key_part->use_count,count); return; } @@ -2180,7 +2346,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) } } if (e_count != elements) - sql_print_error("Warning: Wrong use count: %u (should be %u) for tree at %lx", + sql_print_warning("Wrong use count: %u (should be %u) for tree at %lx", e_count, elements, (gptr) this); } @@ -2210,10 +2376,11 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree) if (records != HA_POS_ERROR) { uint key=param->real_keynr[idx]; - param->table->quick_keys|= (key_map) 1 << key; + param->table->quick_keys.set_bit(key); param->table->quick_rows[key]=records; param->table->quick_key_parts[key]=param->max_key_part+1; } + DBUG_PRINT("exit", ("Records: %lu", (ulong) records)); DBUG_RETURN(records); } @@ -2237,7 +2404,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, uint tmp_min_flag,tmp_max_flag,keynr; char *tmp_min_key=min_key,*tmp_max_key=max_key; - key_tree->store(param->key[idx][key_tree->part].part_length, + key_tree->store(param->key[idx][key_tree->part].store_length, &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); uint min_key_length= (uint) (tmp_min_key- param->min_key); uint max_key_length= (uint) (tmp_max_key- param->max_key); @@ -2282,18 +2449,37 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, !memcmp(param->min_key,param->max_key,min_key_length)) tmp=1; // Max one record else - tmp=param->table->file-> - records_in_range((int) keynr, - (byte*) (!min_key_length ? NullS : - param->min_key), - min_key_length, - (tmp_min_flag & NEAR_MIN ? - HA_READ_AFTER_KEY : HA_READ_KEY_EXACT), - (byte*) (!max_key_length ? NullS : - param->max_key), - max_key_length, - (tmp_max_flag & NEAR_MAX ? - HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY)); + { + if (tmp_min_flag & GEOM_FLAG) + { + key_range min_range; + min_range.key= (byte*) param->min_key; + min_range.length= min_key_length; + /* In this case tmp_min_flag contains the handler-read-function */ + min_range.flag= (ha_rkey_function) (tmp_min_flag ^ GEOM_FLAG); + + tmp= param->table->file->records_in_range(keynr, &min_range, + (key_range*) 0); + } + else + { + key_range min_range, max_range; + + min_range.key= (byte*) param->min_key; + min_range.length= min_key_length; + min_range.flag= (tmp_min_flag & NEAR_MIN ? HA_READ_AFTER_KEY : + HA_READ_KEY_EXACT); + max_range.key= (byte*) param->max_key; + max_range.length= max_key_length; + max_range.flag= (tmp_max_flag & NEAR_MAX ? + HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); + tmp=param->table->file->records_in_range(keynr, + (min_key_length ? &min_range : + (key_range*) 0), + (max_key_length ? &max_range : + (key_range*) 0)); + } + } end: if (tmp == HA_POS_ERROR) // Impossible range return tmp; @@ -2320,8 +2506,14 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) { QUICK_SELECT *quick; DBUG_ENTER("get_quick_select"); - if ((quick=new QUICK_SELECT(param->thd, param->table, - param->real_keynr[idx]))) + + if (param->table->key_info[param->real_keynr[idx]].flags & HA_SPATIAL) + quick=new QUICK_SELECT_GEOM(param->thd, param->table, param->real_keynr[idx], + 0); + else + quick=new QUICK_SELECT(param->thd, param->table, param->real_keynr[idx]); + + if (quick) { if (quick->error || get_quick_keys(param,quick,param->key[idx],key_tree,param->min_key,0, @@ -2361,7 +2553,7 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, return 1; } char *tmp_min_key=min_key,*tmp_max_key=max_key; - key_tree->store(key[key_tree->part].part_length, + key_tree->store(key[key_tree->part].store_length, &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); if (key_tree->next_key_part && @@ -2390,19 +2582,26 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, } } else - flag=key_tree->min_flag | key_tree->max_flag; - - /* Ensure that some part of min_key and max_key are used. If not, - regard this as no lower/upper range */ - if (tmp_min_key != param->min_key) - flag&= ~NO_MIN_RANGE; - else - flag|= NO_MIN_RANGE; - if (tmp_max_key != param->max_key) - flag&= ~NO_MAX_RANGE; - else - flag|= NO_MAX_RANGE; + { + flag = (key_tree->min_flag & GEOM_FLAG) ? + key_tree->min_flag : key_tree->min_flag | key_tree->max_flag; + } + /* + Ensure that some part of min_key and max_key are used. If not, + regard this as no lower/upper range + */ + if ((flag & GEOM_FLAG) == 0) + { + if (tmp_min_key != param->min_key) + flag&= ~NO_MIN_RANGE; + else + flag|= NO_MIN_RANGE; + if (tmp_max_key != param->max_key) + flag&= ~NO_MAX_RANGE; + else + flag|= NO_MAX_RANGE; + } if (flag == 0) { uint length= (uint) (tmp_min_key - param->min_key); @@ -2435,6 +2634,7 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, set_if_bigger(quick->max_used_key_length,range->min_length); set_if_bigger(quick->max_used_key_length,range->max_length); + set_if_bigger(quick->used_key_parts, (uint) key_tree->part+1); quick->ranges.push_back(range); end: @@ -2471,25 +2671,24 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) { for (const char *end=key+length ; key < end; - key+= key_part++->part_length) + key+= key_part++->store_length) { - if (key_part->null_bit) - { - if (*key++) - return 1; - } + if (key_part->null_bit && *key) + return 1; } return 0; } + /**************************************************************************** -** Create a QUICK RANGE based on a key + Create a QUICK RANGE based on a key ****************************************************************************/ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) { - table->file->index_end(); // Remove old cursor - QUICK_SELECT *quick=new QUICK_SELECT(thd, table, ref->key, 1); + MEM_ROOT *old_root= thd->mem_root; + /* The following call may change thd->mem_root */ + QUICK_SELECT *quick= new QUICK_SELECT(thd, table, ref->key); KEY *key_info = &table->key_info[ref->key]; KEY_PART *key_part; QUICK_RANGE *range; @@ -2497,11 +2696,11 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) if (!quick) return 0; /* no ranges found */ - if (cp_buffer_from_ref(ref)) + if (cp_buffer_from_ref(thd, ref)) { - if (thd->fatal_error) + if (thd->is_fatal_error) goto err; // out of memory - return quick; // empty range + goto ok; // empty range } if (!(range= new QUICK_RANGE())) @@ -2521,15 +2720,39 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) { key_part->part=part; key_part->field= key_info->key_part[part].field; - key_part->part_length= key_info->key_part[part].length; - if (key_part->field->type() == FIELD_TYPE_BLOB) - key_part->part_length+=HA_KEY_BLOB_LENGTH; + key_part->length= key_info->key_part[part].length; + key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; } - if (!quick->ranges.push_back(range)) - return quick; + if (quick->ranges.push_back(range)) + goto err; + + /* + Add a NULL range if REF_OR_NULL optimization is used. + For example: + if we have "WHERE A=2 OR A IS NULL" we created the (A=2) range above + and have ref->null_ref_key set. Will create a new NULL range here. + */ + if (ref->null_ref_key) + { + QUICK_RANGE *null_range; + + *ref->null_ref_key= 1; // Set null byte then create a range + if (!(null_range= new QUICK_RANGE((char*)ref->key_buff, ref->key_length, + (char*)ref->key_buff, ref->key_length, + EQ_RANGE))) + goto err; + *ref->null_ref_key= 0; // Clear null byte + if (quick->ranges.push_back(null_range)) + goto err; + } + +ok: + thd->mem_root= old_root; + return quick; err: + thd->mem_root= old_root; delete quick; return 0; } @@ -2543,88 +2766,75 @@ int QUICK_SELECT::get_next() for (;;) { int result; + key_range start_key, end_key; if (range) - { // Already read through key - result=((range->flag & EQ_RANGE) ? - file->index_next_same(record, (byte*) range->min_key, - range->min_length) : - file->index_next(record)); - if (!result) - { - if (!cmp_next(*it.ref())) - DBUG_RETURN(0); - } - else if (result != HA_ERR_END_OF_FILE) + { + // Already read through key + result= file->read_range_next(); + if (result != HA_ERR_END_OF_FILE) DBUG_RETURN(result); } - if (!(range=it++)) + if (!(range= it++)) DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used - if (range->flag & NO_MIN_RANGE) // Read first record - { - int local_error; - if ((local_error=file->index_first(record))) - DBUG_RETURN(local_error); // Empty table - if (cmp_next(range) == 0) - DBUG_RETURN(0); - range=0; // No matching records; go to next range - continue; - } - if ((result = file->index_read(record,(byte*) range->min_key, - range->min_length, - ((range->flag & NEAR_MIN) ? - HA_READ_AFTER_KEY: - (range->flag & EQ_RANGE) ? - HA_READ_KEY_EXACT : - HA_READ_KEY_OR_NEXT)))) - { - if (result != HA_ERR_KEY_NOT_FOUND) - DBUG_RETURN(result); - range=0; // Not found, to next range - continue; - } - if (cmp_next(range) == 0) - { - if (range->flag == (UNIQUE_RANGE | EQ_RANGE)) - range=0; // Stop searching - DBUG_RETURN(0); // Found key is in range - } - range=0; // To next range + start_key.key= (const byte*) range->min_key; + start_key.length= range->min_length; + start_key.flag= ((range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : + (range->flag & EQ_RANGE) ? + HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); + end_key.key= (const byte*) range->max_key; + end_key.length= range->max_length; + /* + We use READ_AFTER_KEY here because if we are reading on a key + prefix we want to find all keys with this prefix + */ + end_key.flag= (range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : + HA_READ_AFTER_KEY); + + result= file->read_range_first(range->min_length ? &start_key : 0, + range->max_length ? &end_key : 0, + test(range->flag & EQ_RANGE), + sorted); + if (range->flag == (UNIQUE_RANGE | EQ_RANGE)) + range=0; // Stop searching + + if (result != HA_ERR_END_OF_FILE) + DBUG_RETURN(result); + range=0; // No matching rows; go to next range } } - /* compare if found key is over max-value */ - /* Returns 0 if key <= range->max_key */ -int QUICK_SELECT::cmp_next(QUICK_RANGE *range_arg) +/* Get next for geometrical indexes */ + +int QUICK_SELECT_GEOM::get_next() { - if (range_arg->flag & NO_MAX_RANGE) - return 0; /* key can't be to large */ + DBUG_ENTER(" QUICK_SELECT_GEOM::get_next"); - KEY_PART *key_part=key_parts; - for (char *key=range_arg->max_key, *end=key+range_arg->max_length; - key < end; - key+= key_part++->part_length) + for (;;) { - int cmp; - if (key_part->null_bit) + int result; + if (range) { - if (*key++) - { - if (!key_part->field->is_null()) - return 1; - continue; - } - else if (key_part->field->is_null()) - return 0; + // Already read through key + result= file->index_next_same(record, (byte*) range->min_key, + range->min_length); + if (result != HA_ERR_END_OF_FILE) + DBUG_RETURN(result); } - if ((cmp=key_part->field->key_cmp((byte*) key, key_part->part_length)) < 0) - return 0; - if (cmp > 0) - return 1; + + if (!(range= it++)) + DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used + + result= file->index_read(record, + (byte*) range->min_key, + range->min_length, + (ha_rkey_function)(range->flag ^ GEOM_FLAG)); + if (result != HA_ERR_KEY_NOT_FOUND) + DBUG_RETURN(result); + range=0; // Not found, to next range } - return (range_arg->flag & NEAR_MAX) ? 1 : 0; // Exact match } @@ -2641,20 +2851,12 @@ int QUICK_SELECT::cmp_next(QUICK_RANGE *range_arg) QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts) : QUICK_SELECT(*q), rev_it(rev_ranges) { - bool not_read_after_key = file->table_flags() & HA_NOT_READ_AFTER_KEY; QUICK_RANGE *r; it.rewind(); for (r = it++; r; r = it++) { rev_ranges.push_front(r); - if (not_read_after_key && range_reads_after_key(r)) - { - it.rewind(); // Reset range - error = HA_ERR_UNSUPPORTED; - dont_free=1; // Don't free memory from 'q' - return; - } } /* Remove EQ_RANGE flag for keys that are not using the full key */ for (r = rev_it++; r; r = rev_it++) @@ -2723,23 +2925,11 @@ int QUICK_SELECT_DESC::get_next() } else { - /* - Heikki changed Sept 11, 2002: since InnoDB does not store the cursor - position if READ_KEY_EXACT is used to a primary key with all - key columns specified, we must use below HA_READ_KEY_OR_NEXT, - so that InnoDB stores the cursor position and is able to move - the cursor one step backward after the search. */ - DBUG_ASSERT(range->flag & NEAR_MAX || range_reads_after_key(range)); - /* - Note: even if max_key is only a prefix, HA_READ_AFTER_KEY will - do the right thing - go past all keys which match the prefix - */ result=file->index_read(record, (byte*) range->max_key, range->max_length, ((range->flag & NEAR_MAX) ? - HA_READ_KEY_OR_NEXT : HA_READ_AFTER_KEY)); - result = file->index_prev(record); + HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV)); } if (result) { @@ -2765,35 +2955,15 @@ int QUICK_SELECT_DESC::get_next() int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range_arg) { + int cmp; if (range_arg->flag & NO_MIN_RANGE) return 0; /* key can't be to small */ - KEY_PART *key_part = key_parts; - for (char *key = range_arg->min_key, *end = key + range_arg->min_length; - key < end; - key += key_part++->part_length) - { - int cmp; - if (key_part->null_bit) - { - // this key part allows null values; NULL is lower than everything else - if (*key++) - { - // the range is expecting a null value - if (!key_part->field->is_null()) - return 0; // not null -- still inside the range - continue; // null -- exact match, go to next key part - } - else if (key_part->field->is_null()) - return 1; // null -- outside the range - } - if ((cmp = key_part->field->key_cmp((byte*) key, - key_part->part_length)) > 0) - return 0; - if (cmp < 0) - return 1; - } - return (range_arg->flag & NEAR_MIN) ? 1 : 0; // Exact match + cmp= key_cmp(key_part_info, (byte*) range_arg->min_key, + range_arg->min_length); + if (cmp > 0 || cmp == 0 && !(range_arg->flag & NEAR_MIN)) + return 0; + return 1; // outside of range } @@ -2816,23 +2986,20 @@ bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range_arg) bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, uint used_key_parts) { - uint offset,end; + uint offset, end; KEY_PART *key_part = key_parts, *key_part_end= key_part+used_key_parts; for (offset= 0, end = min(range_arg->min_length, range_arg->max_length) ; offset < end && key_part != key_part_end ; - offset += key_part++->part_length) + offset+= key_part++->store_length) { - uint null_length=test(key_part->null_bit); if (!memcmp((char*) range_arg->min_key+offset, (char*) range_arg->max_key+offset, - key_part->part_length + null_length)) - { - offset+=null_length; + key_part->store_length)) continue; - } - if (null_length && range_arg->min_key[offset]) + + if (key_part->null_bit && range_arg->min_key[offset]) return 1; // min_key is null and max_key isn't // Range doesn't cover NULL. This is ok if there is no more null parts break; @@ -2874,43 +3041,46 @@ static void print_key(KEY_PART *key_part,const char *key,uint used_length) { char buff[1024]; - String tmp(buff,sizeof(buff)); + const char *key_end= key+used_length; + String tmp(buff,sizeof(buff),&my_charset_bin); + uint store_length; - for (uint length=0; - length < used_length ; - length+=key_part->part_length, key+=key_part->part_length, key_part++) + for (; key < key_end; key+=store_length, key_part++) { - Field *field=key_part->field; - if (length != 0) - fputc('/',DBUG_FILE); + Field *field= key_part->field; + store_length= key_part->store_length; + if (field->real_maybe_null()) { - length++; // null byte is not in part_length - if (*key++) + if (*key) { fwrite("NULL",sizeof(char),4,DBUG_FILE); continue; } + key++; // Skip null byte + store_length--; } - field->set_key_image((char*) key,key_part->part_length - - ((field->type() == FIELD_TYPE_BLOB) ? - HA_KEY_BLOB_LENGTH : 0)); - field->val_str(&tmp,&tmp); + field->set_key_image((char*) key, key_part->length, field->charset()); + field->val_str(&tmp); fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE); + if (key+store_length < key_end) + fputc('/',DBUG_FILE); } } -static void print_quick(QUICK_SELECT *quick,key_map needed_reg) + +static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg) { QUICK_RANGE *range; + char buf[MAX_KEY/8+1]; DBUG_ENTER("print_param"); if (! _db_on_ || !quick) DBUG_VOID_RETURN; List_iterator<QUICK_RANGE> li(quick->ranges); DBUG_LOCK_FILE; - fprintf(DBUG_FILE,"Used quick_range on key: %d (other_keys: %lu):\n", - quick->index, (ulong) needed_reg); + fprintf(DBUG_FILE,"Used quick_range on key: %d (other_keys: 0x%s):\n", + quick->index, needed_reg->print(buf)); while ((range=li++)) { if (!(range->flag & NO_MIN_RANGE)) diff --git a/sql/opt_range.h b/sql/opt_range.h index cda8ad51c3f..15f0bf02b34 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -20,7 +20,7 @@ #ifndef _opt_range_h #define _opt_range_h -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -31,11 +31,14 @@ #define UNIQUE_RANGE 16 #define EQ_RANGE 32 #define NULL_RANGE 64 +#define GEOM_FLAG 128 + typedef struct st_key_part { - uint16 key,part,part_length; - uint8 null_bit; - Field *field; + uint16 key,part, store_length, length; + uint8 null_bit; + Field *field; + Field::imagetype image_type; } KEY_PART; @@ -65,9 +68,9 @@ class QUICK_RANGE :public Sql_alloc { class QUICK_SELECT { public: - bool next,dont_free; + bool next,dont_free,sorted; int error; - uint index,max_used_key_length; + uint index, max_used_key_length, used_key_parts; TABLE *head; handler *file; byte *record; @@ -77,20 +80,34 @@ public: MEM_ROOT alloc; KEY_PART *key_parts; + KEY_PART_INFO *key_part_info; ha_rows records; double read_time; QUICK_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0); virtual ~QUICK_SELECT(); void reset(void) { next=0; it.rewind(); } - int init() { return error=file->index_init(index); } + int init() + { + key_part_info= head->key_info[index].key_part; + return error=file->ha_index_init(index); + } virtual int get_next(); virtual bool reverse_sorted() { return 0; } - int cmp_next(QUICK_RANGE *range); bool unique_key_range(); }; +class QUICK_SELECT_GEOM: public QUICK_SELECT +{ +public: + QUICK_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, bool no_alloc) + :QUICK_SELECT(thd, table, index_arg, no_alloc) + {}; + virtual int get_next(); +}; + + class QUICK_SELECT_DESC: public QUICK_SELECT { public: @@ -124,15 +141,32 @@ class SQL_SELECT :public Sql_alloc { SQL_SELECT(); ~SQL_SELECT(); - bool check_quick(THD *thd, bool force_quick_range= 0, - ha_rows limit= HA_POS_ERROR) - { return test_quick_select(thd, ~0L,0,limit, force_quick_range) < 0; } - inline bool skipp_record() { return cond ? cond->val_int() == 0 : 0; } + void cleanup(); + bool check_quick(THD *thd, bool force_quick_range, ha_rows limit) + { + key_map tmp; + tmp.set_all(); + return test_quick_select(thd, tmp, 0, limit, force_quick_range) < 0; + } + inline bool skip_record() { return cond ? cond->val_int() == 0 : 0; } int test_quick_select(THD *thd, key_map keys, table_map prev_tables, - ha_rows limit, bool force_quick_range=0); + ha_rows limit, bool force_quick_range); }; + +class FT_SELECT: public QUICK_SELECT { +public: + FT_SELECT(THD *thd, TABLE *table, uint key): + QUICK_SELECT (thd, table, key, 1) { init(); } + ~FT_SELECT() { file->ft_end(); } + int init() { return error= file->ft_init(); } + int get_next() { return error= file->ft_read(record); } +}; + + QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, struct st_table_ref *ref); +uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit); + #endif diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 0831c375f7a..cfb5b3695a3 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -15,27 +15,59 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* Optimizing of many different type of queries with GROUP functions */ +/* + Optimising of MIN(), MAX() and COUNT(*) queries without 'group by' clause + by replacing the aggregate expression with a constant. + + Given a table with a compound key on columns (a,b,c), the following + types of queries are optimised (assuming the table handler supports + the required methods) + + SELECT COUNT(*) FROM t1[,t2,t3,...] + SELECT MIN(b) FROM t1 WHERE a=const + SELECT MAX(c) FROM t1 WHERE a=const AND b=const + SELECT MAX(b) FROM t1 WHERE a=const AND b<const + SELECT MIN(b) FROM t1 WHERE a=const AND b>const + SELECT MIN(b) FROM t1 WHERE a=const AND b BETWEEN const AND const + SELECT MAX(b) FROM t1 WHERE a=const AND b BETWEEN const AND const + + Instead of '<' one can use '<=', '>', '>=' and '=' as well. + Instead of 'a=const' the condition 'a IS NULL' can be used. + + If all selected fields are replaced then we will also remove all + involved tables and return the answer without any join. Thus, the + following query will be replaced with a row of two constants: + SELECT MAX(b), MIN(d) FROM t1,t2 + WHERE a=const AND b<const AND d>const + (assuming a index for column d of table t2 is defined) + +*/ #include "mysql_priv.h" #include "sql_select.h" -static bool find_range_key(TABLE_REF *ref, Field* field,COND *cond); +static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, Field* field, + COND *cond, uint *range_fl, + uint *key_prefix_length); +static int reckey_in_range(bool max_fl, TABLE_REF *ref, Field* field, + COND *cond, uint range_fl, uint prefix_len); +static int maxmin_in_range(bool max_fl, Field* field, COND *cond); + /* Substitutes constants for some COUNT(), MIN() and MAX() functions. SYNOPSIS opt_sum_query() - tables Tables in query - all_fields All fields to be returned - conds WHERE clause + tables Tables in query + all_fields All fields to be returned + conds WHERE clause NOTE: This function is only called for queries with sum functions and no GROUP BY part. - RETURN VALUES + RETURN VALUES 0 No errors 1 if all items were resolved -1 on impossible conditions @@ -48,38 +80,59 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) List_iterator_fast<Item> it(all_fields); int const_result= 1; bool recalc_const_item= 0; + longlong count= 1; + bool is_exact_count= TRUE; table_map removed_tables= 0, outer_tables= 0, used_tables= 0; table_map where_tables= 0; Item *item; - COND *org_conds= conds; int error; if (conds) where_tables= conds->used_tables(); - /* Don't replace expression on a table that is part of an outer join */ + /* + Analyze outer join dependencies, and, if possible, compute the number + of returned rows. + */ for (TABLE_LIST *tl=tables; tl ; tl= tl->next) { + /* Don't replace expression on a table that is part of an outer join */ if (tl->on_expr) { outer_tables|= tl->table->map; /* - We can't optimise LEFT JOIN in cases where the WHERE condition - restricts the table that is used, like in: + We can't optimise LEFT JOIN in cases where the WHERE condition + restricts the table that is used, like in: SELECT MAX(t1.a) FROM t1 LEFT JOIN t2 join-condition - WHERE t2.field IS NULL; + WHERE t2.field IS NULL; */ if (tl->table->map & where_tables) - return 0; + return 0; } else used_tables|= tl->table->map; + + /* + If the storage manager of 'tl' gives exact row count, compute the total + number of rows. If there are no outer table dependencies, this count + may be used as the real count. + */ + if (tl->table->file->table_flags() & HA_NOT_EXACT_COUNT) + { + is_exact_count= FALSE; + count= 1; // ensure count != 0 + } + else + { + tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); + count*= tl->table->file->records; + } } /* - Iterate through item is select part and replace COUNT(), MIN() and MAX() - with constants (if possible) + Iterate through all items in the SELECT clause and replace + COUNT(), MIN() and MAX() with constants (if possible). */ while ((item= it++)) @@ -89,381 +142,675 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) Item_sum *item_sum= (((Item_sum*) item)); switch (item_sum->sum_func()) { case Item_sum::COUNT_FUNC: - /* - If the expr in count(expr) can never be null we can change this - to the number of rows in the tables - */ - if (!conds && !((Item_sum_count*) item)->args[0]->maybe_null) - { - longlong count=1; - TABLE_LIST *table; - for (table=tables; table ; table=table->next) - { - if (outer_tables || (table->table->file->table_flags() & - HA_NOT_EXACT_COUNT)) - { - const_result=0; // Can't optimize left join - break; - } - tables->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); - count*= table->table->file->records; - } - if (!table) - { - ((Item_sum_count*) item)->make_const(count); - recalc_const_item=1; - } - } - else - const_result=0; - break; + /* + If the expr in count(expr) can never be null we can change this + to the number of rows in the tables if this number is exact and + there are no outer joins. + */ + if (!conds && !((Item_sum_count*) item)->args[0]->maybe_null && + !outer_tables && is_exact_count) + { + ((Item_sum_count*) item)->make_const(count); + recalc_const_item= 1; + } + else + const_result= 0; + break; case Item_sum::MIN_FUNC: { - /* - If MIN(expr) is the first part of a key or if all previous - parts of the key is found in the COND, then we can use - indexes to find the key. - */ - Item *expr=item_sum->args[0]; - if (expr->type() == Item::FIELD_ITEM) - { - byte key_buff[MAX_KEY_LENGTH]; - TABLE_REF ref; - ref.key_buff=key_buff; - Item_field *item_field= ((Item_field*) expr); - TABLE *table= item_field->field->table; - - if ((outer_tables & table->map) || - (!find_range_key(&ref, item_field->field,conds))) - { - const_result=0; - break; - } - error= table->file->index_init((uint) ref.key); - enum ha_rkey_function find_flag= HA_READ_KEY_OR_NEXT; - uint prefix_len= ref.key_length; - /* - If we are doing MIN() on a column with NULL fields - we must read the key after the NULL column - */ - if (item_field->field->null_bit) - { - ref.key_buff[ref.key_length++]=1; - find_flag= HA_READ_AFTER_KEY; - } - - if (!ref.key_length) - { - error=table->file->index_first(table->record[0]); - } - else - { - error=table->file->index_read(table->record[0],key_buff, - ref.key_length, - find_flag); - if (!error && key_cmp(table, key_buff, ref.key, prefix_len)) - error = HA_ERR_KEY_NOT_FOUND; - } - if (table->key_read) - { - table->key_read=0; - table->file->extra(HA_EXTRA_NO_KEYREAD); - } - table->file->index_end(); - if (error) + /* + If MIN(expr) is the first part of a key or if all previous + parts of the key is found in the COND, then we can use + indexes to find the key. + */ + Item *expr=item_sum->args[0]; + if (expr->type() == Item::FIELD_ITEM) + { + byte key_buff[MAX_KEY_LENGTH]; + TABLE_REF ref; + uint range_fl, prefix_len; + + ref.key_buff= key_buff; + Item_field *item_field= ((Item_field*) expr); + TABLE *table= item_field->field->table; + + /* + Look for a partial key that can be used for optimization. + If we succeed, ref.key_length will contain the length of + this key, while prefix_len will contain the length of + the beginning of this key without field used in MIN(). + Type of range for the key part for this field will be + returned in range_fl. + */ + if ((outer_tables & table->map) || + !find_key_for_maxmin(0, &ref, item_field->field, conds, + &range_fl, &prefix_len)) { + const_result= 0; + break; + } + error= table->file->ha_index_init((uint) ref.key); + + if (!ref.key_length) + error= table->file->index_first(table->record[0]); + else + error= table->file->index_read(table->record[0],key_buff, + ref.key_length, + range_fl & NEAR_MIN ? + HA_READ_AFTER_KEY : + HA_READ_KEY_OR_NEXT); + if (!error && reckey_in_range(0, &ref, item_field->field, + conds, range_fl, prefix_len)) + error= HA_ERR_KEY_NOT_FOUND; + if (table->key_read) + { + table->key_read= 0; + table->file->extra(HA_EXTRA_NO_KEYREAD); + } + table->file->ha_index_end(); + if (error) + { if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) return -1; // No rows matching WHERE - - table->file->print_error(error, MYF(0)); - return(error); // HA_ERR_LOCK_DEADLOCK or - // some other error + /* HA_ERR_LOCK_DEADLOCK or some other error */ + table->file->print_error(error, MYF(0)); + return(error); } - removed_tables|= table->map; - } - else if (!expr->const_item()) // This is VERY seldom false - { - const_result=0; - break; - } - ((Item_sum_min*) item_sum)->reset(); - ((Item_sum_min*) item_sum)->make_const(); - recalc_const_item=1; - break; + removed_tables|= table->map; + } + else if (!expr->const_item() || !is_exact_count) + { + /* + The optimization is not applicable in both cases: + (a) 'expr' is a non-constant expression. Then we can't + replace 'expr' by a constant. + (b) 'expr' is a costant. According to ANSI, MIN/MAX must return + NULL if the query does not return any rows. Thus, if we are not + able to determine if the query returns any rows, we can't apply + the optimization and replace MIN/MAX with a constant. + */ + const_result= 0; + break; + } + if (!count) + { + /* If count == 0, then we know that is_exact_count == TRUE. */ + ((Item_sum_min*) item_sum)->clear(); /* Set to NULL. */ + } + else + ((Item_sum_min*) item_sum)->reset(); /* Set to the constant value. */ + ((Item_sum_min*) item_sum)->make_const(); + recalc_const_item= 1; + break; } case Item_sum::MAX_FUNC: { - /* - If MAX(expr) is the first part of a key or if all previous - parts of the key is found in the COND, then we can use - indexes to find the key. - */ - Item *expr=item_sum->args[0]; - if (expr->type() == Item::FIELD_ITEM) - { - byte key_buff[MAX_KEY_LENGTH]; - TABLE_REF ref; - ref.key_buff=key_buff; - TABLE *table=((Item_field*) expr)->field->table; - - if ((table->file->table_flags() & HA_NOT_READ_AFTER_KEY)) - { - const_result=0; - break; - } - if ((outer_tables & table->map) || - !find_range_key(&ref, ((Item_field*) expr)->field,conds)) - { - const_result=0; - break; - } - error=table->file->index_init((uint) ref.key); - - if (!ref.key_length) - { - error=table->file->index_last(table->record[0]); - } - else - { - error=table->file->index_read(table->record[0], key_buff, - ref.key_length, - HA_READ_PREFIX_LAST); - if (!error && key_cmp(table,key_buff,ref.key,ref.key_length)) - error = HA_ERR_KEY_NOT_FOUND; - } - if (table->key_read) - { - table->key_read=0; - table->file->extra(HA_EXTRA_NO_KEYREAD); - } - table->file->index_end(); - if (error) - { + /* + If MAX(expr) is the first part of a key or if all previous + parts of the key is found in the COND, then we can use + indexes to find the key. + */ + Item *expr=item_sum->args[0]; + if (expr->type() == Item::FIELD_ITEM) + { + byte key_buff[MAX_KEY_LENGTH]; + TABLE_REF ref; + uint range_fl, prefix_len; + + ref.key_buff= key_buff; + Item_field *item_field= ((Item_field*) expr); + TABLE *table= item_field->field->table; + + /* + Look for a partial key that can be used for optimization. + If we succeed, ref.key_length will contain the length of + this key, while prefix_len will contain the length of + the beginning of this key without field used in MAX(). + Type of range for the key part for this field will be + returned in range_fl. + */ + if ((outer_tables & table->map) || + !find_key_for_maxmin(1, &ref, item_field->field, conds, + &range_fl, &prefix_len)) + { + const_result= 0; + break; + } + error= table->file->ha_index_init((uint) ref.key); + + if (!ref.key_length) + error= table->file->index_last(table->record[0]); + else + error= table->file->index_read(table->record[0], key_buff, + ref.key_length, + range_fl & NEAR_MAX ? + HA_READ_BEFORE_KEY : + HA_READ_PREFIX_LAST_OR_PREV); + if (!error && reckey_in_range(1, &ref, item_field->field, + conds, range_fl, prefix_len)) + error= HA_ERR_KEY_NOT_FOUND; + if (table->key_read) + { + table->key_read=0; + table->file->extra(HA_EXTRA_NO_KEYREAD); + } + table->file->ha_index_end(); + if (error) + { if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return -1; // Impossible query - - table->file->print_error(error, MYF(0)); - return error; // Deadlock or some other error + return -1; // No rows matching WHERE + /* HA_ERR_LOCK_DEADLOCK or some other error */ + table->file->print_error(error, MYF(0)); + return(error); } - removed_tables|= table->map; - } - else if (!expr->const_item()) // This is VERY seldom false - { - const_result=0; - break; - } - ((Item_sum_min*) item_sum)->reset(); - ((Item_sum_min*) item_sum)->make_const(); - recalc_const_item=1; - break; + removed_tables|= table->map; + } + else if (!expr->const_item() || !is_exact_count) + { + /* + The optimization is not applicable in both cases: + (a) 'expr' is a non-constant expression. Then we can't + replace 'expr' by a constant. + (b) 'expr' is a costant. According to ANSI, MIN/MAX must return + NULL if the query does not return any rows. Thus, if we are not + able to determine if the query returns any rows, we can't apply + the optimization and replace MIN/MAX with a constant. + */ + const_result= 0; + break; + } + if (!count) + { + /* If count != 1, then we know that is_exact_count == TRUE. */ + ((Item_sum_max*) item_sum)->clear(); /* Set to NULL. */ + } + else + ((Item_sum_max*) item_sum)->reset(); /* Set to the constant value. */ + ((Item_sum_max*) item_sum)->make_const(); + recalc_const_item= 1; + break; } default: - const_result=0; - break; + const_result= 0; + break; } } else if (const_result) { if (recalc_const_item) - item->update_used_tables(); + item->update_used_tables(); if (!item->const_item()) - const_result=0; + const_result= 0; } } /* If we have a where clause, we can only ignore searching in the tables if MIN/MAX optimisation replaced all used tables - This is to not to use replaced values in case of: + We do not use replaced values in case of: SELECT MIN(key) FROM table_1, empty_table removed_tables is != 0 if we have used MIN() or MAX(). */ if (removed_tables && used_tables != removed_tables) - const_result= 0; // We didn't remove all tables + const_result= 0; // We didn't remove all tables return const_result; } -/* Count in how many times table is used (up to MAX_KEY_PARTS+1) */ -uint count_table_entries(COND *cond,TABLE *table) -{ - if (cond->type() == Item::COND_ITEM) - { - if (((Item_cond*) cond)->functype() == Item_func::COND_OR_FUNC) - return (cond->used_tables() & table->map) ? MAX_REF_PARTS+1 : 0; +/* + Test if the predicate compares a field with constants - List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); - Item *item; - uint count=0; - while ((item=li++)) + SYNOPSIS + simple_pred() + func_item in: Predicate item + args out: Here we store the field followed by constants + inv_order out: Is set to 1 if the predicate is of the form 'const op field' + + RETURN + 0 func_item is a simple predicate: a field is compared with constants + 1 Otherwise +*/ + +static bool simple_pred(Item_func *func_item, Item **args, bool *inv_order) +{ + Item *item; + *inv_order= 0; + switch (func_item->argument_count()) { + case 1: + /* field IS NULL */ + item= func_item->arguments()[0]; + if (item->type() != Item::FIELD_ITEM) + return 0; + args[0]= item; + break; + case 2: + /* 'field op const' or 'const op field' */ + item= func_item->arguments()[0]; + if (item->type() == Item::FIELD_ITEM) { - if ((count+=count_table_entries(item,table)) > MAX_REF_PARTS) - return MAX_REF_PARTS+1; + args[0]= item; + item= func_item->arguments()[1]; + if (!item->const_item()) + return 0; + args[1]= item; } - return count; - } - if (cond->type() == Item::FUNC_ITEM && - (((Item_func*) cond)->functype() == Item_func::EQ_FUNC || - (((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC)) && - cond->used_tables() == table->map) - { - Item *left_item= ((Item_func*) cond)->arguments()[0]; - Item *right_item= ((Item_func*) cond)->arguments()[1]; - if (left_item->type() == Item::FIELD_ITEM) + else if (item->const_item()) { - if (!(((Item_field*) left_item)->field->flags & PART_KEY_FLAG) || - !right_item->const_item()) - return MAX_REF_PARTS+1; - return 1; + args[1]= item; + item= func_item->arguments()[1]; + if (item->type() != Item::FIELD_ITEM) + return 0; + args[0]= item; + *inv_order= 1; } - if (right_item->type() == Item::FIELD_ITEM) + else + return 0; + break; + case 3: + /* field BETWEEN const AND const */ + item= func_item->arguments()[0]; + if (item->type() == Item::FIELD_ITEM) { - if (!(((Item_field*) right_item)->field->flags & PART_KEY_FLAG) || - !left_item->const_item()) - return MAX_REF_PARTS+1; - return 1; + args[0]= item; + for (int i= 1 ; i <= 2; i++) + { + item= func_item->arguments()[i]; + if (!item->const_item()) + return 0; + args[i]= item; + } } + else + return 0; } - return (cond->used_tables() & table->map) ? MAX_REF_PARTS+1 : 0; + return 1; } -/* check that the field is usable as key part */ +/* + Check whether a condition matches a key to get {MAX|MIN}(field): + + SYNOPSIS + matching_cond() + max_fl in: Set to 1 if we are optimising MAX() + ref in/out: Reference to the structure we store the key value + keyinfo in Reference to the key info + field_part in: Pointer to the key part for the field + cond in WHERE condition + key_part_used in/out: Map of matchings parts + range_fl in/out: Says whether including key will be used + prefix_len out: Length of common key part for the range + where MAX/MIN is searched for + + DESCRIPTION + For the index specified by the keyinfo parameter, index that + contains field as its component (field_part), the function + checks whether the condition cond is a conjunction and all its + conjuncts referring to the columns of the same table as column + field are one of the following forms: + - f_i= const_i or const_i= f_i or f_i is null, + where f_i is part of the index + - field {<|<=|>=|>|=} const or const {<|<=|>=|>|=} field + - field between const1 and const2 + + RETURN + 0 Index can't be used. + 1 We can use index to get MIN/MAX value +*/ -bool part_of_cond(COND *cond,Field *field) +static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, + KEY_PART_INFO *field_part, COND *cond, + key_part_map *key_part_used, uint *range_fl, + uint *prefix_len) { + if (!cond) + return 1; + Field *field= field_part->field; + if (!(cond->used_tables() & field->table->map)) + { + /* Condition doesn't restrict the used table */ + return 1; + } if (cond->type() == Item::COND_ITEM) { if (((Item_cond*) cond)->functype() == Item_func::COND_OR_FUNC) - return 0; // Already checked + return 0; + /* AND */ List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); Item *item; - while ((item=li++)) + while ((item= li++)) { - if (part_of_cond(item,field)) - return 1; + if (!matching_cond(max_fl, ref, keyinfo, field_part, item, + key_part_used, range_fl, prefix_len)) + return 0; } + return 1; + } + + if (cond->type() != Item::FUNC_ITEM) + return 0; // Not operator, can't optimize + + bool eq_type= 0; // =, <=> or IS NULL + bool noeq_type= 0; // < or > + bool less_fl= 0; // < or <= + bool is_null= 0; + bool between= 0; + + switch (((Item_func*) cond)->functype()) { + case Item_func::ISNULL_FUNC: + is_null= 1; /* fall through */ + case Item_func::EQ_FUNC: + case Item_func::EQUAL_FUNC: + eq_type= 1; + break; + case Item_func::LT_FUNC: + noeq_type= 1; /* fall through */ + case Item_func::LE_FUNC: + less_fl= 1; + break; + case Item_func::GT_FUNC: + noeq_type= 1; /* fall through */ + case Item_func::GE_FUNC: + break; + case Item_func::BETWEEN: + between= 1; + break; + default: + return 0; // Can't optimize function + } + + Item *args[3]; + bool inv; + + /* Test if this is a comparison of a field and constant */ + if (!simple_pred((Item_func*) cond, args, &inv)) return 0; + + if (inv && !eq_type) + less_fl= 1-less_fl; // Convert '<' -> '>' (etc) + + /* Check if field is part of the tested partial key */ + byte *key_ptr= ref->key_buff; + KEY_PART_INFO *part; + for (part= keyinfo->key_part; + ; + key_ptr+= part++->store_length) + + { + if (part > field_part) + return 0; // Field is beyond the tested parts + if (part->field->eq(((Item_field*) args[0])->field)) + break; // Found a part od the key for the field } - if (cond->type() == Item::FUNC_ITEM && - (((Item_func*) cond)->functype() == Item_func::EQ_FUNC || - ((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC) && - cond->used_tables() == field->table->map) + + bool is_field_part= part == field_part; + if (!(is_field_part || eq_type)) + return 0; + + key_part_map org_key_part_used= *key_part_used; + if (eq_type || between || max_fl == less_fl) { - Item *left_item= ((Item_func*) cond)->arguments()[0]; - Item *right_item= ((Item_func*) cond)->arguments()[1]; - if (left_item->type() == Item::FIELD_ITEM) + uint length= (key_ptr-ref->key_buff)+part->store_length; + if (ref->key_length < length) + /* Ultimately ref->key_length will contain the length of the search key */ + ref->key_length= length; + if (!*prefix_len && part+1 == field_part) + *prefix_len= length; + if (is_field_part && eq_type) + *prefix_len= ref->key_length; + + *key_part_used|= (key_part_map) 1 << (part - keyinfo->key_part); + } + + if (org_key_part_used != *key_part_used || + (is_field_part && + (between || eq_type || max_fl == less_fl) && !cond->val_int())) + { + /* + It's the first predicate for this part or a predicate of the + following form that moves upper/lower bounds for max/min values: + - field BETWEEN const AND const + - field = const + - field {<|<=} const, when searching for MAX + - field {>|>=} const, when searching for MIN + */ + + if (is_null) { - if (((Item_field*) left_item)->field != field || - !right_item->const_item()) - return 0; + part->field->set_null(); + *key_ptr= (byte) 1; } - else if (right_item->type() == Item::FIELD_ITEM) + else { - if (((Item_field*) right_item)->field != field || - !left_item->const_item()) - return 0; - right_item=left_item; // const item in right + store_val_in_field(part->field, args[between && max_fl ? 2 : 1]); + if (part->null_bit) + *key_ptr++= (byte) test(part->field->is_null()); + part->field->get_key_image((char*) key_ptr, part->length, + part->field->charset(), Field::itRAW); + } + if (is_field_part) + { + if (between || eq_type) + *range_fl&= ~(NO_MAX_RANGE | NO_MIN_RANGE); + else + { + *range_fl&= ~(max_fl ? NO_MAX_RANGE : NO_MIN_RANGE); + if (noeq_type) + *range_fl|= (max_fl ? NEAR_MAX : NEAR_MIN); + else + *range_fl&= ~(max_fl ? NEAR_MAX : NEAR_MIN); + } } - store_val_in_field(field,right_item); - return 1; } - return 0; + else if (eq_type) + { + if (!is_null && !cond->val_int() || + is_null && !test(part->field->is_null())) + return 0; // Impossible test + } + else if (is_field_part) + *range_fl&= ~(max_fl ? NO_MIN_RANGE : NO_MAX_RANGE); + return 1; } /* - Check if we can get value for field by using a key + Check whether we can get value for {max|min}(field) by using a key. - NOTES + SYNOPSIS + find_key_for_maxmin() + max_fl in: 0 for MIN(field) / 1 for MAX(field) + ref in/out Reference to the structure we store the key value + field in: Field used inside MIN() / MAX() + cond in: WHERE condition + range_fl out: Bit flags for how to search if key is ok + prefix_len out: Length of prefix for the search range + + DESCRIPTION + If where condition is not a conjunction of 0 or more conjuct the + function returns false, otherwise it checks whether there is an + index including field as its k-th component/part such that: + + 1. for each previous component f_i there is one and only one conjunct + of the form: f_i= const_i or const_i= f_i or f_i is null + 2. references to field occur only in conjucts of the form: + field {<|<=|>=|>|=} const or const {<|<=|>=|>|=} field or + field BETWEEN const1 AND const2 + 3. all references to the columns from the same table as column field + occur only in conjucts mentioned above. + + If such an index exists the function through the ref parameter + returns the key value to find max/min for the field using the index, + the length of first (k-1) components of the key and flags saying + how to apply the key for the search max/min value. + (if we have a condition field = const, prefix_len contains the length + of the whole search key) + + NOTE This function may set table->key_read to 1, which must be reset after index is used! (This can only happen when function returns 1) -*/ -static bool find_range_key(TABLE_REF *ref, Field* field, COND *cond) + RETURN + 0 Index can not be used to optimize MIN(field)/MAX(field) + 1 Can use key to optimize MIN()/MAX() + In this case ref, range_fl and prefix_len are updated +*/ + +static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, + Field* field, COND *cond, + uint *range_fl, uint *prefix_len) { if (!(field->flags & PART_KEY_FLAG)) - return 0; // Not part of a key. Skip it + return 0; // Not key field - TABLE *table=field->table; - uint idx=0; - - /* Check if some key has field as first key part */ - if ((field->key_start & field->table->keys_in_use_for_query) && - (! cond || ! (cond->used_tables() & table->map))) - { - for (key_map key=field->key_start ;;) - { - for (; !(key & 1) ; idx++) - key>>=1; - if (!(table->file->index_flags(idx) & HA_WRONG_ASCII_ORDER)) - break; // Key is ok - /* Can't use this key, for looking up min() or max(), end if last one */ - if (key == 1) - return 0; - key>>=1; idx++; - } - ref->key_length=0; - ref->key=idx; - if (field->part_of_key & ((key_map) 1 << idx)) - { - table->key_read=1; - table->file->extra(HA_EXTRA_KEYREAD); - } - return 1; // Ok to use key - } - /* - ** Check if WHERE consist of exactly the previous key parts for some key - */ - if (!cond) - return 0; - uint table_entries= count_table_entries(cond,table); - if (!table_entries || table_entries > MAX_REF_PARTS) - return 0; + TABLE *table= field->table; + uint idx= 0; KEY *keyinfo,*keyinfo_end; - idx=0; - for (keyinfo=table->key_info, keyinfo_end=keyinfo+table->keys ; + for (keyinfo= table->key_info, keyinfo_end= keyinfo+table->keys ; keyinfo != keyinfo_end; keyinfo++,idx++) { - if (table_entries < keyinfo->key_parts) + KEY_PART_INFO *part,*part_end; + key_part_map key_part_to_use= 0; + uint jdx= 0; + *prefix_len= 0; + for (part= keyinfo->key_part, part_end= part+keyinfo->key_parts ; + part != part_end ; + part++, jdx++, key_part_to_use= (key_part_to_use << 1) | 1) { - byte *key_ptr=ref->key_buff; - KEY_PART_INFO *part,*part_end; - int left_length=MAX_KEY_LENGTH; + if (!(table->file->index_flags(idx, jdx, 0) & HA_READ_ORDER)) + return 0; - for (part=keyinfo->key_part, part_end=part+table_entries ; - part != part_end ; - part++) - { - if (!part_of_cond(cond,part->field) || - left_length < part->store_length || - (table->file->index_flags(idx) & HA_WRONG_ASCII_ORDER)) - break; - uint store_length= part->store_length; - // Save found constant - if (part->null_bit) - { - *key_ptr++= (byte) test(part->field->is_null()); - store_length--; - } - part->field->get_key_image((char*) key_ptr, store_length); - key_ptr+= store_length; - left_length-= part->store_length; - } - if (part == part_end && part->field == field) + if (field->eq(part->field)) { - ref->key_length= (uint) (key_ptr-ref->key_buff); - ref->key=idx; - if (field->part_of_key & ((key_map) 1 << idx)) - { - table->key_read=1; - table->file->extra(HA_EXTRA_KEYREAD); - } - return 1; // Ok to use key + ref->key= idx; + ref->key_length= 0; + key_part_map key_part_used= 0; + *range_fl= NO_MIN_RANGE | NO_MAX_RANGE; + if (matching_cond(max_fl, ref, keyinfo, part, cond, + &key_part_used, range_fl, prefix_len) && + !(key_part_to_use & ~key_part_used)) + { + if (!max_fl && key_part_used == key_part_to_use && part->null_bit) + { + /* + SELECT MIN(key_part2) FROM t1 WHERE key_part1=const + If key_part2 may be NULL, then we want to find the first row + that is not null + */ + ref->key_buff[ref->key_length]= 1; + ref->key_length+= part->store_length; + *range_fl&= ~NO_MIN_RANGE; + *range_fl|= NEAR_MIN; // > NULL + } + /* + The following test is false when the key in the key tree is + converted (for example to upper case) + */ + if (field->part_of_key.is_set(idx)) + { + table->key_read= 1; + table->file->extra(HA_EXTRA_KEYREAD); + } + return 1; + } } } } - return 0; // No possible key + return 0; } + + +/* + Check whether found key is in range specified by conditions + + SYNOPSIS + reckey_in_range() + max_fl in: 0 for MIN(field) / 1 for MAX(field) + ref in: Reference to the key value and info + field in: Field used the MIN/MAX expression + cond in: WHERE condition + range_fl in: Says whether there is a condition to to be checked + prefix_len in: Length of the constant part of the key + + RETURN + 0 ok + 1 WHERE was not true for the found row +*/ + +static int reckey_in_range(bool max_fl, TABLE_REF *ref, Field* field, + COND *cond, uint range_fl, uint prefix_len) +{ + if (key_cmp_if_same(field->table, ref->key_buff, ref->key, prefix_len)) + return 1; + if (!cond || (range_fl & (max_fl ? NO_MIN_RANGE : NO_MAX_RANGE))) + return 0; + return maxmin_in_range(max_fl, field, cond); +} + + +/* + Check whether {MAX|MIN}(field) is in range specified by conditions + SYNOPSIS + maxmin_in_range() + max_fl in: 0 for MIN(field) / 1 for MAX(field) + field in: Field used the MIN/MAX expression + cond in: WHERE condition + + RETURN + 0 ok + 1 WHERE was not true for the found row +*/ + +static int maxmin_in_range(bool max_fl, Field* field, COND *cond) +{ + /* If AND/OR condition */ + if (cond->type() == Item::COND_ITEM) + { + List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item= li++)) + { + if (maxmin_in_range(max_fl, field, item)) + return 1; + } + return 0; + } + + if (cond->used_tables() != field->table->map) + return 0; + bool less_fl= 0; + switch (((Item_func*) cond)->functype()) { + case Item_func::BETWEEN: + return cond->val_int() == 0; // Return 1 if WHERE is false + case Item_func::LT_FUNC: + case Item_func::LE_FUNC: + less_fl= 1; + case Item_func::GT_FUNC: + case Item_func::GE_FUNC: + { + Item *item= ((Item_func*) cond)->arguments()[1]; + /* In case of 'const op item' we have to swap the operator */ + if (!item->const_item()) + less_fl= 1-less_fl; + /* + We only have to check the expression if we are using an expression like + SELECT MAX(b) FROM t1 WHERE a=const AND b>const + not for + SELECT MAX(b) FROM t1 WHERE a=const AND b<const + */ + if (max_fl != less_fl) + return cond->val_int() == 0; // Return 1 if WHERE is false + return 0; + } + case Item_func::EQ_FUNC: + case Item_func::EQUAL_FUNC: + break; + default: // Keep compiler happy + DBUG_ASSERT(1); // Impossible + break; + } + return 0; +} + diff --git a/sql/password.c b/sql/password.c index 575e837ceb8..94b9dc440be 100644 --- a/sql/password.c +++ b/sql/password.c @@ -29,21 +29,57 @@ The password is saved (in user.password) by using the PASSWORD() function in mysql. + This is .c file because it's used in libmysqlclient, which is entirely in C. + (we need it to be portable to a variety of systems). Example: update user set password=PASSWORD("hello") where user="test" This saves a hashed number as a string in the password field. + + The new authentication is performed in following manner: + + SERVER: public_seed=create_random_string() + send(public_seed) + + CLIENT: recv(public_seed) + hash_stage1=sha1("password") + hash_stage2=sha1(hash_stage1) + reply=xor(hash_stage1, sha1(public_seed,hash_stage2) + + // this three steps are done in scramble() + + send(reply) + + + SERVER: recv(reply) + hash_stage1=xor(reply, sha1(public_seed,hash_stage2)) + candidate_hash2=sha1(hash_stage1) + check(candidate_hash2==hash_stage2) + + // this three steps are done in check_scramble() + *****************************************************************************/ #include <my_global.h> #include <my_sys.h> #include <m_string.h> +#include <sha1.h> #include "mysql.h" +/************ MySQL 3.23-4.0 authentification routines: untouched ***********/ + +/* + New (MySQL 3.21+) random generation structure initialization + SYNOPSIS + randominit() + rand_st OUT Structure to initialize + seed1 IN First initialization parameter + seed2 IN Second initialization parameter +*/ -void randominit(struct rand_struct *rand_st,ulong seed1, ulong seed2) -{ /* For mysql 3.21.# */ +void randominit(struct rand_struct *rand_st, ulong seed1, ulong seed2) +{ /* For mysql 3.21.# */ #ifdef HAVE_purify - bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */ + bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */ #endif rand_st->max_value= 0x3FFFFFFFL; rand_st->max_value_dbl=(double) rand_st->max_value; @@ -51,13 +87,15 @@ void randominit(struct rand_struct *rand_st,ulong seed1, ulong seed2) rand_st->seed2=seed2%rand_st->max_value; } -static void old_randominit(struct rand_struct *rand_st,ulong seed1) -{ /* For mysql 3.20.# */ - rand_st->max_value= 0x01FFFFFFL; - rand_st->max_value_dbl=(double) rand_st->max_value; - seed1%=rand_st->max_value; - rand_st->seed1=seed1 ; rand_st->seed2=seed1/2; -} + +/* + Generate random number. + SYNOPSIS + my_rnd() + rand_st INOUT Structure used for number generation + RETURN VALUE + generated pseudo random number +*/ double my_rnd(struct rand_struct *rand_st) { @@ -66,14 +104,26 @@ double my_rnd(struct rand_struct *rand_st) return (((double) rand_st->seed1)/rand_st->max_value_dbl); } -void hash_password(ulong *result, const char *password) + +/* + Generate binary hash from raw text string + Used for Pre-4.1 password handling + SYNOPSIS + hash_password() + result OUT store hash in this location + password IN plain text password to build hash + password_len IN password length (password may be not null-terminated) +*/ + +void hash_password(ulong *result, const char *password, uint password_len) { register ulong nr=1345345333L, add=7, nr2=0x12345671L; ulong tmp; - for (; *password ; password++) + const char *password_end= password + password_len; + for (; password < password_end; password++) { if (*password == ' ' || *password == '\t') - continue; /* skipp space in password */ + continue; /* skip space in password */ tmp= (ulong) (uchar) *password; nr^= (((nr & 63)+add)*tmp)+ (nr << 8); nr2+=(nr2 << 8) ^ nr; @@ -81,31 +131,127 @@ void hash_password(ulong *result, const char *password) } result[0]=nr & (((ulong) 1L << 31) -1L); /* Don't use sign bit (str2int) */; result[1]=nr2 & (((ulong) 1L << 31) -1L); - return; } -void make_scrambled_password(char *to,const char *password) + +/* + Create password to be stored in user database from raw string + Used for pre-4.1 password handling + SYNOPSIS + make_scrambled_password_323() + to OUT store scrambled password here + password IN user-supplied password +*/ + +void make_scrambled_password_323(char *to, const char *password) { ulong hash_res[2]; - hash_password(hash_res,password); - sprintf(to,"%08lx%08lx",hash_res[0],hash_res[1]); + hash_password(hash_res, password, strlen(password)); + sprintf(to, "%08lx%08lx", hash_res[0], hash_res[1]); +} + + +/* + Scramble string with password. + Used in pre 4.1 authentication phase. + SYNOPSIS + scramble_323() + to OUT Store scrambled message here. Buffer must be at least + SCRAMBLE_LENGTH_323+1 bytes long + message IN Message to scramble. Message must be at least + SRAMBLE_LENGTH_323 bytes long. + password IN Password to use while scrambling +*/ + +void scramble_323(char *to, const char *message, const char *password) +{ + struct rand_struct rand_st; + ulong hash_pass[2], hash_message[2]; + + if (password && password[0]) + { + char extra, *to_start=to; + const char *message_end= message + SCRAMBLE_LENGTH_323; + hash_password(hash_pass,password, strlen(password)); + hash_password(hash_message, message, SCRAMBLE_LENGTH_323); + randominit(&rand_st,hash_pass[0] ^ hash_message[0], + hash_pass[1] ^ hash_message[1]); + for (; message < message_end; message++) + *to++= (char) (floor(my_rnd(&rand_st)*31)+64); + extra=(char) (floor(my_rnd(&rand_st)*31)); + while (to_start != to) + *(to_start++)^=extra; + } + *to= 0; } -static inline unsigned int char_val(char X) + +/* + Check scrambled message + Used in pre 4.1 password handling + SYNOPSIS + check_scramble_323() + scrambled scrambled message to check. + message original random message which was used for scrambling; must + be exactly SCRAMBLED_LENGTH_323 bytes long and + NULL-terminated. + hash_pass password which should be used for scrambling + All params are IN. + + RETURN VALUE + 0 - password correct + !0 - password invalid +*/ + +my_bool +check_scramble_323(const char *scrambled, const char *message, + ulong *hash_pass) +{ + struct rand_struct rand_st; + ulong hash_message[2]; + char buff[16],*to,extra; /* Big enough for check */ + const char *pos; + + hash_password(hash_message, message, SCRAMBLE_LENGTH_323); + randominit(&rand_st,hash_pass[0] ^ hash_message[0], + hash_pass[1] ^ hash_message[1]); + to=buff; + DBUG_ASSERT(sizeof(buff) > SCRAMBLE_LENGTH_323); + for (pos=scrambled ; *pos && to < buff+sizeof(buff) ; pos++) + *to++=(char) (floor(my_rnd(&rand_st)*31)+64); + if (pos-scrambled != SCRAMBLE_LENGTH_323) + return 1; + extra=(char) (floor(my_rnd(&rand_st)*31)); + to=buff; + while (*scrambled) + { + if (*scrambled++ != (char) (*to++ ^ extra)) + return 1; /* Wrong password */ + } + return 0; +} + +static inline uint8 char_val(uint8 X) { return (uint) (X >= '0' && X <= '9' ? X-'0' : - X >= 'A' && X <= 'Z' ? X-'A'+10 : - X-'a'+10); + X >= 'A' && X <= 'Z' ? X-'A'+10 : X-'a'+10); } + /* -** This code assumes that len(password) is divideable with 8 and that -** res is big enough (2 in mysql) + Convert password from hex string (as stored in mysql.user) to binary form. + SYNOPSIS + get_salt_from_password_323() + res OUT store salt here + password IN password string as stored in mysql.user + NOTE + This function does not have length check for passwords. It will just crash + Password hashes in old format must have length divisible by 8 */ -void get_salt_from_password(ulong *res,const char *password) +void get_salt_from_password_323(ulong *res, const char *password) { - res[0]=res[1]=0; + res[0]= res[1]= 0; if (password) { while (*password) @@ -113,79 +259,263 @@ void get_salt_from_password(ulong *res,const char *password) ulong val=0; uint i; for (i=0 ; i < 8 ; i++) - val=(val << 4)+char_val(*password++); + val=(val << 4)+char_val(*password++); *res++=val; } } - return; } -void make_password_from_salt(char *to, ulong *hash_res) + +/* + Convert scrambled password from binary form to asciiz hex string. + SYNOPSIS + make_password_from_salt_323() + to OUT store resulting string password here, at least 17 bytes + salt IN password in salt format, 2 ulongs +*/ + +void make_password_from_salt_323(char *to, const ulong *salt) { - sprintf(to,"%08lx%08lx",hash_res[0],hash_res[1]); + sprintf(to,"%08lx%08lx", salt[0], salt[1]); } /* - * Genererate a new message based on message and password - * The same thing is done in client and server and the results are checked. - */ + **************** MySQL 4.1.1 authentification routines ************* +*/ + +/* + Generate string of printable random characters of requested length + SYNOPSIS + create_random_string() + to OUT buffer for generation; must be at least length+1 bytes + long; result string is always null-terminated + length IN how many random characters to put in buffer + rand_st INOUT structure used for number generation +*/ -char *scramble(char *to,const char *message,const char *password, - my_bool old_ver) +void create_random_string(char *to, uint length, struct rand_struct *rand_st) { - struct rand_struct rand_st; - ulong hash_pass[2],hash_message[2]; - if (password && password[0]) + char *end= to + length; + /* Use pointer arithmetics as it is faster way to do so. */ + for (; to < end; to++) + *to= (char) (my_rnd(rand_st)*94+33); + *to= '\0'; +} + + +/* Character to use as version identifier for version 4.1 */ + +#define PVERSION41_CHAR '*' + + +/* + Convert given octet sequence to asciiz string of hex characters; + str..str+len and 'to' may not overlap. + SYNOPSIS + octet2hex() + buf OUT output buffer. Must be at least 2*len+1 bytes + str, len IN the beginning and the length of the input string +*/ + +static void +octet2hex(char *to, const uint8 *str, uint len) +{ + const uint8 *str_end= str + len; + for (; str != str_end; ++str) { - char *to_start=to; - hash_password(hash_pass,password); - hash_password(hash_message,message); - if (old_ver) - old_randominit(&rand_st,hash_pass[0] ^ hash_message[0]); - else - randominit(&rand_st,hash_pass[0] ^ hash_message[0], - hash_pass[1] ^ hash_message[1]); - while (*message++) - *to++= (char) (floor(my_rnd(&rand_st)*31)+64); - if (!old_ver) - { /* Make it harder to break */ - char extra=(char) (floor(my_rnd(&rand_st)*31)); - while (to_start != to) - *(to_start++)^=extra; - } + *to++= _dig_vec_upper[(*str & 0xF0) >> 4]; + *to++= _dig_vec_upper[*str & 0x0F]; } - *to=0; - return to; + *to= '\0'; } -my_bool check_scramble(const char *scrambled, const char *message, - ulong *hash_pass, my_bool old_ver) -{ - struct rand_struct rand_st; - ulong hash_message[2]; - char buff[16],*to,extra; /* Big enough for check */ - const char *pos; +/* + Convert given asciiz string of hex (0..9 a..f) characters to octet + sequence. + SYNOPSIS + hex2octet() + to OUT buffer to place result; must be at least len/2 bytes + str, len IN begin, length for character string; str and to may not + overlap; len % 2 == 0 +*/ - hash_password(hash_message,message); - if (old_ver) - old_randominit(&rand_st,hash_pass[0] ^ hash_message[0]); - else - randominit(&rand_st,hash_pass[0] ^ hash_message[0], - hash_pass[1] ^ hash_message[1]); - to=buff; - for (pos=scrambled ; *pos ; pos++) - *to++=(char) (floor(my_rnd(&rand_st)*31)+64); - if (old_ver) - extra=0; - else - extra=(char) (floor(my_rnd(&rand_st)*31)); - to=buff; - while (*scrambled) +static void +hex2octet(uint8 *to, const char *str, uint len) +{ + const char *str_end= str + len; + while (str < str_end) { - if (*scrambled++ != (char) (*to++ ^ extra)) - return 1; /* Wrong password */ + register char tmp= char_val(*str++); + *to++= (tmp << 4) | char_val(*str++); } - return 0; +} + + +/* + Encrypt/Decrypt function used for password encryption in authentication. + Simple XOR is used here but it is OK as we crypt random strings. Note, + that XOR(s1, XOR(s1, s2)) == s2, XOR(s1, s2) == XOR(s2, s1) + SYNOPSIS + my_crypt() + to OUT buffer to hold crypted string; must be at least len bytes + long; to and s1 (or s2) may be the same. + s1, s2 IN input strings (of equal length) + len IN length of s1 and s2 +*/ + +static void +my_crypt(char *to, const uchar *s1, const uchar *s2, uint len) +{ + const uint8 *s1_end= s1 + len; + while (s1 < s1_end) + *to++= *s1++ ^ *s2++; +} + + +/* + MySQL 4.1.1 password hashing: SHA conversion (see RFC 2289, 3174) twice + applied to the password string, and then produced octet sequence is + converted to hex string. + The result of this function is used as return value from PASSWORD() and + is stored in the database. + SYNOPSIS + make_scrambled_password() + buf OUT buffer of size 2*SHA1_HASH_SIZE + 2 to store hex string + password IN NULL-terminated password string +*/ + +void +make_scrambled_password(char *to, const char *password) +{ + SHA1_CONTEXT sha1_context; + uint8 hash_stage2[SHA1_HASH_SIZE]; + + mysql_sha1_reset(&sha1_context); + /* stage 1: hash password */ + mysql_sha1_input(&sha1_context, (uint8 *) password, strlen(password)); + mysql_sha1_result(&sha1_context, (uint8 *) to); + /* stage 2: hash stage1 output */ + mysql_sha1_reset(&sha1_context); + mysql_sha1_input(&sha1_context, (uint8 *) to, SHA1_HASH_SIZE); + /* separate buffer is used to pass 'to' in octet2hex */ + mysql_sha1_result(&sha1_context, hash_stage2); + /* convert hash_stage2 to hex string */ + *to++= PVERSION41_CHAR; + octet2hex(to, hash_stage2, SHA1_HASH_SIZE); +} + + +/* + Produce an obscure octet sequence from password and random + string, recieved from the server. This sequence corresponds to the + password, but password can not be easily restored from it. The sequence + is then sent to the server for validation. Trailing zero is not stored + in the buf as it is not needed. + This function is used by client to create authenticated reply to the + server's greeting. + SYNOPSIS + scramble() + buf OUT store scrambled string here. The buf must be at least + SHA1_HASH_SIZE bytes long. + message IN random message, must be exactly SCRAMBLE_LENGTH long and + NULL-terminated. + password IN users' password +*/ + +void +scramble(char *to, const char *message, const char *password) +{ + SHA1_CONTEXT sha1_context; + uint8 hash_stage1[SHA1_HASH_SIZE]; + uint8 hash_stage2[SHA1_HASH_SIZE]; + + mysql_sha1_reset(&sha1_context); + /* stage 1: hash password */ + mysql_sha1_input(&sha1_context, (uint8 *) password, strlen(password)); + mysql_sha1_result(&sha1_context, hash_stage1); + /* stage 2: hash stage 1; note that hash_stage2 is stored in the database */ + mysql_sha1_reset(&sha1_context); + mysql_sha1_input(&sha1_context, hash_stage1, SHA1_HASH_SIZE); + mysql_sha1_result(&sha1_context, hash_stage2); + /* create crypt string as sha1(message, hash_stage2) */; + mysql_sha1_reset(&sha1_context); + mysql_sha1_input(&sha1_context, (const uint8 *) message, SCRAMBLE_LENGTH); + mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE); + /* xor allows 'from' and 'to' overlap: lets take advantage of it */ + mysql_sha1_result(&sha1_context, (uint8 *) to); + my_crypt(to, (const uchar *) to, hash_stage1, SCRAMBLE_LENGTH); +} + + +/* + Check that scrambled message corresponds to the password; the function + is used by server to check that recieved reply is authentic. + This function does not check lengths of given strings: message must be + null-terminated, reply and hash_stage2 must be at least SHA1_HASH_SIZE + long (if not, something fishy is going on). + SYNOPSIS + check_scramble() + scramble clients' reply, presumably produced by scramble() + message original random string, previously sent to client + (presumably second argument of scramble()), must be + exactly SCRAMBLE_LENGTH long and NULL-terminated. + hash_stage2 hex2octet-decoded database entry + All params are IN. + + RETURN VALUE + 0 password is correct + !0 password is invalid +*/ + +my_bool +check_scramble(const char *scramble, const char *message, + const uint8 *hash_stage2) +{ + SHA1_CONTEXT sha1_context; + uint8 buf[SHA1_HASH_SIZE]; + uint8 hash_stage2_reassured[SHA1_HASH_SIZE]; + + mysql_sha1_reset(&sha1_context); + /* create key to encrypt scramble */ + mysql_sha1_input(&sha1_context, (const uint8 *) message, SCRAMBLE_LENGTH); + mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE); + mysql_sha1_result(&sha1_context, buf); + /* encrypt scramble */ + my_crypt((char *) buf, buf, (const uchar *) scramble, SCRAMBLE_LENGTH); + /* now buf supposedly contains hash_stage1: so we can get hash_stage2 */ + mysql_sha1_reset(&sha1_context); + mysql_sha1_input(&sha1_context, buf, SHA1_HASH_SIZE); + mysql_sha1_result(&sha1_context, hash_stage2_reassured); + return memcmp(hash_stage2, hash_stage2_reassured, SHA1_HASH_SIZE); +} + + +/* + Convert scrambled password from asciiz hex string to binary form. + SYNOPSIS + get_salt_from_password() + res OUT buf to hold password. Must be at least SHA1_HASH_SIZE + bytes long. + password IN 4.1.1 version value of user.password +*/ + +void get_salt_from_password(uint8 *hash_stage2, const char *password) +{ + hex2octet(hash_stage2, password+1 /* skip '*' */, SHA1_HASH_SIZE * 2); +} + +/* + Convert scrambled password from binary form to asciiz hex string. + SYNOPSIS + make_password_from_salt() + to OUT store resulting string here, 2*SHA1_HASH_SIZE+2 bytes + salt IN password in salt format +*/ + +void make_password_from_salt(char *to, const uint8 *hash_stage2) +{ + *to++= PVERSION41_CHAR; + octet2hex(to, hash_stage2, SHA1_HASH_SIZE); } diff --git a/sql/procedure.cc b/sql/procedure.cc index 437bd82d6e5..a0042dd879e 100644 --- a/sql/procedure.cc +++ b/sql/procedure.cc @@ -17,7 +17,7 @@ /* Procedures (functions with changes output of select) */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -57,7 +57,8 @@ setup_procedure(THD *thd,ORDER *param,select_result *result, DBUG_RETURN(0); for (i=0 ; i < array_elements(sql_procs) ; i++) { - if (!my_strcasecmp((*param->item)->name,sql_procs[i].name)) + if (!my_strcasecmp(system_charset_info, + (*param->item)->name,sql_procs[i].name)) { Procedure *proc=(*sql_procs[i].init)(thd,param,result,field_list); *error= !proc; diff --git a/sql/procedure.h b/sql/procedure.h index 349908a8d84..0a1e9ddfa2f 100644 --- a/sql/procedure.h +++ b/sql/procedure.h @@ -17,7 +17,7 @@ /* When using sql procedures */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -35,10 +35,10 @@ public: } enum Type type() const { return Item::PROC_ITEM; } virtual void set(double nr)=0; - virtual void set(const char *str,uint length)=0; + virtual void set(const char *str,uint length,CHARSET_INFO *cs)=0; virtual void set(longlong nr)=0; virtual enum_field_types field_type() const=0; - void set(const char *str) { set(str,(uint) strlen(str)); } + void set(const char *str) { set(str,(uint) strlen(str), default_charset()); } void make_field(Send_field *tmp_field) { init_make_field(tmp_field,field_type()); @@ -55,14 +55,18 @@ public: decimals=dec; max_length=float_length(dec); } enum Item_result result_type () const { return REAL_RESULT; } - enum_field_types field_type() const { return FIELD_TYPE_DOUBLE; } + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } void set(double nr) { value=nr; } void set(longlong nr) { value=(double) nr; } - void set(const char *str,uint length __attribute__((unused))) - { value=atof(str); } + void set(const char *str,uint length,CHARSET_INFO *cs) + { + int err; + char *end_not_used; + value= my_strntod(cs, (char*) str, length, &end_not_used, &err); + } double val() { return value; } longlong val_int() { return (longlong) value; } - String *val_str(String *s) { s->set(value,decimals); return s; } + String *val_str(String *s) { s->set(value,decimals,default_charset()); return s; } unsigned int size_of() { return sizeof(*this);} }; @@ -73,14 +77,14 @@ public: Item_proc_int(const char *name_par) :Item_proc(name_par) { max_length=11; } enum Item_result result_type () const { return INT_RESULT; } - enum_field_types field_type() const { return FIELD_TYPE_LONG; } + enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } void set(double nr) { value=(longlong) nr; } void set(longlong nr) { value=nr; } - void set(const char *str,uint length __attribute__((unused))) - { value=strtoll(str,NULL,10); } + void set(const char *str,uint length, CHARSET_INFO *cs) + { int err; value=my_strntoll(cs,str,length,10,NULL,&err); } double val() { return (double) value; } longlong val_int() { return value; } - String *val_str(String *s) { s->set(value); return s; } + String *val_str(String *s) { s->set(value, default_charset()); return s; } unsigned int size_of() { return sizeof(*this);} }; @@ -91,12 +95,25 @@ public: Item_proc_string(const char *name_par,uint length) :Item_proc(name_par) { this->max_length=length; } enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return FIELD_TYPE_STRING; } - void set(double nr) { str_value.set(nr); } - void set(longlong nr) { str_value.set(nr); } - void set(const char *str, uint length) { str_value.copy(str,length); } - double val() { return atof(str_value.ptr()); } - longlong val_int() { return strtoll(str_value.ptr(),NULL,10); } + enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + void set(double nr) { str_value.set(nr, 2, default_charset()); } + void set(longlong nr) { str_value.set(nr, default_charset()); } + void set(const char *str, uint length, CHARSET_INFO *cs) + { str_value.copy(str,length,cs); } + double val() + { + int err; + CHARSET_INFO *cs= str_value.charset(); + char *end_not_used; + return my_strntod(cs, (char*) str_value.ptr(), str_value.length(), + &end_not_used, &err); + } + longlong val_int() + { + int err; + CHARSET_INFO *cs=str_value.charset(); + return my_strntoll(cs,str_value.ptr(),str_value.length(),10,NULL,&err); + } String *val_str(String*) { return null_value ? (String*) 0 : (String*) &str_value; diff --git a/sql/protocol.cc b/sql/protocol.cc new file mode 100644 index 00000000000..a2287740f1e --- /dev/null +++ b/sql/protocol.cc @@ -0,0 +1,1213 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Low level functions for storing data to be send to the MySQL client + The actual communction is handled by the net_xxx functions in net_serv.cc +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "mysql_priv.h" +#include <stdarg.h> + +static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024; + +#ifndef EMBEDDED_LIBRARY +bool Protocol::net_store_data(const char *from, uint length) +#else +bool Protocol_prep::net_store_data(const char *from, uint length) +#endif +{ + ulong packet_length=packet->length(); + /* + The +9 comes from that strings of length longer than 16M require + 9 bytes to be stored (see net_store_length). + */ + if (packet_length+9+length > packet->alloced_length() && + packet->realloc(packet_length+9+length)) + return 1; + char *to=(char*) net_store_length((char*) packet->ptr()+packet_length, + (ulonglong) length); + memcpy(to,from,length); + packet->length((uint) (to+length-packet->ptr())); + return 0; +} + + + /* Send a error string to client */ + +void send_error(THD *thd, uint sql_errno, const char *err) +{ +#ifndef EMBEDDED_LIBRARY + uint length; + char buff[MYSQL_ERRMSG_SIZE+2], *pos; +#endif + const char *orig_err= err; + NET *net= &thd->net; + DBUG_ENTER("send_error"); + DBUG_PRINT("enter",("sql_errno: %d err: %s", sql_errno, + err ? err : net->last_error[0] ? + net->last_error : "NULL")); + +#ifndef EMBEDDED_LIBRARY /* TODO query cache in embedded library*/ + query_cache_abort(net); +#endif + thd->query_error= 1; // needed to catch query errors during replication + if (!err) + { + if (sql_errno) + err=ER(sql_errno); + else + { + if ((err=net->last_error)[0]) + sql_errno=net->last_errno; + else + { + sql_errno=ER_UNKNOWN_ERROR; + err=ER(sql_errno); /* purecov: inspected */ + } + } + orig_err= err; + } + +#ifdef EMBEDDED_LIBRARY + net->last_errno= sql_errno; + strmake(net->last_error, err, sizeof(net->last_error)-1); + strmov(net->sqlstate, mysql_errno_to_sqlstate(sql_errno)); +#else + + if (net->vio == 0) + { + if (thd->bootstrap) + { + /* In bootstrap it's ok to print on stderr */ + fprintf(stderr,"ERROR: %d %s\n",sql_errno,err); + } + DBUG_VOID_RETURN; + } + + if (net->return_errno) + { // new client code; Add errno before message + int2store(buff,sql_errno); + pos= buff+2; + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + /* The first # is to make the protocol backward compatible */ + buff[2]= '#'; + pos= strmov(buff+3, mysql_errno_to_sqlstate(sql_errno)); + } + length= (uint) (strmake(pos, err, MYSQL_ERRMSG_SIZE-1) - buff); + err=buff; + } + else + { + length=(uint) strlen(err); + set_if_smaller(length,MYSQL_ERRMSG_SIZE-1); + } + VOID(net_write_command(net,(uchar) 255, "", 0, (char*) err,length)); +#endif /* EMBEDDED_LIBRARY*/ + if (!thd->killed) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, sql_errno, + orig_err ? orig_err : ER(sql_errno)); + thd->is_fatal_error=0; // Error message is given + thd->net.report_error= 0; + + /* Abort multi-result sets */ + thd->lex->found_colon= 0; + thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; + DBUG_VOID_RETURN; +} + + +/* + Send a warning to the end user + + SYNOPSIS + send_warning() + thd Thread handler + sql_errno Warning number (error message) + err Error string. If not set, use ER(sql_errno) + + DESCRIPTION + Register the warning so that the user can get it with mysql_warnings() + Send an ok (+ warning count) to the end user. +*/ + +void send_warning(THD *thd, uint sql_errno, const char *err) +{ + DBUG_ENTER("send_warning"); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, sql_errno, + err ? err : ER(sql_errno)); + send_ok(thd); + DBUG_VOID_RETURN; +} + + +/* + Write error package and flush to client + It's a little too low level, but I don't want to use another buffer for + this +*/ + +void +net_printf(THD *thd, uint errcode, ...) +{ + va_list args; + uint length,offset; + const char *format; +#ifndef EMBEDDED_LIBRARY + const char *text_pos; + int head_length= NET_HEADER_SIZE; +#else + char text_pos[1024]; +#endif + NET *net= &thd->net; + + DBUG_ENTER("net_printf"); + DBUG_PRINT("enter",("message: %u",errcode)); + + thd->query_error= 1; // needed to catch query errors during replication +#ifndef EMBEDDED_LIBRARY + query_cache_abort(net); // Safety +#endif + va_start(args,errcode); + /* + The following is needed to make net_printf() work with 0 argument for + errorcode and use the argument after that as the format string. This + is useful for rare errors that are not worth the hassle to put in + errmsg.sys, but at the same time, the message is not fixed text + */ + if (errcode) + format= ER(errcode); + else + { + format=va_arg(args,char*); + errcode= ER_UNKNOWN_ERROR; + } + offset= (net->return_errno ? + ((thd->client_capabilities & CLIENT_PROTOCOL_41) ? + 2+SQLSTATE_LENGTH+1 : 2) : 0); +#ifndef EMBEDDED_LIBRARY + text_pos=(char*) net->buff + head_length + offset + 1; + length= (uint) ((char*)net->buff_end - text_pos); +#else + length=sizeof(text_pos)-1; +#endif + length=my_vsnprintf(my_const_cast(char*) (text_pos), + min(length, sizeof(net->last_error)), + format,args); + va_end(args); + + /* Replication slave relies on net->last_* to see if there was error */ + net->last_errno= errcode; + strmake(net->last_error, text_pos, sizeof(net->last_error)-1); + +#ifndef EMBEDDED_LIBRARY + if (net->vio == 0) + { + if (thd->bootstrap) + { + /* + In bootstrap it's ok to print on stderr + This may also happen when we get an error from a slave thread + */ + fprintf(stderr,"ERROR: %d %s\n",errcode,text_pos); + thd->fatal_error(); + } + DBUG_VOID_RETURN; + } + + int3store(net->buff,length+1+offset); + net->buff[3]= (net->compress) ? 0 : (uchar) (net->pkt_nr++); + net->buff[head_length]=(uchar) 255; // Error package + if (offset) + { + uchar *pos= net->buff+head_length+1; + int2store(pos, errcode); + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + pos[2]= '#'; /* To make the protocol backward compatible */ + memcpy(pos+3, mysql_errno_to_sqlstate(errcode), SQLSTATE_LENGTH); + } + } + VOID(net_real_write(net,(char*) net->buff,length+head_length+1+offset)); +#else + net->last_errno= errcode; + strmake(net->last_error, text_pos, length); + strmake(net->sqlstate, mysql_errno_to_sqlstate(errcode), SQLSTATE_LENGTH); +#endif + if (!thd->killed) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, errcode, + text_pos ? text_pos : ER(errcode)); + thd->is_fatal_error=0; // Error message is given + DBUG_VOID_RETURN; +} + +/* + Return ok to the client. + + SYNOPSIS + send_ok() + thd Thread handler + affected_rows Number of rows changed by statement + id Auto_increment id for first row (if used) + message Message to send to the client (Used by mysql_status) + + DESCRIPTION + The ok packet has the following structure + + 0 Marker (1 byte) + affected_rows Stored in 1-9 bytes + id Stored in 1-9 bytes + server_status Copy of thd->server_status; Can be used by client + to check if we are inside an transaction + New in 4.0 protocol + warning_count Stored in 2 bytes; New in 4.1 protocol + message Stored as packed length (1-9 bytes) + message + Is not stored if no message + + If net->no_send_ok return without sending packet +*/ + +#ifndef EMBEDDED_LIBRARY +void +send_ok(THD *thd, ha_rows affected_rows, ulonglong id, const char *message) +{ + NET *net= &thd->net; + char buff[MYSQL_ERRMSG_SIZE+10],*pos; + DBUG_ENTER("send_ok"); + + if (net->no_send_ok || !net->vio) // hack for re-parsing queries + DBUG_VOID_RETURN; + + buff[0]=0; // No fields + pos=net_store_length(buff+1,(ulonglong) affected_rows); + pos=net_store_length(pos, (ulonglong) id); + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + DBUG_PRINT("info", + ("affected_rows: %lu id: %lu status: %u warning_count: %u", + (ulong) affected_rows, + (ulong) id, + (uint) (thd->server_status & 0xffff), + (uint) thd->total_warn_count)); + int2store(pos,thd->server_status); + pos+=2; + + /* We can only return up to 65535 warnings in two bytes */ + uint tmp= min(thd->total_warn_count, 65535); + int2store(pos, tmp); + pos+= 2; + } + else if (net->return_status) // For 4.0 protocol + { + int2store(pos,thd->server_status); + pos+=2; + } + if (message) + pos=net_store_data((char*) pos, message, strlen(message)); + VOID(my_net_write(net,buff,(uint) (pos-buff))); + VOID(net_flush(net)); + /* We can't anymore send an error to the client */ + thd->net.report_error= 0; + DBUG_VOID_RETURN; +} + +static char eof_buff[1]= { (char) 254 }; /* Marker for end of fields */ + +/* + Send eof (= end of result set) to the client + + SYNOPSIS + send_eof() + thd Thread handler + no_flush Set to 1 if there will be more data to the client, + like in send_fields(). + + DESCRIPTION + The eof packet has the following structure + + 254 Marker (1 byte) + warning_count Stored in 2 bytes; New in 4.1 protocol + status_flag Stored in 2 bytes; + For flags like SERVER_STATUS_MORE_RESULTS + + Note that the warning count will not be sent if 'no_flush' is set as + we don't want to report the warning count until all data is sent to the + client. +*/ + +void +send_eof(THD *thd, bool no_flush) +{ + NET *net= &thd->net; + DBUG_ENTER("send_eof"); + if (net->vio != 0) + { + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + uchar buff[5]; + uint tmp= min(thd->total_warn_count, 65535); + buff[0]=254; + int2store(buff+1, tmp); + /* + The following test should never be true, but it's better to do it + because if 'is_fatal_error' is set the server is not going to execute + other queries (see the if test in dispatch_command / COM_QUERY) + */ + if (thd->is_fatal_error) + thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; + int2store(buff+3, thd->server_status); + VOID(my_net_write(net,(char*) buff,5)); + VOID(net_flush(net)); + } + else + { + VOID(my_net_write(net,eof_buff,1)); + if (!no_flush) + VOID(net_flush(net)); + } + } + DBUG_VOID_RETURN; +} + +/* + Please client to send scrambled_password in old format. + SYNOPSYS + send_old_password_request() + thd thread handle + + RETURN VALUE + 0 ok + !0 error +*/ + +bool send_old_password_request(THD *thd) +{ + NET *net= &thd->net; + return my_net_write(net, eof_buff, 1) || net_flush(net); +} + +#endif /* EMBEDDED_LIBRARY */ + +/* + Faster net_store_length when we know that length is less than 65536. + We keep a separate version for that range because it's widely used in + libmysql. + uint is used as agrument type because of MySQL type conventions: + uint for 0..65536 + ulong for 0..4294967296 + ulonglong for bigger numbers. +*/ + +char *net_store_length(char *pkg, uint length) +{ + uchar *packet=(uchar*) pkg; + if (length < 251) + { + *packet=(uchar) length; + return (char*) packet+1; + } + *packet++=252; + int2store(packet,(uint) length); + return (char*) packet+2; +} + + +/**************************************************************************** + Functions used by the protocol functions (like send_ok) to store strings + and numbers in the header result packet. +****************************************************************************/ + +/* The following will only be used for short strings < 65K */ + +char *net_store_data(char *to,const char *from, uint length) +{ + to=net_store_length(to,length); + memcpy(to,from,length); + return to+length; +} + +char *net_store_data(char *to,int32 from) +{ + char buff[20]; + uint length=(uint) (int10_to_str(from,buff,10)-buff); + to=net_store_length(to,length); + memcpy(to,buff,length); + return to+length; +} + +char *net_store_data(char *to,longlong from) +{ + char buff[22]; + uint length=(uint) (longlong10_to_str(from,buff,10)-buff); + to=net_store_length(to,length); + memcpy(to,buff,length); + return to+length; +} + + +/***************************************************************************** + Default Protocol functions +*****************************************************************************/ + +void Protocol::init(THD *thd_arg) +{ + thd=thd_arg; + packet= &thd->packet; + convert= &thd->convert_buffer; +#ifndef DEBUG_OFF + field_types= 0; +#endif +} + + +bool Protocol::flush() +{ +#ifndef EMBEDDED_LIBRARY + return net_flush(&thd->net); +#else + return 0; +#endif +} + +/* + Send name and type of result to client. + + SYNOPSIS + send_fields() + THD Thread data object + list List of items to send to client + flag Bit mask with the following functions: + 1 send number of rows + 2 send default values + + DESCRIPTION + Sum fields has table name empty and field_name. + + RETURN VALUES + 0 ok + 1 Error (Note that in this case the error is not sent to the client) +*/ + +#ifndef EMBEDDED_LIBRARY +bool Protocol::send_fields(List<Item> *list, uint flag) +{ + List_iterator_fast<Item> it(*list); + Item *item; + char buff[80]; + String tmp((char*) buff,sizeof(buff),&my_charset_bin); + Protocol_simple prot(thd); + String *local_packet= prot.storage_packet(); + CHARSET_INFO *thd_charset= thd->variables.character_set_results; + DBUG_ENTER("send_fields"); + + if (flag & 1) + { // Packet with number of elements + char *pos=net_store_length(buff, (uint) list->elements); + (void) my_net_write(&thd->net, buff,(uint) (pos-buff)); + } + +#ifndef DEBUG_OFF + field_types= (enum_field_types*) thd->alloc(sizeof(field_types) * + list->elements); + uint count= 0; +#endif + + while ((item=it++)) + { + char *pos; + CHARSET_INFO *cs= system_charset_info; + Send_field field; + item->make_field(&field); + prot.prepare_for_resend(); + + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + if (prot.store("def", 3, cs, thd_charset) || + prot.store(field.db_name, (uint) strlen(field.db_name), + cs, thd_charset) || + prot.store(field.table_name, (uint) strlen(field.table_name), + cs, thd_charset) || + prot.store(field.org_table_name, (uint) strlen(field.org_table_name), + cs, thd_charset) || + prot.store(field.col_name, (uint) strlen(field.col_name), + cs, thd_charset) || + prot.store(field.org_col_name, (uint) strlen(field.org_col_name), + cs, thd_charset) || + local_packet->realloc(local_packet->length()+12)) + goto err; + /* Store fixed length fields */ + pos= (char*) local_packet->ptr()+local_packet->length(); + *pos++= 12; // Length of packed fields + if (item->collation.collation == &my_charset_bin || thd_charset == NULL) + { + /* No conversion */ + int2store(pos, field.charsetnr); + int4store(pos+2, field.length); + } + else + { + /* With conversion */ + uint max_char_len; + int2store(pos, thd_charset->number); + /* + For TEXT/BLOB columns, field_length describes the maximum data + length in bytes. There is no limit to the number of characters + that a TEXT column can store, as long as the data fits into + the designated space. + For the rest of textual columns, field_length is evaluated as + char_count * mbmaxlen, where character count is taken from the + definition of the column. In other words, the maximum number + of characters here is limited by the column definition. + */ + max_char_len= (field.type >= (int) MYSQL_TYPE_TINY_BLOB && + field.type <= (int) MYSQL_TYPE_BLOB) ? + field.length / item->collation.collation->mbminlen : + field.length / item->collation.collation->mbmaxlen; + int4store(pos+2, max_char_len * thd_charset->mbmaxlen); + } + pos[6]= field.type; + int2store(pos+7,field.flags); + pos[9]= (char) field.decimals; + pos[10]= 0; // For the future + pos[11]= 0; // For the future + pos+= 12; + } + else + { + if (prot.store(field.table_name, (uint) strlen(field.table_name), + cs, thd_charset) || + prot.store(field.col_name, (uint) strlen(field.col_name), + cs, thd_charset) || + local_packet->realloc(local_packet->length()+10)) + goto err; + pos= (char*) local_packet->ptr()+local_packet->length(); + +#ifdef TO_BE_DELETED_IN_6 + if (!(thd->client_capabilities & CLIENT_LONG_FLAG)) + { + pos[0]=3; + int3store(pos+1,field.length); + pos[4]=1; + pos[5]=field.type; + pos[6]=2; + pos[7]= (char) field.flags; + pos[8]= (char) field.decimals; + pos+= 9; + } + else +#endif + { + pos[0]=3; + int3store(pos+1,field.length); + pos[4]=1; + pos[5]=field.type; + pos[6]=3; + int2store(pos+7,field.flags); + pos[9]= (char) field.decimals; + pos+= 10; + } + } + local_packet->length((uint) (pos - local_packet->ptr())); + if (flag & 2) + item->send(&prot, &tmp); // Send default value + if (prot.write()) + break; /* purecov: inspected */ +#ifndef DEBUG_OFF + field_types[count++]= field.type; +#endif + } + + my_net_write(&thd->net, eof_buff, 1); + DBUG_RETURN(prepare_for_send(list)); + +err: + send_error(thd,ER_OUT_OF_RESOURCES); /* purecov: inspected */ + DBUG_RETURN(1); /* purecov: inspected */ +} + + +bool Protocol::send_records_num(List<Item> *list, ulonglong records) +{ + char *pos; + char buff[20]; + pos=net_store_length(buff, (uint) list->elements); + pos=net_store_length(pos, records); + return my_net_write(&thd->net, buff,(uint) (pos-buff)); +} + + +bool Protocol::write() +{ + DBUG_ENTER("Protocol::write"); + DBUG_RETURN(my_net_write(&thd->net, packet->ptr(), packet->length())); +} +#endif /* EMBEDDED_LIBRARY */ + + +/* + Send \0 end terminated string + + SYNOPSIS + store() + from NullS or \0 terminated string + + NOTES + In most cases one should use store(from, length) instead of this function + + RETURN VALUES + 0 ok + 1 error +*/ + +bool Protocol::store(const char *from, CHARSET_INFO *cs) +{ + if (!from) + return store_null(); + uint length= strlen(from); + return store(from, length, cs); +} + + +/* + Send a set of strings as one long string with ',' in between +*/ + +bool Protocol::store(I_List<i_string>* str_list) +{ + char buf[256]; + String tmp(buf, sizeof(buf), &my_charset_bin); + uint32 len; + I_List_iterator<i_string> it(*str_list); + i_string* s; + + tmp.length(0); + while ((s=it++)) + { + tmp.append(s->ptr); + tmp.append(','); + } + if ((len= tmp.length())) + len--; // Remove last ',' + return store((char*) tmp.ptr(), len, tmp.charset()); +} + + +/**************************************************************************** + Functions to handle the simple (default) protocol where everything is + This protocol is the one that is used by default between the MySQL server + and client when you are not using prepared statements. + + All data are sent as 'packed-string-length' followed by 'string-data' +****************************************************************************/ + +#ifndef EMBEDDED_LIBRARY +void Protocol_simple::prepare_for_resend() +{ + packet->length(0); +#ifndef DEBUG_OFF + field_pos= 0; +#endif +} + +bool Protocol_simple::store_null() +{ +#ifndef DEBUG_OFF + field_pos++; +#endif + char buff[1]; + buff[0]= (char)251; + return packet->append(buff, sizeof(buff), PACKET_BUFFER_EXTRA_ALLOC); +} +#endif + + +/* + Auxilary function to convert string to the given character set + and store in network buffer. +*/ + +bool Protocol::store_string_aux(const char *from, uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs) +{ + /* 'tocs' is set 0 when client issues SET character_set_results=NULL */ + if (tocs && !my_charset_same(fromcs, tocs) && + fromcs != &my_charset_bin && + tocs != &my_charset_bin) + { + uint dummy_errors; + return convert->copy(from, length, fromcs, tocs, &dummy_errors) || + net_store_data(convert->ptr(), convert->length()); + } + return net_store_data(from, length); +} + + +bool Protocol_simple::store(const char *from, uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DECIMAL || + (field_types[field_pos] >= MYSQL_TYPE_ENUM && + field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); + field_pos++; +#endif + return store_string_aux(from, length, fromcs, tocs); +} + + +bool Protocol_simple::store(const char *from, uint length, + CHARSET_INFO *fromcs) +{ + CHARSET_INFO *tocs= this->thd->variables.character_set_results; +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DECIMAL || + (field_types[field_pos] >= MYSQL_TYPE_ENUM && + field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); + field_pos++; +#endif + return store_string_aux(from, length, fromcs, tocs); +} + + +bool Protocol_simple::store_tiny(longlong from) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_TINY); + field_pos++; +#endif + char buff[20]; + return net_store_data((char*) buff, + (uint) (int10_to_str((int) from,buff, -10)-buff)); +} + + +bool Protocol_simple::store_short(longlong from) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_SHORT); + field_pos++; +#endif + char buff[20]; + return net_store_data((char*) buff, + (uint) (int10_to_str((int) from,buff, -10)-buff)); +} + + +bool Protocol_simple::store_long(longlong from) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_INT24 || + field_types[field_pos] == MYSQL_TYPE_LONG); + field_pos++; +#endif + char buff[20]; + return net_store_data((char*) buff, + (uint) (int10_to_str((long int)from,buff, (from <0)?-10:10)-buff)); +} + + +bool Protocol_simple::store_longlong(longlong from, bool unsigned_flag) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_LONGLONG); + field_pos++; +#endif + char buff[22]; + return net_store_data((char*) buff, + (uint) (longlong10_to_str(from,buff, + unsigned_flag ? 10 : -10)- + buff)); +} + + +bool Protocol_simple::store(float from, uint32 decimals, String *buffer) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_FLOAT); + field_pos++; +#endif + buffer->set((double) from, decimals, thd->charset()); + return net_store_data((char*) buffer->ptr(), buffer->length()); +} + + +bool Protocol_simple::store(double from, uint32 decimals, String *buffer) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DOUBLE); + field_pos++; +#endif + buffer->set(from, decimals, thd->charset()); + return net_store_data((char*) buffer->ptr(), buffer->length()); +} + + +bool Protocol_simple::store(Field *field) +{ + if (field->is_null()) + return store_null(); +#ifndef DEBUG_OFF + field_pos++; +#endif + char buff[MAX_FIELD_WIDTH]; + String str(buff,sizeof(buff), &my_charset_bin); + CHARSET_INFO *tocs= this->thd->variables.character_set_results; + + field->val_str(&str); + return store_string_aux(str.ptr(), str.length(), str.charset(), tocs); +} + + +/* + TODO: + Second_part format ("%06") needs to change when + we support 0-6 decimals for time. +*/ + + +bool Protocol_simple::store(TIME *tm) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DATETIME || + field_types[field_pos] == MYSQL_TYPE_TIMESTAMP); + field_pos++; +#endif + char buff[40]; + uint length; + length= my_sprintf(buff,(buff, "%04d-%02d-%02d %02d:%02d:%02d", + (int) tm->year, + (int) tm->month, + (int) tm->day, + (int) tm->hour, + (int) tm->minute, + (int) tm->second)); + if (tm->second_part) + length+= my_sprintf(buff+length,(buff+length, ".%06d", (int)tm->second_part)); + return net_store_data((char*) buff, length); +} + + +bool Protocol_simple::store_date(TIME *tm) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DATE); + field_pos++; +#endif + char buff[MAX_DATE_STRING_REP_LENGTH]; + int length= my_date_to_str(tm, buff); + return net_store_data(buff, (uint) length); +} + + +/* + TODO: + Second_part format ("%06") needs to change when + we support 0-6 decimals for time. +*/ + +bool Protocol_simple::store_time(TIME *tm) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_TIME); + field_pos++; +#endif + char buff[40]; + uint length; + uint day= (tm->year || tm->month) ? 0 : tm->day; + length= my_sprintf(buff,(buff, "%s%02ld:%02d:%02d", + tm->neg ? "-" : "", + (long) day*24L+(long) tm->hour, + (int) tm->minute, + (int) tm->second)); + if (tm->second_part) + length+= my_sprintf(buff+length,(buff+length, ".%06d", (int)tm->second_part)); + return net_store_data((char*) buff, length); +} + + +/**************************************************************************** + Functions to handle the binary protocol used with prepared statements + + Data format: + + [ok:1] reserved ok packet + [null_field:(field_count+7+2)/8] reserved to send null data. The size is + calculated using: + bit_fields= (field_count+7+2)/8; + 2 bits are reserved for identifying type + of package. + [[length]data] data field (the length applies only for + string/binary/time/timestamp fields and + rest of them are not sent as they have + the default length that client understands + based on the field type + [..]..[[length]data] data +****************************************************************************/ + +bool Protocol_prep::prepare_for_send(List<Item> *item_list) +{ + Protocol::prepare_for_send(item_list); + bit_fields= (field_count+9)/8; + if (packet->alloc(bit_fields+1)) + return 1; + /* prepare_for_resend will be called after this one */ + return 0; +} + + +void Protocol_prep::prepare_for_resend() +{ + packet->length(bit_fields+1); + bzero((char*) packet->ptr(), 1+bit_fields); + field_pos=0; +} + + +bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) +{ + CHARSET_INFO *tocs= thd->variables.character_set_results; +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DECIMAL || + (field_types[field_pos] >= MYSQL_TYPE_ENUM && + field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); +#endif + field_pos++; + return store_string_aux(from, length, fromcs, tocs); +} + +bool Protocol_prep::store(const char *from,uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DECIMAL || + (field_types[field_pos] >= MYSQL_TYPE_ENUM && + field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); +#endif + field_pos++; + return store_string_aux(from, length, fromcs, tocs); +} + +bool Protocol_prep::store_null() +{ + uint offset= (field_pos+2)/8+1, bit= (1 << ((field_pos+2) & 7)); + /* Room for this as it's allocated in prepare_for_send */ + char *to= (char*) packet->ptr()+offset; + *to= (char) ((uchar) *to | (uchar) bit); + field_pos++; + return 0; +} + + +bool Protocol_prep::store_tiny(longlong from) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_TINY); +#endif + char buff[1]; + field_pos++; + buff[0]= (uchar) from; + return packet->append(buff, sizeof(buff), PACKET_BUFFER_EXTRA_ALLOC); +} + + +bool Protocol_prep::store_short(longlong from) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_SHORT || + field_types[field_pos] == MYSQL_TYPE_YEAR); +#endif + field_pos++; + char *to= packet->prep_append(2, PACKET_BUFFER_EXTRA_ALLOC); + if (!to) + return 1; + int2store(to, (int) from); + return 0; +} + + +bool Protocol_prep::store_long(longlong from) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_INT24 || + field_types[field_pos] == MYSQL_TYPE_LONG); +#endif + field_pos++; + char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); + if (!to) + return 1; + int4store(to, from); + return 0; +} + + +bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_LONGLONG); +#endif + field_pos++; + char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); + if (!to) + return 1; + int8store(to, from); + return 0; +} + + +bool Protocol_prep::store(float from, uint32 decimals, String *buffer) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_FLOAT); +#endif + field_pos++; + char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); + if (!to) + return 1; + float4store(to, from); + return 0; +} + + +bool Protocol_prep::store(double from, uint32 decimals, String *buffer) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DOUBLE); +#endif + field_pos++; + char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); + if (!to) + return 1; + float8store(to, from); + return 0; +} + + +bool Protocol_prep::store(Field *field) +{ + /* + We should not increment field_pos here as send_binary() will call another + protocol function to do this for us + */ + if (field->is_null()) + return store_null(); + return field->send_binary(this); +} + + +bool Protocol_prep::store(TIME *tm) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_DATETIME || + field_types[field_pos] == MYSQL_TYPE_DATE || + field_types[field_pos] == MYSQL_TYPE_TIMESTAMP); +#endif + char buff[12],*pos; + uint length; + field_pos++; + pos= buff+1; + + int2store(pos, tm->year); + pos[2]= (uchar) tm->month; + pos[3]= (uchar) tm->day; + pos[4]= (uchar) tm->hour; + pos[5]= (uchar) tm->minute; + pos[6]= (uchar) tm->second; + int4store(pos+7, tm->second_part); + if (tm->second_part) + length=11; + else if (tm->hour || tm->minute || tm->second) + length=7; + else if (tm->year || tm->month || tm->day) + length=4; + else + length=0; + buff[0]=(char) length; // Length is stored first + return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC); +} + +bool Protocol_prep::store_date(TIME *tm) +{ + tm->hour= tm->minute= tm->second=0; + tm->second_part= 0; + return Protocol_prep::store(tm); +} + + +bool Protocol_prep::store_time(TIME *tm) +{ +#ifndef DEBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_TIME); +#endif + char buff[13], *pos; + uint length; + field_pos++; + pos= buff+1; + pos[0]= tm->neg ? 1 : 0; + if (tm->hour >= 24) + { + /* Fix if we come from Item::send */ + uint days= tm->hour/24; + tm->hour-= days*24; + tm->day+= days; + } + int4store(pos+1, tm->day); + pos[5]= (uchar) tm->hour; + pos[6]= (uchar) tm->minute; + pos[7]= (uchar) tm->second; + int4store(pos+8, tm->second_part); + if (tm->second_part) + length=12; + else if (tm->hour || tm->minute || tm->second || tm->day) + length=8; + else + length=0; + buff[0]=(char) length; // Length is stored first + return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC); +} + +#ifdef EMBEDDED_LIBRARY +/* Should be removed when we define the Protocol_cursor's future */ +bool Protocol_cursor::write() +{ + return Protocol_simple::write(); +} +#endif + diff --git a/sql/protocol.h b/sql/protocol.h new file mode 100644 index 00000000000..32d6acccddf --- /dev/null +++ b/sql/protocol.h @@ -0,0 +1,184 @@ +/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + + +class i_string; +class THD; +typedef struct st_mysql_field MYSQL_FIELD; +typedef struct st_mysql_rows MYSQL_ROWS; + +class Protocol +{ +protected: + THD *thd; + String *packet; + String *convert; + uint field_pos; +#ifndef DEBUG_OFF + enum enum_field_types *field_types; +#endif + uint field_count; +#ifndef EMBEDDED_LIBRARY + bool net_store_data(const char *from, uint length); +#else + virtual bool net_store_data(const char *from, uint length); + char **next_field; + MYSQL_FIELD *next_mysql_field; + MEM_ROOT *alloc; +#endif + bool store_string_aux(const char *from, uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs); +public: + Protocol() {} + Protocol(THD *thd_arg) { init(thd_arg); } + virtual ~Protocol() {} + void init(THD* thd_arg); + bool send_fields(List<Item> *list, uint flag); + bool send_records_num(List<Item> *list, ulonglong records); + bool store(I_List<i_string> *str_list); + bool store(const char *from, CHARSET_INFO *cs); + String *storage_packet() { return packet; } + inline void free() { packet->free(); } +#ifndef EMBEDDED_LIBRARY + bool write(); +#else + virtual bool write(); +#endif + inline bool store(uint32 from) + { return store_long((longlong) from); } + inline bool store(longlong from) + { return store_longlong((longlong) from, 0); } + inline bool store(ulonglong from) + { return store_longlong((longlong) from, 1); } + inline bool store(String *str) + { return store((char*) str->ptr(), str->length(), str->charset()); } + + virtual bool prepare_for_send(List<Item> *item_list) + { + field_count=item_list->elements; + return 0; + } + virtual bool flush(); + virtual void prepare_for_resend()=0; + + virtual bool store_null()=0; + virtual bool store_tiny(longlong from)=0; + virtual bool store_short(longlong from)=0; + virtual bool store_long(longlong from)=0; + virtual bool store_longlong(longlong from, bool unsigned_flag)=0; + virtual bool store(const char *from, uint length, CHARSET_INFO *cs)=0; + virtual bool store(const char *from, uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs)=0; + virtual bool store(float from, uint32 decimals, String *buffer)=0; + virtual bool store(double from, uint32 decimals, String *buffer)=0; + virtual bool store(TIME *time)=0; + virtual bool store_date(TIME *time)=0; + virtual bool store_time(TIME *time)=0; + virtual bool store(Field *field)=0; +}; + + +/* Class used for the old (MySQL 4.0 protocol) */ + +class Protocol_simple :public Protocol +{ +public: + Protocol_simple() {} + Protocol_simple(THD *thd_arg) :Protocol(thd_arg) {} + virtual void prepare_for_resend(); + virtual bool store_null(); + virtual bool store_tiny(longlong from); + virtual bool store_short(longlong from); + virtual bool store_long(longlong from); + virtual bool store_longlong(longlong from, bool unsigned_flag); + virtual bool store(const char *from, uint length, CHARSET_INFO *cs); + virtual bool store(const char *from, uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs); + virtual bool store(TIME *time); + virtual bool store_date(TIME *time); + virtual bool store_time(TIME *time); + virtual bool store(float nr, uint32 decimals, String *buffer); + virtual bool store(double from, uint32 decimals, String *buffer); + virtual bool store(Field *field); +}; + + +class Protocol_prep :public Protocol +{ +private: + uint bit_fields; +public: + Protocol_prep() {} + Protocol_prep(THD *thd_arg) :Protocol(thd_arg) {} + virtual bool prepare_for_send(List<Item> *item_list); + virtual void prepare_for_resend(); +#ifdef EMBEDDED_LIBRARY + virtual bool write(); + bool net_store_data(const char *from, uint length); +#endif + virtual bool store_null(); + virtual bool store_tiny(longlong from); + virtual bool store_short(longlong from); + virtual bool store_long(longlong from); + virtual bool store_longlong(longlong from, bool unsigned_flag); + virtual bool store(const char *from,uint length, CHARSET_INFO *cs); + virtual bool store(const char *from, uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs); + virtual bool store(TIME *time); + virtual bool store_date(TIME *time); + virtual bool store_time(TIME *time); + virtual bool store(float nr, uint32 decimals, String *buffer); + virtual bool store(double from, uint32 decimals, String *buffer); + virtual bool store(Field *field); +}; + +class Protocol_cursor :public Protocol_simple +{ +public: + MEM_ROOT *alloc; + MYSQL_FIELD *fields; + MYSQL_ROWS *data; + MYSQL_ROWS **prev_record; + ulong row_count; + + Protocol_cursor() {} + Protocol_cursor(THD *thd_arg, MEM_ROOT *ini_alloc) :Protocol_simple(thd_arg), alloc(ini_alloc) {} + bool prepare_for_send(List<Item> *item_list) + { + fields= NULL; + data= NULL; + prev_record= &data; + return Protocol_simple::prepare_for_send(item_list); + } + bool send_fields(List<Item> *list, uint flag); + bool write(); +}; + +void send_warning(THD *thd, uint sql_errno, const char *err=0); +void net_printf(THD *thd,uint sql_errno, ...); +void send_ok(THD *thd, ha_rows affected_rows=0L, ulonglong id=0L, + const char *info=0); +void send_eof(THD *thd, bool no_flush=0); +bool send_old_password_request(THD *thd); +char *net_store_length(char *packet,uint length); +char *net_store_data(char *to,const char *from, uint length); +char *net_store_data(char *to,int32 from); +char *net_store_data(char *to,longlong from); + diff --git a/sql/protocol_cursor.cc b/sql/protocol_cursor.cc new file mode 100644 index 00000000000..b225e06ed32 --- /dev/null +++ b/sql/protocol_cursor.cc @@ -0,0 +1,143 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Low level functions for storing data to be send to the MySQL client + The actual communction is handled by the net_xxx functions in net_serv.cc +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include "mysql_priv.h" +#include <mysql.h> + +bool Protocol_cursor::send_fields(List<Item> *list, uint flag) +{ + List_iterator_fast<Item> it(*list); + Item *item; + MYSQL_FIELD *client_field; + + DBUG_ENTER("send_fields"); + if (prepare_for_send(list)) + return FALSE; + + fields= (MYSQL_FIELD *)alloc_root(alloc, sizeof(MYSQL_FIELD) * field_count); + if (!fields) + goto err; + + client_field= fields; + while ((item= it++)) + { + Send_field server_field; + item->make_field(&server_field); + + client_field->db= strdup_root(alloc, server_field.db_name); + client_field->table= strdup_root(alloc, server_field.table_name); + client_field->name= strdup_root(alloc, server_field.col_name); + client_field->org_table= strdup_root(alloc, server_field.org_table_name); + client_field->org_name= strdup_root(alloc, server_field.org_col_name); + client_field->length= server_field.length; + client_field->type= server_field.type; + client_field->flags= server_field.flags; + client_field->decimals= server_field.decimals; + client_field->db_length= strlen(client_field->db); + client_field->table_length= strlen(client_field->table); + client_field->name_length= strlen(client_field->name); + client_field->org_name_length= strlen(client_field->org_name); + client_field->org_table_length= strlen(client_field->org_table); + client_field->charsetnr= server_field.charsetnr; + + if (INTERNAL_NUM_FIELD(client_field)) + client_field->flags|= NUM_FLAG; + + if (flag & 2) + { + char buff[80]; + String tmp(buff, sizeof(buff), default_charset_info), *res; + + if (!(res=item->val_str(&tmp))) + client_field->def= (char*) ""; + else + client_field->def= strmake_root(alloc, res->ptr(), res->length()); + } + else + client_field->def=0; + client_field->max_length= 0; + ++client_field; + } + + DBUG_RETURN(FALSE); + err: + send_error(thd, ER_OUT_OF_RESOURCES); /* purecov: inspected */ + DBUG_RETURN(TRUE); /* purecov: inspected */ +} + +/* Get the length of next field. Change parameter to point at fieldstart */ +bool Protocol_cursor::write() +{ + byte *cp= (byte *)packet->ptr(); + byte *end_pos= (byte *)packet->ptr() + packet->length(); + ulong len; + MYSQL_FIELD *cur_field= fields; + MYSQL_FIELD *fields_end= fields + field_count; + MYSQL_ROWS *new_record; + byte **data_tmp; + byte *to; + + new_record= (MYSQL_ROWS *)alloc_root(alloc, + sizeof(MYSQL_ROWS) + (field_count + 1)*sizeof(char *) + packet->length()); + if (!new_record) + goto err; + data_tmp= (byte **)(new_record + 1); + new_record->data= (char **)data_tmp; + + to= (byte *)(fields + field_count + 1); + + for (; cur_field < fields_end; ++cur_field, ++data_tmp) + { + if ((len=net_field_length((uchar **)&cp))) + { + *data_tmp= 0; + } + else + { + if ((long)len > (end_pos - cp)) + { +// TODO error signal send_error(thd, CR_MALFORMED_PACKET); + return TRUE; + } + memcpy(to,(char*) cp,len); + to[len]=0; + to+=len+1; + cp+=len; + if (cur_field->max_length < len) + cur_field->max_length=len; + } + } + + *prev_record= new_record; + prev_record= &new_record->next; + new_record->next= NULL; + row_count++; + return FALSE; + err: +// TODO error signal send_error(thd, ER_OUT_OF_RESOURCES); + return TRUE; +} + + diff --git a/sql/records.cc b/sql/records.cc index 98aede52416..7e4a808f0c3 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -22,12 +22,58 @@ static int rr_quick(READ_RECORD *info); static int rr_sequential(READ_RECORD *info); static int rr_from_tempfile(READ_RECORD *info); +static int rr_unpack_from_tempfile(READ_RECORD *info); +static int rr_unpack_from_buffer(READ_RECORD *info); static int rr_from_pointers(READ_RECORD *info); static int rr_from_cache(READ_RECORD *info); static int init_rr_cache(READ_RECORD *info); static int rr_cmp(uchar *a,uchar *b); +static int rr_index_first(READ_RECORD *info); +static int rr_index(READ_RECORD *info); + + +/* + Initialize READ_RECORD structure to perform full index scan + + SYNOPSIS + init_read_record_idx() + info READ_RECORD structure to initialize. + thd Thread handle + table Table to be accessed + print_error If true, call table->file->print_error() if an error + occurs (except for end-of-records error) + idx index to scan + + DESCRIPTION + Initialize READ_RECORD structure to perform full index scan (in forward + direction) using read_record.read_record() interface. + + This function has been added at late stage and is used only by + UPDATE/DELETE. Other statements perform index scans using + join_read_first/next functions. +*/ + +void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, + bool print_error, uint idx) +{ + bzero((char*) info,sizeof(*info)); + info->table= table; + info->file= table->file; + info->record= table->record[0]; + info->print_error= print_error; + + table->status=0; /* And it's always found */ + if (!table->file->inited) + { + table->file->ha_index_init(idx); + table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY); + } + /* read_record will be changed to rr_index in rr_index_first */ + info->read_record= rr_index_first; +} - /* init struct for read with info->read_record */ + +/* init struct for read with info->read_record */ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, SQL_SELECT *select, @@ -41,8 +87,16 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, info->table=table; info->file= table->file; info->forms= &info->table; /* Only one table */ - info->record=table->record[0]; - info->ref_length=table->file->ref_length; + if (table->sort.addon_field) + { + info->rec_buf= table->sort.addon_buf; + info->ref_length= table->sort.addon_length; + } + else + { + info->record= table->record[0]; + info->ref_length= table->file->ref_length; + } info->select=select; info->print_error=print_error; info->ignore_not_found_rows= 0; @@ -51,19 +105,27 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, if (select && my_b_inited(&select->file)) tempfile= &select->file; else - tempfile= table->io_cache; + tempfile= table->sort.io_cache; if (tempfile && my_b_inited(tempfile)) // Test if ref-records was used { DBUG_PRINT("info",("using rr_from_tempfile")); - info->read_record=rr_from_tempfile; + info->read_record= (table->sort.addon_field ? + rr_unpack_from_tempfile : rr_from_tempfile); info->io_cache=tempfile; reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0); info->ref_pos=table->file->ref; - table->file->rnd_init(0); - - if (! (specialflag & SPECIAL_SAFE_MODE) && + if (!table->file->inited) + table->file->ha_rnd_init(0); + + /* + table->sort.addon_field is checked because if we use addon fields, + it doesn't make sense to use cache - we don't read from the table + and table->sort.io_cache is read sequentially + */ + if (!table->sort.addon_field && + ! (specialflag & SPECIAL_SAFE_MODE) && thd->variables.read_rnd_buff_size && - !table->file->fast_key_read() && + !(table->file->table_flags() & HA_FAST_KEY_READ) && (table->db_stat & HA_READ_ONLY || table->reginfo.lock_type <= TL_READ_NO_INSERT) && (ulonglong) table->reclength*(table->file->records+ @@ -83,21 +145,26 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, else if (select && select->quick) { DBUG_PRINT("info",("using rr_quick")); + + if (!table->file->inited) + table->file->ha_index_init(select->quick->index); info->read_record=rr_quick; } - else if (table->record_pointers) + else if (table->sort.record_pointers) { DBUG_PRINT("info",("using record_pointers")); - table->file->rnd_init(0); - info->cache_pos=table->record_pointers; - info->cache_end=info->cache_pos+ table->found_records*info->ref_length; - info->read_record= rr_from_pointers; + table->file->ha_rnd_init(0); + info->cache_pos=table->sort.record_pointers; + info->cache_end=info->cache_pos+ + table->sort.found_records*info->ref_length; + info->read_record= (table->sort.addon_field ? + rr_unpack_from_buffer : rr_from_pointers); } else { DBUG_PRINT("info",("using rr_sequential")); info->read_record=rr_sequential; - table->file->rnd_init(); + table->file->ha_rnd_init(1); /* We can use record cache if we don't update dynamic length tables */ if (!table->no_cache && (use_record_cache > 0 || @@ -113,7 +180,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, void end_read_record(READ_RECORD *info) -{ /* free cache if used */ +{ /* free cache if used */ if (info->cache) { my_free_lock((char*) info->cache,MYF(0)); @@ -121,12 +188,29 @@ void end_read_record(READ_RECORD *info) } if (info->table) { + filesort_free_buffers(info->table); (void) info->file->extra(HA_EXTRA_NO_CACHE); - (void) info->file->rnd_end(); + if (info->read_record != rr_quick) // otherwise quick_range does it + (void) info->file->ha_index_or_rnd_end(); info->table=0; } } +static int rr_handle_error(READ_RECORD *info, int error) +{ + if (error == HA_ERR_END_OF_FILE) + error= -1; + else + { + if (info->print_error) + info->table->file->print_error(error, MYF(0)); + if (error < 0) // Fix negative BDB errno + error= 1; + } + return error; +} + + /* Read a record from head-database */ static int rr_quick(READ_RECORD *info) @@ -141,15 +225,7 @@ static int rr_quick(READ_RECORD *info) } if (tmp != HA_ERR_RECORD_DELETED) { - if (tmp == HA_ERR_END_OF_FILE) - tmp= -1; - else - { - if (info->print_error) - info->file->print_error(tmp,MYF(0)); - if (tmp < 0) // Fix negative BDB errno - tmp=1; - } + tmp= rr_handle_error(info, tmp); break; } } @@ -157,6 +233,57 @@ static int rr_quick(READ_RECORD *info) } +/* + Reads first row in an index scan + + SYNOPSIS + rr_index_first() + info Scan info + + RETURN + 0 Ok + -1 End of records + 1 Error +*/ + + +static int rr_index_first(READ_RECORD *info) +{ + int tmp= info->file->index_first(info->record); + info->read_record= rr_index; + if (tmp) + tmp= rr_handle_error(info, tmp); + return tmp; +} + + +/* + Reads index sequentially after first row + + SYNOPSIS + rr_index() + info Scan info + + DESCRIPTION + Read the next index record (in forward direction) and translate return + value. + + RETURN + 0 Ok + -1 End of records + 1 Error +*/ + + +static int rr_index(READ_RECORD *info) +{ + int tmp= info->file->index_next(info->record); + if (tmp) + tmp= rr_handle_error(info, tmp); + return tmp; +} + + static int rr_sequential(READ_RECORD *info) { int tmp; @@ -167,17 +294,13 @@ static int rr_sequential(READ_RECORD *info) my_error(ER_SERVER_SHUTDOWN,MYF(0)); return 1; } + /* + rnd_next can return RECORD_DELETED for MyISAM when one thread is + reading and another deleting without locks. + */ if (tmp != HA_ERR_RECORD_DELETED) { - if (tmp == HA_ERR_END_OF_FILE) - tmp= -1; - else - { - if (info->print_error) - info->table->file->print_error(tmp,MYF(0)); - if (tmp < 0) // Fix negative BDB errno - tmp=1; - } + tmp= rr_handle_error(info, tmp); break; } } @@ -188,56 +311,104 @@ static int rr_sequential(READ_RECORD *info) static int rr_from_tempfile(READ_RECORD *info) { int tmp; -tryNext: - if (my_b_read(info->io_cache,info->ref_pos,info->ref_length)) - return -1; /* End of file */ - if ((tmp=info->file->rnd_pos(info->record,info->ref_pos))) + for (;;) { - if (tmp == HA_ERR_END_OF_FILE) - tmp= -1; - else if (tmp == HA_ERR_RECORD_DELETED || - (tmp == HA_ERR_KEY_NOT_FOUND && info->ignore_not_found_rows)) - goto tryNext; - else - { - if (info->print_error) - info->file->print_error(tmp,MYF(0)); - if (tmp < 0) // Fix negative BDB errno - tmp=1; - } + if (my_b_read(info->io_cache,info->ref_pos,info->ref_length)) + return -1; /* End of file */ + if (!(tmp=info->file->rnd_pos(info->record,info->ref_pos))) + break; + /* The following is extremely unlikely to happen */ + if (tmp == HA_ERR_RECORD_DELETED || + (tmp == HA_ERR_KEY_NOT_FOUND && info->ignore_not_found_rows)) + continue; + tmp= rr_handle_error(info, tmp); + break; } return tmp; } /* rr_from_tempfile */ +/* + Read a result set record from a temporary file after sorting + + SYNOPSIS + rr_unpack_from_tempfile() + info Reference to the context including record descriptors + + DESCRIPTION + The function first reads the next sorted record from the temporary file. + into a buffer. If a success it calls a callback function that unpacks + the fields values use in the result set from this buffer into their + positions in the regular record buffer. + + RETURN + 0 - Record successfully read. + -1 - There is no record to be read anymore. +*/ + +static int rr_unpack_from_tempfile(READ_RECORD *info) +{ + if (my_b_read(info->io_cache, info->rec_buf, info->ref_length)) + return -1; + TABLE *table= info->table; + (*table->sort.unpack)(table->sort.addon_field, info->rec_buf); + + return 0; +} + static int rr_from_pointers(READ_RECORD *info) { int tmp; byte *cache_pos; -tryNext: - if (info->cache_pos == info->cache_end) - return -1; /* End of file */ - cache_pos=info->cache_pos; - info->cache_pos+=info->ref_length; - if ((tmp=info->file->rnd_pos(info->record,cache_pos))) + for (;;) { - if (tmp == HA_ERR_END_OF_FILE) - tmp= -1; - else if (tmp == HA_ERR_RECORD_DELETED || - (tmp == HA_ERR_KEY_NOT_FOUND && info->ignore_not_found_rows)) - goto tryNext; - else - { - if (info->print_error) - info->file->print_error(tmp,MYF(0)); - if (tmp < 0) // Fix negative BDB errno - tmp=1; - } + if (info->cache_pos == info->cache_end) + return -1; /* End of file */ + cache_pos= info->cache_pos; + info->cache_pos+= info->ref_length; + + if (!(tmp=info->file->rnd_pos(info->record,cache_pos))) + break; + + /* The following is extremely unlikely to happen */ + if (tmp == HA_ERR_RECORD_DELETED || + (tmp == HA_ERR_KEY_NOT_FOUND && info->ignore_not_found_rows)) + continue; + tmp= rr_handle_error(info, tmp); + break; } return tmp; } +/* + Read a result set record from a buffer after sorting + + SYNOPSIS + rr_unpack_from_buffer() + info Reference to the context including record descriptors + + DESCRIPTION + The function first reads the next sorted record from the sort buffer. + If a success it calls a callback function that unpacks + the fields values use in the result set from this buffer into their + positions in the regular record buffer. + + RETURN + 0 - Record successfully read. + -1 - There is no record to be read anymore. +*/ + +static int rr_unpack_from_buffer(READ_RECORD *info) +{ + if (info->cache_pos == info->cache_end) + return -1; /* End of buffer */ + TABLE *table= info->table; + (*table->sort.unpack)(table->sort.addon_field, info->cache_pos); + info->cache_pos+= info->ref_length; + + return 0; +} /* cacheing of records from a database */ static int init_rr_cache(READ_RECORD *info) diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 84640fbc968..61fd5d9bce4 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -15,11 +15,11 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysql_priv.h" +#ifdef HAVE_REPLICATION + #include "repl_failsafe.h" #include "sql_repl.h" #include "slave.h" -#include "sql_acl.h" -#include "mini_client.h" #include "log_event.h" #include <mysql.h> @@ -31,11 +31,10 @@ RPL_STATUS rpl_status=RPL_NULL; pthread_mutex_t LOCK_rpl_status; pthread_cond_t COND_rpl_status; HASH slave_list; -extern const char* any_db; const char *rpl_role_type[] = {"MASTER","SLAVE",NullS}; TYPELIB rpl_role_typelib = {array_elements(rpl_role_type)-1,"", - rpl_role_type}; + rpl_role_type, NULL}; const char* rpl_status_type[]= { @@ -43,7 +42,7 @@ const char* rpl_status_type[]= "RECOVERY_CAPTAIN","NULL",NullS }; TYPELIB rpl_status_typelib= {array_elements(rpl_status_type)-1,"", - rpl_status_type}; + rpl_status_type, NULL}; static Slave_log_event* find_slave_event(IO_CACHE* log, @@ -61,13 +60,17 @@ static Slave_log_event* find_slave_event(IO_CACHE* log, static int init_failsafe_rpl_thread(THD* thd) { DBUG_ENTER("init_failsafe_rpl_thread"); + /* + thd->bootstrap is to report errors barely to stderr; if this code is + enable again one day, one should check if bootstrap is still needed (maybe + this thread has no other error reporting method). + */ thd->system_thread = thd->bootstrap = 1; thd->host_or_ip= ""; - thd->client_capabilities = 0; my_net_init(&thd->net, 0); thd->net.read_timeout = slave_net_timeout; thd->max_client_packet_length=thd->net.max_packet; - thd->master_access= ~0; + thd->master_access= ~(ulong)0; thd->priv_user = 0; pthread_mutex_lock(&LOCK_thread_count); thd->thread_id = thread_id++; @@ -75,7 +78,7 @@ static int init_failsafe_rpl_thread(THD* thd) if (init_thr_lock() || thd->store_globals()) { - close_connection(&thd->net,ER_OUT_OF_RESOURCES); // is this needed? + close_connection(thd, ER_OUT_OF_RESOURCES, 1); // is this needed? statistic_increment(aborted_connects,&LOCK_status); end_thread(thd,0); DBUG_RETURN(-1); @@ -87,7 +90,7 @@ static int init_failsafe_rpl_thread(THD* thd) VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); #endif - thd->mem_root.free=thd->mem_root.used=0; + thd->mem_root->free= thd->mem_root->used= 0; if (thd->variables.max_join_size == HA_POS_ERROR) thd->options|= OPTION_BIG_SELECTS; @@ -158,9 +161,8 @@ int register_slave(THD* thd, uchar* packet, uint packet_length) SLAVE_INFO *si; uchar *p= packet, *p_end= packet + packet_length; - if (check_access(thd, REPL_SLAVE_ACL, any_db)) + if (check_access(thd, REPL_SLAVE_ACL, any_db,0,0,0)) return 1; - if (!(si = (SLAVE_INFO*)my_malloc(sizeof(SLAVE_INFO), MYF(MY_WME)))) goto err2; @@ -181,7 +183,7 @@ int register_slave(THD* thd, uchar* packet, uint packet_length) pthread_mutex_lock(&LOCK_slave_list); unregister_slave(thd,0,0); - res= hash_insert(&slave_list, (byte*) si); + res= my_hash_insert(&slave_list, (byte*) si); pthread_mutex_unlock(&LOCK_slave_list); return res; @@ -190,7 +192,7 @@ err: my_message(ER_UNKNOWN_ERROR, "Wrong parameters to function register_slave", MYF(0)); err2: - send_error(&thd->net); + send_error(thd); return 1; } @@ -209,7 +211,7 @@ extern "C" void slave_info_free(void *s) void init_slave_list() { - hash_init(&slave_list, SLAVE_LIST_CHUNK, 0, 0, + hash_init(&slave_list, system_charset_info, SLAVE_LIST_CHUNK, 0, 0, (hash_get_key) slave_list_key, (hash_free_key) slave_info_free, 0); pthread_mutex_init(&LOCK_slave_list, MY_MUTEX_INIT_FAST); } @@ -260,7 +262,7 @@ static int find_target_pos(LEX_MASTER_INFO *mi, IO_CACHE *log, char *errmsg) it is reworked. Event's log_pos used to be preserved through log-slave-updates to make code in repl_failsafe.cc work (this function, SHOW NEW MASTER); but on the other side it caused unexpected - values in Exec_master_log_pos in A->B->C replication setup, + values in Exec_Master_Log_Pos in A->B->C replication setup, synchronization problems in master_pos_wait(), ... So we (Dmitri & Guilhem) removed it. @@ -436,10 +438,11 @@ static Slave_log_event* find_slave_event(IO_CACHE* log, int show_new_master(THD* thd) { + Protocol *protocol= thd->protocol; DBUG_ENTER("show_new_master"); List<Item> field_list; char errmsg[SLAVE_ERRMSG_SIZE]; - LEX_MASTER_INFO* lex_mi = &thd->lex.mi; + LEX_MASTER_INFO* lex_mi= &thd->lex->mi; errmsg[0]=0; // Safety if (translate_master(thd, lex_mi, errmsg)) @@ -451,28 +454,28 @@ int show_new_master(THD* thd) } else { - String* packet = &thd->packet; field_list.push_back(new Item_empty_string("Log_name", 20)); - field_list.push_back(new Item_empty_string("Log_pos", 20)); - if (send_fields(thd, field_list, 1)) + field_list.push_back(new Item_return_int("Log_pos", 10, + MYSQL_TYPE_LONGLONG)); + if (protocol->send_fields(&field_list, 1)) DBUG_RETURN(-1); - packet->length(0); - net_store_data(packet, lex_mi->log_file_name); - net_store_data(packet, (longlong)lex_mi->pos); - if (my_net_write(&thd->net, packet->ptr(), packet->length())) + protocol->prepare_for_resend(); + protocol->store(lex_mi->log_file_name, &my_charset_bin); + protocol->store((ulonglong) lex_mi->pos); + if (protocol->write()) DBUG_RETURN(-1); - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } } /* Asks the master for the list of its other connected slaves. - This is for failsafe replication : - in order for failsafe replication to work, the servers involved in replication - must know of each other. We accomplish this by having each slave report to the - master how to reach it, and on connection, each slave receives information - about where the other slaves are. + This is for failsafe replication: + in order for failsafe replication to work, the servers involved in + replication must know of each other. We accomplish this by having each + slave report to the master how to reach it, and on connection, each + slave receives information about where the other slaves are. SYNOPSIS update_slave_list() @@ -484,8 +487,8 @@ int show_new_master(THD* thd) hostname/port of the master, the username used by the slave to connect to the master. If the user used by the slave to connect to the master does not have the - REPLICATION SLAVE privilege, it will pop in this function because SHOW SLAVE - HOSTS will fail on the master. + REPLICATION SLAVE privilege, it will pop in this function because + SHOW SLAVE HOSTS will fail on the master. RETURN VALUES 1 error @@ -501,14 +504,14 @@ int update_slave_list(MYSQL* mysql, MASTER_INFO* mi) int port_ind; DBUG_ENTER("update_slave_list"); - if (mc_mysql_query(mysql,"SHOW SLAVE HOSTS",0) || - !(res = mc_mysql_store_result(mysql))) + if (mysql_real_query(mysql,"SHOW SLAVE HOSTS",16) || + !(res = mysql_store_result(mysql))) { - error= mc_mysql_error(mysql); + error= mysql_error(mysql); goto err; } - switch (mc_mysql_num_fields(res)) { + switch (mysql_num_fields(res)) { case 5: have_auth_info = 0; port_ind=2; @@ -525,7 +528,7 @@ HOSTS"; pthread_mutex_lock(&LOCK_slave_list); - while ((row= mc_mysql_fetch_row(res))) + while ((row= mysql_fetch_row(res))) { uint32 server_id; SLAVE_INFO* si, *old_si; @@ -542,7 +545,7 @@ HOSTS"; goto err; } si->server_id = server_id; - hash_insert(&slave_list, (byte*)si); + my_hash_insert(&slave_list, (byte*)si); } strmake(si->host, row[1], sizeof(si->host)-1); si->port = atoi(row[port_ind]); @@ -558,7 +561,7 @@ HOSTS"; err: if (res) - mc_mysql_free_result(res); + mysql_free_result(res); if (error) { sql_print_error("While trying to obtain the list of slaves from the master \ @@ -585,7 +588,7 @@ pthread_handler_decl(handle_failsafe_rpl,arg) const char* msg; pthread_detach_this_thread(); - if (init_failsafe_rpl_thread(thd) || !(recovery_captain=mc_mysql_init(0))) + if (init_failsafe_rpl_thread(thd) || !(recovery_captain=mysql_init(0))) { sql_print_error("Could not initialize failsafe replication thread"); goto err; @@ -617,7 +620,7 @@ pthread_handler_decl(handle_failsafe_rpl,arg) thd->exit_cond(msg); err: if (recovery_captain) - mc_mysql_close(recovery_captain); + mysql_close(recovery_captain); delete thd; my_thread_end(); pthread_exit(0); @@ -628,22 +631,24 @@ err: int show_slave_hosts(THD* thd) { List<Item> field_list; - NET* net = &thd->net; - String* packet = &thd->packet; + Protocol *protocol= thd->protocol; DBUG_ENTER("show_slave_hosts"); - field_list.push_back(new Item_empty_string("Server_id", 20)); + field_list.push_back(new Item_return_int("Server_id", 10, + MYSQL_TYPE_LONG)); field_list.push_back(new Item_empty_string("Host", 20)); if (opt_show_slave_auth_info) { field_list.push_back(new Item_empty_string("User",20)); field_list.push_back(new Item_empty_string("Password",20)); } - field_list.push_back(new Item_empty_string("Port",20)); - field_list.push_back(new Item_empty_string("Rpl_recovery_rank", 20)); - field_list.push_back(new Item_empty_string("Master_id", 20)); + field_list.push_back(new Item_return_int("Port", 7, MYSQL_TYPE_LONG)); + field_list.push_back(new Item_return_int("Rpl_recovery_rank", 7, + MYSQL_TYPE_LONG)); + field_list.push_back(new Item_return_int("Master_id", 10, + MYSQL_TYPE_LONG)); - if (send_fields(thd, field_list, 1)) + if (protocol->send_fields(&field_list, 1)) DBUG_RETURN(-1); pthread_mutex_lock(&LOCK_slave_list); @@ -651,25 +656,25 @@ int show_slave_hosts(THD* thd) for (uint i = 0; i < slave_list.records; ++i) { SLAVE_INFO* si = (SLAVE_INFO*) hash_element(&slave_list, i); - packet->length(0); - net_store_data(packet, si->server_id); - net_store_data(packet, si->host); + protocol->prepare_for_resend(); + protocol->store((uint32) si->server_id); + protocol->store(si->host, &my_charset_bin); if (opt_show_slave_auth_info) { - net_store_data(packet, si->user); - net_store_data(packet, si->password); + protocol->store(si->user, &my_charset_bin); + protocol->store(si->password, &my_charset_bin); } - net_store_data(packet, (uint32) si->port); - net_store_data(packet, si->rpl_recovery_rank); - net_store_data(packet, si->master_id); - if (my_net_write(net, (char*)packet->ptr(), packet->length())) + protocol->store((uint32) si->port); + protocol->store((uint32) si->rpl_recovery_rank); + protocol->store((uint32) si->master_id); + if (protocol->write()) { pthread_mutex_unlock(&LOCK_slave_list); DBUG_RETURN(-1); } } pthread_mutex_unlock(&LOCK_slave_list); - send_eof(net); + send_eof(thd); DBUG_RETURN(0); } @@ -683,9 +688,23 @@ int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi) strmov(mysql->net.last_error, "Master is not configured"); DBUG_RETURN(1); } - if (!mc_mysql_connect(mysql, mi->host, mi->user, mi->password, 0, - mi->port, 0, 0, - slave_net_timeout)) + mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout); + mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout); + +#ifdef HAVE_OPENSSL + if (mi->ssl) + mysql_ssl_set(mysql, + mi->ssl_key[0]?mi->ssl_key:0, + mi->ssl_cert[0]?mi->ssl_cert:0, + mi->ssl_ca[0]?mi->ssl_ca:0, + mi->ssl_capath[0]?mi->ssl_capath:0, + mi->ssl_cipher[0]?mi->ssl_cipher:0); +#endif + + mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname); + mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir); + if (!mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0, + mi->port, 0, 0)) DBUG_RETURN(1); DBUG_RETURN(0); } @@ -697,9 +716,9 @@ static inline void cleanup_mysql_results(MYSQL_RES* db_res, for (; cur >= start; --cur) { if (*cur) - mc_mysql_free_result(*cur); + mysql_free_result(*cur); } - mc_mysql_free_result(db_res); + mysql_free_result(db_res); } @@ -707,18 +726,19 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db, MYSQL_RES *table_res, MASTER_INFO *mi) { MYSQL_ROW row; - for (row = mc_mysql_fetch_row(table_res); row; - row = mc_mysql_fetch_row(table_res)) + for (row = mysql_fetch_row(table_res); row; + row = mysql_fetch_row(table_res)) { TABLE_LIST table; const char* table_name= row[0]; int error; if (table_rules_on) { - table.next= 0; + bzero((char*) &table, sizeof(table)); //just for safe table.db= (char*) db; table.real_name= (char*) table_name; table.updating= 1; + if (!tables_ok(thd, &table)) continue; } @@ -743,7 +763,9 @@ int load_master_data(THD* thd) int error = 0; const char* errmsg=0; int restart_thread_mask; - mc_mysql_init(&mysql); + HA_CREATE_INFO create_info; + + mysql_init(&mysql); /* We do not want anyone messing with the slave at all for the entire @@ -756,7 +778,7 @@ int load_master_data(THD* thd) (error=terminate_slave_threads(active_mi,restart_thread_mask, 1 /*skip lock*/))) { - send_error(&thd->net,error); + send_error(thd,error); unlock_slave_threads(active_mi); pthread_mutex_unlock(&LOCK_active_mi); return 1; @@ -764,8 +786,8 @@ int load_master_data(THD* thd) if (connect_to_master(thd, &mysql, active_mi)) { - net_printf(&thd->net, error= ER_CONNECT_TO_MASTER, - mc_mysql_error(&mysql)); + net_printf(thd, error= ER_CONNECT_TO_MASTER, + mysql_error(&mysql)); goto err; } @@ -774,15 +796,15 @@ int load_master_data(THD* thd) MYSQL_RES *db_res, **table_res, **table_res_end, **cur_table_res; uint num_dbs; - if (mc_mysql_query(&mysql, "show databases", 0) || - !(db_res = mc_mysql_store_result(&mysql))) + if (mysql_real_query(&mysql, "SHOW DATABASES", 14) || + !(db_res = mysql_store_result(&mysql))) { - net_printf(&thd->net, error = ER_QUERY_ON_MASTER, - mc_mysql_error(&mysql)); + net_printf(thd, error = ER_QUERY_ON_MASTER, + mysql_error(&mysql)); goto err; } - if (!(num_dbs = (uint) mc_mysql_num_rows(db_res))) + if (!(num_dbs = (uint) mysql_num_rows(db_res))) goto err; /* In theory, the master could have no databases at all @@ -791,7 +813,7 @@ int load_master_data(THD* thd) if (!(table_res = (MYSQL_RES**)thd->alloc(num_dbs * sizeof(MYSQL_RES*)))) { - net_printf(&thd->net, error = ER_OUTOFMEMORY); + net_printf(thd, error = ER_OUTOFMEMORY); goto err; } @@ -801,12 +823,12 @@ int load_master_data(THD* thd) we wait to issue FLUSH TABLES WITH READ LOCK for as long as we can to minimize the lock time. */ - if (mc_mysql_query(&mysql, "FLUSH TABLES WITH READ LOCK", 0) || - mc_mysql_query(&mysql, "SHOW MASTER STATUS",0) || - !(master_status_res = mc_mysql_store_result(&mysql))) + if (mysql_real_query(&mysql, "FLUSH TABLES WITH READ LOCK", 27) || + mysql_real_query(&mysql, "SHOW MASTER STATUS",18) || + !(master_status_res = mysql_store_result(&mysql))) { - net_printf(&thd->net, error = ER_QUERY_ON_MASTER, - mc_mysql_error(&mysql)); + net_printf(thd, error = ER_QUERY_ON_MASTER, + mysql_error(&mysql)); goto err; } @@ -821,7 +843,7 @@ int load_master_data(THD* thd) cur_table_res++) { // since we know how many rows we have, this can never be NULL - MYSQL_ROW row = mc_mysql_fetch_row(db_res); + MYSQL_ROW row = mysql_fetch_row(db_res); char* db = row[0]; /* @@ -846,19 +868,22 @@ int load_master_data(THD* thd) continue; } - if (mysql_create_db(thd, db, HA_LEX_CREATE_IF_NOT_EXISTS, 1)) + bzero((char*) &create_info, sizeof(create_info)); + create_info.options= HA_LEX_CREATE_IF_NOT_EXISTS; + + if (mysql_create_db(thd, db, &create_info, 1)) { - send_error(&thd->net, 0, 0); + send_error(thd, 0, 0); cleanup_mysql_results(db_res, cur_table_res - 1, table_res); goto err; } - if (mc_mysql_select_db(&mysql, db) || - mc_mysql_query(&mysql, "show tables", 0) || - !(*cur_table_res = mc_mysql_store_result(&mysql))) + if (mysql_select_db(&mysql, db) || + mysql_real_query(&mysql, "SHOW TABLES", 11) || + !(*cur_table_res = mysql_store_result(&mysql))) { - net_printf(&thd->net, error = ER_QUERY_ON_MASTER, - mc_mysql_error(&mysql)); + net_printf(thd, error = ER_QUERY_ON_MASTER, + mysql_error(&mysql)); cleanup_mysql_results(db_res, cur_table_res - 1, table_res); goto err; } @@ -876,7 +901,7 @@ int load_master_data(THD* thd) // adjust replication coordinates from the master if (master_status_res) { - MYSQL_ROW row = mc_mysql_fetch_row(master_status_res); + MYSQL_ROW row = mysql_fetch_row(master_status_res); /* We need this check because the master may not be running with @@ -892,25 +917,31 @@ int load_master_data(THD* thd) setting active_mi, because init_master_info() sets active_mi with defaults. */ + int error; + if (init_master_info(active_mi, master_info_file, relay_log_info_file, 0, (SLAVE_IO | SLAVE_SQL))) - send_error(&thd->net, ER_MASTER_INFO); + send_error(thd, ER_MASTER_INFO); strmake(active_mi->master_log_name, row[0], sizeof(active_mi->master_log_name)); - active_mi->master_log_pos = strtoull(row[1], (char**) 0, 10); - // at least in recent versions, the condition below should be false + active_mi->master_log_pos= my_strtoll10(row[1], (char**) 0, &error); + /* at least in recent versions, the condition below should be false */ if (active_mi->master_log_pos < BIN_LOG_HEADER_SIZE) active_mi->master_log_pos = BIN_LOG_HEADER_SIZE; - active_mi->rli.pending = 0; + /* + Relay log's IO_CACHE may not be inited (even if we are sure that some + host was specified; there could have been a problem when replication + started, which led to relay log's IO_CACHE to not be inited. + */ flush_master_info(active_mi, 0); } - mc_mysql_free_result(master_status_res); + mysql_free_result(master_status_res); } - if (mc_mysql_query(&mysql, "UNLOCK TABLES", 0)) + if (mysql_real_query(&mysql, "UNLOCK TABLES", 13)) { - net_printf(&thd->net, error = ER_QUERY_ON_MASTER, - mc_mysql_error(&mysql)); + net_printf(thd, error = ER_QUERY_ON_MASTER, + mysql_error(&mysql)); goto err; } } @@ -919,15 +950,25 @@ int load_master_data(THD* thd) 0 /* not only reset, but also reinit */, &errmsg)) { - send_error(&thd->net, 0, "Failed purging old relay logs"); + send_error(thd, 0, "Failed purging old relay logs"); unlock_slave_threads(active_mi); pthread_mutex_unlock(&LOCK_active_mi); return 1; } pthread_mutex_lock(&active_mi->rli.data_lock); - active_mi->rli.master_log_pos = active_mi->master_log_pos; - strmake(active_mi->rli.master_log_name,active_mi->master_log_name, - sizeof(active_mi->rli.master_log_name)-1); + active_mi->rli.group_master_log_pos = active_mi->master_log_pos; + strmake(active_mi->rli.group_master_log_name,active_mi->master_log_name, + sizeof(active_mi->rli.group_master_log_name)-1); + /* + Cancel the previous START SLAVE UNTIL, as the fact to download + a new copy logically makes UNTIL irrelevant. + */ + clear_until_condition(&active_mi->rli); + + /* + No need to update rli.event* coordinates, they will be when the slave + threads start ; only rli.group* coordinates are necessary here. + */ flush_relay_log_info(&active_mi->rli); pthread_cond_broadcast(&active_mi->rli.data_cond); pthread_mutex_unlock(&active_mi->rli.data_lock); @@ -945,9 +986,12 @@ err: pthread_mutex_unlock(&LOCK_active_mi); thd->proc_info = 0; - mc_mysql_close(&mysql); // safe to call since we always do mc_mysql_init() + mysql_close(&mysql); // safe to call since we always do mysql_init() if (!error) - send_ok(&thd->net); + send_ok(thd); return error; } + +#endif /* HAVE_REPLICATION */ + diff --git a/sql/repl_failsafe.h b/sql/repl_failsafe.h index eb0e97c2820..ad0219bb735 100644 --- a/sql/repl_failsafe.h +++ b/sql/repl_failsafe.h @@ -14,8 +14,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifndef REPL_FAILSAFE_H -#define REPL_FAILSAFE_H +#ifdef HAVE_REPLICATION #include "mysql.h" #include "my_sys.h" @@ -50,4 +49,4 @@ void end_slave_list(); int register_slave(THD* thd, uchar* packet, uint packet_length); void unregister_slave(THD* thd, bool only_mine, bool need_mutex); -#endif +#endif /* HAVE_REPLICATION */ diff --git a/sql/set_var.cc b/sql/set_var.cc index f12b81f0682..681c70c4c02 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -19,18 +19,27 @@ To add a new variable, one has to do the following: - - If the variable is thread specific, add it to 'system_variables' struct. - If not, add it to mysqld.cc and an declaration in 'mysql_priv.h' - Use one of the 'sys_var... classes from set_var.h or write a specific one for the variable type. - Define it in the 'variable definition list' in this file. - - If the variable should be changeable, it should be added to the - 'list of all variables' list in this file. + - If the variable should be changeable or one should be able to access it + with @@variable_name, it should be added to the 'list of all variables' + list (sys_variables) in this file. + - If the variable is thread specific, add it to 'system_variables' struct. + If not, add it to mysqld.cc and an declaration in 'mysql_priv.h' - If the variable should be changed from the command line, add a definition of it in the my_option structure list in mysqld.dcc + - Don't forget to initialize new fields in global_system_variables and + max_system_variables! - If the variable should show up in 'show variables' add it to the init_vars[] struct in this file + NOTES: + - Be careful with var->save_result: sys_var::check() only updates + ulonglong_value; so other members of the union are garbage then; to use + them you must first assign a value to them (in specific ::check() for + example). + TODO: - Add full support for the variable character_set (for 4.1) @@ -39,13 +48,13 @@ new attribute. */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" +#include <mysql.h> #include "slave.h" -#include "sql_acl.h" #include <my_getopt.h> #include <thr_alarm.h> #include <myisam.h> @@ -55,26 +64,39 @@ #ifdef HAVE_INNOBASE_DB #include "ha_innodb.h" #endif +#ifdef HAVE_NDBCLUSTER_DB +#include "ha_ndbcluster.h" +#endif static HASH system_variable_hash; const char *bool_type_names[]= { "OFF", "ON", NullS }; TYPELIB bool_typelib= { - array_elements(bool_type_names)-1, "", bool_type_names + array_elements(bool_type_names)-1, "", bool_type_names, NULL }; const char *delay_key_write_type_names[]= { "OFF", "ON", "ALL", NullS }; TYPELIB delay_key_write_typelib= { - array_elements(delay_key_write_type_names)-1, "", delay_key_write_type_names + array_elements(delay_key_write_type_names)-1, "", + delay_key_write_type_names, NULL }; -static bool sys_check_charset(THD *thd, set_var *var); +static int sys_check_charset(THD *thd, set_var *var); static bool sys_update_charset(THD *thd, set_var *var); static void sys_set_default_charset(THD *thd, enum_var_type type); +static int sys_check_ftb_syntax(THD *thd, set_var *var); +static bool sys_update_ftb_syntax(THD *thd, set_var * var); +static void sys_default_ftb_syntax(THD *thd, enum_var_type type); +static bool sys_update_init_connect(THD*, set_var*); +static void sys_default_init_connect(THD*, enum_var_type type); +static bool sys_update_init_slave(THD*, set_var*); +static void sys_default_init_slave(THD*, enum_var_type type); static bool set_option_bit(THD *thd, set_var *var); static bool set_option_autocommit(THD *thd, set_var *var); +static int check_log_update(THD *thd, set_var *var); static bool set_log_update(THD *thd, set_var *var); +static int check_pseudo_thread_id(THD *thd, set_var *var); static void fix_low_priority_updates(THD *thd, enum_var_type type); static void fix_tx_isolation(THD *thd, enum_var_type type); static void fix_net_read_timeout(THD *thd, enum_var_type type); @@ -82,15 +104,22 @@ static void fix_net_write_timeout(THD *thd, enum_var_type type); static void fix_net_retry_count(THD *thd, enum_var_type type); static void fix_max_join_size(THD *thd, enum_var_type type); static void fix_query_cache_size(THD *thd, enum_var_type type); -static void fix_key_buffer_size(THD *thd, enum_var_type type); +static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type); static void fix_myisam_max_extra_sort_file_size(THD *thd, enum_var_type type); static void fix_myisam_max_sort_file_size(THD *thd, enum_var_type type); static void fix_max_binlog_size(THD *thd, enum_var_type type); static void fix_max_relay_log_size(THD *thd, enum_var_type type); static void fix_max_connections(THD *thd, enum_var_type type); +static int check_max_delayed_threads(THD *thd, set_var *var); static void fix_thd_mem_root(THD *thd, enum_var_type type); static void fix_trans_mem_root(THD *thd, enum_var_type type); static void fix_server_id(THD *thd, enum_var_type type); +static KEY_CACHE *create_key_cache(const char *name, uint length); +void fix_sql_mode_var(THD *thd, enum_var_type type); +static byte *get_error_count(THD *thd); +static byte *get_warning_count(THD *thd); +static byte *get_prepared_stmt_count(THD *thd); +static byte *get_have_innodb(THD *thd); /* Variable definition list @@ -103,11 +132,19 @@ sys_var_long_ptr sys_binlog_cache_size("binlog_cache_size", &binlog_cache_size); sys_var_thd_ulong sys_bulk_insert_buff_size("bulk_insert_buffer_size", &SV::bulk_insert_buff_size); -sys_var_str sys_charset("character_set", +sys_var_character_set_server sys_character_set_server("character_set_server"); +sys_var_str sys_charset_system("character_set_system", sys_check_charset, sys_update_charset, - sys_set_default_charset); -sys_var_thd_conv_charset sys_convert_charset("convert_character_set"); + sys_set_default_charset, + (char *)my_charset_utf8_general_ci.name); +sys_var_character_set_database sys_character_set_database("character_set_database"); +sys_var_character_set_client sys_character_set_client("character_set_client"); +sys_var_character_set_connection sys_character_set_connection("character_set_connection"); +sys_var_character_set_results sys_character_set_results("character_set_results"); +sys_var_collation_connection sys_collation_connection("collation_connection"); +sys_var_collation_database sys_collation_database("collation_database"); +sys_var_collation_server sys_collation_server("collation_server"); sys_var_bool_ptr sys_concurrent_insert("concurrent_insert", &myisam_concurrent_insert); sys_var_long_ptr sys_connect_timeout("connect_timeout", @@ -122,15 +159,35 @@ sys_var_long_ptr sys_delayed_insert_timeout("delayed_insert_timeout", &delayed_insert_timeout); sys_var_long_ptr sys_delayed_queue_size("delayed_queue_size", &delayed_queue_size); +sys_var_long_ptr sys_expire_logs_days("expire_logs_days", + &expire_logs_days); sys_var_bool_ptr sys_flush("flush", &myisam_flush); sys_var_long_ptr sys_flush_time("flush_time", &flush_time); +sys_var_str sys_ft_boolean_syntax("ft_boolean_syntax", + sys_check_ftb_syntax, + sys_update_ftb_syntax, + sys_default_ftb_syntax, + ft_boolean_syntax); +sys_var_str sys_init_connect("init_connect", 0, + sys_update_init_connect, + sys_default_init_connect,0); +sys_var_str sys_init_slave("init_slave", 0, + sys_update_init_slave, + sys_default_init_slave,0); sys_var_thd_ulong sys_interactive_timeout("interactive_timeout", &SV::net_interactive_timeout); sys_var_thd_ulong sys_join_buffer_size("join_buffer_size", &SV::join_buff_size); -sys_var_ulonglong_ptr sys_key_buffer_size("key_buffer_size", - &keybuff_size, - fix_key_buffer_size); +sys_var_key_buffer_size sys_key_buffer_size("key_buffer_size"); +sys_var_key_cache_long sys_key_cache_block_size("key_cache_block_size", + offsetof(KEY_CACHE, + param_block_size)); +sys_var_key_cache_long sys_key_cache_division_limit("key_cache_division_limit", + offsetof(KEY_CACHE, + param_division_limit)); +sys_var_key_cache_long sys_key_cache_age_threshold("key_cache_age_threshold", + offsetof(KEY_CACHE, + param_age_threshold)); sys_var_bool_ptr sys_local_infile("local_infile", &opt_local_infile); sys_var_thd_ulong sys_log_warnings("log_warnings", &SV::log_warnings); @@ -157,22 +214,36 @@ sys_var_long_ptr sys_max_connections("max_connections", sys_var_long_ptr sys_max_connect_errors("max_connect_errors", &max_connect_errors); sys_var_thd_ulong sys_max_insert_delayed_threads("max_insert_delayed_threads", - &SV::max_insert_delayed_threads); + &SV::max_insert_delayed_threads, + check_max_delayed_threads, + fix_max_connections); sys_var_thd_ulong sys_max_delayed_threads("max_delayed_threads", &SV::max_insert_delayed_threads, - fix_max_connections); + check_max_delayed_threads, + fix_max_connections); +sys_var_thd_ulong sys_max_error_count("max_error_count", + &SV::max_error_count); sys_var_thd_ulong sys_max_heap_table_size("max_heap_table_size", &SV::max_heap_table_size); +sys_var_thd_ulong sys_pseudo_thread_id("pseudo_thread_id", + &SV::pseudo_thread_id, + check_pseudo_thread_id, 0); sys_var_thd_ha_rows sys_max_join_size("max_join_size", &SV::max_join_size, fix_max_join_size); sys_var_thd_ulong sys_max_seeks_for_key("max_seeks_for_key", &SV::max_seeks_for_key); +sys_var_thd_ulong sys_max_length_for_sort_data("max_length_for_sort_data", + &SV::max_length_for_sort_data); #ifndef TO_BE_DELETED /* Alias for max_join_size */ sys_var_thd_ha_rows sys_sql_max_join_size("sql_max_join_size", &SV::max_join_size, fix_max_join_size); #endif +static sys_var_long_ptr_global +sys_max_prepared_stmt_count("max_prepared_stmt_count", + &max_prepared_stmt_count, + &LOCK_prepared_stmt_count); sys_var_long_ptr sys_max_relay_log_size("max_relay_log_size", &max_relay_log_size, fix_max_relay_log_size); @@ -184,27 +255,42 @@ sys_var_thd_ulong sys_max_tmp_tables("max_tmp_tables", &SV::max_tmp_tables); sys_var_long_ptr sys_max_write_lock_count("max_write_lock_count", &max_write_lock_count); +sys_var_long_ptr sys_myisam_data_pointer_size("myisam_data_pointer_size", + &myisam_data_pointer_size); sys_var_thd_ulonglong sys_myisam_max_extra_sort_file_size("myisam_max_extra_sort_file_size", &SV::myisam_max_extra_sort_file_size, fix_myisam_max_extra_sort_file_size, 1); sys_var_thd_ulonglong sys_myisam_max_sort_file_size("myisam_max_sort_file_size", &SV::myisam_max_sort_file_size, fix_myisam_max_sort_file_size, 1); sys_var_thd_ulong sys_myisam_repair_threads("myisam_repair_threads", &SV::myisam_repair_threads); sys_var_thd_ulong sys_myisam_sort_buffer_size("myisam_sort_buffer_size", &SV::myisam_sort_buff_size); + +sys_var_thd_enum sys_myisam_stats_method("myisam_stats_method", + &SV::myisam_stats_method, + &myisam_stats_method_typelib, + NULL); + sys_var_thd_ulong sys_net_buffer_length("net_buffer_length", &SV::net_buffer_length); sys_var_thd_ulong sys_net_read_timeout("net_read_timeout", &SV::net_read_timeout, - fix_net_read_timeout); + 0, fix_net_read_timeout); sys_var_thd_ulong sys_net_write_timeout("net_write_timeout", &SV::net_write_timeout, - fix_net_write_timeout); + 0, fix_net_write_timeout); sys_var_thd_ulong sys_net_retry_count("net_retry_count", &SV::net_retry_count, - fix_net_retry_count); + 0, fix_net_retry_count); sys_var_thd_bool sys_new_mode("new", &SV::new_mode); +sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords); +sys_var_thd_ulong sys_preload_buff_size("preload_buffer_size", + &SV::preload_buff_size); sys_var_thd_ulong sys_read_buff_size("read_buffer_size", &SV::read_buff_size); sys_var_bool_ptr sys_readonly("read_only", &opt_readonly); sys_var_thd_ulong sys_read_rnd_buff_size("read_rnd_buffer_size", &SV::read_rnd_buff_size); +#ifdef HAVE_REPLICATION +sys_var_bool_ptr sys_relay_log_purge("relay_log_purge", + &relay_log_purge); +#endif sys_var_long_ptr sys_rpl_recovery_rank("rpl_recovery_rank", &rpl_recovery_rank); sys_var_long_ptr sys_query_cache_size("query_cache_size", @@ -215,20 +301,23 @@ sys_var_thd_ulong sys_range_alloc_block_size("range_alloc_block_size", &SV::range_alloc_block_size); sys_var_thd_ulong sys_query_alloc_block_size("query_alloc_block_size", &SV::query_alloc_block_size, - fix_thd_mem_root); + 0, fix_thd_mem_root); sys_var_thd_ulong sys_query_prealloc_size("query_prealloc_size", &SV::query_prealloc_size, - fix_thd_mem_root); + 0, fix_thd_mem_root); sys_var_thd_ulong sys_trans_alloc_block_size("transaction_alloc_block_size", &SV::trans_alloc_block_size, - fix_trans_mem_root); + 0, fix_trans_mem_root); sys_var_thd_ulong sys_trans_prealloc_size("transaction_prealloc_size", &SV::trans_prealloc_size, - fix_trans_mem_root); + 0, fix_trans_mem_root); #ifdef HAVE_QUERY_CACHE sys_var_long_ptr sys_query_cache_limit("query_cache_limit", &query_cache.query_cache_limit); +sys_var_long_ptr sys_query_cache_min_res_unit("query_cache_min_res_unit", + &query_cache_min_res_unit, + fix_query_cache_min_res_unit); sys_var_thd_enum sys_query_cache_type("query_cache_type", &SV::query_cache_type, &query_cache_type_typelib); @@ -236,17 +325,38 @@ sys_var_thd_bool sys_query_cache_wlock_invalidate("query_cache_wlock_invalidate", &SV::query_cache_wlock_invalidate); #endif /* HAVE_QUERY_CACHE */ +sys_var_bool_ptr sys_secure_auth("secure_auth", &opt_secure_auth); sys_var_long_ptr sys_server_id("server_id", &server_id, fix_server_id); sys_var_bool_ptr sys_slave_compressed_protocol("slave_compressed_protocol", &opt_slave_compressed_protocol); +#ifdef HAVE_REPLICATION sys_var_long_ptr sys_slave_net_timeout("slave_net_timeout", &slave_net_timeout); +sys_var_long_ptr sys_slave_trans_retries("slave_transaction_retries", + &slave_trans_retries); +#endif sys_var_long_ptr sys_slow_launch_time("slow_launch_time", &slow_launch_time); sys_var_thd_ulong sys_sort_buffer("sort_buffer_size", &SV::sortbuff_size); -sys_var_thd_enum sys_table_type("table_type", &SV::table_type, - &ha_table_typelib); +sys_var_thd_sql_mode sys_sql_mode("sql_mode", + &SV::sql_mode); +sys_var_thd_table_type sys_table_type("table_type", + &SV::table_type); +sys_var_thd_storage_engine sys_storage_engine("storage_engine", + &SV::table_type); +#ifdef HAVE_REPLICATION +sys_var_sync_binlog_period sys_sync_binlog_period("sync_binlog", &sync_binlog_period); +sys_var_thd_ulong sys_sync_replication("sync_replication", + &SV::sync_replication); +sys_var_thd_ulong sys_sync_replication_slave_id( + "sync_replication_slave_id", + &SV::sync_replication_slave_id); +sys_var_thd_ulong sys_sync_replication_timeout( + "sync_replication_timeout", + &SV::sync_replication_timeout); +#endif +sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm); sys_var_long_ptr sys_table_cache_size("table_cache", &table_cache_size); sys_var_long_ptr sys_thread_cache_size("thread_cache_size", @@ -259,72 +369,98 @@ sys_var_thd_ulong sys_tmp_table_size("tmp_table_size", &SV::tmp_table_size); sys_var_thd_ulong sys_net_wait_timeout("wait_timeout", &SV::net_wait_timeout); - + #ifdef HAVE_INNOBASE_DB sys_var_long_ptr sys_innodb_max_dirty_pages_pct("innodb_max_dirty_pages_pct", &srv_max_buf_pool_modified_pct); sys_var_long_ptr sys_innodb_max_purge_lag("innodb_max_purge_lag", &srv_max_purge_lag); -sys_var_long_ptr sys_innodb_autoextend_increment("innodb_autoextend_increment", - &srv_auto_extend_increment); sys_var_thd_bool sys_innodb_table_locks("innodb_table_locks", &SV::innodb_table_locks); -#endif +sys_var_long_ptr sys_innodb_autoextend_increment("innodb_autoextend_increment", + &srv_auto_extend_increment); +#endif +#ifdef HAVE_NDBCLUSTER_DB +/* ndb thread specific variable settings */ +sys_var_thd_ulong +sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz", + &SV::ndb_autoincrement_prefetch_sz); +sys_var_thd_bool +sys_ndb_force_send("ndb_force_send", &SV::ndb_force_send); +sys_var_thd_bool +sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); +sys_var_thd_bool +sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); +#endif -/* - Variables that are bits in THD -*/ +/* Time/date/datetime formats */ + +sys_var_thd_date_time_format sys_time_format("time_format", + &SV::time_format, + MYSQL_TIMESTAMP_TIME); +sys_var_thd_date_time_format sys_date_format("date_format", + &SV::date_format, + MYSQL_TIMESTAMP_DATE); +sys_var_thd_date_time_format sys_datetime_format("datetime_format", + &SV::datetime_format, + MYSQL_TIMESTAMP_DATETIME); + +/* Variables that are bits in THD */ -static sys_var_thd_bit sys_autocommit("autocommit", +static sys_var_thd_bit sys_autocommit("autocommit", 0, set_option_autocommit, OPTION_NOT_AUTOCOMMIT, 1); -static sys_var_thd_bit sys_big_tables("big_tables", +static sys_var_thd_bit sys_big_tables("big_tables", 0, set_option_bit, OPTION_BIG_TABLES); #ifndef TO_BE_DELETED /* Alias for big_tables */ -static sys_var_thd_bit sys_sql_big_tables("sql_big_tables", +static sys_var_thd_bit sys_sql_big_tables("sql_big_tables", 0, set_option_bit, OPTION_BIG_TABLES); #endif -static sys_var_thd_bit sys_big_selects("sql_big_selects", +static sys_var_thd_bit sys_big_selects("sql_big_selects", 0, set_option_bit, OPTION_BIG_SELECTS); -static sys_var_thd_bit sys_log_off("sql_log_off", +static sys_var_thd_bit sys_log_off("sql_log_off", 0, set_option_bit, OPTION_LOG_OFF); static sys_var_thd_bit sys_log_update("sql_log_update", + check_log_update, set_log_update, OPTION_UPDATE_LOG); static sys_var_thd_bit sys_log_binlog("sql_log_bin", - set_log_update, - OPTION_BIN_LOG); -static sys_var_thd_bit sys_sql_warnings("sql_warnings", + check_log_update, + set_log_update, + OPTION_BIN_LOG); +static sys_var_thd_bit sys_sql_warnings("sql_warnings", 0, set_option_bit, OPTION_WARNINGS); -static sys_var_thd_bit sys_auto_is_null("sql_auto_is_null", +static sys_var_thd_bit sys_sql_notes("sql_notes", 0, + set_option_bit, + OPTION_SQL_NOTES); +static sys_var_thd_bit sys_auto_is_null("sql_auto_is_null", 0, set_option_bit, OPTION_AUTO_IS_NULL); -static sys_var_thd_bit sys_safe_updates("sql_safe_updates", +static sys_var_thd_bit sys_safe_updates("sql_safe_updates", 0, set_option_bit, OPTION_SAFE_UPDATES); -static sys_var_thd_bit sys_buffer_results("sql_buffer_result", +static sys_var_thd_bit sys_buffer_results("sql_buffer_result", 0, set_option_bit, OPTION_BUFFER_RESULT); -static sys_var_thd_bit sys_quote_show_create("sql_quote_show_create", +static sys_var_thd_bit sys_quote_show_create("sql_quote_show_create", 0, set_option_bit, OPTION_QUOTE_SHOW_CREATE); -static sys_var_thd_bit sys_foreign_key_checks("foreign_key_checks", +static sys_var_thd_bit sys_foreign_key_checks("foreign_key_checks", 0, set_option_bit, OPTION_NO_FOREIGN_KEY_CHECKS, 1); -static sys_var_thd_bit sys_unique_checks("unique_checks", +static sys_var_thd_bit sys_unique_checks("unique_checks", 0, set_option_bit, OPTION_RELAXED_UNIQUE_CHECKS, 1); - /* Local state variables */ static sys_var_thd_ha_rows sys_select_limit("sql_select_limit", @@ -333,18 +469,38 @@ static sys_var_timestamp sys_timestamp("timestamp"); static sys_var_last_insert_id sys_last_insert_id("last_insert_id"); static sys_var_last_insert_id sys_identity("identity"); static sys_var_insert_id sys_insert_id("insert_id"); +static sys_var_readonly sys_error_count("error_count", + OPT_SESSION, + SHOW_LONG, + get_error_count); +static sys_var_readonly sys_warning_count("warning_count", + OPT_SESSION, + SHOW_LONG, + get_warning_count); +static sys_var_readonly sys_prepared_stmt_count("prepared_stmt_count", + OPT_GLOBAL, SHOW_LONG, + get_prepared_stmt_count); + /* alias for last_insert_id() to be compatible with Sybase */ +#ifdef HAVE_REPLICATION static sys_var_slave_skip_counter sys_slave_skip_counter("sql_slave_skip_counter"); +#endif static sys_var_rand_seed1 sys_rand_seed1("rand_seed1"); static sys_var_rand_seed2 sys_rand_seed2("rand_seed2"); static sys_var_thd_ulong sys_default_week_format("default_week_format", - &SV::default_week_format); + &SV::default_week_format); + +sys_var_thd_ulong sys_group_concat_max_len("group_concat_max_len", + &SV::group_concat_max_len); +sys_var_thd_time_zone sys_time_zone("time_zone"); /* Read only variables */ sys_var_const_str sys_os("version_compile_os", SYSTEM_TYPE); +sys_var_readonly sys_have_innodb("have_innodb", OPT_GLOBAL, + SHOW_CHAR, get_have_innodb); /* Global read-only variable describing server license */ sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE)); @@ -366,22 +522,41 @@ sys_var *sys_variables[]= &sys_binlog_cache_size, &sys_buffer_results, &sys_bulk_insert_buff_size, + &sys_character_set_server, + &sys_character_set_database, + &sys_character_set_client, + &sys_character_set_connection, + &sys_character_set_results, + &sys_collation_connection, + &sys_collation_database, + &sys_collation_server, &sys_concurrent_insert, &sys_connect_timeout, + &sys_date_format, + &sys_datetime_format, &sys_default_week_format, - &sys_convert_charset, &sys_delay_key_write, &sys_delayed_insert_limit, &sys_delayed_insert_timeout, &sys_delayed_queue_size, + &sys_error_count, + &sys_expire_logs_days, &sys_flush, &sys_flush_time, + &sys_ft_boolean_syntax, &sys_foreign_key_checks, + &sys_group_concat_max_len, + &sys_have_innodb, &sys_identity, + &sys_init_connect, + &sys_init_slave, &sys_insert_id, &sys_interactive_timeout, &sys_join_buffer_size, &sys_key_buffer_size, + &sys_key_cache_block_size, + &sys_key_cache_division_limit, + &sys_key_cache_age_threshold, &sys_last_insert_id, &sys_license, &sys_local_infile, @@ -397,30 +572,40 @@ sys_var *sys_variables[]= &sys_max_connect_errors, &sys_max_connections, &sys_max_delayed_threads, + &sys_max_error_count, &sys_max_insert_delayed_threads, &sys_max_heap_table_size, &sys_max_join_size, + &sys_max_length_for_sort_data, + &sys_max_prepared_stmt_count, &sys_max_relay_log_size, &sys_max_seeks_for_key, &sys_max_sort_length, &sys_max_tmp_tables, &sys_max_user_connections, &sys_max_write_lock_count, + &sys_myisam_data_pointer_size, &sys_myisam_max_extra_sort_file_size, &sys_myisam_max_sort_file_size, &sys_myisam_repair_threads, &sys_myisam_sort_buffer_size, + &sys_myisam_stats_method, &sys_net_buffer_length, &sys_net_read_timeout, &sys_net_retry_count, &sys_net_wait_timeout, &sys_net_write_timeout, &sys_new_mode, + &sys_old_passwords, + &sys_preload_buff_size, + &sys_prepared_stmt_count, + &sys_pseudo_thread_id, &sys_query_alloc_block_size, &sys_query_cache_size, &sys_query_prealloc_size, #ifdef HAVE_QUERY_CACHE &sys_query_cache_limit, + &sys_query_cache_min_res_unit, &sys_query_cache_type, &sys_query_cache_wlock_invalidate, #endif /* HAVE_QUERY_CACHE */ @@ -428,26 +613,45 @@ sys_var *sys_variables[]= &sys_rand_seed1, &sys_rand_seed2, &sys_range_alloc_block_size, + &sys_readonly, &sys_read_buff_size, &sys_read_rnd_buff_size, +#ifdef HAVE_REPLICATION + &sys_relay_log_purge, +#endif &sys_rpl_recovery_rank, &sys_safe_updates, + &sys_secure_auth, &sys_select_limit, &sys_server_id, +#ifdef HAVE_REPLICATION &sys_slave_compressed_protocol, &sys_slave_net_timeout, + &sys_slave_trans_retries, &sys_slave_skip_counter, - &sys_readonly, +#endif &sys_slow_launch_time, &sys_sort_buffer, &sys_sql_big_tables, &sys_sql_low_priority_updates, &sys_sql_max_join_size, + &sys_sql_mode, &sys_sql_warnings, + &sys_sql_notes, + &sys_storage_engine, +#ifdef HAVE_REPLICATION + &sys_sync_binlog_period, + &sys_sync_replication, + &sys_sync_replication_slave_id, + &sys_sync_replication_timeout, +#endif + &sys_sync_frm, &sys_table_cache_size, &sys_table_type, &sys_thread_cache_size, + &sys_time_format, &sys_timestamp, + &sys_time_zone, &sys_tmp_table_size, &sys_trans_alloc_block_size, &sys_trans_prealloc_size, @@ -456,10 +660,18 @@ sys_var *sys_variables[]= #ifdef HAVE_INNOBASE_DB &sys_innodb_max_dirty_pages_pct, &sys_innodb_max_purge_lag, - &sys_innodb_autoextend_increment, &sys_innodb_table_locks, -#endif - &sys_unique_checks + &sys_innodb_max_purge_lag, + &sys_innodb_autoextend_increment, +#endif +#ifdef HAVE_NDBCLUSTER_DB + &sys_ndb_autoincrement_prefetch_sz, + &sys_ndb_force_send, + &sys_ndb_use_exact_count, + &sys_ndb_use_transactions, +#endif + &sys_unique_checks, + &sys_warning_count }; @@ -478,49 +690,72 @@ struct show_var_st init_vars[]= { {"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG}, {"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL}, {"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR}, - {"bdb_version", (char*) DB_VERSION_STRING, SHOW_CHAR}, #endif {sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS}, {sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS}, - {sys_charset.name, (char*) &sys_charset, SHOW_SYS}, - {"character_sets", (char*) &charsets_list, SHOW_CHAR_PTR}, + {sys_character_set_client.name,(char*) &sys_character_set_client, SHOW_SYS}, + {sys_character_set_connection.name,(char*) &sys_character_set_connection,SHOW_SYS}, + {sys_character_set_database.name, (char*) &sys_character_set_database,SHOW_SYS}, + {sys_character_set_results.name,(char*) &sys_character_set_results, SHOW_SYS}, + {sys_character_set_server.name, (char*) &sys_character_set_server,SHOW_SYS}, + {sys_charset_system.name, (char*) &sys_charset_system, SHOW_SYS}, + {"character_sets_dir", mysql_charsets_dir, SHOW_CHAR}, + {sys_collation_connection.name,(char*) &sys_collation_connection, SHOW_SYS}, + {sys_collation_database.name,(char*) &sys_collation_database, SHOW_SYS}, + {sys_collation_server.name,(char*) &sys_collation_server, SHOW_SYS}, {sys_concurrent_insert.name,(char*) &sys_concurrent_insert, SHOW_SYS}, {sys_connect_timeout.name, (char*) &sys_connect_timeout, SHOW_SYS}, - {sys_convert_charset.name, (char*) &sys_convert_charset, SHOW_SYS}, {"datadir", mysql_real_data_home, SHOW_CHAR}, - {"default_week_format", (char*) &sys_default_week_format, SHOW_SYS}, + {sys_date_format.name, (char*) &sys_date_format, SHOW_SYS}, + {sys_datetime_format.name, (char*) &sys_datetime_format, SHOW_SYS}, + {sys_default_week_format.name, (char*) &sys_default_week_format, SHOW_SYS}, {sys_delay_key_write.name, (char*) &sys_delay_key_write, SHOW_SYS}, {sys_delayed_insert_limit.name, (char*) &sys_delayed_insert_limit,SHOW_SYS}, {sys_delayed_insert_timeout.name, (char*) &sys_delayed_insert_timeout, SHOW_SYS}, {sys_delayed_queue_size.name,(char*) &sys_delayed_queue_size, SHOW_SYS}, + {sys_expire_logs_days.name, (char*) &sys_expire_logs_days, SHOW_SYS}, {sys_flush.name, (char*) &sys_flush, SHOW_SYS}, {sys_flush_time.name, (char*) &sys_flush_time, SHOW_SYS}, - {"ft_boolean_syntax", (char*) ft_boolean_syntax, SHOW_CHAR}, + {sys_ft_boolean_syntax.name,(char*) &ft_boolean_syntax, SHOW_CHAR}, {"ft_max_word_len", (char*) &ft_max_word_len, SHOW_LONG}, - {"ft_max_word_len_for_sort",(char*) &ft_max_word_len_for_sort, SHOW_LONG}, {"ft_min_word_len", (char*) &ft_min_word_len, SHOW_LONG}, + {"ft_query_expansion_limit",(char*) &ft_query_expansion_limit, SHOW_LONG}, {"ft_stopword_file", (char*) &ft_stopword_file, SHOW_CHAR_PTR}, + {sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS}, + {"have_archive", (char*) &have_archive_db, SHOW_HAVE}, {"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE}, + {"have_blackhole_engine", (char*) &have_blackhole_db, SHOW_HAVE}, + {"have_compress", (char*) &have_compress, SHOW_HAVE}, {"have_crypt", (char*) &have_crypt, SHOW_HAVE}, + {"have_csv", (char*) &have_csv_db, SHOW_HAVE}, + {"have_example_engine", (char*) &have_example_db, SHOW_HAVE}, + {"have_geometry", (char*) &have_geometry, SHOW_HAVE}, {"have_innodb", (char*) &have_innodb, SHOW_HAVE}, - {"have_isam", (char*) &have_isam, SHOW_HAVE}, + {"have_isam", (char*) &have_isam, SHOW_HAVE}, + {"have_ndbcluster", (char*) &have_ndbcluster, SHOW_HAVE}, {"have_openssl", (char*) &have_openssl, SHOW_HAVE}, {"have_query_cache", (char*) &have_query_cache, SHOW_HAVE}, {"have_raid", (char*) &have_raid, SHOW_HAVE}, - {"have_symlink", (char*) &have_symlink, SHOW_HAVE}, + {"have_rtree_keys", (char*) &have_rtree_keys, SHOW_HAVE}, + {"have_symlink", (char*) &have_symlink, SHOW_HAVE}, + {"init_connect", (char*) &sys_init_connect, SHOW_SYS}, {"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR}, + {"init_slave", (char*) &sys_init_slave, SHOW_SYS}, #ifdef HAVE_INNOBASE_DB {"innodb_additional_mem_pool_size", (char*) &innobase_additional_mem_pool_size, SHOW_LONG }, {sys_innodb_autoextend_increment.name, (char*) &sys_innodb_autoextend_increment, SHOW_SYS}, + {"innodb_buffer_pool_awe_mem_mb", (char*) &innobase_buffer_pool_awe_mem_mb, SHOW_LONG }, {"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONG }, {"innodb_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR}, {"innodb_data_home_dir", (char*) &innobase_data_home_dir, SHOW_CHAR_PTR}, {"innodb_fast_shutdown", (char*) &innobase_fast_shutdown, SHOW_MY_BOOL}, {"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG }, + {"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL}, {"innodb_flush_log_at_trx_commit", (char*) &innobase_flush_log_at_trx_commit, SHOW_INT}, {"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR}, {"innodb_force_recovery", (char*) &innobase_force_recovery, SHOW_LONG }, {"innodb_lock_wait_timeout", (char*) &innobase_lock_wait_timeout, SHOW_LONG }, + {"innodb_locks_unsafe_for_binlog", (char*) &innobase_locks_unsafe_for_binlog, SHOW_MY_BOOL}, {"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR}, {"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL}, {"innodb_log_buffer_size", (char*) &innobase_log_buffer_size, SHOW_LONG }, @@ -530,12 +765,19 @@ struct show_var_st init_vars[]= { {sys_innodb_max_dirty_pages_pct.name, (char*) &sys_innodb_max_dirty_pages_pct, SHOW_SYS}, {sys_innodb_max_purge_lag.name, (char*) &sys_innodb_max_purge_lag, SHOW_SYS}, {"innodb_mirrored_log_groups", (char*) &innobase_mirrored_log_groups, SHOW_LONG}, + {"innodb_open_files", (char*) &innobase_open_files, SHOW_LONG }, {sys_innodb_table_locks.name, (char*) &sys_innodb_table_locks, SHOW_SYS}, {"innodb_thread_concurrency", (char*) &innobase_thread_concurrency, SHOW_LONG }, #endif {sys_interactive_timeout.name,(char*) &sys_interactive_timeout, SHOW_SYS}, {sys_join_buffer_size.name, (char*) &sys_join_buffer_size, SHOW_SYS}, {sys_key_buffer_size.name, (char*) &sys_key_buffer_size, SHOW_SYS}, + {sys_key_cache_age_threshold.name, (char*) &sys_key_cache_age_threshold, + SHOW_SYS}, + {sys_key_cache_block_size.name, (char*) &sys_key_cache_block_size, + SHOW_SYS}, + {sys_key_cache_division_limit.name, (char*) &sys_key_cache_division_limit, + SHOW_SYS}, {"language", language, SHOW_CHAR}, {"large_files_support", (char*) &opt_large_files, SHOW_BOOL}, {sys_license.name, (char*) &sys_license, SHOW_SYS}, @@ -546,7 +788,9 @@ struct show_var_st init_vars[]= { {"log", (char*) &opt_log, SHOW_BOOL}, {"log_bin", (char*) &opt_bin_log, SHOW_BOOL}, {"log_error", (char*) log_error_file, SHOW_CHAR}, +#ifdef HAVE_REPLICATION {"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL}, +#endif {"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL}, {"log_update", (char*) &opt_update_log, SHOW_BOOL}, {sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS}, @@ -560,16 +804,22 @@ struct show_var_st init_vars[]= { {sys_max_connect_errors.name, (char*) &sys_max_connect_errors, SHOW_SYS}, {sys_max_connections.name, (char*) &sys_max_connections, SHOW_SYS}, {sys_max_delayed_threads.name,(char*) &sys_max_delayed_threads, SHOW_SYS}, + {sys_max_error_count.name, (char*) &sys_max_error_count, SHOW_SYS}, {sys_max_heap_table_size.name,(char*) &sys_max_heap_table_size, SHOW_SYS}, {sys_max_insert_delayed_threads.name, (char*) &sys_max_insert_delayed_threads, SHOW_SYS}, {sys_max_join_size.name, (char*) &sys_max_join_size, SHOW_SYS}, + {sys_max_length_for_sort_data.name, (char*) &sys_max_length_for_sort_data, + SHOW_SYS}, + {sys_max_prepared_stmt_count.name, (char*) &sys_max_prepared_stmt_count, + SHOW_SYS}, {sys_max_relay_log_size.name, (char*) &sys_max_relay_log_size, SHOW_SYS}, {sys_max_seeks_for_key.name, (char*) &sys_max_seeks_for_key, SHOW_SYS}, {sys_max_sort_length.name, (char*) &sys_max_sort_length, SHOW_SYS}, {sys_max_tmp_tables.name, (char*) &sys_max_tmp_tables, SHOW_SYS}, {sys_max_user_connections.name,(char*) &sys_max_user_connections, SHOW_SYS}, {sys_max_write_lock_count.name, (char*) &sys_max_write_lock_count,SHOW_SYS}, + {sys_myisam_data_pointer_size.name, (char*) &sys_myisam_data_pointer_size, SHOW_SYS}, {sys_myisam_max_extra_sort_file_size.name, (char*) &sys_myisam_max_extra_sort_file_size, SHOW_SYS}, @@ -579,26 +829,41 @@ struct show_var_st init_vars[]= { {sys_myisam_repair_threads.name, (char*) &sys_myisam_repair_threads, SHOW_SYS}, {sys_myisam_sort_buffer_size.name, (char*) &sys_myisam_sort_buffer_size, SHOW_SYS}, + + {sys_myisam_stats_method.name, (char*) &sys_myisam_stats_method, SHOW_SYS}, + #ifdef __NT__ {"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL}, #endif +#ifdef HAVE_NDBCLUSTER_DB + {sys_ndb_autoincrement_prefetch_sz.name, + (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, + {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, + {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, + {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, +#endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, {sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS}, {sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS}, {sys_net_write_timeout.name,(char*) &sys_net_write_timeout, SHOW_SYS}, {sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS}, + {sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS}, {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, {"pid_file", (char*) pidfile_name, SHOW_CHAR}, - {"port", (char*) &mysql_port, SHOW_INT}, + {sys_prepared_stmt_count.name, (char*) &sys_prepared_stmt_count, SHOW_SYS}, + {"port", (char*) &mysqld_port, SHOW_INT}, + {sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS}, {"protocol_version", (char*) &protocol_version, SHOW_INT}, {sys_query_alloc_block_size.name, (char*) &sys_query_alloc_block_size, SHOW_SYS}, #ifdef HAVE_QUERY_CACHE {sys_query_cache_limit.name,(char*) &sys_query_cache_limit, SHOW_SYS}, + {sys_query_cache_min_res_unit.name, (char*) &sys_query_cache_min_res_unit, + SHOW_SYS}, {sys_query_cache_size.name, (char*) &sys_query_cache_size, SHOW_SYS}, {sys_query_cache_type.name, (char*) &sys_query_cache_type, SHOW_SYS}, {sys_query_cache_wlock_invalidate.name, - (char*) &sys_query_cache_wlock_invalidate, SHOW_SYS}, + (char *) &sys_query_cache_wlock_invalidate, SHOW_SYS}, #endif /* HAVE_QUERY_CACHE */ {sys_query_prealloc_size.name, (char*) &sys_query_prealloc_size, SHOW_SYS}, {sys_range_alloc_block_size.name, (char*) &sys_range_alloc_block_size, @@ -606,18 +871,45 @@ struct show_var_st init_vars[]= { {sys_read_buff_size.name, (char*) &sys_read_buff_size, SHOW_SYS}, {sys_readonly.name, (char*) &sys_readonly, SHOW_SYS}, {sys_read_rnd_buff_size.name,(char*) &sys_read_rnd_buff_size, SHOW_SYS}, +#ifdef HAVE_REPLICATION + {sys_relay_log_purge.name, (char*) &sys_relay_log_purge, SHOW_SYS}, + {"relay_log_space_limit", (char*) &relay_log_space_limit, SHOW_LONGLONG}, +#endif {sys_rpl_recovery_rank.name,(char*) &sys_rpl_recovery_rank, SHOW_SYS}, + {"secure_auth", (char*) &sys_secure_auth, SHOW_SYS}, +#ifdef HAVE_SMEM + {"shared_memory", (char*) &opt_enable_shared_memory, SHOW_MY_BOOL}, + {"shared_memory_base_name", (char*) &shared_memory_base_name, SHOW_CHAR_PTR}, +#endif {sys_server_id.name, (char*) &sys_server_id, SHOW_SYS}, {"skip_external_locking", (char*) &my_disable_locking, SHOW_MY_BOOL}, {"skip_networking", (char*) &opt_disable_networking, SHOW_BOOL}, {"skip_show_database", (char*) &opt_skip_show_db, SHOW_BOOL}, +#ifdef HAVE_REPLICATION {sys_slave_net_timeout.name,(char*) &sys_slave_net_timeout, SHOW_SYS}, + {sys_slave_trans_retries.name,(char*) &sys_slave_trans_retries, SHOW_SYS}, +#endif {sys_slow_launch_time.name, (char*) &sys_slow_launch_time, SHOW_SYS}, #ifdef HAVE_SYS_UN_H - {"socket", (char*) &mysql_unix_port, SHOW_CHAR_PTR}, + {"socket", (char*) &mysqld_unix_port, SHOW_CHAR_PTR}, #endif {sys_sort_buffer.name, (char*) &sys_sort_buffer, SHOW_SYS}, - {"sql_mode", (char*) &opt_sql_mode, SHOW_LONG}, + {sys_sql_mode.name, (char*) &sys_sql_mode, SHOW_SYS}, + {"sql_notes", (char*) &sys_sql_notes, SHOW_BOOL}, + {"sql_warnings", (char*) &sys_sql_warnings, SHOW_BOOL}, + {sys_storage_engine.name, (char*) &sys_storage_engine, SHOW_SYS}, +#ifdef HAVE_REPLICATION + {sys_sync_binlog_period.name,(char*) &sys_sync_binlog_period, SHOW_SYS}, +#endif + {sys_sync_frm.name, (char*) &sys_sync_frm, SHOW_SYS}, +#ifdef HAVE_REPLICATION + {sys_sync_replication.name, (char*) &sys_sync_replication, SHOW_SYS}, + {sys_sync_replication_slave_id.name, (char*) &sys_sync_replication_slave_id,SHOW_SYS}, + {sys_sync_replication_timeout.name, (char*) &sys_sync_replication_timeout,SHOW_SYS}, +#endif +#ifdef HAVE_TZNAME + {"system_time_zone", system_time_zone, SHOW_CHAR}, +#endif {"table_cache", (char*) &table_cache_size, SHOW_LONG}, {sys_table_type.name, (char*) &sys_table_type, SHOW_SYS}, {sys_thread_cache_size.name,(char*) &sys_thread_cache_size, SHOW_SYS}, @@ -625,32 +917,134 @@ struct show_var_st init_vars[]= { {"thread_concurrency", (char*) &concurrency, SHOW_LONG}, #endif {"thread_stack", (char*) &thread_stack, SHOW_LONG}, -#ifdef HAVE_TZNAME - {"timezone", time_zone, SHOW_CHAR}, -#endif + {sys_time_format.name, (char*) &sys_time_format, SHOW_SYS}, + {"time_zone", (char*) &sys_time_zone, SHOW_SYS}, {sys_tmp_table_size.name, (char*) &sys_tmp_table_size, SHOW_SYS}, - {"tmpdir", (char*) &mysql_tmpdir, SHOW_CHAR_PTR}, + {"tmpdir", (char*) &opt_mysql_tmpdir, SHOW_CHAR_PTR}, {sys_trans_alloc_block_size.name, (char*) &sys_trans_alloc_block_size, SHOW_SYS}, {sys_trans_prealloc_size.name, (char*) &sys_trans_prealloc_size, SHOW_SYS}, {sys_tx_isolation.name, (char*) &sys_tx_isolation, SHOW_SYS}, {"version", server_version, SHOW_CHAR}, +#ifdef HAVE_BERKELEY_DB + {"version_bdb", (char*) DB_VERSION_STRING, SHOW_CHAR}, +#endif {"version_comment", (char*) MYSQL_COMPILATION_COMMENT, SHOW_CHAR}, + {"version_compile_machine", (char*) MACHINE_TYPE, SHOW_CHAR}, {sys_os.name, (char*) &sys_os, SHOW_SYS}, {sys_net_wait_timeout.name, (char*) &sys_net_wait_timeout, SHOW_SYS}, {NullS, NullS, SHOW_LONG} }; + +bool sys_var::check(THD *thd, set_var *var) +{ + var->save_result.ulonglong_value= var->value->val_int(); + return 0; +} + +bool sys_var_str::check(THD *thd, set_var *var) +{ + int res; + if (!check_func) + return 0; + + if ((res=(*check_func)(thd, var)) < 0) + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, + var->value->str_value.ptr()); + return res; +} + /* Functions to check and update variables */ + +/* + Update variables 'init_connect, init_slave'. + + In case of 'DEFAULT' value + (for example: 'set GLOBAL init_connect=DEFAULT') + 'var' parameter is NULL pointer. +*/ + +bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex, + set_var *var) +{ + char *res= 0, *old_value=(char *)(var ? var->value->str_value.ptr() : 0); + uint new_length= (var ? var->value->str_value.length() : 0); + if (!old_value) + old_value= (char*) ""; + if (!(res= my_strdup_with_length((byte*)old_value, new_length, MYF(0)))) + return 1; + /* + Replace the old value in such a way that the any thread using + the value will work. + */ + rw_wrlock(var_mutex); + old_value= var_str->value; + var_str->value= res; + var_str->value_length= new_length; + rw_unlock(var_mutex); + my_free(old_value, MYF(MY_ALLOW_ZERO_PTR)); + return 0; +} + + +static bool sys_update_init_connect(THD *thd, set_var *var) +{ + return update_sys_var_str(&sys_init_connect, &LOCK_sys_init_connect, var); +} + + +static void sys_default_init_connect(THD* thd, enum_var_type type) +{ + update_sys_var_str(&sys_init_connect, &LOCK_sys_init_connect, 0); +} + + +static bool sys_update_init_slave(THD *thd, set_var *var) +{ + return update_sys_var_str(&sys_init_slave, &LOCK_sys_init_slave, var); +} + + +static void sys_default_init_slave(THD* thd, enum_var_type type) +{ + update_sys_var_str(&sys_init_slave, &LOCK_sys_init_slave, 0); +} + +static int sys_check_ftb_syntax(THD *thd, set_var *var) +{ + if (thd->master_access & SUPER_ACL) + return ft_boolean_check_syntax_string((byte*) var->value->str_value.c_ptr()) ? + -1 : 0; + else + { + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); + return 1; + } +} + +static bool sys_update_ftb_syntax(THD *thd, set_var * var) +{ + strmake(ft_boolean_syntax, var->value->str_value.c_ptr(), + sizeof(ft_boolean_syntax)-1); + return 0; +} + +static void sys_default_ftb_syntax(THD *thd, enum_var_type type) +{ + strmake(ft_boolean_syntax, def_ft_boolean_syntax, + sizeof(ft_boolean_syntax)-1); +} + /* The following 3 functions need to be changed in 4.1 when we allow one to change character sets */ -static bool sys_check_charset(THD *thd, set_var *var) +static int sys_check_charset(THD *thd, set_var *var) { return 0; } @@ -728,6 +1122,7 @@ static void fix_tx_isolation(THD *thd, enum_var_type type) If we are changing the thread variable, we have to copy it to NET too */ +#ifdef HAVE_REPLICATION static void fix_net_read_timeout(THD *thd, enum_var_type type) { if (type != OPT_GLOBAL) @@ -746,20 +1141,39 @@ static void fix_net_retry_count(THD *thd, enum_var_type type) if (type != OPT_GLOBAL) thd->net.retry_count=thd->variables.net_retry_count; } +#else /* HAVE_REPLICATION */ +static void fix_net_read_timeout(THD *thd __attribute__(unused), + enum_var_type type __attribute__(unused)) +{} +static void fix_net_write_timeout(THD *thd __attribute__(unused), + enum_var_type type __attribute__(unused)) +{} +static void fix_net_retry_count(THD *thd __attribute__(unused), + enum_var_type type __attribute__(unused)) +{} +#endif /* HAVE_REPLICATION */ static void fix_query_cache_size(THD *thd, enum_var_type type) { #ifdef HAVE_QUERY_CACHE + ulong requested= query_cache_size; query_cache.resize(query_cache_size); + if (requested != query_cache_size) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_QC_RESIZE, ER(ER_WARN_QC_RESIZE), + requested, query_cache_size); #endif } -static void fix_key_buffer_size(THD *thd, enum_var_type type) +#ifdef HAVE_QUERY_CACHE +static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type) { - ha_resize_key_cache(); + query_cache_min_res_unit= + query_cache.set_min_res_unit(query_cache_min_res_unit); } +#endif extern void fix_delay_key_write(THD *thd, enum_var_type type) @@ -784,8 +1198,10 @@ static void fix_max_binlog_size(THD *thd, enum_var_type type) DBUG_PRINT("info",("max_binlog_size=%lu max_relay_log_size=%lu", max_binlog_size, max_relay_log_size)); mysql_bin_log.set_max_size(max_binlog_size); +#ifdef HAVE_REPLICATION if (!max_relay_log_size) active_mi->rli.relay_log.set_max_size(max_binlog_size); +#endif DBUG_VOID_RETURN; } @@ -794,23 +1210,41 @@ static void fix_max_relay_log_size(THD *thd, enum_var_type type) DBUG_ENTER("fix_max_relay_log_size"); DBUG_PRINT("info",("max_binlog_size=%lu max_relay_log_size=%lu", max_binlog_size, max_relay_log_size)); +#ifdef HAVE_REPLICATION active_mi->rli.relay_log.set_max_size(max_relay_log_size ? max_relay_log_size: max_binlog_size); +#endif DBUG_VOID_RETURN; } +static int check_max_delayed_threads(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (var->type != OPT_GLOBAL && val != 0 && + val != (longlong) global_system_variables.max_insert_delayed_threads) + { + char buf[64]; + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name, llstr(val, buf)); + return 1; + } + return 0; +} + + static void fix_max_connections(THD *thd, enum_var_type type) { +#ifndef EMBEDDED_LIBRARY resize_thr_alarm(max_connections + global_system_variables.max_insert_delayed_threads + 10); +#endif } static void fix_thd_mem_root(THD *thd, enum_var_type type) { if (type != OPT_GLOBAL) - reset_root_defaults(&thd->mem_root, + reset_root_defaults(thd->mem_root, thd->variables.query_alloc_block_size, thd->variables.query_prealloc_size); } @@ -824,33 +1258,52 @@ static void fix_trans_mem_root(THD *thd, enum_var_type type) thd->variables.trans_prealloc_size); } + static void fix_server_id(THD *thd, enum_var_type type) { server_id_supplied = 1; } -bool sys_var_long_ptr::update(THD *thd, set_var *var) + +sys_var_long_ptr:: +sys_var_long_ptr(const char *name_arg, ulong *value_ptr, + sys_after_update_func after_update_arg) + :sys_var_long_ptr_global(name_arg, value_ptr, + &LOCK_global_system_variables, after_update_arg) +{} + + +bool sys_var_long_ptr_global::check(THD *thd, set_var *var) { - ulonglong tmp= var->value->val_int(); - pthread_mutex_lock(&LOCK_global_system_variables); + longlong v= var->value->val_int(); + var->save_result.ulonglong_value= v < 0 ? 0 : v; + return 0; +} + +bool sys_var_long_ptr_global::update(THD *thd, set_var *var) +{ + ulonglong tmp= var->save_result.ulonglong_value; + pthread_mutex_lock(guard); if (option_limits) *value= (ulong) getopt_ull_limit_value(tmp, option_limits); else *value= (ulong) tmp; - pthread_mutex_unlock(&LOCK_global_system_variables); + pthread_mutex_unlock(guard); return 0; } -void sys_var_long_ptr::set_default(THD *thd, enum_var_type type) +void sys_var_long_ptr_global::set_default(THD *thd, enum_var_type type) { + pthread_mutex_lock(guard); *value= (ulong) option_limits->def_value; + pthread_mutex_unlock(guard); } bool sys_var_ulonglong_ptr::update(THD *thd, set_var *var) { - ulonglong tmp= var->value->val_int(); + ulonglong tmp= var->save_result.ulonglong_value; pthread_mutex_lock(&LOCK_global_system_variables); if (option_limits) *value= (ulonglong) getopt_ull_limit_value(tmp, option_limits); @@ -889,15 +1342,20 @@ bool sys_var_enum::update(THD *thd, set_var *var) } -byte *sys_var_enum::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_enum::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { return (byte*) enum_names->type_names[*value]; } +bool sys_var_thd_ulong::check(THD *thd, set_var *var) +{ + return (sys_var_thd::check(thd, var) || + (check_func && (*check_func)(thd, var))); +} bool sys_var_thd_ulong::update(THD *thd, set_var *var) { - ulonglong tmp= var->value->val_int(); + ulonglong tmp= var->save_result.ulonglong_value; /* Don't use bigger value than given with --maximum-variable-name=.. */ if ((ulong) tmp > max_system_variables.*offset) @@ -925,7 +1383,8 @@ void sys_var_thd_ulong::set_default(THD *thd, enum_var_type type) } -byte *sys_var_thd_ulong::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_thd_ulong::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { if (type == OPT_GLOBAL) return (byte*) &(global_system_variables.*offset); @@ -935,7 +1394,7 @@ byte *sys_var_thd_ulong::value_ptr(THD *thd, enum_var_type type) bool sys_var_thd_ha_rows::update(THD *thd, set_var *var) { - ulonglong tmp= var->value->val_int(); + ulonglong tmp= var->save_result.ulonglong_value; /* Don't use bigger value than given with --maximum-variable-name=.. */ if ((ha_rows) tmp > max_system_variables.*offset) @@ -970,17 +1429,17 @@ void sys_var_thd_ha_rows::set_default(THD *thd, enum_var_type type) } -byte *sys_var_thd_ha_rows::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_thd_ha_rows::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { if (type == OPT_GLOBAL) return (byte*) &(global_system_variables.*offset); return (byte*) &(thd->variables.*offset); } - bool sys_var_thd_ulonglong::update(THD *thd, set_var *var) { - ulonglong tmp= var->value->val_int(); + ulonglong tmp= var->save_result.ulonglong_value; if (tmp > max_system_variables.*offset) tmp= max_system_variables.*offset; @@ -1005,7 +1464,7 @@ void sys_var_thd_ulonglong::set_default(THD *thd, enum_var_type type) if (type == OPT_GLOBAL) { pthread_mutex_lock(&LOCK_global_system_variables); - global_system_variables.*offset= (ulong) option_limits->def_value; + global_system_variables.*offset= (ulonglong) option_limits->def_value; pthread_mutex_unlock(&LOCK_global_system_variables); } else @@ -1013,7 +1472,8 @@ void sys_var_thd_ulonglong::set_default(THD *thd, enum_var_type type) } -byte *sys_var_thd_ulonglong::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_thd_ulonglong::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { if (type == OPT_GLOBAL) return (byte*) &(global_system_variables.*offset); @@ -1040,7 +1500,8 @@ void sys_var_thd_bool::set_default(THD *thd, enum_var_type type) } -byte *sys_var_thd_bool::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_thd_bool::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { if (type == OPT_GLOBAL) return (byte*) &(global_system_variables.*offset); @@ -1052,14 +1513,14 @@ bool sys_var::check_enum(THD *thd, set_var *var, TYPELIB *enum_names) { char buff[80]; const char *value; - String str(buff,sizeof(buff)), *res; + String str(buff, sizeof(buff), system_charset_info), *res; if (var->value->result_type() == STRING_RESULT) { if (!(res=var->value->val_str(&str)) || ((long) (var->save_result.ulong_value= - (ulong) find_type(res->c_ptr(), enum_names, 3)-1)) - < 0) + (ulong) find_type(enum_names, res->ptr(), + res->length(),1)-1)) < 0) { value= res ? res->c_ptr() : "NULL"; goto err; @@ -1083,23 +1544,64 @@ err: return 1; } + +bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) +{ + bool not_used; + char buff[80], *error= 0; + uint error_len= 0; + String str(buff, sizeof(buff), system_charset_info), *res; + + if (var->value->result_type() == STRING_RESULT) + { + if (!(res= var->value->val_str(&str))) + { + strmov(buff, "NULL"); + goto err; + } + var->save_result.ulong_value= ((ulong) + find_set(enum_names, res->c_ptr(), + res->length(), + NULL, + &error, &error_len, + ¬_used)); + if (error_len) + { + strmake(buff, error, min(sizeof(buff), error_len)); + goto err; + } + } + else + { + ulonglong tmp= var->value->val_int(); + if (tmp >= enum_names->count) + { + llstr(tmp, buff); + goto err; + } + var->save_result.ulong_value= (ulong) tmp; // Save for update + } + return 0; + +err: + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buff); + return 1; +} + + /* Return an Item for a variable. Used with @@[global.]variable_name - If type is not given, return local value if exists, else global - - We have to use netprintf() instead of my_error() here as this is - called on the parsing stage. */ -Item *sys_var::item(THD *thd, enum_var_type var_type) +Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base) { if (check_type(var_type)) { if (var_type != OPT_DEFAULT) { - net_printf(&thd->net,ER_INCORRECT_GLOBAL_LOCAL_VAR, - name, var_type == OPT_GLOBAL ? "LOCAL" : "GLOBAL"); + my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), + name, var_type == OPT_GLOBAL ? "SESSION" : "GLOBAL"); return 0; } /* As there was no local variable, return the global value */ @@ -1107,12 +1609,18 @@ Item *sys_var::item(THD *thd, enum_var_type var_type) } switch (type()) { case SHOW_LONG: - return new Item_uint((int32) *(ulong*) value_ptr(thd, var_type)); + { + ulong value; + pthread_mutex_lock(&LOCK_global_system_variables); + value= *(ulong*) value_ptr(thd, var_type, base); + pthread_mutex_unlock(&LOCK_global_system_variables); + return new Item_uint((int32) value); + } case SHOW_LONGLONG: { longlong value; pthread_mutex_lock(&LOCK_global_system_variables); - value= *(longlong*) value_ptr(thd, var_type); + value= *(longlong*) value_ptr(thd, var_type, base); pthread_mutex_unlock(&LOCK_global_system_variables); return new Item_int(value); } @@ -1120,19 +1628,30 @@ Item *sys_var::item(THD *thd, enum_var_type var_type) { ha_rows value; pthread_mutex_lock(&LOCK_global_system_variables); - value= *(ha_rows*) value_ptr(thd, var_type); + value= *(ha_rows*) value_ptr(thd, var_type, base); pthread_mutex_unlock(&LOCK_global_system_variables); return new Item_int((longlong) value); } case SHOW_MY_BOOL: - return new Item_int((int32) *(my_bool*) value_ptr(thd, var_type),1); + return new Item_int((int32) *(my_bool*) value_ptr(thd, var_type, base),1); case SHOW_CHAR: { - char *str= (char*) value_ptr(thd, var_type); - return new Item_string(str,strlen(str)); + Item *tmp; + pthread_mutex_lock(&LOCK_global_system_variables); + char *str= (char*) value_ptr(thd, var_type, base); + if (str) + tmp= new Item_string(str, strlen(str), + system_charset_info, DERIVATION_SYSCONST); + else + { + tmp= new Item_null(); + tmp->collation.set(system_charset_info, DERIVATION_SYSCONST); + } + pthread_mutex_unlock(&LOCK_global_system_variables); + return tmp; } default: - net_printf(&thd->net, ER_VAR_CANT_BE_READ, name); + my_error(ER_VAR_CANT_BE_READ, MYF(0), name); } return 0; } @@ -1157,7 +1676,8 @@ void sys_var_thd_enum::set_default(THD *thd, enum_var_type type) } -byte *sys_var_thd_enum::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_thd_enum::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { ulong tmp= ((type == OPT_GLOBAL) ? global_system_variables.*offset : @@ -1165,16 +1685,21 @@ byte *sys_var_thd_enum::value_ptr(THD *thd, enum_var_type type) return (byte*) enum_names->type_names[tmp]; } +bool sys_var_thd_bit::check(THD *thd, set_var *var) +{ + return (check_enum(thd, var, &bool_typelib) || + (check_func && (*check_func)(thd, var))); +} bool sys_var_thd_bit::update(THD *thd, set_var *var) { int res= (*update_func)(thd, var); - thd->lex.select_lex.options=thd->options; return res; } -byte *sys_var_thd_bit::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_thd_bit::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { /* If reverse is 0 (default) return 1 if bit is set. @@ -1186,61 +1711,663 @@ byte *sys_var_thd_bit::value_ptr(THD *thd, enum_var_type type) } -bool sys_var_thd_conv_charset::check(THD *thd, set_var *var) +/* Update a date_time format variable based on given value */ + +void sys_var_thd_date_time_format::update2(THD *thd, enum_var_type type, + DATE_TIME_FORMAT *new_value) { - CONVERT *tmp; - char buff[80]; - String str(buff,sizeof(buff)), *res; + DATE_TIME_FORMAT *old; + DBUG_ENTER("sys_var_date_time_format::update2"); + DBUG_DUMP("positions",(char*) new_value->positions, + sizeof(new_value->positions)); - if (!var->value) // Default value + if (type == OPT_GLOBAL) { - var->save_result.convert= (var->type != OPT_GLOBAL ? - global_system_variables.convert_set - : (CONVERT*) 0); - return 0; + pthread_mutex_lock(&LOCK_global_system_variables); + old= (global_system_variables.*offset); + (global_system_variables.*offset)= new_value; + pthread_mutex_unlock(&LOCK_global_system_variables); } + else + { + old= (thd->variables.*offset); + (thd->variables.*offset)= new_value; + } + my_free((char*) old, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_VOID_RETURN; +} + + +bool sys_var_thd_date_time_format::update(THD *thd, set_var *var) +{ + DATE_TIME_FORMAT *new_value; + /* We must make a copy of the last value to get it into normal memory */ + new_value= date_time_format_copy((THD*) 0, + var->save_result.date_time_format); + if (!new_value) + return 1; // Out of memory + update2(thd, var->type, new_value); // Can't fail + return 0; +} + + +bool sys_var_thd_date_time_format::check(THD *thd, set_var *var) +{ + char buff[80]; + String str(buff,sizeof(buff), system_charset_info), *res; + DATE_TIME_FORMAT *format; + if (!(res=var->value->val_str(&str))) - res= &empty_string; + res= &my_empty_string; + + if (!(format= date_time_format_make(date_time_type, + res->ptr(), res->length()))) + { + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, res->c_ptr()); + return 1; + } + + /* + We must copy result to thread space to not get a memory leak if + update is aborted + */ + var->save_result.date_time_format= date_time_format_copy(thd, format); + my_free((char*) format, MYF(0)); + return var->save_result.date_time_format == 0; +} + + +void sys_var_thd_date_time_format::set_default(THD *thd, enum_var_type type) +{ + DATE_TIME_FORMAT *res= 0; + + if (type == OPT_GLOBAL) + { + const char *format; + if ((format= opt_date_time_formats[date_time_type])) + res= date_time_format_make(date_time_type, format, strlen(format)); + } + else + { + /* Make copy with malloc */ + res= date_time_format_copy((THD *) 0, global_system_variables.*offset); + } + + if (res) // Should always be true + update2(thd, type, res); +} + + +byte *sys_var_thd_date_time_format::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + if (type == OPT_GLOBAL) + { + char *res; + /* + We do a copy here just to be sure things will work even if someone + is modifying the original string while the copy is accessed + (Can't happen now in SQL SHOW, but this is a good safety for the future) + */ + res= thd->strmake((global_system_variables.*offset)->format.str, + (global_system_variables.*offset)->format.length); + return (byte*) res; + } + return (byte*) (thd->variables.*offset)->format.str; +} + + +typedef struct old_names_map_st +{ + const char *old_name; + const char *new_name; +} my_old_conv; + +static my_old_conv old_conv[]= +{ + { "cp1251_koi8" , "cp1251" }, + { "cp1250_latin2" , "cp1250" }, + { "kam_latin2" , "keybcs2" }, + { "mac_latin2" , "MacRoman" }, + { "macce_latin2" , "MacCE" }, + { "pc2_latin2" , "pclatin2" }, + { "vga_latin2" , "pclatin1" }, + { "koi8_cp1251" , "koi8r" }, + { "win1251ukr_koi8_ukr" , "win1251ukr" }, + { "koi8_ukr_win1251ukr" , "koi8u" }, + { NULL , NULL } +}; + +CHARSET_INFO *get_old_charset_by_name(const char *name) +{ + my_old_conv *conv; + + for (conv= old_conv; conv->old_name; conv++) + { + if (!my_strcasecmp(&my_charset_latin1, name, conv->old_name)) + return get_charset_by_csname(conv->new_name, MY_CS_PRIMARY, MYF(0)); + } + return NULL; +} + + +bool sys_var_collation::check(THD *thd, set_var *var) +{ + CHARSET_INFO *tmp; + + if (var->value->result_type() == STRING_RESULT) + { + char buff[80]; + String str(buff,sizeof(buff), system_charset_info), *res; + if (!(res=var->value->val_str(&str))) + { + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL"); + return 1; + } + if (!(tmp=get_charset_by_name(res->c_ptr(),MYF(0)))) + { + my_error(ER_UNKNOWN_COLLATION, MYF(0), res->c_ptr()); + return 1; + } + } + else // INT_RESULT + { + if (!(tmp=get_charset((int) var->value->val_int(),MYF(0)))) + { + char buf[20]; + int10_to_str((int) var->value->val_int(), buf, -10); + my_error(ER_UNKNOWN_COLLATION, MYF(0), buf); + return 1; + } + } + var->save_result.charset= tmp; // Save for update + return 0; +} + + +bool sys_var_character_set::check(THD *thd, set_var *var) +{ + CHARSET_INFO *tmp; + + if (var->value->result_type() == STRING_RESULT) + { + char buff[80]; + String str(buff,sizeof(buff), system_charset_info), *res; + if (!(res=var->value->val_str(&str))) + { + if (!nullable) + { + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL"); + return 1; + } + tmp= NULL; + } + else if (!(tmp=get_charset_by_csname(res->c_ptr(),MY_CS_PRIMARY,MYF(0))) && + !(tmp=get_old_charset_by_name(res->c_ptr()))) + { + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), res->c_ptr()); + return 1; + } + } + else // INT_RESULT + { + if (!(tmp=get_charset((int) var->value->val_int(),MYF(0)))) + { + char buf[20]; + int10_to_str((int) var->value->val_int(), buf, -10); + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), buf); + return 1; + } + } + var->save_result.charset= tmp; // Save for update + return 0; +} + + +bool sys_var_character_set::update(THD *thd, set_var *var) +{ + ci_ptr(thd,var->type)[0]= var->save_result.charset; + thd->update_charset(); + return 0; +} + + +byte *sys_var_character_set::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + CHARSET_INFO *cs= ci_ptr(thd,type)[0]; + return cs ? (byte*) cs->csname : (byte*) NULL; +} + - if (!(tmp=get_convert_set(res->c_ptr()))) +CHARSET_INFO ** sys_var_character_set_connection::ci_ptr(THD *thd, + enum_var_type type) +{ + if (type == OPT_GLOBAL) + return &global_system_variables.collation_connection; + else + return &thd->variables.collation_connection; +} + + +void sys_var_character_set_connection::set_default(THD *thd, + enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.collation_connection= default_charset_info; + else + { + thd->variables.collation_connection= global_system_variables.collation_connection; + thd->update_charset(); + } +} + + +CHARSET_INFO ** sys_var_character_set_client::ci_ptr(THD *thd, + enum_var_type type) +{ + if (type == OPT_GLOBAL) + return &global_system_variables.character_set_client; + else + return &thd->variables.character_set_client; +} + + +void sys_var_character_set_client::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.character_set_client= default_charset_info; + else + { + thd->variables.character_set_client= (global_system_variables. + character_set_client); + thd->update_charset(); + } +} + + +CHARSET_INFO ** +sys_var_character_set_results::ci_ptr(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + return &global_system_variables.character_set_results; + else + return &thd->variables.character_set_results; +} + + +void sys_var_character_set_results::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.character_set_results= default_charset_info; + else + { + thd->variables.character_set_results= (global_system_variables. + character_set_results); + thd->update_charset(); + } +} + + +CHARSET_INFO ** +sys_var_character_set_server::ci_ptr(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + return &global_system_variables.collation_server; + else + return &thd->variables.collation_server; +} + + +void sys_var_character_set_server::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.collation_server= default_charset_info; + else + { + thd->variables.collation_server= global_system_variables.collation_server; + thd->update_charset(); + } +} + +#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +bool sys_var_character_set_server::check(THD *thd, set_var *var) +{ + if ((var->type == OPT_GLOBAL) && + (mysql_bin_log.is_open() || + active_mi->slave_running || active_mi->rli.slave_running)) { - my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), res->c_ptr()); + my_printf_error(0, "Binary logging and replication forbid changing \ +the global server character set or collation", MYF(0)); return 1; } - var->save_result.convert=tmp; // Save for update + return sys_var_character_set::check(thd,var); +} +#endif + +CHARSET_INFO ** sys_var_character_set_database::ci_ptr(THD *thd, + enum_var_type type) +{ + if (type == OPT_GLOBAL) + return &global_system_variables.collation_database; + else + return &thd->variables.collation_database; +} + + +void sys_var_character_set_database::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.collation_database= default_charset_info; + else + { + thd->variables.collation_database= thd->db_charset; + thd->update_charset(); + } +} + + +bool sys_var_collation_connection::update(THD *thd, set_var *var) +{ + if (var->type == OPT_GLOBAL) + global_system_variables.collation_connection= var->save_result.charset; + else + { + thd->variables.collation_connection= var->save_result.charset; + thd->update_charset(); + } return 0; } -bool sys_var_thd_conv_charset::update(THD *thd, set_var *var) +byte *sys_var_collation_connection::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + CHARSET_INFO *cs= ((type == OPT_GLOBAL) ? + global_system_variables.collation_connection : + thd->variables.collation_connection); + return cs ? (byte*) cs->name : (byte*) "NULL"; +} + + +void sys_var_collation_connection::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.collation_connection= default_charset_info; + else + { + thd->variables.collation_connection= (global_system_variables. + collation_connection); + thd->update_charset(); + } +} + +bool sys_var_collation_database::update(THD *thd, set_var *var) { if (var->type == OPT_GLOBAL) - global_system_variables.convert_set= var->save_result.convert; + global_system_variables.collation_database= var->save_result.charset; else - thd->lex.convert_set= thd->variables.convert_set= - var->save_result.convert; + { + thd->variables.collation_database= var->save_result.charset; + thd->update_charset(); + } return 0; } -byte *sys_var_thd_conv_charset::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_collation_database::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { - CONVERT *conv= ((type == OPT_GLOBAL) ? - global_system_variables.convert_set : - thd->variables.convert_set); - return conv ? (byte*) conv->name : (byte*) ""; + CHARSET_INFO *cs= ((type == OPT_GLOBAL) ? + global_system_variables.collation_database : + thd->variables.collation_database); + return cs ? (byte*) cs->name : (byte*) "NULL"; } -void sys_var_thd_conv_charset::set_default(THD *thd, enum_var_type type) +void sys_var_collation_database::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.collation_database= default_charset_info; + else + { + thd->variables.collation_database= (global_system_variables. + collation_database); + thd->update_charset(); + } +} + +#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +bool sys_var_collation_server::check(THD *thd, set_var *var) { - thd->variables.convert_set= global_system_variables.convert_set; + if ((var->type == OPT_GLOBAL) && + (mysql_bin_log.is_open() || + active_mi->slave_running || active_mi->rli.slave_running)) + { + my_printf_error(0, "Binary logging and replication forbid changing \ +the global server character set or collation", MYF(0)); + return 1; + } + return sys_var_collation::check(thd,var); } +#endif +bool sys_var_collation_server::update(THD *thd, set_var *var) +{ + if (var->type == OPT_GLOBAL) + global_system_variables.collation_server= var->save_result.charset; + else + { + thd->variables.collation_server= var->save_result.charset; + thd->update_charset(); + } + return 0; +} + + +byte *sys_var_collation_server::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + CHARSET_INFO *cs= ((type == OPT_GLOBAL) ? + global_system_variables.collation_server : + thd->variables.collation_server); + return cs ? (byte*) cs->name : (byte*) "NULL"; +} + + +void sys_var_collation_server::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.collation_server= default_charset_info; + else + { + thd->variables.collation_server= (global_system_variables. + collation_server); + thd->update_charset(); + } +} + + +LEX_STRING default_key_cache_base= {(char *) "default", 7 }; + +static KEY_CACHE zero_key_cache; + +KEY_CACHE *get_key_cache(LEX_STRING *cache_name) +{ + safe_mutex_assert_owner(&LOCK_global_system_variables); + if (!cache_name || ! cache_name->length) + cache_name= &default_key_cache_base; + return ((KEY_CACHE*) find_named(&key_caches, + cache_name->str, cache_name->length, 0)); +} + + +byte *sys_var_key_cache_param::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + KEY_CACHE *key_cache= get_key_cache(base); + if (!key_cache) + key_cache= &zero_key_cache; + return (byte*) key_cache + offset ; +} + + +bool sys_var_key_buffer_size::update(THD *thd, set_var *var) +{ + ulonglong tmp= var->save_result.ulonglong_value; + LEX_STRING *base_name= &var->base; + KEY_CACHE *key_cache; + bool error= 0; + + /* If no basename, assume it's for the key cache named 'default' */ + if (!base_name->length) + base_name= &default_key_cache_base; + + pthread_mutex_lock(&LOCK_global_system_variables); + key_cache= get_key_cache(base_name); + + if (!key_cache) + { + /* Key cache didn't exists */ + if (!tmp) // Tried to delete cache + goto end; // Ok, nothing to do + if (!(key_cache= create_key_cache(base_name->str, base_name->length))) + { + error= 1; + goto end; + } + } + + /* + Abort if some other thread is changing the key cache + TODO: This should be changed so that we wait until the previous + assignment is done and then do the new assign + */ + if (key_cache->in_init) + goto end; + + if (!tmp) // Zero size means delete + { + if (key_cache == dflt_key_cache) + goto end; // Ignore default key cache + + if (key_cache->key_cache_inited) // If initied + { + /* + Move tables using this key cache to the default key cache + and clear the old key cache. + */ + NAMED_LIST *list; + key_cache= (KEY_CACHE *) find_named(&key_caches, base_name->str, + base_name->length, &list); + key_cache->in_init= 1; + pthread_mutex_unlock(&LOCK_global_system_variables); + error= reassign_keycache_tables(thd, key_cache, dflt_key_cache); + pthread_mutex_lock(&LOCK_global_system_variables); + key_cache->in_init= 0; + } + /* + We don't delete the key cache as some running threads my still be + in the key cache code with a pointer to the deleted (empty) key cache + */ + goto end; + } + + key_cache->param_buff_size= + (ulonglong) getopt_ull_limit_value(tmp, option_limits); + + /* If key cache didn't existed initialize it, else resize it */ + key_cache->in_init= 1; + pthread_mutex_unlock(&LOCK_global_system_variables); + + if (!key_cache->key_cache_inited) + error= (bool) (ha_init_key_cache("", key_cache)); + else + error= (bool)(ha_resize_key_cache(key_cache)); + + pthread_mutex_lock(&LOCK_global_system_variables); + key_cache->in_init= 0; + +end: + pthread_mutex_unlock(&LOCK_global_system_variables); + return error; +} + + +bool sys_var_key_cache_long::update(THD *thd, set_var *var) +{ + ulong tmp= (ulong) var->value->val_int(); + LEX_STRING *base_name= &var->base; + bool error= 0; + + if (!base_name->length) + base_name= &default_key_cache_base; + + pthread_mutex_lock(&LOCK_global_system_variables); + KEY_CACHE *key_cache= get_key_cache(base_name); + + if (!key_cache && !(key_cache= create_key_cache(base_name->str, + base_name->length))) + { + error= 1; + goto end; + } + + /* + Abort if some other thread is changing the key cache + TODO: This should be changed so that we wait until the previous + assignment is done and then do the new assign + */ + if (key_cache->in_init) + goto end; + + *((ulong*) (((char*) key_cache) + offset))= + (ulong) getopt_ull_limit_value(tmp, option_limits); + + /* + Don't create a new key cache if it didn't exist + (key_caches are created only when the user sets block_size) + */ + key_cache->in_init= 1; + + pthread_mutex_unlock(&LOCK_global_system_variables); + + error= (bool) (ha_resize_key_cache(key_cache)); + + pthread_mutex_lock(&LOCK_global_system_variables); + key_cache->in_init= 0; + +end: + pthread_mutex_unlock(&LOCK_global_system_variables); + return error; +} + + +/***************************************************************************** + Functions to handle SET NAMES and SET CHARACTER SET +*****************************************************************************/ + +int set_var_collation_client::check(THD *thd) +{ + return 0; +} + +int set_var_collation_client::update(THD *thd) +{ + thd->variables.character_set_client= character_set_client; + thd->variables.character_set_results= character_set_results; + thd->variables.collation_connection= collation_connection; + thd->update_charset(); + thd->protocol_simple.init(thd); + thd->protocol_prep.init(thd); + return 0; +} + +/****************************************************************************/ bool sys_var_timestamp::update(THD *thd, set_var *var) { - thd->set_time((time_t) var->value->val_int()); + thd->set_time((time_t) var->save_result.ulonglong_value); return 0; } @@ -1251,7 +2378,8 @@ void sys_var_timestamp::set_default(THD *thd, enum_var_type type) } -byte *sys_var_timestamp::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_timestamp::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { thd->sys_var_tmp.long_value= (long) thd->start_time; return (byte*) &thd->sys_var_tmp.long_value; @@ -1260,12 +2388,13 @@ byte *sys_var_timestamp::value_ptr(THD *thd, enum_var_type type) bool sys_var_last_insert_id::update(THD *thd, set_var *var) { - thd->insert_id(var->value->val_int()); + thd->insert_id(var->save_result.ulonglong_value); return 0; } -byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { thd->sys_var_tmp.long_value= (long) thd->insert_id(); return (byte*) &thd->last_insert_id; @@ -1274,17 +2403,19 @@ byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type) bool sys_var_insert_id::update(THD *thd, set_var *var) { - thd->next_insert_id=var->value->val_int(); + thd->next_insert_id= var->save_result.ulonglong_value; return 0; } -byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type) +byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) { return (byte*) &thd->current_insert_id; } +#ifdef HAVE_REPLICATION bool sys_var_slave_skip_counter::check(THD *thd, set_var *var) { int result= 0; @@ -1297,6 +2428,7 @@ bool sys_var_slave_skip_counter::check(THD *thd, set_var *var) } pthread_mutex_unlock(&active_mi->rli.run_lock); pthread_mutex_unlock(&LOCK_active_mi); + var->save_result.ulong_value= (ulong) var->value->val_int(); return result; } @@ -1313,7 +2445,7 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var) if (!active_mi->rli.slave_running) { pthread_mutex_lock(&active_mi->rli.data_lock); - active_mi->rli.slave_skip_counter= (ulong) var->value->val_int(); + active_mi->rli.slave_skip_counter= var->save_result.ulong_value; pthread_mutex_unlock(&active_mi->rli.data_lock); } pthread_mutex_unlock(&active_mi->rli.run_lock); @@ -1322,19 +2454,114 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var) } +bool sys_var_sync_binlog_period::update(THD *thd, set_var *var) +{ + pthread_mutex_t *lock_log= mysql_bin_log.get_log_lock(); + sync_binlog_period= (ulong) var->save_result.ulonglong_value; + /* + Must reset the counter otherwise it may already be beyond the new period + and so the new period will not be taken into account. Need mutex otherwise + might be cancelled by a simultanate ++ in MYSQL_LOG::write(). + */ + pthread_mutex_lock(lock_log); + sync_binlog_counter= 0; + pthread_mutex_unlock(lock_log); + return 0; +} +#endif /* HAVE_REPLICATION */ + bool sys_var_rand_seed1::update(THD *thd, set_var *var) { - thd->rand.seed1= (ulong) var->value->val_int(); + thd->rand.seed1= (ulong) var->save_result.ulonglong_value; return 0; } bool sys_var_rand_seed2::update(THD *thd, set_var *var) { - thd->rand.seed2= (ulong) var->value->val_int(); + thd->rand.seed2= (ulong) var->save_result.ulonglong_value; + return 0; +} + + +bool sys_var_thd_time_zone::check(THD *thd, set_var *var) +{ + char buff[MAX_TIME_ZONE_NAME_LENGTH]; + String str(buff, sizeof(buff), &my_charset_latin1); + String *res= var->value->val_str(&str); + +#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) + if ((var->type == OPT_GLOBAL) && + (mysql_bin_log.is_open() || + active_mi->slave_running || active_mi->rli.slave_running)) + { + my_printf_error(0, "Binary logging and replication forbid changing " + "of the global server time zone", MYF(0)); + return 1; + } +#endif + + if (!(var->save_result.time_zone= + my_tz_find(res, thd->lex->time_zone_tables_used))) + { + my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL"); + return 1; + } + return 0; +} + + +bool sys_var_thd_time_zone::update(THD *thd, set_var *var) +{ + /* We are using Time_zone object found during check() phase. */ + if (var->type == OPT_GLOBAL) + { + pthread_mutex_lock(&LOCK_global_system_variables); + global_system_variables.time_zone= var->save_result.time_zone; + pthread_mutex_unlock(&LOCK_global_system_variables); + } + else + thd->variables.time_zone= var->save_result.time_zone; return 0; } +byte *sys_var_thd_time_zone::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + /* + We can use ptr() instead of c_ptr() here because String contaning + time zone name is guaranteed to be zero ended. + */ + if (type == OPT_GLOBAL) + return (byte *)(global_system_variables.time_zone->get_name()->ptr()); + else + return (byte *)(thd->variables.time_zone->get_name()->ptr()); +} + + +void sys_var_thd_time_zone::set_default(THD *thd, enum_var_type type) +{ + pthread_mutex_lock(&LOCK_global_system_variables); + if (type == OPT_GLOBAL) + { + if (default_tz_name) + { + String str(default_tz_name, &my_charset_latin1); + /* + We are guaranteed to find this time zone since its existence + is checked during start-up. + */ + global_system_variables.time_zone= + my_tz_find(&str, thd->lex->time_zone_tables_used); + } + else + global_system_variables.time_zone= my_tz_SYSTEM; + } + else + thd->variables.time_zone= global_system_variables.time_zone; + pthread_mutex_unlock(&LOCK_global_system_variables); +} + /* Functions to update thd->options bits */ @@ -1380,6 +2607,17 @@ static bool set_option_autocommit(THD *thd, set_var *var) return 0; } +static int check_log_update(THD *thd, set_var *var) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (!(thd->master_access & SUPER_ACL)) + { + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); + return 1; + } +#endif + return 0; +} static bool set_log_update(THD *thd, set_var *var) { @@ -1390,6 +2628,51 @@ static bool set_log_update(THD *thd, set_var *var) return 0; } +static int check_pseudo_thread_id(THD *thd, set_var *var) +{ + var->save_result.ulonglong_value= var->value->val_int(); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (thd->master_access & SUPER_ACL) + return 0; + else + { + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); + return 1; + } +#else + return 0; +#endif +} + +static byte *get_warning_count(THD *thd) +{ + thd->sys_var_tmp.long_value= + (thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_NOTE] + + thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_WARN]); + return (byte*) &thd->sys_var_tmp.long_value; +} + +static byte *get_error_count(THD *thd) +{ + thd->sys_var_tmp.long_value= + thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR]; + return (byte*) &thd->sys_var_tmp.long_value; +} + + +static byte *get_have_innodb(THD *thd) +{ + return (byte*) show_comp_option_name[have_innodb]; +} + + +static byte *get_prepared_stmt_count(THD *thd) +{ + pthread_mutex_lock(&LOCK_prepared_stmt_count); + thd->sys_var_tmp.ulong_value= prepared_stmt_count; + pthread_mutex_unlock(&LOCK_prepared_stmt_count); + return (byte*) &thd->sys_var_tmp.ulong_value; +} /**************************************************************************** Main handling of variables: @@ -1449,10 +2732,9 @@ static byte *get_sys_var_length(const sys_var *var, uint *length, void set_var_init() { - extern struct my_option my_long_options[]; // From mysqld - - hash_init(&system_variable_hash,array_elements(sys_variables),0,0, - (hash_get_key) get_sys_var_length,0, HASH_CASE_INSENSITIVE); + hash_init(&system_variable_hash, system_charset_info, + array_elements(sys_variables),0,0, + (hash_get_key) get_sys_var_length,0,0); sys_var **var, **end; for (var= sys_variables, end= sys_variables+array_elements(sys_variables) ; var < end; @@ -1460,9 +2742,8 @@ void set_var_init() { (*var)->name_length= strlen((*var)->name); (*var)->option_limits= find_option(my_long_options, (*var)->name); - hash_insert(&system_variable_hash, (byte*) *var); + my_hash_insert(&system_variable_hash, (byte*) *var); } - /* Special cases Needed because MySQL can't find the limits for a variable it it has @@ -1503,7 +2784,7 @@ sys_var *find_sys_var(const char *str, uint length) length ? length : strlen(str)); if (!var) - net_printf(¤t_thd->net, ER_UNKNOWN_SYSTEM_VARIABLE, (char*) str); + net_printf(current_thd, ER_UNKNOWN_SYSTEM_VARIABLE, (char*) str); return var; } @@ -1532,19 +2813,56 @@ sys_var *find_sys_var(const char *str, uint length) int sql_set_variables(THD *thd, List<set_var_base> *var_list) { - int error= 0; - List_iterator<set_var_base> it(*var_list); + int error; + List_iterator_fast<set_var_base> it(*var_list); + DBUG_ENTER("sql_set_variables"); set_var_base *var; while ((var=it++)) { - if ((error=var->check(thd))) - return error; + if ((error= var->check(thd))) + goto err; } - it.rewind(); - while ((var=it++)) - error|= var->update(thd); // Returns 0, -1 or 1 - return error; + if (!(error= test(thd->net.report_error))) + { + it.rewind(); + while ((var= it++)) + error|= var->update(thd); // Returns 0, -1 or 1 + } + +err: + free_underlaid_joins(thd, &thd->lex->select_lex); + DBUG_RETURN(error); +} + + +/* + Say if all variables set by a SET support the ONE_SHOT keyword (currently, + only character set and collation do; later timezones will). + + SYNOPSIS + + not_all_support_one_shot + set_var List of variables to update + + NOTES + It has a "not_" because it makes faster tests (no need to "!") + + RETURN VALUE + 0 all variables of the list support ONE_SHOT + 1 at least one does not support ONE_SHOT +*/ + +bool not_all_support_one_shot(List<set_var_base> *var_list) +{ + List_iterator_fast<set_var_base> it(*var_list); + set_var_base *var; + while ((var= it++)) + { + if (var->no_support_one_shot()) + return 1; + } + return 0; } @@ -1563,7 +2881,6 @@ int set_var::check(THD *thd) } if ((type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL))) return 1; - /* value is a NULL pointer if we are using SET ... = DEFAULT */ if (!value) { @@ -1575,7 +2892,8 @@ int set_var::check(THD *thd) return 0; } - if (value->fix_fields(thd,0)) + if ((!value->fixed && + value->fix_fields(thd, 0, &value)) || value->check_cols(1)) return -1; if (var->check_update_type(value->result_type())) { @@ -1586,6 +2904,37 @@ int set_var::check(THD *thd) } +/* + Check variable, but without assigning value (used by PS) + + SYNOPSIS + set_var::light_check() + thd thread handler + + RETURN VALUE + 0 ok + 1 ERROR, message sent (normally no variables was updated) + -1 ERROR, message not sent +*/ +int set_var::light_check(THD *thd) +{ + if (var->check_type(type)) + { + my_error(type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE, + MYF(0), + var->name); + return -1; + } + if (type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL)) + return 1; + + if (value && ((!value->fixed && value->fix_fields(thd, 0, &value)) || + value->check_cols(1))) + return -1; + return 0; +} + + int set_var::update(THD *thd) { if (!value) @@ -1604,7 +2953,34 @@ int set_var::update(THD *thd) int set_var_user::check(THD *thd) { - return user_var_item->fix_fields(thd,0) ? -1 : 0; + /* + Item_func_set_user_var can't substitute something else on its place => + 0 can be passed as last argument (reference on item) + */ + return (user_var_item->fix_fields(thd, 0, (Item**) 0) || + user_var_item->check()) ? -1 : 0; +} + + +/* + Check variable, but without assigning value (used by PS) + + SYNOPSIS + set_var_user::light_check() + thd thread handler + + RETURN VALUE + 0 ok + 1 ERROR, message sent (normally no variables was updated) + -1 ERROR, message not sent +*/ +int set_var_user::light_check(THD *thd) +{ + /* + Item_func_set_user_var can't substitute something else on its place => + 0 can be passed as last argument (reference on item) + */ + return (user_var_item->fix_fields(thd, 0, (Item**) 0)); } @@ -1626,24 +3002,330 @@ int set_var_user::update(THD *thd) int set_var_password::check(THD *thd) { +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (!user->host.str) - user->host.str= (char*) thd->host_or_ip; + { + if (thd->priv_host != 0) + { + user->host.str= (char *) thd->priv_host; + user->host.length= strlen(thd->priv_host); + } + else + { + user->host.str= (char *)"%"; + user->host.length= 1; + } + } /* Returns 1 as the function sends error to client */ - return check_change_password(thd, user->host.str, user->user.str) ? 1 : 0; + return check_change_password(thd, user->host.str, user->user.str, + password, strlen(password)) ? 1 : 0; +#else + return 0; +#endif } int set_var_password::update(THD *thd) { +#ifndef NO_EMBEDDED_ACCESS_CHECKS /* Returns 1 as the function sends error to client */ - return (change_password(thd, user->host.str, user->user.str, password) ? - 1 : 0); + return change_password(thd, user->host.str, user->user.str, password) ? + 1 : 0; +#else + return 0; +#endif +} + +/**************************************************************************** + Functions to handle table_type +****************************************************************************/ + +/* Based upon sys_var::check_enum() */ + +bool sys_var_thd_storage_engine::check(THD *thd, set_var *var) +{ + char buff[80]; + const char *value; + String str(buff, sizeof(buff), &my_charset_latin1), *res; + + if (var->value->result_type() == STRING_RESULT) + { + enum db_type db_type; + if (!(res=var->value->val_str(&str)) || + !(var->save_result.ulong_value= + (ulong) (db_type= ha_resolve_by_name(res->ptr(), res->length()))) || + ha_checktype(db_type) != db_type) + { + value= res ? res->c_ptr() : "NULL"; + goto err; + } + return 0; + } + value= "unknown"; + +err: + my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), value); + return 1; +} + + +byte *sys_var_thd_storage_engine::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + ulong val; + val= ((type == OPT_GLOBAL) ? global_system_variables.*offset : + thd->variables.*offset); + const char *table_type= ha_get_storage_engine((enum db_type)val); + return (byte *) table_type; +} + + +void sys_var_thd_storage_engine::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.*offset= (ulong) DB_TYPE_MYISAM; + else + thd->variables.*offset= (ulong) (global_system_variables.*offset); } + +bool sys_var_thd_storage_engine::update(THD *thd, set_var *var) +{ + if (var->type == OPT_GLOBAL) + global_system_variables.*offset= var->save_result.ulong_value; + else + thd->variables.*offset= var->save_result.ulong_value; + return 0; +} + +void sys_var_thd_table_type::warn_deprecated(THD *thd) +{ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_SYNTAX, + ER(ER_WARN_DEPRECATED_SYNTAX), "table_type", + "storage_engine"); +} + +void sys_var_thd_table_type::set_default(THD *thd, enum_var_type type) +{ + warn_deprecated(thd); + sys_var_thd_storage_engine::set_default(thd, type); +} + +bool sys_var_thd_table_type::update(THD *thd, set_var *var) +{ + warn_deprecated(thd); + return sys_var_thd_storage_engine::update(thd, var); +} + + +/**************************************************************************** + Functions to handle sql_mode +****************************************************************************/ + +byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + ulong val; + char buff[256]; + String tmp(buff, sizeof(buff), &my_charset_latin1); + + tmp.length(0); + val= ((type == OPT_GLOBAL) ? global_system_variables.*offset : + thd->variables.*offset); + for (uint i= 0; val; val>>= 1, i++) + { + if (val & 1) + { + tmp.append(enum_names->type_names[i]); + tmp.append(','); + } + } + if (tmp.length()) + tmp.length(tmp.length() - 1); + return (byte*) thd->strmake(tmp.ptr(), tmp.length()); +} + + +void sys_var_thd_sql_mode::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.*offset= 0; + else + thd->variables.*offset= global_system_variables.*offset; +} + +void fix_sql_mode_var(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.sql_mode= + fix_sql_mode(global_system_variables.sql_mode); + else + thd->variables.sql_mode= fix_sql_mode(thd->variables.sql_mode); +} + +/* Map database specific bits to function bits */ + +ulong fix_sql_mode(ulong sql_mode) +{ + /* + Note that we dont set + MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | MODE_NO_FIELD_OPTIONS + to allow one to get full use of MySQL in this mode. + */ + + if (sql_mode & MODE_ANSI) + { + sql_mode|= (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE); + /* + MODE_ONLY_FULL_GROUP_BY removed from ANSI mode because it is currently + overly restrictive (see BUG#8510). + */ + } + if (sql_mode & MODE_ORACLE) + sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | + MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | + MODE_NO_FIELD_OPTIONS); + if (sql_mode & MODE_MSSQL) + sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | + MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | + MODE_NO_FIELD_OPTIONS); + if (sql_mode & MODE_POSTGRESQL) + sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | + MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | + MODE_NO_FIELD_OPTIONS); + if (sql_mode & MODE_DB2) + sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | + MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | + MODE_NO_FIELD_OPTIONS); + if (sql_mode & MODE_MAXDB) + sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | + MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | + MODE_NO_FIELD_OPTIONS); + if (sql_mode & MODE_MYSQL40) + sql_mode|= MODE_NO_FIELD_OPTIONS; + if (sql_mode & MODE_MYSQL323) + sql_mode|= MODE_NO_FIELD_OPTIONS; + return sql_mode; +} + + +/**************************************************************************** + Named list handling +****************************************************************************/ + +gptr find_named(I_List<NAMED_LIST> *list, const char *name, uint length, + NAMED_LIST **found) +{ + I_List_iterator<NAMED_LIST> it(*list); + NAMED_LIST *element; + while ((element= it++)) + { + if (element->cmp(name, length)) + { + if (found) + *found= element; + return element->data; + } + } + return 0; +} + + +void delete_elements(I_List<NAMED_LIST> *list, + void (*free_element)(const char *name, gptr)) +{ + NAMED_LIST *element; + DBUG_ENTER("delete_elements"); + while ((element= list->get())) + { + (*free_element)(element->name, element->data); + delete element; + } + DBUG_VOID_RETURN; +} + + +/* Key cache functions */ + +static KEY_CACHE *create_key_cache(const char *name, uint length) +{ + KEY_CACHE *key_cache; + DBUG_ENTER("create_key_cache"); + DBUG_PRINT("enter",("name: %.*s", length, name)); + + if ((key_cache= (KEY_CACHE*) my_malloc(sizeof(KEY_CACHE), + MYF(MY_ZEROFILL | MY_WME)))) + { + if (!new NAMED_LIST(&key_caches, name, length, (gptr) key_cache)) + { + my_free((char*) key_cache, MYF(0)); + key_cache= 0; + } + else + { + /* + Set default values for a key cache + The values in dflt_key_cache_var is set by my_getopt() at startup + + We don't set 'buff_size' as this is used to enable the key cache + */ + key_cache->param_block_size= dflt_key_cache_var.param_block_size; + key_cache->param_division_limit= dflt_key_cache_var.param_division_limit; + key_cache->param_age_threshold= dflt_key_cache_var.param_age_threshold; + } + } + DBUG_RETURN(key_cache); +} + + +KEY_CACHE *get_or_create_key_cache(const char *name, uint length) +{ + LEX_STRING key_cache_name; + KEY_CACHE *key_cache; + + key_cache_name.str= (char *) name; + key_cache_name.length= length; + pthread_mutex_lock(&LOCK_global_system_variables); + if (!(key_cache= get_key_cache(&key_cache_name))) + key_cache= create_key_cache(name, length); + pthread_mutex_unlock(&LOCK_global_system_variables); + return key_cache; +} + + +void free_key_cache(const char *name, KEY_CACHE *key_cache) +{ + ha_end_key_cache(key_cache); + my_free((char*) key_cache, MYF(0)); +} + + +bool process_key_caches(int (* func) (const char *name, KEY_CACHE *)) +{ + I_List_iterator<NAMED_LIST> it(key_caches); + NAMED_LIST *element; + + while ((element= it++)) + { + KEY_CACHE *key_cache= (KEY_CACHE *) element->data; + func(element->name, key_cache); + } + return 0; +} + + /**************************************************************************** Used templates ****************************************************************************/ #ifdef __GNUC__ template class List<set_var_base>; -template class List_iterator<set_var_base>; +template class List_iterator_fast<set_var_base>; +template class I_List_iterator<NAMED_LIST>; #endif diff --git a/sql/set_var.h b/sql/set_var.h index 08e3f20fdb4..c6319a79cf6 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -16,7 +16,7 @@ /* Classes to support the SET command */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -28,17 +28,13 @@ class sys_var; class set_var; typedef struct system_variables SV; -extern TYPELIB bool_typelib, delay_key_write_typelib; +extern TYPELIB bool_typelib, delay_key_write_typelib, sql_mode_typelib; -enum enum_var_type -{ - OPT_DEFAULT, OPT_SESSION, OPT_GLOBAL -}; - -typedef bool (*sys_check_func)(THD *, set_var *); +typedef int (*sys_check_func)(THD *, set_var *); typedef bool (*sys_update_func)(THD *, set_var *); typedef void (*sys_after_update_func)(THD *,enum_var_type); typedef void (*sys_set_default_func)(THD *, enum_var_type); +typedef byte *(*sys_value_ptr_func)(THD *thd); class sys_var { @@ -46,42 +42,84 @@ public: struct my_option *option_limits; /* Updated by by set_var_init() */ uint name_length; /* Updated by by set_var_init() */ const char *name; + sys_after_update_func after_update; - sys_var(const char *name_arg) :name(name_arg),after_update(0) - {} - sys_var(const char *name_arg,sys_after_update_func func) - :name(name_arg),after_update(func) +#if MYSQL_VERSION_ID < 50000 + bool no_support_one_shot; +#endif + sys_var(const char *name_arg, sys_after_update_func func= NULL) + :name(name_arg), after_update(func) +#if MYSQL_VERSION_ID < 50000 + , no_support_one_shot(1) +#endif {} virtual ~sys_var() {} - virtual bool check(THD *thd, set_var *var) { return 0; } + virtual bool check(THD *thd, set_var *var); bool check_enum(THD *thd, set_var *var, TYPELIB *enum_names); + bool check_set(THD *thd, set_var *var, TYPELIB *enum_names); virtual bool update(THD *thd, set_var *var)=0; virtual void set_default(THD *thd, enum_var_type type) {} virtual SHOW_TYPE type() { return SHOW_UNDEF; } - virtual byte *value_ptr(THD *thd, enum_var_type type) { return 0; } + virtual byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { return 0; } virtual bool check_type(enum_var_type type) { return type != OPT_GLOBAL; } /* Error if not GLOBAL */ virtual bool check_update_type(Item_result type) { return type != INT_RESULT; } /* Assume INT */ virtual bool check_default(enum_var_type type) { return option_limits == 0; } - Item *item(THD *thd, enum_var_type type); + Item *item(THD *thd, enum_var_type type, LEX_STRING *base); + virtual bool is_struct() { return 0; } }; -class sys_var_long_ptr :public sys_var +/* + A base class for all variables that require its access to + be guarded with a mutex. +*/ + +class sys_var_global: public sys_var +{ +protected: + pthread_mutex_t *guard; +public: + sys_var_global(const char *name_arg, sys_after_update_func after_update_arg, + pthread_mutex_t *guard_arg) + :sys_var(name_arg, after_update_arg), guard(guard_arg) {} +}; + + +/* + A global-only ulong variable that requires its access to be + protected with a mutex. +*/ + +class sys_var_long_ptr_global: public sys_var_global { public: ulong *value; - sys_var_long_ptr(const char *name_arg, ulong *value_ptr) - :sys_var(name_arg),value(value_ptr) {} - sys_var_long_ptr(const char *name_arg, ulong *value_ptr, - sys_after_update_func func) - :sys_var(name_arg,func), value(value_ptr) {} + sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr, + pthread_mutex_t *guard_arg, + sys_after_update_func after_update_arg= NULL) + :sys_var_global(name_arg, after_update_arg, guard_arg), value(value_ptr) {} + bool check(THD *thd, set_var *var); bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_LONG; } - byte *value_ptr(THD *thd, enum_var_type type) { return (byte*) value; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { return (byte*) value; } +}; + + +/* + A global ulong variable that is protected by LOCK_global_system_variables +*/ + +class sys_var_long_ptr :public sys_var_long_ptr_global +{ +public: + sys_var_long_ptr(const char *name_arg, ulong *value_ptr, + sys_after_update_func after_update_arg= NULL); }; @@ -97,7 +135,8 @@ public: bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_LONGLONG; } - byte *value_ptr(THD *thd, enum_var_type type) { return (byte*) value; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { return (byte*) value; } }; @@ -115,7 +154,8 @@ public: bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_MY_BOOL; } - byte *value_ptr(THD *thd, enum_var_type type) { return (byte*) value; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { return (byte*) value; } bool check_update_type(Item_result type) { return 0; } }; @@ -124,20 +164,19 @@ class sys_var_str :public sys_var { public: char *value; // Pointer to allocated string + uint value_length; sys_check_func check_func; sys_update_func update_func; sys_set_default_func set_default_func; sys_var_str(const char *name_arg, sys_check_func check_func_arg, sys_update_func update_func_arg, - sys_set_default_func set_default_func_arg) - :sys_var(name_arg), check_func(check_func_arg), + sys_set_default_func set_default_func_arg, + char *value_arg) + :sys_var(name_arg), value(value_arg), check_func(check_func_arg), update_func(update_func_arg),set_default_func(set_default_func_arg) {} - bool check(THD *thd, set_var *var) - { - return check_func ? (*check_func)(thd, var) : 0; - } + bool check(THD *thd, set_var *var); bool update(THD *thd, set_var *var) { return (*update_func)(thd, var); @@ -147,7 +186,8 @@ public: (*set_default_func)(thd, type); } SHOW_TYPE type() { return SHOW_CHAR; } - byte *value_ptr(THD *thd, enum_var_type type) { return (byte*) value; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { return (byte*) value; } bool check_update_type(Item_result type) { return type != STRING_RESULT; /* Only accept strings */ @@ -161,7 +201,7 @@ class sys_var_const_str :public sys_var public: char *value; // Pointer to const value sys_var_const_str(const char *name_arg, const char *value_arg) - :sys_var(name_arg), value((char*) value_arg) + :sys_var(name_arg),value((char*) value_arg) {} bool check(THD *thd, set_var *var) { @@ -172,7 +212,10 @@ public: return 1; } SHOW_TYPE type() { return SHOW_CHAR; } - byte *value_ptr(THD *thd, enum_var_type type) { return (byte*) value; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { + return (byte*) value; + } bool check_update_type(Item_result type) { return 1; @@ -196,7 +239,7 @@ public: } bool update(THD *thd, set_var *var); SHOW_TYPE type() { return SHOW_CHAR; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_update_type(Item_result type) { return 0; } }; @@ -204,10 +247,7 @@ public: class sys_var_thd :public sys_var { public: - sys_var_thd(const char *name_arg) - :sys_var(name_arg) - {} - sys_var_thd(const char *name_arg, sys_after_update_func func) + sys_var_thd(const char *name_arg, sys_after_update_func func= NULL) :sys_var(name_arg,func) {} bool check_type(enum_var_type type) { return 0; } @@ -220,19 +260,21 @@ public: class sys_var_thd_ulong :public sys_var_thd { + sys_check_func check_func; public: ulong SV::*offset; sys_var_thd_ulong(const char *name_arg, ulong SV::*offset_arg) - :sys_var_thd(name_arg), offset(offset_arg) + :sys_var_thd(name_arg), check_func(0), offset(offset_arg) {} sys_var_thd_ulong(const char *name_arg, ulong SV::*offset_arg, - sys_after_update_func func) - :sys_var_thd(name_arg,func), offset(offset_arg) + sys_check_func c_func, sys_after_update_func au_func) + :sys_var_thd(name_arg,au_func), check_func(c_func), offset(offset_arg) {} + bool check(THD *thd, set_var *var); bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_LONG; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -250,7 +292,7 @@ public: bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_HA_ROWS; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -270,7 +312,7 @@ public: bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_LONGLONG; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_default(enum_var_type type) { return type == OPT_GLOBAL && !option_limits; @@ -296,7 +338,7 @@ public: bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_MY_BOOL; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check(THD *thd, set_var *var) { return check_enum(thd, var, &bool_typelib); @@ -307,6 +349,7 @@ public: class sys_var_thd_enum :public sys_var_thd { +protected: ulong SV::*offset; TYPELIB *enum_names; public: @@ -326,31 +369,78 @@ public: bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); SHOW_TYPE type() { return SHOW_CHAR; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_update_type(Item_result type) { return 0; } }; +extern void fix_sql_mode_var(THD *thd, enum_var_type type); + +class sys_var_thd_sql_mode :public sys_var_thd_enum +{ +public: + sys_var_thd_sql_mode(const char *name_arg, ulong SV::*offset_arg) + :sys_var_thd_enum(name_arg, offset_arg, &sql_mode_typelib, + fix_sql_mode_var) + {} + bool check(THD *thd, set_var *var) + { + return check_set(thd, var, enum_names); + } + void set_default(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + + +class sys_var_thd_storage_engine :public sys_var_thd +{ +protected: + ulong SV::*offset; +public: + sys_var_thd_storage_engine(const char *name_arg, ulong SV::*offset_arg) + :sys_var_thd(name_arg), offset(offset_arg) + {} + bool check(THD *thd, set_var *var); +SHOW_TYPE type() { return SHOW_CHAR; } + bool check_update_type(Item_result type) + { + return type != STRING_RESULT; /* Only accept strings */ + } + void set_default(THD *thd, enum_var_type type); + bool update(THD *thd, set_var *var); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + +class sys_var_thd_table_type :public sys_var_thd_storage_engine +{ +public: + sys_var_thd_table_type(const char *name_arg, ulong SV::*offset_arg) + :sys_var_thd_storage_engine(name_arg, offset_arg) + {} + void warn_deprecated(THD *thd); + void set_default(THD *thd, enum_var_type type); + bool update(THD *thd, set_var *var); +}; + class sys_var_thd_bit :public sys_var_thd { + sys_check_func check_func; sys_update_func update_func; public: ulong bit_flag; bool reverse; - sys_var_thd_bit(const char *name_arg, sys_update_func func, ulong bit, - bool reverse_arg=0) - :sys_var_thd(name_arg), update_func(func), bit_flag(bit), - reverse(reverse_arg) + sys_var_thd_bit(const char *name_arg, + sys_check_func c_func, sys_update_func u_func, + ulong bit, bool reverse_arg=0) + :sys_var_thd(name_arg), check_func(c_func), update_func(u_func), + bit_flag(bit), reverse(reverse_arg) {} - bool check(THD *thd, set_var *var) - { - return check_enum(thd, var, &bool_typelib); - } + bool check(THD *thd, set_var *var); bool update(THD *thd, set_var *var); bool check_update_type(Item_result type) { return 0; } bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } SHOW_TYPE type() { return SHOW_MY_BOOL; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -365,7 +455,7 @@ public: bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } bool check_default(enum_var_type type) { return 0; } SHOW_TYPE type() { return SHOW_LONG; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -376,7 +466,7 @@ public: bool update(THD *thd, set_var *var); bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } SHOW_TYPE type() { return SHOW_LONGLONG; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -387,10 +477,11 @@ public: bool update(THD *thd, set_var *var); bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } SHOW_TYPE type() { return SHOW_LONGLONG; } - byte *value_ptr(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; +#ifdef HAVE_REPLICATION class sys_var_slave_skip_counter :public sys_var { public: @@ -404,6 +495,14 @@ public: */ }; +class sys_var_sync_binlog_period :public sys_var_long_ptr +{ +public: + sys_var_sync_binlog_period(const char *name_arg, ulong *value_ptr) + :sys_var_long_ptr(name_arg,value_ptr) {} + bool update(THD *thd, set_var *var); +}; +#endif class sys_var_rand_seed1 :public sys_var { @@ -422,25 +521,242 @@ public: }; -class sys_var_thd_conv_charset :public sys_var_thd +class sys_var_collation :public sys_var_thd { public: - sys_var_thd_conv_charset(const char *name_arg) - :sys_var_thd(name_arg) - {} + sys_var_collation(const char *name_arg) :sys_var_thd(name_arg) + { +#if MYSQL_VERSION_ID < 50000 + no_support_one_shot= 0; +#endif + } + bool check(THD *thd, set_var *var); +SHOW_TYPE type() { return SHOW_CHAR; } + bool check_update_type(Item_result type) + { + return ((type != STRING_RESULT) && (type != INT_RESULT)); + } + bool check_default(enum_var_type type) { return 0; } + virtual void set_default(THD *thd, enum_var_type type)= 0; +}; + +class sys_var_character_set :public sys_var_thd +{ +public: + bool nullable; + sys_var_character_set(const char *name_arg) : + sys_var_thd(name_arg) + { + nullable= 0; +#if MYSQL_VERSION_ID < 50000 + /* + In fact only almost all variables derived from sys_var_character_set + support ONE_SHOT; character_set_results doesn't. But that's good enough. + */ + no_support_one_shot= 0; +#endif + } + bool check(THD *thd, set_var *var); + SHOW_TYPE type() { return SHOW_CHAR; } + bool check_update_type(Item_result type) + { + return ((type != STRING_RESULT) && (type != INT_RESULT)); + } + bool check_default(enum_var_type type) { return 0; } + bool update(THD *thd, set_var *var); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); + virtual void set_default(THD *thd, enum_var_type type)= 0; + virtual CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type)= 0; +}; + +class sys_var_character_set_client :public sys_var_character_set +{ +public: + sys_var_character_set_client(const char *name_arg) : + sys_var_character_set(name_arg) {} + void set_default(THD *thd, enum_var_type type); + CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); +}; + +class sys_var_character_set_results :public sys_var_character_set +{ +public: + sys_var_character_set_results(const char *name_arg) : + sys_var_character_set(name_arg) + { nullable= 1; } + void set_default(THD *thd, enum_var_type type); + CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); +}; + +class sys_var_character_set_server :public sys_var_character_set +{ +public: + sys_var_character_set_server(const char *name_arg) : + sys_var_character_set(name_arg) {} +#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) bool check(THD *thd, set_var *var); +#endif + void set_default(THD *thd, enum_var_type type); + CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); +}; + +class sys_var_character_set_database :public sys_var_character_set +{ +public: + sys_var_character_set_database(const char *name_arg) : + sys_var_character_set(name_arg) {} + void set_default(THD *thd, enum_var_type type); + CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); +}; + +class sys_var_character_set_connection :public sys_var_character_set +{ +public: + sys_var_character_set_connection(const char *name_arg) : + sys_var_character_set(name_arg) {} + void set_default(THD *thd, enum_var_type type); + CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); +}; + +class sys_var_collation_connection :public sys_var_collation +{ +public: + sys_var_collation_connection(const char *name_arg) :sys_var_collation(name_arg) {} bool update(THD *thd, set_var *var); + void set_default(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + +class sys_var_collation_server :public sys_var_collation +{ +public: + sys_var_collation_server(const char *name_arg) :sys_var_collation(name_arg) {} +#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) + bool check(THD *thd, set_var *var); +#endif + bool update(THD *thd, set_var *var); + void set_default(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + +class sys_var_collation_database :public sys_var_collation +{ +public: + sys_var_collation_database(const char *name_arg) :sys_var_collation(name_arg) {} + bool update(THD *thd, set_var *var); + void set_default(THD *thd, enum_var_type type); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + + +class sys_var_key_cache_param :public sys_var +{ +protected: + size_t offset; +public: + sys_var_key_cache_param(const char *name_arg, size_t offset_arg) + :sys_var(name_arg), offset(offset_arg) + {} + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); + bool check_default(enum_var_type type) { return 1; } + bool is_struct() { return 1; } +}; + + +class sys_var_key_buffer_size :public sys_var_key_cache_param +{ +public: + sys_var_key_buffer_size(const char *name_arg) + :sys_var_key_cache_param(name_arg, offsetof(KEY_CACHE, param_buff_size)) + {} + bool update(THD *thd, set_var *var); + SHOW_TYPE type() { return SHOW_LONGLONG; } +}; + + +class sys_var_key_cache_long :public sys_var_key_cache_param +{ +public: + sys_var_key_cache_long(const char *name_arg, size_t offset_arg) + :sys_var_key_cache_param(name_arg, offset_arg) + {} + bool update(THD *thd, set_var *var); + SHOW_TYPE type() { return SHOW_LONG; } +}; + + +class sys_var_thd_date_time_format :public sys_var_thd +{ + DATE_TIME_FORMAT *SV::*offset; + timestamp_type date_time_type; +public: + sys_var_thd_date_time_format(const char *name_arg, + DATE_TIME_FORMAT *SV::*offset_arg, + timestamp_type date_time_type_arg) + :sys_var_thd(name_arg), offset(offset_arg), + date_time_type(date_time_type_arg) + {} SHOW_TYPE type() { return SHOW_CHAR; } - byte *value_ptr(THD *thd, enum_var_type type); bool check_update_type(Item_result type) { return type != STRING_RESULT; /* Only accept strings */ } bool check_default(enum_var_type type) { return 0; } + bool check(THD *thd, set_var *var); + bool update(THD *thd, set_var *var); + void update2(THD *thd, enum_var_type type, DATE_TIME_FORMAT *new_value); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); void set_default(THD *thd, enum_var_type type); }; +/* Variable that you can only read from */ + +class sys_var_readonly: public sys_var +{ +public: + enum_var_type var_type; + SHOW_TYPE show_type; + sys_value_ptr_func value_ptr_func; + sys_var_readonly(const char *name_arg, enum_var_type type, + SHOW_TYPE show_type_arg, + sys_value_ptr_func value_ptr_func_arg) + :sys_var(name_arg), var_type(type), + show_type(show_type_arg), value_ptr_func(value_ptr_func_arg) + {} + bool update(THD *thd, set_var *var) { return 1; } + bool check_default(enum_var_type type) { return 1; } + bool check_type(enum_var_type type) { return type != var_type; } + bool check_update_type(Item_result type) { return 1; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { + return (*value_ptr_func)(thd); + } + SHOW_TYPE type() { return show_type; } +}; + +class sys_var_thd_time_zone :public sys_var_thd +{ +public: + sys_var_thd_time_zone(const char *name_arg): + sys_var_thd(name_arg) + { +#if MYSQL_VERSION_ID < 50000 + no_support_one_shot= 0; +#endif + } + bool check(THD *thd, set_var *var); + SHOW_TYPE type() { return SHOW_CHAR; } + bool check_update_type(Item_result type) + { + return type != STRING_RESULT; /* Only accept strings */ + } + bool check_default(enum_var_type type) { return 0; } + bool update(THD *thd, set_var *var); + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); + virtual void set_default(THD *thd, enum_var_type type); +}; + /**************************************************************************** Classes for parsing of the SET command ****************************************************************************/ @@ -452,6 +768,11 @@ public: virtual ~set_var_base() {} virtual int check(THD *thd)=0; /* To check privileges etc. */ virtual int update(THD *thd)=0; /* To set the value */ + /* light check for PS */ + virtual int light_check(THD *thd) { return check(thd); } +#if MYSQL_VERSION_ID < 50000 + virtual bool no_support_one_shot() { return 1; } +#endif }; @@ -465,12 +786,17 @@ public: enum_var_type type; union { - CONVERT *convert; + CHARSET_INFO *charset; ulong ulong_value; + ulonglong ulonglong_value; + DATE_TIME_FORMAT *date_time_format; + Time_zone *time_zone; } save_result; + LEX_STRING base; /* for structs */ - set_var(enum_var_type type_arg, sys_var *var_arg, Item *value_arg) - :var(var_arg), type(type_arg) + set_var(enum_var_type type_arg, sys_var *var_arg, LEX_STRING *base_name_arg, + Item *value_arg) + :var(var_arg), type(type_arg), base(*base_name_arg) { /* If the set value is a field, change it to a string to allow things like @@ -479,7 +805,8 @@ public: if (value_arg && value_arg->type() == Item::FIELD_ITEM) { Item_field *item= (Item_field*) value_arg; - if (!(value=new Item_string(item->field_name, strlen(item->field_name)))) + if (!(value=new Item_string(item->field_name, strlen(item->field_name), + item->collation.collation))) value=value_arg; /* Give error message later */ } else @@ -487,6 +814,10 @@ public: } int check(THD *thd); int update(THD *thd); + int light_check(THD *thd); +#if MYSQL_VERSION_ID < 50000 + bool no_support_one_shot() { return var->no_support_one_shot; } +#endif }; @@ -501,6 +832,7 @@ public: {} int check(THD *thd); int update(THD *thd); + int light_check(THD *thd); }; /* For SET PASSWORD */ @@ -518,6 +850,68 @@ public: }; +/* For SET NAMES and SET CHARACTER SET */ + +class set_var_collation_client: public set_var_base +{ + CHARSET_INFO *character_set_client; + CHARSET_INFO *character_set_results; + CHARSET_INFO *collation_connection; +public: + set_var_collation_client(CHARSET_INFO *client_coll_arg, + CHARSET_INFO *connection_coll_arg, + CHARSET_INFO *result_coll_arg) + :character_set_client(client_coll_arg), + character_set_results(result_coll_arg), + collation_connection(connection_coll_arg) + {} + int check(THD *thd); + int update(THD *thd); +}; + + +/* Named lists (used for keycaches) */ + +class NAMED_LIST :public ilink +{ + const char *name; + uint name_length; +public: + gptr data; + + NAMED_LIST(I_List<NAMED_LIST> *links, const char *name_arg, + uint name_length_arg, gptr data_arg) + :name_length(name_length_arg), data(data_arg) + { + name= my_strdup_with_length((byte*) name_arg, name_length, MYF(MY_WME)); + links->push_back(this); + } + inline bool cmp(const char *name_cmp, uint length) + { + return length == name_length && !memcmp(name, name_cmp, length); + } + ~NAMED_LIST() + { + my_free((char*) name, MYF(0)); + } + friend bool process_key_caches(int (* func) (const char *name, + KEY_CACHE *)); + friend void delete_elements(I_List<NAMED_LIST> *list, + void (*free_element)(const char*, gptr)); +}; + +/* updated in sql_acl.cc */ + +extern sys_var_thd_bool sys_old_passwords; +extern LEX_STRING default_key_cache_base; + +/* For sql_yacc */ +struct sys_var_with_base +{ + sys_var *var; + LEX_STRING base_name; +}; + /* Prototypes for helper functions */ @@ -526,6 +920,21 @@ void set_var_init(); void set_var_free(); sys_var *find_sys_var(const char *str, uint length=0); int sql_set_variables(THD *thd, List<set_var_base> *var_list); +bool not_all_support_one_shot(List<set_var_base> *var_list); void fix_delay_key_write(THD *thd, enum_var_type type); - -extern sys_var_str sys_charset; +ulong fix_sql_mode(ulong sql_mode); +extern sys_var_str sys_charset_system; +extern sys_var_str sys_init_connect; +extern sys_var_str sys_init_slave; +extern sys_var_thd_time_zone sys_time_zone; +CHARSET_INFO *get_old_charset_by_name(const char *old_name); +gptr find_named(I_List<NAMED_LIST> *list, const char *name, uint length, + NAMED_LIST **found); + +/* key_cache functions */ +KEY_CACHE *get_key_cache(LEX_STRING *cache_name); +KEY_CACHE *get_or_create_key_cache(const char *name, uint length); +void free_key_cache(const char *name, KEY_CACHE *key_cache); +bool process_key_caches(int (* func) (const char *name, KEY_CACHE *)); +void delete_elements(I_List<NAMED_LIST> *list, + void (*free_element)(const char*, gptr)); diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am index 33c3f9a7edd..3b13d73e8da 100644 --- a/sql/share/Makefile.am +++ b/sql/share/Makefile.am @@ -23,7 +23,7 @@ dist-hook: done; \ sleep 1 ; touch $(srcdir)/*/errmsg.sys $(INSTALL_DATA) $(srcdir)/charsets/README $(distdir)/charsets - $(INSTALL_DATA) $(srcdir)/charsets/Index $(distdir)/charsets + $(INSTALL_DATA) $(srcdir)/charsets/Index.xml $(distdir)/charsets all-local: @AVAILABLE_LANGUAGES_ERRORS@ @@ -41,13 +41,19 @@ install-data-local: done $(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/charsets $(INSTALL_DATA) $(srcdir)/charsets/README $(DESTDIR)$(pkgdatadir)/charsets/README - $(INSTALL_DATA) $(srcdir)/charsets/Index $(DESTDIR)$(pkgdatadir)/charsets/Index - $(INSTALL_DATA) $(srcdir)/charsets/*.conf $(DESTDIR)$(pkgdatadir)/charsets + $(INSTALL_DATA) $(srcdir)/charsets/*.xml $(DESTDIR)$(pkgdatadir)/charsets + +# FIXME maybe shouldn't remove, could be needed by other installation? +uninstall-local: + @RM@ -f -r $(DESTDIR)$(pkgdatadir) + +# Do nothing +link_sources: fix_errors: for lang in @AVAILABLE_LANGUAGES@; \ do \ - ../../extra/comp_err $(srcdir)/$$lang/errmsg.txt $(srcdir)/$$lang/errmsg.sys; \ + ../../extra/comp_err -C$(srcdir)/charsets/ $(srcdir)/$$lang/errmsg.txt $(srcdir)/$$lang/errmsg.sys; \ done # Don't update the files from bitkeeper diff --git a/sql/share/charsets/Index b/sql/share/charsets/Index deleted file mode 100644 index 5cf30682cc0..00000000000 --- a/sql/share/charsets/Index +++ /dev/null @@ -1,38 +0,0 @@ -# sql/share/charsets/Index -# -# This file lists all of the available character sets. Please keep this -# file sorted by character set number. - - -big5 1 -czech 2 -dec8 3 -dos 4 -german1 5 -hp8 6 -koi8_ru 7 -latin1 8 -latin2 9 -swe7 10 -usa7 11 -ujis 12 -sjis 13 -cp1251 14 -danish 15 -hebrew 16 -# The win1251 character set is deprecated. Please use cp1251 instead. -win1251 17 -tis620 18 -euc_kr 19 -estonia 20 -hungarian 21 -koi8_ukr 22 -win1251ukr 23 -gb2312 24 -greek 25 -win1250 26 -croat 27 -gbk 28 -cp1257 29 -latin5 30 -latin1_de 31 diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml new file mode 100644 index 00000000000..97fc27e1431 --- /dev/null +++ b/sql/share/charsets/Index.xml @@ -0,0 +1,583 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets max-id="96"> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<description> +This file lists all of the available character sets. +To make maintaining easier please: + - keep records sorted by collation number. + - change charsets.max-id when adding a new collation. +</description> + +<charset name="big5"> + <family>Traditional Chinese</family> + <description>Big5 Traditional Chinese</description> + <alias>big-5</alias> + <alias>bigfive</alias> + <alias>big-five</alias> + <alias>cn-big5</alias> + <alias>csbig5</alias> + <collation name="big5_chinese_ci" id="1" order="Chinese"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="big5_bin" id="84" order="Binary"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="latin2"> + <family>Central European</family> + <description>ISO 8859-2 Central European</description> + <alias>csisolatin2</alias> + <alias>iso-8859-2</alias> + <alias>iso-ir-101</alias> + <alias>iso_8859-2</alias> + <alias>iso_8859-2:1987</alias> + <alias>l2</alias> + <collation name="latin2_czech_cs" id="2" order="Czech" flag="compiled"/> + <collation name="latin2_general_ci" id="9" flag="primary"> + <order>Hungarian</order> + <order>Polish</order> + <order>Romanian</order> + <order>Croatian</order> + <order>Slovak</order> + <order>Slovenian</order> + <order>Sorbian</order> + </collation> + <collation name="latin2_hungarian_ci" id="21" order="Hungarian"/> + <collation name="latin2_croatian_ci" id="27" order="Croatian"/> + <collation name="latin2_bin" id="77" order="Binary" flag="binary"/> +</charset> + +<charset name="dec8"> + <family>Western</family> + <description>DEC West European</description> + <collation name="dec8_bin" id="69" order="Binary" flag="binary"/> + <collation name="dec8_swedish_ci" id="3" flag="primary"> + <order>Dutch</order> + <order>English</order> + <order>French</order> + <order>German Duden</order> + <order>Italian</order> + <order>Latin</order> + <order>Portuguese</order> + <order>Spanish</order> + </collation> +</charset> + +<charset name="cp850"> + <family>Western</family> + <description>DOS West European</description> + <alias>850</alias> + <alias>cspc850multilingual</alias> + <alias>ibm850</alias> + <collation name="cp850_general_ci" id="4" flag="primary"> + <order>Dutch</order> + <order>English</order> + <order>French</order> + <order>German Duden</order> + <order>Italian</order> + <order>Latin</order> + <order>Portuguese</order> + <order>Spanish</order> + </collation> + <collation name="cp850_bin" id="80" order="Binary" flag="binary"/> +</charset> + +<charset name="latin1"> + <family>Western</family> + <description>cp1252 West European</description> + <alias>csisolatin1</alias> + <alias>iso-8859-1</alias> + <alias>iso-ir-100</alias> + <alias>iso_8859-1</alias> + <alias>iso_8859-1:1987</alias> + <alias>l1</alias> + <alias>latin1</alias> + <collation name="latin1_german1_ci" id="5" order="German Duden"/> + <collation name="latin1_swedish_ci" id="8" order="Finnish, Swedish"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="latin1_danish_ci" id="15" order="Danish"/> + <collation name="latin1_german2_ci" id="31" order="German Phonebook" flag="compiled"/> + <collation name="latin1_spanish_ci" id="94" order="Spanish"/> + <collation name="latin1_bin" id="47" order="Binary"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> + <collation name="latin1_general_ci" id="48"> + <order>Dutch</order> + <order>English</order> + <order>French</order> + <order>German Duden</order> + <order>Italian</order> + <order>Latin</order> + <order>Portuguese</order> + <order>Spanish</order> + </collation> + <collation name="latin1_general_cs" id="49"> + <order>Dutch</order> + <order>English</order> + <order>French</order> + <order>German Duden</order> + <order>Italian</order> + <order>Latin</order> + <order>Portuguese</order> + <order>Spanish</order> + </collation> +</charset> + +<charset name="hp8"> + <family>Western</family> + <description>HP West European</description> + <alias>hproman8</alias> + <collation name="hp8_bin" id="72" order="Binary" flag="binary"/> + <collation name="hp8_english_ci" id="6" flag="primary"> + <order>Dutch</order> + <order>English</order> + <order>French</order> + <order>German Duden</order> + <order>Italian</order> + <order>Latin</order> + <order>Portuguese</order> + <order>Spanish</order> + </collation> +</charset> + +<charset name="koi8r"> + <family>Cyrillic</family> + <description>KOI8-R Relcom Russian</description> + <alias>koi8-r</alias> + <alias>cskoi8r</alias> + <collation name="koi8r_general_ci" id="7" order="Russian" flag="primary"/> + <collation name="koi8r_bin" id="74" order="Binary" flag="binary"/> +</charset> + +<charset name="swe7"> + <family>Western</family> + <description>7bit Swedish</description> + <alias>iso-646-se</alias> + <collation name="swe7_swedish_ci" id="10" order="Swedish" flag="primary"/> + <collation name="swe7_bin" id="82" order="Binary" flag="binary"/> +</charset> + +<charset name="ascii"> + <family>Western</family> + <description>US ASCII</description> + <alias>us</alias> + <alias>us-ascii</alias> + <alias>csascii</alias> + <alias>iso-ir-6</alias> + <alias>iso646-us</alias> + <collation name="ascii_general_ci" id="11" order="English" flag="primary"/> + <collation name="ascii_bin" id="65" order="Binary" flag="binary"/> +</charset> + +<charset name="ujis"> + <family>Japanese</family> + <description>EUC-JP Japanese</description> + <alias>euc-jp</alias> + <collation name="ujis_japanese_ci" id="12" order="Japanese"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="ujis_bin" id="91" order="Japanese"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="sjis"> + <family>Japanese</family> + <description>Shift-JIS Japanese</description> + <alias>s-jis</alias> + <alias>shift-jis</alias> + <alias>x-sjis</alias> + <collation name="sjis_japanese_ci" id="13" order="Japanese"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="sjis_bin" id="88" order="Binary"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="cp1251"> + <family>Cyrillic</family> + <description>Windows Cyrillic</description> + <alias>windows-1251</alias> + <alias>ms-cyr</alias> + <alias>ms-cyrillic</alias> + <collation name="cp1251_bulgarian_ci" id="14"> + <order>Belarusian</order> + <order>Bulgarian</order> + <order>Macedonian</order> + <order>Russian</order> + <order>Serbian</order> + <order>Mongolian</order> + <order>Ukrainian</order> + </collation> + <collation name="cp1251_ukrainian_ci" id="23" order="Ukrainian"/> + <collation name="cp1251_bin" id="50" order="Binary" flag="binary"/> + <collation name="cp1251_general_ci" id="51" flag="primary"> + <order>Belarusian</order> + <order>Bulgarian</order> + <order>Macedonian</order> + <order>Russian</order> + <order>Serbian</order> + <order>Mongolian</order> + <order>Ukrainian</order> + </collation> + <collation name="cp1251_general_cs" id="52"> + <order>Belarusian</order> + <order>Bulgarian</order> + <order>Macedonian</order> + <order>Russian</order> + <order>Serbian</order> + <order>Mongolian</order> + <order>Ukrainian</order> + </collation> +</charset> + +<charset name="hebrew"> + <family>Hebrew</family> + <description>ISO 8859-8 Hebrew</description> + <alias>csisolatinhebrew</alias> + <alias>iso-8859-8</alias> + <alias>iso-ir-138</alias> + <collation name="hebrew_general_ci" id="16" order="Hebrew" flag="primary"/> + <collation name="hebrew_bin" id="71" order="Binary" flag="binary"/> +</charset> + +<charset name="tis620"> + <family>Thai</family> + <description>TIS620 Thai</description> + <alias>tis-620</alias> + <collation name="tis620_thai_ci" id="18" order="Thai"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="tis620_bin" id="89" order="Binary"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="euckr"> + <family>Korean</family> + <description>EUC-KR Korean</description> + <alias>euc_kr</alias> + <alias>euc-kr</alias> + <collation name="euckr_korean_ci" id="19" order="Korean"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="euckr_bin" id="85"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="latin7"> + <family>Baltic</family> + <description>ISO 8859-13 Baltic</description> + <alias>BalticRim</alias> + <alias>iso-8859-13</alias> + <alias>l7</alias> + <collation name="latin7_estonian_cs" id="20"> + <order>Estonian</order> + </collation> + <collation name="latin7_general_ci" id="41"> + <order>Latvian</order> + <order>Lithuanian</order> + <flag>primary</flag> + </collation> + <collation name="latin7_general_cs" id="42"> + <order>Latvian</order> + <order>Lithuanian</order> + </collation> + <collation name="latin7_bin" id="79" order="Binary" flag="binary"/> +</charset> + +<charset name="koi8u"> + <family>Cyrillic</family> + <description>KOI8-U Ukrainian</description> + <alias>koi8-u</alias> + <collation name="koi8u_general_ci" id="22" order="Ukranian" flag="primary"/> + <collation name="koi8u_bin" id="75" order="Binary" flag="binary"/> +</charset> + +<charset name="gb2312"> + <family>Simplified Chinese</family> + <description>GB2312 Simplified Chinese</description> + <alias>chinese</alias> + <alias>iso-ir-58</alias> + <collation name="gb2312_chinese_ci" id="24" order="Chinese"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="gb2312_bin" id="86"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="greek"> + <family>Greek</family> + <description>ISO 8859-7 Greek</description> + <alias>csisolatingreek</alias> + <alias>ecma-118</alias> + <alias>greek8</alias> + <alias>iso-8859-7</alias> + <alias>iso-ir-126</alias> + <collation name="greek_general_ci" id="25" order="Greek" flag="primary"/> + <collation name="greek_bin" id="70" order="Binary" flag="binary"/> +</charset> + +<charset name="cp1250"> + <family>Central European</family> + <description>Windows Central European</description> + <alias>ms-ce</alias> + <alias>windows-1250</alias> + <collation name="cp1250_general_ci" id="26" flag="primary"> + <order>Hungarian</order> + <order>Polish</order> + <order>Romanian</order> + <order>Croatian</order> + <order>Slovak</order> + <order>Slovenian</order> + <order>Sorbian</order> + </collation> + <collation name="cp1250_croatian_ci" id="44"> + <order>Croatian</order> + </collation> + <collation name="cp1250_czech_cs" id="34" order="Czech"> + <flag>compiled</flag> + </collation> + <collation name="cp1250_bin" id="66" order="Binary" flag="binary"/> +</charset> + +<charset name="gbk"> + <family>East Asian</family> + <description>GBK Simplified Chinese</description> + <alias>cp936</alias> + <collation name="gbk_chinese_ci" id="28" order="Chinese"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="gbk_bin" id="87" order="Binary"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="cp1257"> + <family>Baltic</family> + <description>Windows Baltic</description> + <alias>WinBaltRim</alias> + <alias>windows-1257</alias> + <collation name="cp1257_lithuanian_ci" id="29" order="Lithuanian"/> + <collation name="cp1257_bin" id="58" order="Binary" flag="binary"/> + <collation name="cp1257_general_ci" id="59" flag="primary"> + <order>Latvian</order> + <order>Lithuanian</order> + </collation> + <!--collation name="cp1257_ci" id="60"/--> + <!--collation name="cp1257_cs" id="61"/--> +</charset> + +<charset name="latin5"> + <family>South Asian</family> + <description>ISO 8859-9 Turkish</description> + <alias>csisolatin5</alias> + <alias>iso-8859-9</alias> + <alias>iso-ir-148</alias> + <alias>l5</alias> + <alias>latin5</alias> + <alias>turkish</alias> + <collation name="latin5_turkish_ci" id="30" order="Turkish" flag="primary"/> + <collation name="latin5_bin" id="78" order="Binary" flag="binary"/> +</charset> + +<charset name="armscii8"> + <family>South Asian</family> + <description>ARMSCII-8 Armenian</description> + <alias>armscii-8</alias> + <collation name="armscii8_general_ci" id="32" order="Armenian" flag="primary"/> + <collation name="armscii8_bin" id="64" order="Binary" flag="binary"/> +</charset> + +<charset name="utf8"> + <family>Unicode</family> + <description>UTF-8 Unicode</description> + <alias>utf-8</alias> + <collation name="utf8_general_ci" id="33"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="utf8_bin" id="83"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="ucs2"> + <family>Unicode</family> + <description>UCS-2 Unicode</description> + <collation name="ucs2_general_ci" id="35"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="ucs2_bin" id="90"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="cp866"> + <family>Cyrillic</family> + <description>DOS Russian</description> + <alias>866</alias> + <alias>csibm866</alias> + <alias>ibm866</alias> + <alias>DOSCyrillicRussian</alias> + <collation name="cp866_general_ci" id="36" order="Russian" flag="primary"/> + <collation name="cp866_bin" id="68" order="Binary" flag="binary"/> +</charset> + +<charset name="keybcs2"> + <family>Central European</family> + <description>DOS Kamenicky Czech-Slovak</description> + <collation name="keybcs2_general_ci" id="37" order="Czech" flag="primary"/> + <collation name="keybcs2_bin" id="73" order="Binary" flag="binary"/> +</charset> + +<charset name="macce"> + <family>Central European</family> + <description>Mac Central European</description> + <alias>MacCentralEurope</alias> + <collation name="macce_general_ci" id="38" flag="primary"> + <order>Hungarian</order> + <order>Polish</order> + <order>Romanian</order> + <order>Croatian</order> + <order>Slovak</order> + <order>Slovenian</order> + <order>Sorbian</order> + </collation> + <collation name="macce_bin" id="43" order="Binary" flag="binary"/> +</charset> + +<charset name="macroman"> + <family>Western</family> + <description>Mac West European</description> + <alias>Mac</alias> + <alias>Macintosh</alias> + <alias>csmacintosh</alias> + <collation name="macroman_general_ci" id="39" flag="primary"> + <order>Dutch</order> + <order>English</order> + <order>French</order> + <order>German Duden</order> + <order>Italian</order> + <order>Latin</order> + <order>Portuguese</order> + <order>Spanish</order> + </collation> + <collation name="macroman_bin" id="53" order="Binary" flag="binary"/> + <!--collation name="macroman_ci" id="54"/--> + <!--collation name="macroman_ci_ai" id="55"/--> + <!--collation name="macroman_cs" id="56"/--> +</charset> + +<charset name="cp852"> + <family>Central European</family> + <description>DOS Central European</description> + <alias>852</alias> + <alias>cp852</alias> + <alias>ibm852</alias> + <collation name="cp852_general_ci" id="40" flag="primary"> + <order>Hungarian</order> + <order>Polish</order> + <order>Romanian</order> + <order>Croatian</order> + <order>Slovak</order> + <order>Slovenian</order> + <order>Sorbian</order> + </collation> + <collation name="cp852_bin" id="81" order="Binary" flag="binary"/> +</charset> + +<charset name="cp1256"> + <family>Arabic</family> + <description>Windows Arabic</description> + <alias>ms-arab</alias> + <alias>windows-1256</alias> + <collation name="cp1256_bin" id="67" order="Binary" flag="binary"/> + <collation name="cp1256_general_ci" id="57" order="Arabic" flag="primary"> + <order>Arabic</order> + <order>Persian</order> + <order>Pakistani</order> + <order>Urdu</order> + </collation> +</charset> + +<charset name="geostd8"> + <family>South Asian</family> + <description>GEOSTD8 Georgian</description> + <collation name="geostd8_general_ci" id="92" order="Georgian" flag="primary"/> + <collation name="geostd8_bin" id="93" order="Binary" flag="binary"/> +</charset> + +<charset name="binary"> + <description>Binary pseudo charset</description> + <collation name="binary" id="63" order="Binary"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> +</charset> + +<charset name="cp932"> + <family>Japanese</family> + <description>SJIS for Windows Japanese</description> + <alias>windows-31j</alias> + <alias>cswindows31j</alias> + <alias>sjisms</alias> + <alias>windows-95j</alias> + <alias>x-sjis-cp932</alias> + <alias>ms932</alias> + <alias>sjisms</alias> + <collation name="cp932_japanese_ci" id="95" order="Japanese"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="cp932_bin" id="96" order="Binary"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + +</charsets> + diff --git a/sql/share/charsets/armscii8.xml b/sql/share/charsets/armscii8.xml new file mode 100644 index 00000000000..d0ab428345f --- /dev/null +++ b/sql/share/charsets/armscii8.xml @@ -0,0 +1,139 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="armscii8"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 01 02 01 02 01 02 01 02 01 02 01 02 01 02 + 01 02 01 02 01 02 01 02 01 02 01 02 01 02 01 02 + 01 02 01 02 01 02 01 02 01 02 01 02 01 02 01 02 + 01 02 01 02 01 02 01 02 01 02 01 02 01 02 01 02 + 01 02 01 02 01 02 01 02 01 02 01 02 01 02 10 10 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 B8 A9 AA AB AC AD AE AF + B0 B1 B3 B3 B5 B5 B7 B7 B9 B9 BB BB BD BD BF BF + C1 C1 C3 C3 C5 C5 C7 C7 C9 C9 CB CB CD CD CF CF + D1 D1 D3 D3 D5 D5 D7 D7 D9 D9 DB DB DD DD DF DF + E1 E1 E3 E3 E5 E5 E7 E7 E9 E9 EB EB ED ED EF EF + F1 F1 F3 F3 F5 F5 F7 F7 F9 F9 FB FB FD FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B2 B4 B4 B6 B6 B8 B8 BA BA BC BC BE BE + C0 C0 C2 C2 C4 C4 C6 C6 C8 C8 CA CA CC CC CE CE + D0 D0 D2 D2 D4 D4 D6 D6 D8 D8 DA DA DC DC DE DE + E0 E0 E2 E2 E4 E4 E6 E6 E8 E8 EA EA EC EC EE EE + F0 F0 F2 F2 F4 F4 F6 F6 F8 F8 FA FA FC FC FE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 2741 00A7 0589 0029 0028 00BB 00AB 2014 002E 055D 002C 002D 055F 2026 055C +055B 055E 0531 0561 0532 0562 0533 0563 0534 0564 0535 0565 0536 0566 0537 0567 +0538 0568 0539 0569 053A 056A 053B 056B 053C 056C 053D 056D 053E 056E 053F 056F +0540 0570 0541 0571 0542 0572 0543 0573 0544 0574 0545 0575 0546 0576 0547 0577 +0548 0578 0549 0579 054A 057A 054B 057B 054C 057C 054D 057D 054E 057E 054F 057F +0550 0580 0551 0581 0552 0582 0553 0583 0554 0584 0555 0585 0556 0586 2019 0027 +</map> +</unicode> + + +<collation name="armscii8_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="armscii8_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/ascii.xml b/sql/share/charsets/ascii.xml new file mode 100644 index 00000000000..3813bd42601 --- /dev/null +++ b/sql/share/charsets/ascii.xml @@ -0,0 +1,139 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="ascii"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +</map> +</unicode> + + +<collation name="ascii_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5C 5D 5B 5E 5F + 45 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 59 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="ascii_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/win1250.conf b/sql/share/charsets/cp1250.xml index 31d253d7381..1e62e64ad5a 100644 --- a/sql/share/charsets/win1250.conf +++ b/sql/share/charsets/cp1250.xml @@ -1,6 +1,29 @@ -# Configuration file for the win1250 character set. +<?xml version='1.0' encoding="utf-8"?> -# The ctype array must have 257 elements. +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="cp1250"> + +<ctype> +<map> 00 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 @@ -18,8 +41,12 @@ 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 10 +</map> +</ctype> -# The to_lower array must have 256 elements. + +<lower> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -30,14 +57,18 @@ 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F 80 81 82 83 84 85 86 87 88 89 9A 8B 9C 9D 9E 9F 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 B3 A4 B9 A6 DF A8 A9 BA AB AC AD AE BF + A0 A1 A2 B3 A4 B9 A6 A7 A8 A9 BA AB AC AD AE BF B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BE BD BE BF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + -# The to_upper array must have 256 elements. +<upper> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -51,11 +82,37 @@ A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF B0 B1 B2 A3 B4 B5 B6 B7 B8 A5 AA BB BC BD BC AF C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE A7 + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF +</map> +</upper> -# The sort_order array must have 256 elements. + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +20AC 0000 201A 0000 201E 2026 2020 2021 0000 2030 0160 2039 015A 0164 017D 0179 +0000 2018 2019 201C 201D 2022 2013 2014 0000 2122 0161 203A 015B 0165 017E 017A +00A0 02C7 02D8 0141 00A4 0104 00A6 00A7 00A8 00A9 015E 00AB 00AC 00AD 00AE 017B +00B0 00B1 02DB 0142 00B4 00B5 00B6 00B7 00B8 0105 015F 00BB 013D 02DD 013E 017C +0154 00C1 00C2 0102 00C4 0139 0106 00C7 010C 00C9 0118 00CB 011A 00CD 00CE 010E +0110 0143 0147 00D3 00D4 0150 00D6 00D7 0158 016E 00DA 0170 00DC 00DD 0162 00DF +0155 00E1 00E2 0103 00E4 013A 0107 00E7 010D 00E9 0119 00EB 011B 00ED 00EE 010F +0111 0144 0148 00F3 00F4 0151 00F6 00F7 0159 016F 00FA 0171 00FC 00FD 0163 02D9 +</map> +</unicode> + + +<collation name="cp1250_general_ci"> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -72,3 +129,34 @@ 47 53 53 55 55 55 55 D7 58 5C 5C 5C 5C 60 5B 59 58 41 41 41 41 50 45 43 44 49 49 49 49 4D 4D 46 47 53 53 55 55 55 55 F7 58 5C 5C 5C 5C 60 5B FF +</map> +</collation> + +<collation name="cp1250_croatian_ci"> +<map> +00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F +10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F +20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F +30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F +40 41 43 44 48 4B 4D 4E 4F 50 52 53 54 56 57 59 +5B 5C 5D 5F 62 64 66 67 68 69 6B 90 91 92 93 94 +95 41 43 44 48 4B 4D 4E 4F 50 52 53 54 56 57 59 +5B 5C 5D 5F 62 64 66 67 68 69 6B 96 97 98 99 9A +9B 9C 9E 9F A0 A1 A2 A3 A4 A5 60 A6 5F 62 6C 6B +A7 A8 A9 AA AB AC AD AE AF B0 60 B1 5F 62 6C 6B +B2 B3 B4 54 B5 41 B6 B7 B8 B9 5F BA BB BC BD 6B +BE BF C0 54 C1 C2 C3 C4 C5 41 5F C6 54 C7 54 6B +5D 41 41 41 41 54 47 44 46 4B 4B 4B 4B 50 50 48 +4A 57 57 59 59 59 59 C8 5D 64 64 64 64 69 62 5F +5D 41 41 41 41 54 47 44 46 4B 4B 4B 4B 50 50 48 +4A 57 57 59 59 59 59 C9 5D 64 64 64 64 69 62 FF +</map> +</collation> + +<collation name="cp1250_czech_ci"/> + +<collation name="cp1250_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/cp1251.conf b/sql/share/charsets/cp1251.conf deleted file mode 100644 index 6af97c891b8..00000000000 --- a/sql/share/charsets/cp1251.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the cp1251 character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 B8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 A8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7C 7D 7E 7F 80 - 81 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 82 83 84 85 FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - FF FF FF FF FF FF FF FF 61 FF FF FF FF FF FF FF - FF FF FF FF FF FF FF FF 61 FF FF FF FF FF FF FF - 5B 5C 5D 5E 5F 60 62 63 64 65 66 67 68 69 6A 6B - 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B - 5B 5C 5D 5E 5F 60 62 63 64 65 66 67 68 69 6A 6B - 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B diff --git a/sql/share/charsets/cp1251.xml b/sql/share/charsets/cp1251.xml new file mode 100644 index 00000000000..7f94788c0d0 --- /dev/null +++ b/sql/share/charsets/cp1251.xml @@ -0,0 +1,215 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="cp1251"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 + 01 01 00 02 00 00 00 00 00 00 01 00 01 01 01 01 + 02 00 00 00 00 00 00 00 00 00 02 00 02 02 02 02 + 00 01 02 01 00 01 00 00 01 00 01 00 00 00 00 01 + 00 00 01 02 02 00 00 00 02 00 02 00 02 01 02 02 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 90 83 82 83 84 85 86 87 88 89 9A 8B 9C 9D 9E 9F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A2 A2 BC A4 B4 A6 A7 B8 A9 BA AB AC AD AE BF + B0 B1 B3 B3 B4 B5 B6 B7 B8 B9 BA BB BC BE BE BF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 81 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 80 91 92 93 94 95 96 97 98 99 8A 9B 8C 9D 8E 8F + A0 A1 A1 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B2 A5 B5 B6 B7 A8 B9 AA BB A3 BD BD AF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 0402 0403 201A 0453 201E 2026 2020 2021 0000 2030 0409 2039 040A 040C 040B 040F + 0452 2018 2019 201C 201D 2022 2013 2014 0000 2122 0459 203A 045A 045C 045B 045F + 00A0 040E 045E 0408 00A4 0490 00A6 00A7 0401 00A9 0404 00AB 00AC 00AD 00AE 0407 + 00B0 00B1 0406 0456 0491 00B5 00B6 00B7 0451 2116 0454 00BB 0458 0405 0455 0457 + 0410 0411 0412 0413 0414 0415 0416 0417 0418 0419 041A 041B 041C 041D 041E 041F + 0420 0421 0422 0423 0424 0425 0426 0427 0428 0429 042A 042B 042C 042D 042E 042F + 0430 0431 0432 0433 0434 0435 0436 0437 0438 0439 043A 043B 043C 043D 043E 043F + 0440 0441 0442 0443 0444 0445 0446 0447 0448 0449 044A 044B 044C 044D 044E 044F + +</map> +</unicode> + + +<collation name="cp1251_bulgarian_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7C 7D 7E 7F 80 + 81 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 82 83 84 85 FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF 61 FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF 61 FF FF FF FF FF FF FF + 5B 5C 5D 5E 5F 60 62 63 64 65 66 67 68 69 6A 6B + 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B + 5B 5C 5D 5E 5F 60 62 63 64 65 66 67 68 69 6A 6B + 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B +</map> +</collation> + + +<collation name="cp1251_bin" flag="binary"/> + + +<collation name="cp1251_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 43 45 47 49 4B 4D 4F 51 53 55 57 59 5B 5D + 5F 61 63 65 67 69 6B 6D 6F 71 73 D3 D4 D5 D6 D7 + D8 41 43 45 47 49 4B 4D 4F 51 53 55 57 59 5B 5D + 5F 61 63 65 67 69 6B 6D 6F 71 73 D9 DA DB DC DD + 81 83 DE 83 DF E0 E1 E2 E3 E4 A1 E5 A7 9D B3 C1 + 81 E6 E7 E8 E9 EA EB EC ED EE A1 EF A7 9D B3 C1 + F0 B7 B7 99 F1 7D F2 F3 87 F4 89 F5 F6 F7 F8 95 + F9 FA 93 93 7D FB FC FD 87 FE 89 FF 99 8F 8F 95 + 75 77 79 7B 7F 85 8B 8D 91 97 9B 9F A3 A5 A9 AB + AD AF B1 B5 B9 BB BD BF C3 C5 C7 C9 CB CD CF D1 + 75 77 79 7B 7F 85 8B 8D 91 97 9B 9F A3 A5 A9 AB + AD AF B1 B5 B9 BB BD BF C3 C5 C7 C9 CB CD CF D1 +</map> +</collation> + + +<collation name="cp1251_general_cs"> +<!-- +# Case insensitive, accent sensitive +# Sort order is correct for Belarusian, Bulgarian, Macedonian, +# Russian, Serbian, Mongolian languages. Almost good for Ukrainian, +# except that "CYRILLIC LETTER SOFT SIGN" is not in the end of alphabet, +# but between YERU and E. +--> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 43 45 47 49 4B 4D 4F 51 53 55 57 59 5B 5D + 5F 61 63 65 67 69 6B 6D 6F 71 73 D3 D4 D5 D6 D7 + D8 42 44 46 48 4A 4C 4E 50 52 54 56 58 5A 5C 5E + 60 62 64 66 68 6A 6C 6E 70 72 74 D9 DA DB DC DD + 81 83 DE 84 DF E0 E1 E2 E3 E4 A1 E5 A7 9D B3 C1 + 82 E6 E7 E8 E9 EA EB EC ED EE A2 EF A8 9E B4 C2 + F0 B7 B8 99 F1 7D F2 F3 87 F4 89 F5 F6 F7 F8 95 + F9 FA 93 94 7E FB FC FD 88 FE 8A FF 9A 8F 90 96 + 75 77 79 7B 7F 85 8B 8D 91 97 9B 9F A3 A5 A9 AB + AD AF B1 B5 B9 BB BD BF C3 C5 C7 C9 CB CD CF D1 + 76 78 7A 7C 80 86 8C 8E 92 98 9C A0 A4 A6 AA AC + AE B0 B2 B6 BA BC BE C0 C4 C6 C8 CA CC CE D0 D2 +</map> +</collation> + + +<collation name="cp1251_ukrainian_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 20 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + A5 A6 A7 A8 A9 AA AB AC AD AE AF B0 B1 B2 B3 B4 + B5 B6 B7 B8 B9 BA BB BC BD BE BF C0 C1 C2 C3 C4 + C5 C6 C7 C8 C9 84 CA CB 88 CC 87 CD CE CF D0 8D + D1 D2 8C 8C 84 D3 D4 D5 88 D6 87 D7 D8 D9 DA 8D + 80 81 82 83 85 86 89 8A 8B 8E 8F 90 91 92 93 94 + 95 96 97 98 99 9A 9B 9C 9D 9E 9F A0 A1 A2 A3 A4 + 80 81 82 83 85 86 89 8A 8B 8E 8F 90 91 92 93 94 + 95 96 97 98 99 9A 9B 9C 9D 9E 9F A0 A1 A2 A3 A4 +</map> +</collation> + + +</charset> + +</charsets> diff --git a/sql/share/charsets/cp1256.xml b/sql/share/charsets/cp1256.xml new file mode 100644 index 00000000000..69eb6a68238 --- /dev/null +++ b/sql/share/charsets/cp1256.xml @@ -0,0 +1,142 @@ +<?xml version='1.0' encoding="utf-8"?> + +<!-- Arabic, Persian, Pakistani, Urdu --> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="cp1256"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 00 00 + 00 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 00 03 00 02 00 00 00 00 00 00 00 00 01 03 03 00 + 03 10 10 10 10 00 00 00 00 00 00 00 02 00 00 00 + 00 10 00 00 00 00 00 00 00 00 00 10 10 10 00 00 + 10 10 00 00 00 00 00 00 00 00 10 10 00 00 00 10 + 00 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 + 03 03 03 03 03 03 03 00 03 03 03 03 03 03 03 03 + 02 03 02 03 03 03 03 02 02 02 02 02 03 03 02 02 + 03 03 03 03 02 03 03 00 03 02 03 02 02 00 00 00 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 54 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 54 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 9C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 74 55 56 57 58 59 5A 5B 5C 5F 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 74 55 56 57 58 59 5A 7B 7C 7F 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 8C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 20AC 067E 201A 0192 201E 2026 2020 2021 02C6 2030 0000 2039 0152 0686 0698 0000 + 06AF 2018 2019 201C 201D 2022 2013 2014 0000 2122 0000 203A 0153 200C 200D 0000 + 00A0 060C 00A2 00A3 00A4 00A5 00A6 00A7 00A8 00A9 0000 00AB 00AC 00AD 00AE 00AF + 00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00B8 00B9 061B 00BB 00BC 00BD 00BE 061F + 0000 0621 0622 0623 0624 0625 0626 0627 0628 0629 062A 062B 062C 062D 062E 062F + 0630 0631 0632 0633 0634 0635 0636 00D7 0637 0638 0639 063A 0640 0641 0642 0643 + 00E0 0644 00E2 0645 0646 0647 0648 00E7 00E8 00E9 00EA 00EB 0649 064A 00EE 00EF + 064B 064C 064D 064E 00F4 064F 0650 00F7 0651 00F9 0652 00FB 00FC 200E 200F 0000 +</map> +</unicode> + + +<collation name="cp1256_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 45 47 4A 4C 52 55 57 59 5D 5F 61 63 65 67 + 6C 6E 70 72 74 76 7B 7D 7F 81 83 B9 BA BB BC BD + BE 41 45 47 4A 4C 52 55 57 59 5D 5F 61 63 65 67 + 6C 6E 70 72 74 76 7B 7D 7F 81 83 BF C0 C1 C2 C3 + C4 8E C5 54 C6 C7 C8 C9 CA CB CC CD 6A 92 99 CE + A5 CF D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 6A DA DB DC + DD B6 DE DF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB + EC ED EE EF F0 F1 F2 F3 F4 F5 B7 F6 F7 F8 F9 B8 + FA 85 86 87 88 89 8A 8B 8C 8D 9F 90 91 93 94 95 + 96 97 98 9A 9B 9C 9D FB 9E 9F A0 A1 AD A2 A3 A4 + 43 A6 44 A7 A8 A9 AA 49 4E 4F 50 51 AB AC 5B 5C + AE AF B0 B1 69 B2 B3 FC B4 78 B5 79 7A FD FE FF +</map> +</collation> + +<collation name="cp1256_bin" flag="binary"/> + +</charset> + +</charsets> + diff --git a/sql/share/charsets/cp1257.conf b/sql/share/charsets/cp1257.conf deleted file mode 100644 index 610ed5a646f..00000000000 --- a/sql/share/charsets/cp1257.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the cp1257 character set. - -# The ctype array must have 257 elements. - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 01 01 00 00 00 00 01 00 01 00 00 01 00 00 00 00 - 01 00 00 00 00 00 00 00 01 00 00 01 00 00 01 00 - 02 02 00 00 00 00 02 00 02 00 00 02 00 00 00 00 - 02 00 00 00 00 00 00 00 02 00 00 02 00 00 02 00 - -# The to_lower array must have 256 elements. - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 C2 C3 C4 C5 E6 C7 E8 C9 CA EB CC CD CE CF - F0 D1 D2 D3 D4 D5 D6 D7 F8 D9 DA FB DC DD FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# The to_upper array must have 256 elements. - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 E2 E3 E4 E5 C6 E7 C8 E9 EA CB EC ED EE EF - D0 F1 F2 F3 F4 F5 F6 F7 D8 F9 FA DB FC FD DE FF - -# The sort_order array must have 256 elements. - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 43 44 46 47 4A 4B 4C 4D 50 51 52 53 54 55 - 56 57 58 59 5B 5C 5F 60 61 4E FF 62 63 64 65 66 - 67 41 43 44 46 47 4A 4B 4C 4D 50 51 52 53 54 55 - 56 57 58 59 5B 5C 5F 60 61 4E FF 68 69 6A 6B FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - 42 4F FF FF FF FF 48 FF 45 FF FF 49 FF FF FF FF - 5A FF FF FF FF FF FF FF 5E FF FF 5D FF FF FF FF - FF 4F FF FF FF FF 48 FF 45 FF FF 49 FF FF FF FF - 5A FF FF FF FF FF FF FF 5E FF FF 5D FF FF FF FF diff --git a/sql/share/charsets/cp1257.xml b/sql/share/charsets/cp1257.xml new file mode 100644 index 00000000000..93a1bd47a77 --- /dev/null +++ b/sql/share/charsets/cp1257.xml @@ -0,0 +1,228 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="cp1257"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 01 00 01 00 00 00 00 01 + 00 00 00 00 00 00 00 00 02 00 02 00 00 00 00 02 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 00 01 01 01 01 01 01 01 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 00 02 02 02 02 02 02 02 00 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 B8 A9 BA AB AC AD AE BF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 BA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 A8 B9 BA BB BC BD BE AF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 20AC 0000 201A 0000 201E 2026 2020 2021 0000 2030 0000 2039 0000 00A8 02C7 00B8 + 0000 2018 2019 201C 201D 2022 2013 2014 0000 2122 0000 203A 0000 00AF 02DB 0000 + 00A0 0000 00A2 00A3 00A4 0000 00A6 00A7 00D8 00A9 0156 00AB 00AC 00AD 00AE 00C6 + 00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00F8 00B9 0157 00BB 00BC 00BD 00BE 00E6 + 0104 012E 0100 0106 00C4 00C5 0118 0112 010C 00C9 0179 0116 0122 0136 012A 013B + 0160 0143 0145 00D3 014C 00D5 00D6 00D7 0172 0141 015A 016A 00DC 017B 017D 00DF + 0105 012F 0101 0107 00E4 00E5 0119 0113 010D 00E9 017A 0117 0123 0137 012B 013C + 0161 0144 0146 00F3 014D 00F5 00F6 00F7 0173 0142 015B 016B 00FC 017C 017E 02D9 +</map> +</unicode> + + +<collation name="cp1257_lithuanian_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 43 44 46 47 4A 4B 4C 4D 50 51 52 53 54 55 + 56 57 58 59 5B 5C 5F 60 61 4E FF 62 63 64 65 66 + 67 41 43 44 46 47 4A 4B 4C 4D 50 51 52 53 54 55 + 56 57 58 59 5B 5C 5F 60 61 4E FF 68 69 6A 6B FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + 42 4F FF FF FF FF 48 FF 45 FF FF 49 FF FF FF FF + 5A FF FF FF FF FF FF FF 5E FF FF 5D FF FF FF FF + FF 4F FF FF FF FF 48 FF 45 FF FF 49 FF FF FF FF + 5A FF FF FF FF FF FF FF 5E FF FF 5D FF FF FF FF +</map> +</collation> + + +<collation name="cp1257_bin" flag="binary"/> + + +<collation name="cp1257_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 4D 4F 55 57 61 63 67 69 6F 71 75 7B 7D 83 + 8F 91 93 97 9E A0 A8 AA AC AE B0 B8 B9 BA BB BC + BD 41 4D 4F 55 57 61 63 67 69 6F 71 75 7B 7D 83 + 8F 91 93 97 9E A0 A8 AA AC AE B0 BE BF C0 C1 C4 + C5 C6 C7 C8 C9 CA CB CC CD CE CF D0 D1 D2 D3 D4 + D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 E1 E2 E3 E4 + E5 E6 E7 E8 E9 EA EB EC 83 ED 93 EE EF F0 F1 41 + F2 F3 F4 F5 F6 F7 F8 F9 83 FA 93 FB FC FD FE 41 + 41 69 41 4F 41 41 57 57 4F 57 B0 57 63 71 69 75 + 97 7D 7D 83 83 83 83 C2 A0 75 97 A0 A0 B0 B0 97 + 41 69 41 4F 41 41 57 57 4F 57 B0 57 63 71 69 75 + 97 7D 7D 83 83 83 83 C3 A0 75 97 A0 A0 B0 B0 FF +</map> +</collation> + + +<collation name="cp1257_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 4D 4F 55 57 61 63 67 69 6F 71 75 7B 7D 83 + 8F 91 93 97 9E A0 A8 AA AC AE B0 B8 B9 BA BB BC + BD 41 4D 4F 55 57 61 63 67 69 6F 71 75 7B 7D 83 + 8F 91 93 97 9E A0 A8 AA AC AE B0 BE BF C0 C1 C4 + C5 C6 C7 C8 C9 CA CB CC CD CE CF D0 D1 D2 D3 D4 + D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 E1 E2 E3 E4 + E5 E6 E7 E8 E9 EA EB EC 85 ED 95 EE EF F0 F1 4B + F2 F3 F4 F5 F6 F7 F8 F9 85 FA 95 FB FC FD FE 4B + 43 6B 45 51 47 49 59 5B 53 5D B2 5F 65 73 6D 77 + 99 7F 81 87 89 8B 8D C2 A2 79 9B A4 A6 B4 B6 9D + 43 6B 45 51 47 49 59 5B 53 5D B2 5F 65 73 6D 77 + 99 7F 81 87 89 8B 8D C3 A2 79 9B A4 A6 B4 B6 FF +</map> +</collation> + + +<collation name="cp1257_cs"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 4D 4F 55 57 61 63 67 69 6F 71 75 7B 7D 83 + 8F 91 93 97 9E A0 A8 AA AC AE B0 B8 B9 BA BB BC + BD 42 4E 50 56 58 62 64 68 6A 70 72 76 7C 7E 84 + 90 92 94 98 9F A1 A9 AB AD AF B1 BE BF C0 C1 C4 + C5 C6 C7 C8 C9 CA CB CC CD CE CF D0 D1 D2 D3 D4 + D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 E1 E2 E3 E4 + E5 E6 E7 E8 E9 EA EB EC 85 ED 95 EE EF F0 F1 4B + F2 F3 F4 F5 F6 F7 F8 F9 86 FA 96 FB FC FD FE 4C + 43 6B 45 51 47 49 59 5B 53 5D B2 5F 65 73 6D 77 + 99 7F 81 87 89 8B 8D C2 A2 79 9B A4 A6 B4 B6 9D + 44 6C 46 52 48 4A 5A 5C 54 5E B3 60 66 74 6E 78 + 9A 80 82 88 8A 8C 8E C3 A3 7A 9C A5 A7 B5 B7 FF +</map> +</collation> + + +<collation name="cp1257ltlv"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 47 49 4D 4F 57 59 5D 5F 65 67 6B 6F 71 75 + 79 7B 7D 81 85 87 8D 8F 91 93 95 FF FF FF FF FF + FF 42 48 4A 4E 50 58 5A 5E 60 66 68 6C 70 72 76 + 7A 7C 7E 82 86 88 8E 90 92 94 96 FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF 7F FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF 80 FF FF FF FF FF + 45 63 43 FF FF FF 53 51 4B FF FF 55 5B 69 61 6D + 83 FF 73 FF 77 FF FF FF 8B FF FF 89 FF 99 97 FF + 46 64 44 FF FF FF 54 52 4C FF FF 56 5C 6A 62 6E + 84 FF 74 FF 78 FF FF FF 8C FF FF 8A FF 9A 98 FF +</map> +</collation> + +</charset> + +</charsets> diff --git a/sql/share/charsets/cp850.xml b/sql/share/charsets/cp850.xml new file mode 100644 index 00000000000..79497aa17f1 --- /dev/null +++ b/sql/share/charsets/cp850.xml @@ -0,0 +1,139 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="cp850"> + +<ctype> +<map> + 00 + 20 30 30 30 30 30 30 20 20 28 28 28 28 28 30 30 + 30 30 30 30 30 30 30 30 30 30 20 30 30 30 30 30 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 30 + 01 02 02 02 02 02 02 02 02 02 02 02 02 02 01 01 + 01 02 01 02 02 02 02 02 02 01 01 02 10 01 10 10 + 02 02 02 02 02 01 10 10 10 10 10 10 10 10 10 10 + 10 10 10 10 10 01 01 01 10 10 10 10 10 10 10 10 + 10 10 10 10 10 10 02 01 10 10 10 10 10 10 10 10 + 02 01 01 01 01 02 01 01 01 10 10 10 10 10 01 10 + 01 02 01 01 02 01 10 02 01 01 01 01 02 01 10 10 + 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 20 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 87 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 84 86 + 82 91 91 93 94 95 96 97 98 94 81 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A4 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 9A 90 41 8E 41 8F 80 45 45 45 49 49 49 8E 8F + 90 92 92 4F 99 4F 55 55 59 99 9A 9B 9C 9D 9E 9F + 41 49 4F 55 A5 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000a 000b 000c 000d 000e 000f +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001a 001b 001c 001d 001e 001f +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002a 002b 002c 002d 002e 002f +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003a 003b 003c 003d 003e 003f +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004a 004b 004c 004d 004e 004f +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005a 005b 005c 005d 005e 005f +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006a 006b 006c 006d 006e 006f +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007a 007b 007c 007d 007e 007f +00c7 00fc 00e9 00e2 00e4 00e0 00e5 00e7 00ea 00eb 00e8 00ef 00ee 00ec 00c4 00c5 +00c9 00e6 00c6 00f4 00f6 00f2 00fb 00f9 00ff 00d6 00dc 00f8 00a3 00d8 00d7 0192 +00e1 00ed 00f3 00fa 00f1 00d1 00aa 00ba 00bf 00ae 00ac 00bd 00bc 00a1 00ab 00bb +2591 2592 2593 2502 2524 00c1 00c2 00c0 00a9 2563 2551 2557 255d 00a2 00a5 2510 +2514 2534 252c 251c 2500 253c 00e3 00c3 255a 2554 2569 2566 2560 2550 256c 00a4 +00f0 00d0 00ca 00cb 00c8 0131 00cd 00ce 00cf 2518 250c 2588 2584 00a6 00cc 2580 +00d3 00df 00d4 00d2 00f5 00d5 00b5 00fe 00de 00da 00db 00d9 00fd 00dd 00af 00b4 +00ad 00b1 2017 00be 00b6 00a7 00f7 00b8 00b0 00a8 00b7 00b9 00b3 00b2 25a0 00a0 +</map> +</unicode> + + +<collation name="cp850_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 51 53 55 59 63 65 67 69 74 76 78 7A 7C 80 + 8E 90 92 94 97 99 A3 A5 A7 A9 AE B1 B2 B3 B4 B5 + B6 41 51 53 55 59 63 65 67 69 74 76 78 7A 7C 80 + 8E 90 92 94 97 99 A3 A5 A7 A9 AE B7 B8 B9 BA BB + 54 A1 5D 47 4B 43 4D 54 5F 61 5B 71 6F 6B 4B 4D + 5D 4F 4F 86 8A 82 9F 9B AD 8A A1 8C E3 8C BD BE + 45 6D 84 9D 7E 7E EA FA FF EE EC FD FC CE EB FB + DC DD DE C3 C9 45 47 43 E9 D5 CF D1 D3 E2 E5 C5 + C6 CB CA C8 C2 CC 49 49 D2 D0 D7 D6 D4 CD D8 E4 + 57 57 5F 61 5B 73 6D 6F 71 C7 C4 DB DA E6 6B D9 + 84 96 86 82 88 88 F5 B0 B0 9D 9F 9B AB AB EF F4 + ED F1 C1 FE F6 E7 BF BC F0 E8 F7 F9 F3 F2 DF E0 +</map> +</collation> + +<collation name="cp850_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/cp852.xml b/sql/share/charsets/cp852.xml new file mode 100644 index 00000000000..73a81e54b02 --- /dev/null +++ b/sql/share/charsets/cp852.xml @@ -0,0 +1,139 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="cp852"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 + 01 02 02 02 02 02 02 02 02 02 01 02 02 01 01 01 + 01 01 02 02 02 01 02 01 02 01 01 01 02 01 00 02 + 02 02 02 02 01 02 01 02 01 02 00 02 01 01 00 00 + 00 00 00 00 00 01 01 01 02 00 00 00 00 01 02 00 + 00 00 00 00 00 00 01 02 00 00 00 00 00 00 00 00 + 02 01 01 01 02 01 01 01 02 00 00 00 00 01 01 00 + 01 02 01 01 02 02 01 02 01 01 02 01 02 01 02 00 + 00 00 00 00 00 00 00 00 00 00 00 02 01 02 00 48 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 87 81 82 83 84 85 86 87 88 89 8B 8B 8C AB 84 86 + 82 92 92 93 94 96 96 98 98 94 81 9C 9C 88 9E 9F + A0 A1 A2 A3 A5 A5 A7 A7 A9 A9 AA AB 9F B8 AE AF + B0 B1 B2 B3 B4 A0 83 D8 B8 B9 BA BB BC BE BE BF + C0 C1 C2 C3 C4 C5 C7 C7 C8 C9 CA CB CC CD CE CF + D0 D0 D4 89 D4 E5 A1 8C D8 D9 DA DB DC EE 85 DF + A2 E1 93 E4 E4 E5 E7 E7 EA A3 E8 FB EC EC EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 9A 90 B6 8E DE 8F 80 9D D3 8A 8A D7 8D 8E 8F + 90 91 91 E2 99 95 95 97 97 99 9A 9B 9B 9D 9E AC + B5 D6 E0 E9 A4 A4 A6 A6 A8 A8 AA 8D AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 AD B9 BA BB BC BE BD BF + C0 C1 C2 C3 C4 C5 C6 C6 C8 C9 CA CB CC CD CE CF + D1 D1 D2 D3 D2 D5 D6 D7 B7 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E3 D5 E6 E6 E8 E9 E8 EB ED ED DD EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA EB FC FC FE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 00C7 00FC 00E9 00E2 00E4 016F 0107 00E7 0142 00EB 0150 0151 00EE 0179 00C4 0106 + 00C9 0139 013A 00F4 00F6 013D 013E 015A 015B 00D6 00DC 0164 0165 0141 00D7 010D + 00E1 00ED 00F3 00FA 0104 0105 017D 017E 0118 0119 00AC 017A 010C 015F 00AB 00BB + 2591 2592 2593 2502 2524 00C1 00C2 011A 015E 2563 2551 2557 255D 017B 017C 2510 + 2514 2534 252C 251C 2500 253C 0102 0103 255A 2554 2569 2566 2560 2550 256C 00A4 + 0111 0110 010E 00CB 010F 0147 00CD 00CE 011B 2518 250C 2588 2584 0162 016E 2580 + 00D3 00DF 00D4 0143 0144 0148 0160 0161 0154 00DA 0155 0170 00FD 00DD 0163 00B4 + 00AD 02DD 02DB 02C7 02D8 00A7 00F7 00B8 00B0 00A8 02D9 0171 0158 0159 25A0 00A0 +</map> +</unicode> + + +<collation name="cp852_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 47 48 4C 4F 54 55 56 57 5A 5B 5C 5E 5F 62 + 67 68 69 6C 71 74 75 76 77 78 7B 90 91 92 93 94 + 95 41 47 48 4C 4F 54 55 56 57 5A 5B 5C 5E 5F 62 + 67 68 69 6C 71 74 75 76 77 78 7B 96 97 98 99 9A + 48 74 4F 41 41 74 48 48 5C 4F 62 62 57 7B 41 48 + 4F 5C 5C 62 62 5C 5C 6C 6C 62 74 71 71 5C 9E 48 + 41 57 62 74 41 41 7B 7B 4F 4F AA 7B 48 6C AE AF + B0 B1 B2 B3 B4 41 41 4F 6C B5 BA BB BC 7B 7B BF + C0 C1 C2 C3 C4 C5 41 41 C8 C9 CA CB CC CD CE CF + 4C 4C 4C 4F 4C 60 57 57 4F D9 DA DB DC 71 74 DF + 62 70 62 60 60 60 6C 6C 69 74 69 74 78 78 71 EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA 74 69 69 FE FF +</map> +</collation> + +<collation name="cp852_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/cp866.xml b/sql/share/charsets/cp866.xml new file mode 100644 index 00000000000..1a72b396c7c --- /dev/null +++ b/sql/share/charsets/cp866.xml @@ -0,0 +1,142 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="cp866"> +<!-- cp866_DOSCyrillicRussian --> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 01 02 01 02 01 02 01 02 00 00 00 00 00 00 00 48 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + A0 A1 A2 A3 A4 A5 86 87 88 89 AA AB AC AD AE AF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + A0 A1 A2 A3 A4 A5 86 87 88 89 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F1 F1 F3 F3 F5 F5 F7 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + F0 F0 F2 F2 F4 F4 F6 F6 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 0410 0411 0412 0413 0414 0415 0416 0417 0418 0419 041A 041B 041C 041D 041E 041F + 0420 0421 0422 0423 0424 0425 0426 0427 0428 0429 042A 042B 042C 042D 042E 042F + 0430 0431 0432 0433 0434 0435 0436 0437 0438 0439 043A 043B 043C 043D 043E 043F + 2591 2592 2593 2502 2524 2561 2562 2556 2555 2563 2551 2557 255D 255C 255B 2510 + 2514 2534 252C 251C 2500 253C 255E 255F 255A 2554 2569 2566 2560 2550 256C 2567 + 2568 2564 2565 2559 2558 2552 2553 256B 256A 2518 250C 2588 2584 258C 2590 2580 + 0440 0441 0442 0443 0444 0445 0446 0447 0448 0449 044A 044B 044C 044D 044E 044F + 0401 0451 0404 0454 0407 0457 040E 045E 00B0 2219 00B7 221A 207F 00B2 25A0 00A0 +</map> +</unicode> + + +<collation name="cp866_general_ci"> +<!-- Case insensitive, accent sensitive --> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 43 45 47 49 4B 4D 4F 51 53 55 57 59 5B 5D + 5F 61 63 65 67 69 6B 6D 6F 71 73 BD BE BF C0 C1 + C2 41 43 45 47 49 4B 4D 4F 51 54 55 57 59 5B 5D + 5F 61 63 65 67 69 6B 6D 6F 71 73 C3 C4 C5 C6 C7 + 75 77 79 7B 7D 7F 85 87 89 8D 8F 91 93 95 97 99 + 9B 9D 9F A1 A5 A7 A9 AB AD AF B1 B3 B5 B7 B9 BB + 75 77 79 7B 7D 7F 85 87 89 8D 8F 91 93 95 97 99 + C8 C9 CA D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF + 9B 9D 9F A1 A5 A7 A9 AB AD AF B1 B3 B5 B7 B9 BB + 81 81 83 83 8B 8B A3 A3 CB CC CD CE CF D0 D1 D2 +</map> +</collation> + +<collation name="cp866_bin" flag="binary"/> + +</charset> + +</charsets> + diff --git a/sql/share/charsets/croat.conf b/sql/share/charsets/croat.conf deleted file mode 100644 index fbbe3328547..00000000000 --- a/sql/share/charsets/croat.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the croat character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 01 00 00 00 01 00 00 - 00 00 00 00 00 00 00 00 00 02 00 00 00 02 00 00 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 9A 8B 8C 8D 9E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 8A 9B 9C 9D 8E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 46 48 49 4A 4B 4C 4D 4E 4F 50 51 52 - 53 54 55 56 58 59 5A 5B 5C 5D 5E 5B 5C 5D 5E 5F - 60 41 42 43 46 48 49 4A 4B 4C 4D 4E 4F 50 51 52 - 53 54 55 56 58 59 5A 5B 5C 5D 5E 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 57 8B 8C 8D 5F 8F - 90 91 92 93 94 95 96 97 98 99 57 9B 9C 9D 5F 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - 41 41 41 41 5C 5B 45 43 44 45 45 45 49 49 49 49 - 47 4E 4F 4F 4F 4F 5D D7 D8 55 55 55 59 59 DE DF - 41 41 41 41 5C 5B 45 43 44 45 45 45 49 49 49 49 - 47 4E 4F 4F 4F 4F 5D F7 D8 55 55 55 59 59 DE FF diff --git a/sql/share/charsets/danish.conf b/sql/share/charsets/danish.conf deleted file mode 100644 index f99590ed6f3..00000000000 --- a/sql/share/charsets/danish.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the danish character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - 41 41 41 41 5B 5D 5B 43 45 45 45 45 49 49 49 49 - 44 4E 4F 4F 4F 4F 5C D7 5C 55 55 55 59 59 DE DF - 41 41 41 41 5B 5D 5B 43 45 45 45 45 49 49 49 49 - 44 4E 4F 4F 4F 4F 5C F7 5C 55 55 55 59 59 DE FF diff --git a/sql/share/charsets/dec8.conf b/sql/share/charsets/dec8.xml index a4849aaa04c..2cb28cb0f4f 100644 --- a/sql/share/charsets/dec8.conf +++ b/sql/share/charsets/dec8.xml @@ -1,6 +1,29 @@ -# Configuration file for the dec8 character set +<?xml version='1.0' encoding="utf-8"?> -# ctype array (must have 257 elements) +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="dec8"> + +<ctype> +<map> 00 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 @@ -18,8 +41,12 @@ 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 +</map> +</ctype> -# to_lower array (must have 256 elements) + +<lower> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -36,8 +63,12 @@ F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + -# to_upper array (must have 256 elements) +<upper> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -54,8 +85,34 @@ D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF +</map> +</upper> -# sort_order array (must have 256 elements) + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 00A1 00A2 00A3 0000 00A5 0000 00A7 00A4 00A9 00AA 00AB 0000 0000 0000 0000 +00B0 00B1 00B2 00B3 0000 00B5 00B6 00B7 0000 00B9 00BA 00BB 00BC 00BD 0000 00BF +00C0 00C1 00C2 00C3 00C4 00C5 00C6 00C7 00C8 00C9 00CA 00CB 00CC 00CD 00CE 00CF +0000 00D1 00D2 00D3 00D4 00D5 00D6 0152 00D8 00D9 00DA 00DB 00DC 0178 0000 00DF +00E0 00E1 00E2 00E3 00E4 00E5 00E6 00E7 00E8 00E9 00EA 00EB 00EC 00ED 00EE 00EF +0000 00F1 00F2 00F3 00F4 00F5 00F6 0153 00F8 00F9 00FA 00FB 00FC 00FF 0000 0000 +</map> +</unicode> + + +<collation name="dec8_swedish_ci"> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -72,3 +129,12 @@ 44 4E 4F 4F 4F 4F 5D D7 D8 55 55 55 59 59 DE DF 41 41 41 41 5C 5B 5C 43 45 45 45 45 49 49 49 49 44 4E 4F 4F 4F 4F 5D F7 D8 55 55 55 59 59 DE FF +</map> +</collation> + +<collation name="dec8_bin" flag="binary"/> + +</charset> + + +</charsets> diff --git a/sql/share/charsets/dos.conf b/sql/share/charsets/dos.conf deleted file mode 100644 index dda86d0f3e8..00000000000 --- a/sql/share/charsets/dos.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the dos character set - -# ctype array (must have 257 elements) - 00 - 20 30 30 30 30 30 30 20 20 28 28 28 28 28 30 30 - 30 30 30 30 30 30 30 30 30 30 20 30 30 30 30 30 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 30 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 01 01 - 01 02 01 02 02 02 02 02 02 01 01 10 10 10 10 10 - 02 02 02 02 02 01 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 20 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 87 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 84 86 - 82 91 91 93 94 95 96 97 98 94 81 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A4 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 9A 90 41 8E 41 8F 80 45 45 45 49 49 49 8E 8F - 90 92 92 4F 99 4F 55 55 59 99 9A 9B 9C 9D 9E 9F - 41 49 4F 55 A5 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 43 59 45 41 5C 41 5B 43 45 45 45 49 49 49 5C 5B - 45 5C 5C 4F 5D 4F 55 55 59 5D 59 24 24 24 24 24 - 41 49 4F 55 4E 4E A6 A7 3F A9 AA AB AC 21 22 22 - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF diff --git a/sql/share/charsets/estonia.conf b/sql/share/charsets/estonia.conf deleted file mode 100644 index 76bbc021b0c..00000000000 --- a/sql/share/charsets/estonia.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the estonia character set. - -# The ctype array must have 257 elements. - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 - -# The to_lower array must have 256 elements. - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 B8 A9 BA AB AC AD AE BF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# The to_upper array must have 256 elements. - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 A8 B9 AA BB BC BD BE AF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - -# The sort_order array must have 256 elements. - 00 02 03 04 05 06 07 08 09 2E 2F 30 31 32 0A 0B - 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B - 2C 33 34 35 36 37 38 27 39 3A 3B 5D 3C 28 3D 3E - 76 7A 7C 7E 80 81 82 83 84 85 3F 40 5E 5F 60 41 - 42 86 90 92 98 9A A4 A6 AA AC B2 B4 B8 BE C0 C6 - CE D0 D2 D6 E5 E8 EE F0 FA FC DD 43 44 45 46 47 - 48 87 91 93 99 9B A5 A7 AB AD B3 B5 B9 BF C1 C7 - CF D1 D3 D7 E6 E9 EF F1 FB FD DE 49 4A 4B 4C 1C - 01 1D 57 1E 5A 74 71 72 1F 75 20 5B 21 4E 52 51 - 22 55 56 58 59 73 2A 2B 23 E7 24 5C 25 4F 54 26 - 2D FE 66 67 68 FF 4D 69 CC 6A D4 62 6B 29 6C 8E - 6D 61 7D 7F 50 6E 6F 70 CD 7B D5 63 77 78 79 8F - 8C B0 88 94 F4 8A A2 A0 96 9C DF 9E A8 B6 AE BA - DB C2 C4 C8 CA F2 F6 64 EC BC D8 EA F8 E1 E3 DA - 8D B1 89 95 F5 8B A3 A1 97 9D E0 9F A9 B7 AF BB - DC C3 C5 C9 CB F3 F7 65 ED BD D9 EB F9 E2 E4 53 diff --git a/sql/share/charsets/geostd8.xml b/sql/share/charsets/geostd8.xml new file mode 100644 index 00000000000..c09aa078fb7 --- /dev/null +++ b/sql/share/charsets/geostd8.xml @@ -0,0 +1,139 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="geostd8"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 00 00 10 00 10 10 10 10 00 10 00 10 00 00 00 00 + 00 10 10 10 10 10 10 10 00 00 00 10 00 00 00 00 + 48 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 + 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 + 03 03 03 03 03 03 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +20AC 0000 201A 0000 201E 2026 2020 2021 0000 2030 0000 2039 0000 0000 0000 0000 +0000 2018 2019 201C 201D 2022 2013 2014 0000 0000 0000 203A 0000 0000 0000 0000 +00A0 00A1 00A2 00A3 00A4 00A5 00A6 00A7 00A8 00A9 00AA 00AB 00AC 00AD 00AE 00AF +00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00B8 00B9 00BA 00BB 00BC 00BD 00BE 00BF +10D0 10D1 10D2 10D3 10D4 10D5 10D6 10F1 10D7 10D8 10D9 10DA 10DB 10DC 10F2 10DD +10DE 10DF 10E0 10E1 10E2 10F3 10E3 10E4 10E5 10E6 10E7 10E8 10E9 10EA 10EB 10EC +10ED 10EE 10F4 10EF 10F0 10F5 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 2116 0000 0000 +</map> +</unicode> + + +<collation name="geostd8_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="geostd8_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/german1.conf b/sql/share/charsets/german1.conf deleted file mode 100644 index 3090c921ebe..00000000000 --- a/sql/share/charsets/german1.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the german1 character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - 41 41 41 41 41 41 41 43 45 45 45 45 49 49 49 49 - D0 4E 4F 4F 4F 4F 4F D7 4F 55 55 55 55 59 DE 53 - 41 41 41 41 41 41 41 43 45 45 45 45 49 49 49 49 - D0 4E 4F 4F 4F 4F 4F F7 4F 55 55 55 55 59 DE FF diff --git a/sql/share/charsets/greek.conf b/sql/share/charsets/greek.conf deleted file mode 100644 index 73d67d6ee71..00000000000 --- a/sql/share/charsets/greek.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the greek character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 10 10 10 00 00 10 10 10 10 00 10 10 10 00 10 - 10 10 10 10 10 10 01 10 01 01 01 10 01 10 01 01 - 02 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 00 01 01 01 01 01 01 01 01 01 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 00 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 DC B7 DD DE DF BB FC BD FD FE - C0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 D2 F3 F4 F5 F6 F7 F8 F9 FA FB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - DA C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB C1 C5 C7 C9 - DB C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D3 D3 D4 D5 D6 D7 D8 D9 DA DB CF D5 D9 FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 C1 B7 C5 C7 C9 BB CF BD D5 D9 - C9 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 C9 D5 C1 C5 C7 C9 - D5 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D3 D3 D4 D5 D6 D7 D8 D9 C9 D5 CF D5 D9 FF diff --git a/sql/share/charsets/greek.xml b/sql/share/charsets/greek.xml new file mode 100644 index 00000000000..1cfe6b49610 --- /dev/null +++ b/sql/share/charsets/greek.xml @@ -0,0 +1,144 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="greek"> + +<!-- It's ISO Greek rahter than WIN Greek because --> +<!-- 0xB6 is marked as upper letter, it's true for ISO Greek version --> +<!-- In Windows version this character is PILCROW SIGN --> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 48 10 10 10 00 00 10 10 10 10 00 10 10 10 00 10 + 10 10 10 10 10 10 01 10 01 01 01 10 01 10 01 01 + 02 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 00 01 01 01 01 01 01 01 01 01 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 00 + </map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 DC B7 DD DE DF BB FC BD FD FE + C0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 D2 F3 F4 F5 F6 F7 F8 F9 FA FB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + DA C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB C1 C5 C7 C9 + DB C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D3 D3 D4 D5 D6 D7 D8 D9 DA DB CF D5 D9 FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 02BD 02BC 00A3 0000 0000 00A6 00A7 00A8 00A9 0000 00AB 00AC 00AD 0000 2015 +00B0 00B1 00B2 00B3 0384 0385 0386 00B7 0388 0389 038A 00BB 038C 00BD 038E 038F +0390 0391 0392 0393 0394 0395 0396 0397 0398 0399 039A 039B 039C 039D 039E 039F +03A0 03A1 0000 03A3 03A4 03A5 03A6 03A7 03A8 03A9 03AA 03AB 03AC 03AD 03AE 03AF +03B0 03B1 03B2 03B3 03B4 03B5 03B6 03B7 03B8 03B9 03BA 03BB 03BC 03BD 03BE 03BF +03C0 03C1 03C2 03C3 03C4 03C5 03C6 03C7 03C8 03C9 03CA 03CB 03CC 03CD 03CE 0000 +</map> +</unicode> + + +<collation name="greek_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 C1 B7 C5 C7 C9 BB CF BD D5 D9 + C9 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 C9 D5 C1 C5 C7 C9 + D5 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D3 D3 D4 D5 D6 D7 D8 D9 C9 D5 CF D5 D9 FF +</map> +</collation> + +<collation name="greek_bin" flag="binary"/> + +</charset> + +</charsets> + diff --git a/sql/share/charsets/hebrew.conf b/sql/share/charsets/hebrew.conf deleted file mode 100644 index 6a5f88eb228..00000000000 --- a/sql/share/charsets/hebrew.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the hebrew character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 00 00 00 00 00 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF diff --git a/sql/share/charsets/hebrew.xml b/sql/share/charsets/hebrew.xml new file mode 100644 index 00000000000..5bcf222a728 --- /dev/null +++ b/sql/share/charsets/hebrew.xml @@ -0,0 +1,140 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="hebrew"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 00 00 00 00 00 + </map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 0000 00A2 00A3 00A4 00A5 00A6 00A7 00A8 00A9 00D7 00AB 00AC 00AD 00AE 203E +00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00B8 00B9 00F7 00BB 00BC 00BD 00BE 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 2017 +05D0 05D1 05D2 05D3 05D4 05D5 05D6 05D7 05D8 05D9 05DA 05DB 05DC 05DD 05DE 05DF +05E0 05E1 05E2 05E3 05E4 05E5 05E6 05E7 05E8 05E9 05EA 0000 0000 0000 0000 0000 +</map> +</unicode> + + +<collation name="hebrew_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="hebrew_bin" flag="binary"/> + +</charset> + +</charsets> + diff --git a/sql/share/charsets/hp8.conf b/sql/share/charsets/hp8.conf deleted file mode 100644 index e9fadacbf76..00000000000 --- a/sql/share/charsets/hp8.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the hp8 character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 20 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 20 20 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 02 10 10 10 10 10 10 02 10 02 02 - 01 10 10 01 02 10 10 02 01 10 01 01 01 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 20 20 20 20 10 10 10 10 10 10 10 10 10 20 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 C8 C0 C9 C1 CD D1 DD A8 A9 AA AB AC CB C3 AF - B0 B2 B2 B3 B5 B5 B7 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D4 D1 D6 D7 D4 D5 D6 D7 CC D9 CE CF C5 DD DE C2 - C4 E2 E2 E4 E4 D5 D9 C6 CA EA EA EC EC C7 EF EF - F1 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B1 B3 B4 B4 B6 B6 B8 B9 BA BB BC BD BE BF - A2 A4 DF AE E0 DC E7 ED A1 A3 E8 AD D8 A5 DA DB - D0 A6 D2 D3 D0 E5 D2 D3 D8 E6 DA DB DC A7 DE DF - E0 E1 E1 E3 E3 E5 E6 E7 E8 E9 E9 EB EB ED EE EE - F0 F0 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5C 5D 5B 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF diff --git a/sql/share/charsets/hp8.xml b/sql/share/charsets/hp8.xml new file mode 100644 index 00000000000..35224f8c544 --- /dev/null +++ b/sql/share/charsets/hp8.xml @@ -0,0 +1,140 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="hp8"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 20 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 20 20 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 10 10 10 02 10 10 10 10 10 10 02 10 02 02 + 01 10 10 01 02 10 10 02 01 10 01 01 01 10 10 10 + 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 20 20 20 20 10 10 10 10 10 10 10 10 10 20 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 C8 C0 C9 C1 CD D1 DD A8 A9 AA AB AC CB C3 AF + B0 B2 B2 B3 B5 B5 B7 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D4 D1 D6 D7 D4 D5 D6 D7 CC D9 CE CF C5 DD DE C2 + C4 E2 E2 E4 E4 D5 D9 C6 CA EA EA EC EC C7 EF EF + F1 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B1 B3 B4 B4 B6 B6 B8 B9 BA BB BC BD BE BF + A2 A4 DF AE E0 DC E7 ED A1 A3 E8 AD D8 A5 DA DB + D0 A6 D2 D3 D0 E5 D2 D3 D8 E6 DA DB DC A7 DE DF + E0 E1 E1 E3 E3 E5 E6 E7 E8 E9 E9 EB EB ED EE EE + F0 F0 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 00C0 00C2 00C8 00CA 00CB 00CE 00CF 00B4 02CB 02C6 00A8 02DC 00D9 00DB 20A4 +00AF 00DD 00FD 00B0 00C7 00E7 00D1 00F1 00A1 00BF 00A4 00A3 00A5 00A7 0192 00A2 +00E2 00EA 00F4 00FB 00E1 00E9 00F3 00FA 00E0 00E8 00F2 00F9 00E4 00EB 00F6 00FC +00C5 00EE 00D8 00C6 00E5 00ED 00F8 00E6 00C4 00EC 00D6 00DC 00C9 00EF 00DF 00D4 +00C1 00C3 00E3 00D0 00F0 00CD 00CC 00D3 00D2 00D5 00F5 0160 0161 00DA 0178 00FF +00DE 00FE 00B7 00B5 00B6 00BE 2014 00BC 00BD 00AA 00BA 00AB 25A0 00BB 00B1 0000 + +</map> +</unicode> + + +<collation name="hp8_english_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5C 5D 5B 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="hp8_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/hungarian.conf b/sql/share/charsets/hungarian.conf deleted file mode 100644 index db58d62575f..00000000000 --- a/sql/share/charsets/hungarian.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the hungarian character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 48 - 01 10 01 10 01 01 10 00 00 01 01 01 01 10 01 01 - 10 02 10 02 10 02 02 10 10 02 02 02 02 10 02 02 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 10 01 01 01 01 01 01 10 01 01 01 01 01 01 01 10 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 10 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8B 8B A1 A1 8E A0 - 82 91 92 93 94 A2 96 A3 96 94 81 9B 9C 9D 9E 9F - A0 A1 A2 A3 B5 B6 A6 93 A8 B9 BA BB BC AD BE BF - B0 B1 B2 B3 B4 E1 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - D0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF - A2 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA 96 EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 9A 90 83 84 85 86 87 88 89 8A 8A 8C 8D 8E 8F - 90 91 92 A7 99 95 98 97 98 99 9A 9B 9C 9D 9E 9F - 8F 8D 95 97 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 A0 B2 A2 B4 B5 A5 B7 B8 A9 AA AB AC BD AE AF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA EB CC CD CE CF - F0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 47 48 4C 4E 53 54 55 56 5A 5B 5C 60 61 64 - 69 6A 6B 6E 72 75 7A 7B 7C 7D 7F 83 84 85 86 87 - 88 41 47 48 4C 4E 53 54 55 56 5A 5B 5C 60 61 64 - 69 6A 6B 6E 72 75 7A 7B 7C 7D 7F 89 8A 8B 8C 00 - 01 78 4E 04 05 06 07 08 09 0A 67 67 56 56 0F 41 - 4E 12 13 67 67 64 78 75 78 67 78 1C 1D 1E 1F FF - 41 56 64 75 5E 6F FF 67 FF 70 71 73 80 FF 81 82 - FF 42 FF 5D FF 41 6F FF FF 70 71 73 80 FF 81 82 - 6C 41 44 45 46 5F 49 4B 4A 4E 51 52 50 56 57 4D - FF 62 63 64 66 67 67 FF 6D 77 75 78 78 7E 74 FF - 64 41 44 45 46 5F 49 4B 4A 4E 51 78 50 56 58 4D - FF 62 63 64 66 67 67 FF 6D 77 75 78 78 7E 74 FF diff --git a/sql/share/charsets/keybcs2.xml b/sql/share/charsets/keybcs2.xml new file mode 100644 index 00000000000..6332891ef23 --- /dev/null +++ b/sql/share/charsets/keybcs2.xml @@ -0,0 +1,140 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="keybcs2"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 + 01 02 82 02 02 01 01 02 82 81 01 01 02 02 01 01 + 81 02 01 02 02 01 02 01 02 01 01 01 01 01 01 02 + 02 02 02 02 02 01 01 01 02 02 02 01 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 02 02 01 02 01 02 00 02 01 01 01 02 00 02 02 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 48 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 87 81 82 83 84 83 86 87 88 88 8D A1 8C 8D 84 A0 + 82 91 91 93 94 A2 96 A3 98 94 81 9B 8C 98 A9 9F + A0 A1 A2 A3 A4 A4 96 93 9B A9 AA AA AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 ED E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 68 59 5A 7B 7C 7D 7E 7F + 87 9A 90 85 8E 85 86 80 89 89 8A 8B 9C 8A 8E 8F + 90 92 92 A7 99 95 A6 97 9D 99 9A A8 9C 9D 9E 9F + 8F 8B 95 97 A5 A5 A6 A7 A8 9E AB AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC E8 EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 010C 00FC 00E9 010F 00E4 010E 0164 010D 011B 011A 0139 00CD 013E 013A 00C4 00C1 + 00C9 017E 017D 00F4 00F6 00D3 016F 00DA 00FD 00D6 00DC 0160 013D 00DD 0158 0165 + 00E1 00ED 00F3 00FA 0148 0147 016E 00D4 0161 0159 0155 0154 00BC 00A1 00AB 00BB + 2591 2592 2593 2502 2524 2561 2562 2556 2555 2563 2551 2557 255D 255C 255B 2510 + 2514 2534 252C 251C 2500 253C 255E 255F 255A 2554 2569 2566 2560 2550 256C 2567 + 2568 2564 2565 2559 2558 2552 2553 256B 256A 2518 250C 2588 2584 258C 2590 2580 + 03B1 00DF 0393 03C0 03A3 03C3 00B5 03C4 03A6 0398 03A9 03B4 221E 03C6 03B5 2229 + 2261 00B1 2265 2264 2320 2321 00F7 2248 00B0 2219 00B7 221A 207F 00B2 25A0 00A0 +</map> +</unicode> + + +<collation name="keybcs2_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 44 45 47 49 50 51 52 53 54 55 56 57 58 5A + 5E 5F 60 63 66 68 6C 6D 6E 6F 72 90 91 92 93 94 + 95 41 44 45 47 49 50 51 52 53 54 55 56 57 58 5A + 5E 5F 60 63 66 68 6C 6D 6E 6F 72 96 97 98 99 9A + 45 68 49 47 41 47 66 45 49 49 56 53 56 56 41 41 + 49 72 72 5A 5A 5A 68 68 6F 5A 68 63 56 6F 60 66 + 41 53 5A 68 58 58 68 5A 63 60 60 60 A0 A1 A2 A3 + A4 A5 A6 B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC + BD BE BF C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC + CD CE CF D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC + 80 65 83 87 88 89 DD 8A 85 8B 84 81 DE 85 82 DF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="keybcs2_bin" flag="binary"/> + +</charset> + +</charsets> + diff --git a/sql/share/charsets/koi8_ru.conf b/sql/share/charsets/koi8_ru.conf deleted file mode 100644 index 4cfee67a236..00000000000 --- a/sql/share/charsets/koi8_ru.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the koi8_ru character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 02 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 01 10 10 10 10 10 10 10 10 10 10 10 10 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 A3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 B3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 E5 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE - AF B0 B1 E5 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD - FE DF E0 F6 E3 E4 F4 E2 F5 E8 E9 EA EB EC ED EE - EF FF F0 F1 F2 F3 E6 E1 FC FB E7 F8 FD F9 F7 FA - FE DF E0 F6 E3 E4 F4 E2 F5 E8 E9 EA EB EC ED EE - EF FF F0 F1 F2 F3 E6 E1 FC FB E7 F8 FD F9 F7 FA diff --git a/sql/share/charsets/koi8r.xml b/sql/share/charsets/koi8r.xml new file mode 100644 index 00000000000..033597e9bfc --- /dev/null +++ b/sql/share/charsets/koi8r.xml @@ -0,0 +1,139 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="koi8r"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 10 02 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 10 01 10 10 10 10 10 10 10 10 10 10 10 10 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 A3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 B3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000a 000b 000c 000d 000e 000f +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001a 001b 001c 001d 001e 001f +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002a 002b 002c 002d 002e 002f +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003a 003b 003c 003d 003e 003f +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004a 004b 004c 004d 004e 004f +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005a 005b 005c 005d 005e 005f +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006a 006b 006c 006d 006e 006f +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007a 007b 007c 007d 007e 007f +2500 2502 250c 2510 2514 2518 251c 2524 252c 2534 253c 2580 2584 2588 258c 2590 +2591 2592 2593 2320 25a0 2219 221a 2248 2264 2265 00a0 2321 00b0 00b2 00b7 00f7 +2550 2551 2552 0451 2553 2554 2555 2556 2557 2558 2559 255a 255b 255c 255d 255e +255f 2560 2561 0401 2562 2563 2564 2565 2566 2567 2568 2569 256a 256b 256c 00a9 +044e 0430 0431 0446 0434 0435 0444 0433 0445 0438 0439 043a 043b 043c 043d 043e +043f 044f 0440 0441 0442 0443 0436 0432 044c 044b 0437 0448 044d 0449 0447 044a +042e 0410 0411 0426 0414 0415 0424 0413 0425 0418 0419 041a 041b 041c 041d 041e +041f 042f 0420 0421 0422 0423 0416 0412 042c 042b 0417 0428 042d 0429 0427 042a +</map> +</unicode> + + +<collation name="koi8r_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 E5 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE + AF B0 B1 E5 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD + FE DF E0 F6 E3 E4 F4 E2 F5 E8 E9 EA EB EC ED EE + EF FF F0 F1 F2 F3 E6 E1 FC FB E7 F8 FD F9 F7 FA + FE DF E0 F6 E3 E4 F4 E2 F5 E8 E9 EA EB EC ED EE + EF FF F0 F1 F2 F3 E6 E1 FC FB E7 F8 FD F9 F7 FA +</map> +</collation> + +<collation name="koi8r_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/koi8_ukr.conf b/sql/share/charsets/koi8u.xml index 3e2c8e27325..4f5fa35af3d 100644 --- a/sql/share/charsets/koi8_ukr.conf +++ b/sql/share/charsets/koi8u.xml @@ -1,6 +1,29 @@ -# Configuration file for the koi8_ukr character set +<?xml version='1.0' encoding="utf-8"?> -# ctype array (must have 257 elements) +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="koi8u"> + +<ctype> +<map> 00 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 @@ -18,8 +41,12 @@ 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 +</map> +</ctype> -# to_lower array (must have 256 elements) + +<lower> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -36,8 +63,12 @@ D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF +</map> +</lower> + -# to_upper array (must have 256 elements) +<upper> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -54,8 +85,34 @@ F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> -# sort_order array (must have 256 elements) + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +2500 2502 250C 2510 2514 2518 251C 2524 252C 2534 253C 2580 2584 2588 258C 2590 +2591 2592 2593 2320 25A0 2022 221A 2248 2264 2265 00A0 2321 00B0 00B2 00B7 00F7 +2550 2551 2552 0451 0454 2554 0456 0457 2557 2558 2559 255A 255B 0491 255D 255E +255F 2560 2561 0401 0404 2563 0406 0407 2566 2567 2568 2569 256A 0490 256C 00A9 +044E 0430 0431 0446 0434 0435 0444 0433 0445 0438 0439 043A 043B 043C 043D 043E +043F 044F 0440 0441 0442 0443 0436 0432 044C 044B 0437 0448 044D 0449 0447 044A +042E 0410 0411 0426 0414 0415 0424 0413 0425 0418 0419 041A 041B 041C 041D 041E +041F 042F 0420 0421 0422 0423 0416 0412 042C 042B 0417 0428 042D 0429 0427 042A +</map> +</unicode> + + +<collation name="koi8u_general_ci"> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -72,3 +129,12 @@ 94 A4 95 96 97 98 89 82 A1 A0 8A 9D A2 9E 9C 9F A3 80 81 9B 85 86 99 83 9A 8B 8E 8F 90 91 92 93 94 A4 95 96 97 98 89 82 A1 A0 8A 9D A2 9E 9C 9F +</map> +</collation> + +<collation name="koi8u_bin" flag="binary"/> + +</charset> + +</charsets> + diff --git a/sql/share/charsets/languages.html b/sql/share/charsets/languages.html new file mode 100644 index 00000000000..6d1a8aafc5c --- /dev/null +++ b/sql/share/charsets/languages.html @@ -0,0 +1,257 @@ +#!/bin/sh +#<pre> +( +echo "DROP TABLE lang;" +echo "CREATE TABLE lang (lang varchar(128), letters text character set utf8);" +( +grep -v "^#" << END +# +Greenlandic ÁÂÃÊÍÎÔÚÛáâãêíîôúûĨĩĸŨũ +#Use of these letters was abolished in a spelling reform in 1973: +#Greenlandic ÅÆØåæø +#Characters not found in the UCS: +# K LATIN CAPITAL LETTER KRA +############################################################# +#Basque ÑÜñüŔŕ +#Characters not found in the UCS: +# D LATIN CAPITAL LETTER D WITH MACRON +# d LATIN SMALL LETTER D WITH MACRON +# L LATIN CAPITAL LETTER L WITH MACRON +# l LATIN SMALL LETTER L WITH MACRON +# T LATIN CAPITAL LETTER T WITH MACRON +# t LATIN SMALL LETTER T WITH MACRON +############################################################# +#Maltese #ÀÁÂÈÉÊÌÍÎÒÓÔÙÚÛ#àáâèéêìíîòÓôùúû#ĊċĠġĦħŻżʼ +#BosnianCyr ЂЈЉЊЋЏАБВГДЕЖЗИКЛМНОПРСТУФХЦЧШабвгдежзиклмнопрстуфхцчшђјљњћџ +#Scots A +#Scots1 ƷȜȝʒ +########################################### +#### Hiragana 3040-309F +Hiragana ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねのはばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろゎわゐゑをん +Hiragana1 ゔ゙゚ +Hiragana2 ゛゜ゝゞ +#### Katakana 30A0-30FF +Katakana ァアィイゥウェエォオカガキギクグケゲコゴサザシジスズセゼソゾタダチヂッツヅテデトドナニヌネノハバパヒビピフブプヘベペホボポマミムメモャヤュユョヨラリルレロヮワヰヱヲンヴヵヶ +Katakana1 ヷヸヹヺ +Katakana2 ・ーヽヾ +############################################ +Albanian ÂÇËâçë +Bosnian ĆćČčĐ𩹮ž +Breton ÂÊÑÙÜâêñùü +Catalan ÀÇÈÉÍÏÒÓÚÜàçèéíïòóúü +#Catalan1 ·Ŀŀ +Croatian ĆćČčĐ𩹮ž +CroatianLig DZDzdzDŽDždžLJLjljNJNjnj +Czech ÁÉÍÓÚÝáéíóúýČčĎďĚěŇňŘřŠšŤťŮůŽž +Danish ÁÄÅÆÉÓÖØÜáäåæéóöøü +Dutch ÀÂÄÆÇÈÉÊËÎÏÑÒÓÔÖÙÚÛÜàâäæçèéêëîïñòóôöùúûü +Esperanto ĈĉĜĝĤĥĴĵŜŝŬŭ +Estonian ÄÕÖÜäõöüŠšŽž +Faroese ÅÆÐÓÖØÚÝåæðóöøúý +Finnish ÄÅÖÜäåöü +#Finnish1 ŠšŽž +French(limited) ÀÂÆÇÈÉÊËÎÏÑÔÙÛàâæçèéêëîïñôùûÿ +French ŒœŸ +German ÄÖÜßäöü +Hungarian ÁÉÍÓÖÚÜáéíóöúüŐőŰű +Icelandic ÁÆÉÍÐÓÖÚÝÞáæéíðóöúýþ +Italian ÀÈÉÌÍÏÒÓÙÚàèéìíïòóùú +#Latin A +Latvian ĀāČčĒēĢģĪīĶķĻļŅņŠšŪūŽž +Lithuanian ĄąČčĖėĘęĮįŠšŪūŲųŽž +Norwegian ÅÆØåæø +Polish ÓóĄąĆćĘꣳŃńŚśŹźŻż +Portuguese ÀÁÂÃÇÉÊÍÓÔÕÚÜàáâãçéêíóôõúü +#http://en.wikipedia.org/wiki/Special_Romanian_Unicode_characters +Romanian ÂÎâîĂăȘșȚț +Romanian(ErrorST) ÂÎâîĂ㪺Ţţ +Slovak ÁÄÉÍÓÔÚÝáäéíóôúýČčĎďĹ弾ŇňŔ੹ŤťŽž +Slovene ČčŠšŽž +Sorbian-Lower ĆćČčĚ죳ŃńŘřŚśŠšŹźŽž +Sorbian-Upper ÓóĆćČčĚ죳ŃńŘřŠšŽž +Spanish ÁÉÍÑÓÚÜáéíñóúü +Swedish ÄÅÖäåö +Turkish ÂÇÖÛÜâçöûüĞğİı +Welsh ÀÁÂÄÈÉÊËÌÍÎÏÒÓÔÖÙÚÛÜÝàáâäèéêëìíîïòóôöùúûüýÿŴŵŶŷŸẀẁẂẃẄẅỲỳ +################################## +Belarusian ЁІЎАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяёіў +Bulgarian АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя +Bulgarian1 ЀҭѐѝѢѣѪѫ +Macedonian ЃЅЈЉЊЌЏАБВГДЕЖЗИКЛМНОПРСТУФХЦЧШабвгдежзиклмнопрстуфхцчшѓѕјљњќџ +Russian ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё +RussianOLD ІіѢѣѲѳѴѵ +Serbian ЂЈЉЊЋЏАБВГДЕЖЗИКЛМНОПРСТУФХЦЧШабвгдежзиклмнопрстуфхцчшђјљњћџ +Ukrainian ЄІЇАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЭЮЯабвгдежзийклмнопрстуфхцчшщьэюяєіїҐґ +################################## +Armenian ԱԲԳԴԵԶԷԸԹԺԻԼԽԾԿՁՂՃՄՅՆՇՈՉՊՋՌՍՎՏՑՒՓՔՕՖ՛՜՝՞՟աբգդեֆ։ +#Armenian1 ՚֊ +#Characters not found in the UCS: +# ARMENIAN ETERNITY SIGN +# +GeorgianOld ႠႡႢႣႤႥႦႧႨႩႪႫႬႭႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀჁჂჃჄჅ +Georgian აბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ +GeorgianArc ჱჲჳჴჵჶ +GeorgianPunc ჻ +# +GreekExt1 ΄΅Ά·ΈΉΊ»Ό½ΎΏΐ +Greek ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩαβγδεζηθικλμνξοπρστυφχψω +GreekExt2 ΪΫάέήίΰϊϋόύώ +GreekExt4 ς +# +Hebrew אבגדהוזחטיךכלםמןנסעףפץצקרשת +################################## +#Abaza +#Abkhaz +#Adyghe +#Agul * +#(Aisor) +#Akhvakh * +#(?lvdalska) +#(Andi) * +#(Aragonese) +#Archi * +#Arumanian +#(Arvanite) +#Asturian +#Avar +#Azerbaijani +#(Bagulal) * +#Balkar +#Bashkir +#Basque ! +#Bats * +#Bezhta * +#(Botlikh) * +#Budukh * +#(Chamalal) +#Chechen +#Chuvash +#Cornish ! +#(Corsican) +#Dargwa +#Erzya +#(Franco-Proven?al) +#(Frisian, East) +#(Frisian, North) +#Frisian, West +#Friulian +#Gagauz +#Gaelic, Irish ! +#Gaelic, Manx ! +#Gaelic, Scottish ! +#Galician ! +#(German, Low) ! +#(German, Swiss) ! +#Godoberi * +#(Hinukh) * +#(Hunzib) * +#Ingrian +#Ingush +#Istro-Romanian +#(Judeo-Georgian) +#(Judeo-Kurdish) +#(Judeo-Tati) +#Kabardian +#Kalmyk +#Karachay +#(Karaim) +#(Karata) * +#Karelian +#Kashubian +#Kazakh +#Khinalug +#(Khvarshi) * +#(Kirmanji) +#Komi +#Komi-Permyak +#(Kryts) +#Kumyk +#(Kurdish) +#(Ladin) +#(Ladino) +#Lak +#Laz +#Lezgian +#Livonian +#(Ludian) +#Luxemburgish ! +#Mari, Hill +#Mari, Meadow +#Megleno-Romanian +#(Mingrelian) +#Moksha +#Moldavian +#Nenets, Tundra +#Nogai +#Occitan +#Old Church Slavonic +#(Olonets) +#Ossetian +#(Romani) +#Romansch +#(Rusyn) +#Rutul +#Sami, Inari +#Sami, Kildin +#Sami, Lule +#Sami, Northern +#Sami, Skolt +#Sami, Southern +#(Sami, Ter) * +#(Sami, Ume) * +#(Sardinian) * +#Scots ! +#Svan +#Tabasaran +#(Talysh) +#Tatar, Crimean +#Tatar, Kazan +#Tati +#(Tindi) * +#(Tsakonian) * +#Tsakhur * +#(Tsez) * +#(Turkish, Crimean) +#Ubykh * +#Udi +#Udmurt +#(V?mhusm?l) +#Vepsian +#Votic +#(Walloon) +#(Yiddish) +################################ +# 4 Gaelic-new-orthography +# 4 Frisian +# 3 Rhaeto-Romanic +# 2 S&AACUTEmi-with-restrictions +# 1 Rhjaeto-Romanic +# 1 Gaelic-old-and-new-orthographies +END +) | + +while read a b +do + c=`echo $b | replace "&#x" "" ";" ""` + printf "INSERT INTO lang VALUES ('$a',_ucs2 X'$c');\n" +done +) | mysql -f test + +#mysql test << END +#SELECT * FROM lang WHERE CONVERT(letters USING latin1) NOT LIKE _binary'%?%'; +#SELECT * FROM lang WHERE CONVERT(letters USING latin2) NOT LIKE _binary'%?%'; +#END + + + +list="big5 dec8 cp850 hp8 koi8r latin1 latin2 swe7 ascii ujis sjis hebrew euckr koi8u gb2312 greek cp1250 gbk latin5 armscii8 cp866 keybcs2 macce macroman cp852 latin7 cp1251 cp1256 cp1257 geostd8" + +for p in $list +do +echo "-----------------" +echo $p: +mysql -sss test << END +SELECT lang FROM lang WHERE CONVERT(letters USING $p) NOT LIKE _binary'%?%' ORDER BY lang; +END +done + diff --git a/sql/share/charsets/latin1.conf b/sql/share/charsets/latin1.conf deleted file mode 100644 index cf974aefa14..00000000000 --- a/sql/share/charsets/latin1.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the latin1 character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - 41 41 41 41 5C 5B 5C 43 45 45 45 45 49 49 49 49 - 44 4E 4F 4F 4F 4F 5D D7 D8 55 55 55 59 59 DE DF - 41 41 41 41 5C 5B 5C 43 45 45 45 45 49 49 49 49 - 44 4E 4F 4F 4F 4F 5D F7 D8 55 55 55 59 59 DE FF diff --git a/sql/share/charsets/latin1.xml b/sql/share/charsets/latin1.xml new file mode 100644 index 00000000000..5814a17b0e1 --- /dev/null +++ b/sql/share/charsets/latin1.xml @@ -0,0 +1,253 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="latin1"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 10 00 10 02 10 10 10 10 10 10 01 10 01 00 01 00 + 00 10 10 10 10 10 10 10 10 10 02 10 02 00 02 01 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 20AC 0081 201A 0192 201E 2026 2020 2021 02C6 2030 0160 2039 0152 008D 017D 008F + 0090 2018 2019 201C 201D 2022 2013 2014 02DC 2122 0161 203A 0153 009D 017E 0178 + 00A0 00A1 00A2 00A3 00A4 00A5 00A6 00A7 00A8 00A9 00AA 00AB 00AC 00AD 00AE 00AF + 00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00B8 00B9 00BA 00BB 00BC 00BD 00BE 00BF + 00C0 00C1 00C2 00C3 00C4 00C5 00C6 00C7 00C8 00C9 00CA 00CB 00CC 00CD 00CE 00CF + 00D0 00D1 00D2 00D3 00D4 00D5 00D6 00D7 00D8 00D9 00DA 00DB 00DC 00DD 00DE 00DF + 00E0 00E1 00E2 00E3 00E4 00E5 00E6 00E7 00E8 00E9 00EA 00EB 00EC 00ED 00EE 00EF + 00F0 00F1 00F2 00F3 00F4 00F5 00F6 00F7 00F8 00F9 00FA 00FB 00FC 00FD 00FE 00FF +</map> +</unicode> + + +<collation name="latin1_swedish_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + 41 41 41 41 5C 5B 5C 43 45 45 45 45 49 49 49 49 + 44 4E 4F 4F 4F 4F 5D D7 D8 55 55 55 59 59 DE DF + 41 41 41 41 5C 5B 5C 43 45 45 45 45 49 49 49 49 + 44 4E 4F 4F 4F 4F 5D F7 D8 55 55 55 59 59 DE FF +</map> +</collation> + + +<collation name="latin1_german1_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + 41 41 41 41 41 41 41 43 45 45 45 45 49 49 49 49 + D0 4E 4F 4F 4F 4F 4F D7 4F 55 55 55 55 59 DE 53 + 41 41 41 41 41 41 41 43 45 45 45 45 49 49 49 49 + D0 4E 4F 4F 4F 4F 4F F7 4F 55 55 55 55 59 DE FF +</map> +</collation> + + +<collation name="latin1_danish_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + 41 41 41 41 5B 5D 5B 43 45 45 45 45 49 49 49 49 + 44 4E 4F 4F 4F 4F 5C D7 5C 55 55 55 59 59 DE DF + 41 41 41 41 5B 5D 5B 43 45 45 45 45 49 49 49 49 + 44 4E 4F 4F 4F 4F 5C F7 5C 55 55 55 59 59 DE FF +</map> +</collation> + + +<collation name="latin1_german2_ci"/> + + +<collation name="latin1_bin" flag="binary"/> + + +<collation name="latin1_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 51 53 57 5B 65 67 69 6B 75 77 79 7B 7D 81 + 8F 91 93 95 98 9A A4 A6 A8 AA AF B3 B4 B5 B6 B7 + B8 41 51 53 57 5B 65 67 69 6B 75 77 79 7B 7D 81 + 8F 91 93 95 98 9A A4 A6 A8 AA AF B9 BA BB BC BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF + 43 45 47 49 4B 4D 4F 55 5D 5F 61 63 6D 6F 71 73 + 59 7F 83 85 87 89 8B BD 8D 9C 9E A0 A2 AC B1 97 + 43 45 47 49 4B 4D 4F 55 5D 5F 61 63 6D 6F 71 73 + 59 7F 83 85 87 89 8B BE 8D 9C 9E A0 A2 AC B1 AE +</map> +</collation> + + +<collation name="latin1_general_cs"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 51 53 57 5B 65 67 69 6B 75 77 79 7B 7D 81 + 8F 91 93 95 98 9A A4 A6 A8 AA AF B3 B4 B5 B6 B7 + B8 42 52 54 58 5C 66 68 6A 6C 76 78 7A 7C 7E 82 + 90 92 94 96 99 9B A5 A7 A9 AB B0 B9 BA BB BC BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF + 43 45 47 49 4B 4D 4F 55 5D 5F 61 63 6D 6F 71 73 + 59 7F 83 85 87 89 8B BD 8D 9C 9E A0 A2 AC B1 97 + 44 46 48 4A 4C 4E 50 56 5E 60 62 64 6E 70 72 74 + 5A 80 84 86 88 8A 8C BE 8E 9D 9F A1 A3 AD B2 AE +</map> +</collation> + + +<collation name="latin1_spanish_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 51 53 57 5B 65 67 69 6B 75 77 79 7B 7D 81 + 8F 91 93 95 98 9A A4 A6 A8 AA AF B3 B4 B5 B6 B7 + B8 41 51 53 57 5B 65 67 69 6B 75 77 79 7B 7D 81 + 8F 91 93 95 98 9A A4 A6 A8 AA AF B9 BA BB BC BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF + 41 41 41 41 41 41 41 53 5B 5B 5B 5B 6B 6B 6B 6B + 57 7F 81 81 81 81 81 BD 81 9A 9A 9A 9A AA B1 97 + 41 41 41 41 41 41 41 53 5B 5B 5B 5B 6B 6B 6B 6B + 57 7F 81 81 81 81 81 BE 81 9A 9A 9A 9A AA B1 AA +</map> +</collation> + +</charset> + +</charsets> diff --git a/sql/share/charsets/latin2.conf b/sql/share/charsets/latin2.conf deleted file mode 100644 index cc18c22c0a2..00000000000 --- a/sql/share/charsets/latin2.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the latin2 character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 01 10 01 10 01 01 10 10 01 01 01 01 10 01 01 - 10 02 10 02 10 02 02 10 10 02 02 02 02 10 02 02 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 10 01 01 01 01 01 01 10 01 01 01 01 01 01 01 10 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 10 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 B1 A2 B3 A4 B5 B6 A7 A8 B9 BA BB BC AD BE BF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - D0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 A1 B2 A3 B4 A5 A6 B7 B8 A9 AA AB AC BD AE AF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - F0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 44 45 48 49 4B 4C 4D 4E 4F 50 51 53 54 56 - 58 59 5A 5B 5E 5F 60 61 62 63 64 68 69 6A 6B 6C - 6D 41 44 45 48 49 4B 4C 4D 4E 4F 50 51 53 54 56 - 58 59 5A 5B 5E 5F 60 61 62 63 64 6E 6F 70 71 FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF - FF 42 FF 52 FF 51 5C FF FF 5D 5B 5E 65 FF 67 66 - FF 42 FF 52 FF 51 5C FF FF 5D 5B 5E 65 FF 67 66 - 5A 43 43 43 43 51 46 45 47 49 4A 49 49 4E 4E 48 - FF 55 54 57 56 56 56 FF 5A 5F 5F 5F 5F 63 5E FF - 5A 43 43 43 43 51 46 45 47 49 4A 49 49 4E 4E 48 - FF 55 54 57 56 56 56 FF 5A 5F 5F 5F 5F 63 5E FF diff --git a/sql/share/charsets/latin2.xml b/sql/share/charsets/latin2.xml new file mode 100644 index 00000000000..7f00148a1df --- /dev/null +++ b/sql/share/charsets/latin2.xml @@ -0,0 +1,186 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="latin2"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 48 01 10 01 10 01 01 10 10 01 01 01 01 10 01 01 + 10 02 10 02 10 02 02 10 10 02 02 02 02 10 02 02 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 10 01 01 01 01 01 01 10 01 01 01 01 01 01 01 10 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 10 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 B1 A2 B3 A4 B5 B6 A7 A8 B9 BA BB BC AD BE BF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 A1 B2 A3 B4 A5 A6 B7 B8 A9 AA AB AC BD AE AF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 0104 02D8 0141 00A4 013D 015A 00A7 00A8 0160 015E 0164 0179 00AD 017D 017B +00B0 0105 02DB 0142 00B4 013E 015B 02C7 00B8 0161 015F 0165 017A 02DD 017E 017C +0154 00C1 00C2 0102 00C4 0139 0106 00C7 010C 00C9 0118 00CB 011A 00CD 00CE 010E +0110 0143 0147 00D3 00D4 0150 00D6 00D7 0158 016E 00DA 0170 00DC 00DD 0162 00DF +0155 00E1 00E2 0103 00E4 013A 0107 00E7 010D 00E9 0119 00EB 011B 00ED 00EE 010F +0111 0144 0148 00F3 00F4 0151 00F6 00F7 0159 016F 00FA 0171 00FC 00FD 0163 02D9 +</map> +</unicode> + + +<collation name="latin2_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 44 45 48 49 4B 4C 4D 4E 4F 50 51 53 54 56 + 58 59 5A 5B 5E 5F 60 61 62 63 64 68 69 6A 6B 6C + 6D 41 44 45 48 49 4B 4C 4D 4E 4F 50 51 53 54 56 + 58 59 5A 5B 5E 5F 60 61 62 63 64 6E 6F 70 71 FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF + FF 42 FF 52 FF 51 5C FF FF 5D 5B 5E 65 FF 67 66 + FF 42 FF 52 FF 51 5C FF FF 5D 5B 5E 65 FF 67 66 + 5A 43 43 43 43 51 46 45 47 49 4A 49 49 4E 4E 48 + FF 55 54 57 56 56 56 FF 5A 5F 5F 5F 5F 63 5E FF + 5A 43 43 43 43 51 46 45 47 49 4A 49 49 4E 4E 48 + FF 55 54 57 56 56 56 FF 5A 5F 5F 5F 5F 63 5E FF +</map> +</collation> + + +<collation name="latin2_croatian_ci"> +<map> +00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F +10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F +20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F +30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F +40 41 43 44 48 4B 4D 4E 4F 50 52 53 54 56 57 59 +5B 5C 5D 5F 62 64 66 67 68 69 6B C6 C7 C8 C9 CA +CB 41 43 44 48 4B 4D 4E 4F 50 52 53 54 56 57 59 +5B 5C 5D 5F 62 64 66 67 68 69 6B CC CD CE CF D0 +D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 +E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 +F1 41 F2 54 F3 54 5F F4 F5 61 5F 62 6B F6 8E 6B +F7 41 F8 54 F9 54 5F FA FB 61 5F 62 6B FC 8E 6B +5D 41 41 41 41 54 47 44 46 4B 4B 4B 4B 50 50 48 +4A 57 57 59 59 59 59 FD 5D 64 64 64 64 69 62 5F +5D 41 41 41 41 54 47 44 46 4B 4B 4B 4B 50 50 48 +4A 57 57 59 59 59 59 FE 5D 64 64 64 64 69 62 FF +</map> +</collation> + + +<collation name="latin2_czech_ci"/> + + +<collation name="latin2_hungarian_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 47 48 4C 4E 53 54 55 56 5A 5B 5C 60 61 64 + 69 6A 6B 6E 72 75 7A 7B 7C 7D 7F 83 84 85 86 87 + 88 41 47 48 4C 4E 53 54 55 56 5A 5B 5C 60 61 64 + 69 6A 6B 6E 72 75 7A 7B 7C 7D 7F 89 8A 8B 8C 00 + 01 78 4E 04 05 06 07 08 09 0A 67 67 56 56 0F 41 + 4E 12 13 67 67 64 78 75 78 67 78 1C 1D 1E 1F FF + 41 56 64 75 5E 6F FF 67 FF 70 71 73 80 FF 81 82 + FF 42 FF 5D FF 41 6F FF FF 70 71 73 80 FF 81 82 + 6C 41 44 45 46 5F 49 4B 4A 4E 51 52 50 56 57 4D + FF 62 63 64 66 67 67 FF 6D 77 75 78 78 7E 74 FF + 64 41 44 45 46 5F 49 4B 4A 4E 51 78 50 56 58 4D + FF 62 63 64 66 67 67 FF 6D 77 75 78 78 7E 74 FF +</map> +</collation> + +<collation name="latin2_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/latin5.conf b/sql/share/charsets/latin5.conf deleted file mode 100644 index 92fbd2299bb..00000000000 --- a/sql/share/charsets/latin5.conf +++ /dev/null @@ -1,78 +0,0 @@ -# Configuration file for the latin5 (turkish) character set - -# Note: all accented characters are compared separately (this -# is different from the default latin1 character set, where -# e.g. a = ä = á, etc.). - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 FD 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC 69 FE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 DD 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC 49 DE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 45 46 47 48 4A 4B 4D 4E 4F 50 51 52 - 54 55 56 57 59 5A 5C 5D 5E 5F 60 61 62 63 64 65 - 66 41 42 43 45 46 47 48 4A 4C 4D 4E 4F 50 51 52 - 54 55 56 57 59 5A 5C 5D 5E 5F 60 87 88 89 8A 8B - 8C 8D 8E 8F 90 91 92 93 94 95 96 97 98 99 9A 9B - 9C 9D 9E 9F A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB - AC AD AE AF B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB - BC BD BE BF C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB - CC CD CE CF D0 D1 D2 44 D3 D4 D5 D6 D7 D8 D9 DA - 49 DB DC DD DE DF 53 E0 E1 E2 E3 E4 5B 4C 58 E5 - CC CD CE CF D0 D1 D2 44 D3 D4 D5 D6 D7 D8 D9 DA - 49 DB DC DD DE DF 53 FA E1 E2 E3 E4 5B 4B 58 FF diff --git a/sql/share/charsets/latin5.xml b/sql/share/charsets/latin5.xml new file mode 100644 index 00000000000..5004f045889 --- /dev/null +++ b/sql/share/charsets/latin5.xml @@ -0,0 +1,139 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="latin5"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 FD 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC 69 FE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 DD 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC 49 DE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 00A1 00A2 00A3 00A4 00A5 00A6 00A7 00A8 00A9 00AA 00AB 00AC 00AD 00AE 00AF +00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00B8 00B9 00BA 00BB 00BC 00BD 00BE 00BF +00C0 00C1 00C2 00C3 00C4 00C5 00C6 00C7 00C8 00C9 00CA 00CB 00CC 00CD 00CE 00CF +011E 00D1 00D2 00D3 00D4 00D5 00D6 00D7 00D8 00D9 00DA 00DB 00DC 0130 015E 00DF +00E0 00E1 00E2 00E3 00E4 00E5 00E6 00E7 00E8 00E9 00EA 00EB 00EC 00ED 00EE 00EF +011F 00F1 00F2 00F3 00F4 00F5 00F6 00F7 00F8 00F9 00FA 00FB 00FC 0131 015F 00FF +</map> +</unicode> + + +<collation name="latin5_turkish_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 45 46 47 48 4A 4B 4D 4E 4F 50 51 52 + 54 55 56 57 59 5A 5C 5D 5E 5F 60 61 62 63 64 65 + 66 41 42 43 45 46 47 48 4A 4C 4D 4E 4F 50 51 52 + 54 55 56 57 59 5A 5C 5D 5E 5F 60 87 88 89 8A 8B + 8C 8D 8E 8F 90 91 92 93 94 95 96 97 98 99 9A 9B + 9C 9D 9E 9F A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB + AC AD AE AF B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB + BC BD BE BF C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB + 41 41 41 41 41 41 41 44 46 46 46 46 4C 4C 4C 4C + 49 51 52 52 52 52 53 E0 52 5A 5A 5A 5B 4C 58 57 + 41 41 41 41 41 41 41 44 46 46 46 46 4C 4C 4C 4C + 49 51 52 52 52 52 53 FA 52 5A 5A 5A 5B 4B 58 5F +</map> +</collation> + +<collation name="latin5_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/latin7.xml b/sql/share/charsets/latin7.xml new file mode 100644 index 00000000000..dd87a1a2d89 --- /dev/null +++ b/sql/share/charsets/latin7.xml @@ -0,0 +1,187 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="latin7"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 + 01 20 10 20 10 10 00 00 20 10 20 10 20 10 10 10 + 20 10 10 10 10 10 10 10 20 00 20 10 20 10 10 20 + 48 20 10 10 10 20 10 10 10 10 01 10 10 10 10 01 + 10 10 10 10 10 10 10 10 10 10 02 10 10 10 10 02 + 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 10 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 B8 A9 BA AB AC AD AE BF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 A8 B9 AA BB BC BD BE AF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF +</map> +</upper> + + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F +0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F +0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F +0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F +00A0 201D 00A2 00A3 00A4 201E 00A6 00A7 00D8 00A9 0156 00AB 00AC 00AD 00AE 00C6 +00B0 00B1 00B2 00B3 201C 00B5 00B6 00B7 00F8 00B9 0157 00BB 00BC 00BD 00BE 00E6 +0104 012E 0100 0106 00C4 00C5 0118 0112 010C 00C9 0179 0116 0122 0136 012A 013B +0160 0143 0145 00D3 014C 00D5 00D6 00D7 0172 0141 015A 016A 00DC 017B 017D 00DF +0105 012F 0101 0107 00E4 00E5 0119 0113 010D 00E9 017A 0117 0123 0137 012B 013C +0161 0144 0146 00F3 014D 00F5 00F6 00F7 0173 0142 015B 016B 00FC 017C 017E 2019 +</map> +</unicode> + + +<collation name="latin7_estonian_cs"> +<map> + 00 02 03 04 05 06 07 08 09 2E 2F 30 31 32 0A 0B + 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B + 2C 33 34 35 36 37 38 27 39 3A 3B 5D 3C 28 3D 3E + 76 7A 7C 7E 80 81 82 83 84 85 3F 40 5E 5F 60 41 + 42 86 90 92 98 9A A4 A6 AA AC B2 B4 B8 BE C0 C6 + CE D0 D2 D6 E5 E8 EE F0 FA FC DD 43 44 45 46 47 + 48 87 91 93 99 9B A5 A7 AB AD B3 B5 B9 BF C1 C7 + CF D1 D3 D7 E6 E9 EF F1 FB FD DE 49 4A 4B 4C 1C + 01 1D 57 1E 5A 74 71 72 1F 75 20 5B 21 4E 52 51 + 22 55 56 58 59 73 2A 2B 23 E7 24 5C 25 4F 54 26 + 2D FE 66 67 68 FF 4D 69 CC 6A D4 62 6B 29 6C 8E + 6D 61 7D 7F 50 6E 6F 70 CD 7B D5 63 77 78 79 8F + 8C B0 88 94 F4 8A A2 A0 96 9C DF 9E A8 B6 AE BA + DB C2 C4 C8 CA F2 F6 64 EC BC D8 EA F8 E1 E3 DA + 8D B1 89 95 F5 8B A3 A1 97 9D E0 9F A9 B7 AF BB + DC C3 C5 C9 CB F3 F7 65 ED BD D9 EB F9 E2 E4 53 +</map> +</collation> + + +<collation name="latin7_general_cs"> +<!-- Created for case-sensitive record search --> +<!-- by Andis Grasis & Rihards Grasis e-mail:andis@cata.lv --> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 30 32 33 34 35 36 37 2B 38 39 3A 5C 3B 2C 3C 3D + 76 7A 7C 7E 80 81 82 83 84 85 3E 3F 5D 5E 5F 40 + 41 86 92 94 9A 9C A6 A8 AC AE B4 B6 BA C0 C2 C8 + D4 D6 D8 DC E3 E6 EE F0 F2 F4 F6 42 43 44 45 46 + 47 87 93 95 9B 9D A7 A9 AD AF B5 B7 BB C1 C3 C9 + D5 D7 D9 DD E4 E7 EF F1 F3 F5 F7 48 49 4A 4B 20 + 75 21 56 22 59 73 70 71 23 74 24 5A 25 4D 51 50 + 26 54 55 57 58 72 2E 2F 27 E5 28 5B 29 4E 53 2A + 31 FE 65 66 67 FF 4C 68 D3 69 DA 61 6A 2D 6B 90 + 6C 60 7D 7F 4F 6D 6E 6F D2 7B DB 62 77 78 79 91 + 8E B2 8A 96 88 8C A4 A2 98 9E F8 A0 AA B8 B0 BE + E1 C4 C6 CA CE D0 CC 63 EC BC DE EA E8 FA FC E0 + 8F B3 8B 97 89 8D A5 A3 99 9F F9 A1 AB B9 B1 BF + E2 C5 C7 CB CF D1 CD 64 ED BD DF EB E9 FB FD 52 +</map> +</collation> + + +<collation name="latin7_general_ci"> +<!-- Created for case-insensitive record search --> +<!-- Created by Andis & Rihards --> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 30 32 33 34 35 36 37 2B 38 39 3A 5C 3B 2C 3C 3D + 76 7A 7C 7E 80 81 82 83 84 85 3E 3F 5D 5E 5F 40 + 41 86 92 94 9A 9C A6 A8 AC AE B4 B6 BA C0 C2 C8 + D4 D6 D8 DC E3 E6 EE F0 F2 F4 F6 42 43 44 45 46 + 47 86 92 94 9A 9C A6 A8 AC AE B4 B6 BA C0 C2 C8 + D4 D6 D8 DC E2 E6 EE F0 F2 F4 F6 48 49 4A 4B 20 + 75 21 56 22 59 73 70 71 23 74 24 5A 25 4D 51 50 + 26 54 55 57 58 72 2E 2F 27 E5 28 5B 29 4E 53 2A + 31 FE 65 66 67 FF 4C 68 2D 69 DA 61 6A 2D 6B 90 + 6C 60 7D 7F 4F 6D 6E 6F D3 7B DB 62 77 78 79 90 + 8E B2 8A 96 88 8C A4 A2 98 9E F8 A0 AA B8 B0 BE + E1 C4 C6 CA CE D0 CC 63 EC BC DE EA E8 FA FC E0 + 8E B2 8A 96 88 8C A4 A2 98 9E F8 A0 AA B8 B0 BE + E1 C4 C6 CA CE D0 CC 64 EC BC DE EA E8 FA FC 52 +</map> +</collation> + +<collation name="latin7_bin" flag="binary"/> + +</charset> + +</charsets> diff --git a/sql/share/charsets/macce.xml b/sql/share/charsets/macce.xml new file mode 100644 index 00000000000..61f6d79b34f --- /dev/null +++ b/sql/share/charsets/macce.xml @@ -0,0 +1,207 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="macce"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 00 + 01 01 02 01 01 01 01 02 02 01 02 02 01 02 02 01 + 02 01 02 02 01 02 01 02 02 02 02 02 02 01 02 02 + 00 00 01 00 00 00 00 02 00 00 00 02 00 00 02 01 + 02 01 00 00 02 01 00 00 02 01 02 01 02 01 02 01 + 02 01 00 00 02 01 00 00 00 00 00 02 01 01 02 01 + 00 00 00 00 00 00 00 00 02 01 02 01 00 00 02 01 + 02 01 00 00 02 01 02 01 01 02 01 01 02 01 01 01 + 02 01 01 02 01 02 01 02 01 02 02 01 01 02 01 00 +</map> +</ctype> + + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 54 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 54 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 8A 82 82 8E 88 9A 9F 87 88 8B 8A 8B 8D 8D 8E 90 + 90 93 92 93 95 95 98 97 98 99 9A 9B 9C 9E 9E 9F + A0 A1 AB A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE B0 + B0 B4 B2 B3 B4 FA B6 B7 B8 BA BA BC BC BE BE C0 + C0 C4 C2 C3 C4 CB C6 C7 C8 C9 CA CB CE 9B CE D8 + D0 D1 D2 D3 D4 D5 D6 D7 D8 DA DA DE DC DD DE E0 + E0 E4 E2 E3 E4 E6 E6 87 E9 E9 92 EC EC F0 97 99 + F0 F3 9C F3 F5 F5 F7 F7 F9 F9 FA FD B8 FD AE FF +</map> +</lower> + + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 74 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 74 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 81 83 84 85 86 E7 84 89 80 89 8C 8C 83 8F + 8F 91 EA 91 94 94 96 EE 96 EF 85 CD F2 9D 9D 86 + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA A2 AC AD FE AF + AF B1 B2 B3 B1 B5 B6 B7 FC B9 B9 BB BB BD BD BF + BF C1 C2 C3 C1 C5 C6 C7 C8 C9 CA C5 CC CD CC CF + D0 D1 D2 D3 D4 D5 D6 D7 CF D9 D9 DB DC DD DB DF + DF E1 E2 E3 E1 E5 E5 E7 E8 E8 EA EB EB ED EE EF + ED F1 F2 F1 F4 F4 F6 F6 F8 F8 B5 FB FC FB FE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 00C4 0100 0101 00C9 0104 00D6 00DC 00E1 0105 010C 00E4 010D 0106 0107 00E9 0179 + 017A 010E 00ED 010F 0112 0113 0116 00F3 0117 00F4 00F6 00F5 00FA 011A 011B 00FC + 2020 00B0 0118 00A3 00A7 2022 00B6 00DF 00AE 00A9 2122 0119 00A8 2260 0123 012E + 012F 012A 2264 2265 012B 0136 2202 2211 0142 013B 013C 013D 013E 0139 013A 0145 + 0146 0143 00AC 221A 0144 0147 2206 00AB 00BB 2026 00A0 0148 0150 00D5 0151 014C + 2013 2014 201C 201D 2018 2019 00F7 25CA 014D 0154 0155 0158 2039 203A 0159 0156 + 0157 0160 201A 201E 0161 015A 015B 00C1 0164 0165 00CD 017D 017E 016A 00D3 00D4 + 016B 016E 00DA 016F 0170 0171 0172 0173 00DD 00FD 0137 017B 0141 017C 0122 02C7 +</map> +</unicode> + + +<collation name="macce_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 46 47 4A 4C 52 53 55 56 5A 5B 5D 62 62 67 + 6F 70 71 75 79 81 88 89 8A 8B 8D 90 91 92 93 94 + 95 41 46 47 4A 4C 52 53 55 56 5A 5B 5D 62 62 67 + 6F 70 71 75 79 81 88 89 8A 8B 8D 96 97 98 99 9A + 41 41 41 4C 41 67 81 41 41 47 41 47 47 47 4C 8D + 8D 4A 56 4A 4C 4C 4C 67 4C 67 67 67 81 4C 4C 81 + A0 A1 4C A3 A4 A5 A6 75 A8 A9 AA 4C AC AD 53 56 + 56 56 B2 B3 56 5B B6 B7 5D 5D 5D 5D 5D 5D 5D 62 + 62 62 C2 C3 62 62 C6 C7 C8 C9 CA 62 67 67 67 67 + D0 D1 D2 D3 D4 D5 D6 D7 67 71 71 71 DC DD 71 71 + 71 75 E2 E3 75 75 75 41 79 79 56 8D 8D 81 67 67 + 81 81 81 81 81 81 81 81 8B 8B 5B 8D 5D 8D 53 FF +</map> +</collation> + + +<collation name="macce_bin" flag="binary"/> + +<collation name="macce_ci_ai"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 4B 4D 53 57 63 65 69 6B 73 75 79 83 85 8D + 9B 9D 9F A7 AE B2 C0 C2 C4 C6 CA D2 D3 D4 D5 D6 + D7 41 4B 4D 53 57 63 65 69 6B 73 75 79 83 85 8D + 9B 9D 9F A7 AE B2 C0 C2 C4 C6 CA D8 D9 DA DB DC + 41 41 41 57 41 8D B2 41 41 4D 41 4D 4D 4D 57 CA + CA 53 6B 53 57 57 57 8D 57 8D 8D 8D B2 57 57 B2 + DD DE 57 DF E0 E1 E2 A7 E3 E4 E5 57 E6 E7 65 6B + 6B 6B E8 E9 6B 75 EA EB 79 79 79 79 79 79 79 85 + 85 85 EC ED 85 85 EE EF F0 F1 F2 85 8D 8D 8D 8D + F3 F4 F5 F6 F7 F8 F9 FA 8D 9F 9F 9F FB FC 9F 9F + 9F A7 FD FE A7 A7 A7 41 BE BE 6B CA CA B2 8D 8D + B2 B2 B2 B2 B2 B2 B2 B2 C6 C6 75 CA 79 CA 65 FF +</map> +</collation> + + +<collation name="macce_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 4B 4D 53 57 63 65 69 6B 73 75 79 83 85 8D + 9B 9D 9F A7 AE B2 C0 C2 C4 C6 CA D2 D3 D4 D5 D6 + D7 41 4B 4D 53 57 63 65 69 6B 73 75 79 83 85 8D + 9B 9D 9F A7 AE B2 C0 C2 C4 C6 CA D8 D9 DA DB DC + 45 47 47 59 49 91 B6 43 49 4F 45 4F 51 51 59 CE + CE 55 71 55 5B 5B 5D 8F 5D 99 91 97 B8 5F 5F B6 + DD DE 61 DF E0 E1 E2 AD E3 E4 E5 61 E6 E7 67 6F + 6F 6D E8 E9 6D 77 EA EB 7B 81 82 7F 7F 7D 7D 8B + 8B 87 EC ED 87 89 EE EF F0 F1 F2 89 93 97 93 95 + F3 F4 F5 F6 F7 F8 F9 FA 95 A1 A1 A3 FB FC A3 A5 + A5 A9 FD FE A9 AB AB 43 B0 B0 71 CC CC BC 8F 99 + BC B4 B8 B4 BA BA BE BE C8 C8 77 D0 7B D0 67 FF + +</map> +</collation> + + +<collation name="macce_cs"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 4B 4D 53 57 63 65 69 6B 73 75 79 83 85 8D + 9B 9D 9F A7 AE B2 C0 C2 C4 C6 CA D2 D3 D4 D5 D6 + D7 42 4C 4E 54 58 64 66 6A 6C 74 76 7A 84 86 8E + 9C 9E A0 A8 AF B3 C1 C3 C5 C7 CB D8 D9 DA DB DC + 45 47 48 59 49 91 B6 44 4A 4F 46 50 51 52 5A CE + CF 55 72 56 5B 5C 5D 90 5E 9A 92 98 B8 5F 60 B7 + DD DE 61 DF E0 E1 E2 AD E3 E4 E5 62 E6 E7 68 6F + 70 6D E8 E9 6E 77 EA EB 7C 81 82 7F 80 7D 7E 8B + 8C 87 EC ED 88 89 EE EF F0 F1 F2 8A 93 97 94 95 + F3 F4 F5 F6 F7 F8 F9 FA 96 A1 A2 A3 FB FC A4 A5 + A6 A9 FD FE AA AB AC 43 B0 B1 71 CC CD BC 8F 99 + BD B4 B9 B5 BA BB BE BF C8 C9 78 D0 7B D1 67 FF +</map> +</collation> + + +</charset> + +</charsets> diff --git a/sql/share/charsets/macroman.xml b/sql/share/charsets/macroman.xml new file mode 100644 index 00000000000..36c8e8cf13a --- /dev/null +++ b/sql/share/charsets/macroman.xml @@ -0,0 +1,200 @@ +<?xml version='1.0' encoding="utf-8"?> + +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="macroman"> + +<ctype> +<map> + 00 + 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 + 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 + 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 + 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 + 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 + 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 + 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 10 + 20 01 01 01 01 01 01 02 02 02 02 02 02 02 02 02 + 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 + 00 00 00 00 00 00 00 02 00 00 00 00 00 00 01 01 + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 02 + 00 00 00 00 02 00 00 00 00 00 00 20 01 01 00 00 + 00 00 00 00 00 00 00 00 02 01 00 00 00 00 00 00 + 00 00 00 00 00 20 01 01 01 01 01 01 01 01 01 01 + 00 01 01 01 01 02 00 00 00 00 00 00 00 00 00 00 +</map> +</ctype> + +<lower> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F + 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + 8A 8C 8D 8E 96 9A 9F 87 88 89 8A 8B 8C 8D 8E 8F + 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD BE BF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA 88 8B 9B CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D8 D8 DA DB DC DD DE DF + E0 E1 E2 E3 E4 89 90 87 91 8F 92 94 95 93 97 99 + F0 98 9C 9E 9D F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + +<upper> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F + 80 81 82 83 84 85 86 E7 CB E5 80 CC 81 82 83 E9 + E6 E8 EA ED EB EC 84 EE F1 EF 85 CD F2 F4 F3 86 + A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD AE AF + C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + D0 D1 D2 D3 D4 D5 D6 D7 D9 D9 DA DB DC DD DE DF + E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> + + +<unicode> +<map> + 0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F + 0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F + 0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F + 0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F + 0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F + 0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F + 0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F + 0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F + 00C4 00C5 00C7 00C9 00D1 00D6 00DC 00E1 00E0 00E2 00E4 00E3 00E5 00E7 00E9 00E8 + 00EA 00EB 00ED 00EC 00EE 00EF 00F1 00F3 00F2 00F4 00F6 00F5 00FA 00F9 00FB 00FC + 2020 00B0 00A2 00A3 00A7 2022 00B6 00DF 00AE 00A9 2122 00B4 00A8 2260 00C6 00D8 + 221E 00B1 2264 2265 00A5 00B5 2202 2211 220F 03C0 222B 00AA 00BA 03A9 00E6 00F8 + 00BF 00A1 00AC 221A 0192 2248 2206 00AB 00BB 2026 00A0 00C0 00C3 00D5 0152 0153 + 2013 2014 201C 201D 2018 2019 00F7 25CA 00FF 0178 2044 20AC 2039 203A FB01 FB02 + 2021 00B7 201A 201E 2030 00C2 00CA 00C1 00CB 00C8 00CD 00CE 00CF 00CC 00D3 00D4 + F8FF 00D2 00DA 00DB 00D9 0131 02C6 02DC 00AF 02D8 02D9 02DA 00B8 02DD 02DB 02C7 +</map> +</unicode> + +<collation name="macroman_general_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 49 50 52 53 57 59 60 61 67 68 69 70 71 72 + 79 80 81 82 84 85 90 91 92 93 95 A0 A1 A2 A3 A4 + A5 41 49 50 52 53 57 59 60 61 67 68 69 70 71 72 + 79 80 81 82 84 85 90 91 92 93 95 A6 A7 A8 A9 AA + 41 41 50 53 71 72 85 41 41 41 41 41 41 50 53 53 + 53 53 61 61 61 61 71 72 72 72 72 72 85 85 85 85 + AB AC AD AE AF B0 B1 82 B2 B3 B4 B5 B6 B7 48 72 + B8 B9 BA BB BC BD BE BF C0 C1 C2 C3 C4 C5 48 72 + C6 C7 C8 C9 57 CA CB CC CD CE CF 41 41 72 D0 D1 + D2 D3 D4 D5 D6 D7 D8 D9 93 93 DA DB DC DD DE DF + E0 E1 E2 E3 E4 41 53 41 53 53 61 61 61 61 72 72 + F0 72 85 85 85 61 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="macroman_bin" flag="binary"/> + +<collation name="macroman_ci_ai"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 51 53 57 59 63 66 68 6A 75 77 79 7B 7D 81 + 91 93 95 97 9A 9C A6 A8 AA AC B0 B2 B3 B4 B5 B6 + B7 41 51 53 57 59 63 66 68 6A 75 77 79 7B 7D 81 + 91 93 95 97 9A 9C A6 A8 AA AC B0 B8 B9 BA BB BC + 41 41 53 59 7D 81 9C 41 41 41 41 41 41 53 59 59 + 59 59 6A 6A 6A 6A 7D 81 81 81 81 81 9C 9C 9C 9C + BD BE BF C0 C1 C2 C3 97 C4 C5 C6 C7 C8 C9 41 81 + CA CB CC CD CE CF D0 D1 D2 D3 D4 D5 D6 D7 41 81 + D8 D9 DA DB 63 DC DD DE DF E0 E1 41 41 81 81 81 + E2 E3 E4 E5 E6 E7 E8 E9 AC AC EA EB EC ED EE EF + F0 F1 F2 F3 F4 41 59 41 59 59 6A 6A 6A 6A 81 81 + F0 81 9C 9C 9C 6A F6 F7 F8 F9 FA FB FC FD FE FF + +</map> +</collation> + +<collation name="macroman_ci"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 51 53 57 59 63 66 68 6A 75 77 79 7B 7D 81 + 91 93 95 97 9A 9C A6 A8 AA AC B0 B2 B3 B4 B5 B6 + B7 41 51 53 57 59 63 66 68 6A 75 77 79 7B 7D 81 + 91 93 95 97 9A 9C A6 A8 AA AC B0 B8 B9 BA BB BC + 4B 4D 55 5D 7F 8B A4 45 43 47 4B 49 4D 55 5D 5B + 5F 61 6E 6C 70 72 7F 85 83 87 8B 89 A0 9E A2 A4 + BD BE BF C0 C1 C2 C3 99 C4 C5 C6 C7 C8 C9 4F 8D + CA CB CC CD CE CF D0 D1 D2 D3 D4 D5 D6 D7 4F 8D + D8 D9 DA DB 65 DC DD DE DF E0 E1 43 49 89 8F 8F + E2 E3 E4 E5 E6 E7 E8 E9 AE AE EA EB EC ED EE EF + F0 F1 F2 F3 F4 47 5F 45 61 5B 6E 70 70 6C 85 87 + F0 83 A0 A2 9E 72 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="macroman_cs"> +<map> + 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + 40 41 51 53 57 59 63 66 68 6A 75 77 79 7B 7D 81 + 91 93 95 97 9A 9C A6 A8 AA AC B0 B2 B3 B4 B5 B6 + B7 42 52 54 58 5A 64 67 69 6B 76 78 7A 7C 7E 82 + 92 94 96 98 9B 9D A7 A9 AB AD B1 B8 B9 BA BB BC + 4B 4D 55 5D 7F 8B A4 46 44 48 4C 4A 4E 56 5E 5C + 60 62 6F 6D 71 73 80 86 84 88 8C 8A A1 9F A3 A5 + BD BE BF C0 C1 C2 C3 99 C4 C5 C6 C7 C8 C9 4F 8D + CA CB CC CD CE CF D0 D1 D2 D3 D4 D5 D6 D7 50 8E + D8 D9 DA DB 65 DC DD DE DF E0 E1 43 49 89 8F 90 + E2 E3 E4 E5 E6 E7 E8 E9 AF AE EA EB EC ED EE EF + F0 F1 F2 F3 F4 47 5F 45 61 5B 6E 70 72 6C 85 87 + F0 83 A0 A2 9E 74 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +</charset> + +</charsets> diff --git a/sql/share/charsets/swe7.conf b/sql/share/charsets/swe7.xml index d2de48b4d1c..2b8ff4edcce 100644 --- a/sql/share/charsets/swe7.conf +++ b/sql/share/charsets/swe7.xml @@ -1,6 +1,29 @@ -# Configuration file for the swe7 character set +<?xml version='1.0' encoding="utf-8"?> -# ctype array (must have 257 elements) +<charsets> + +<copyright> + Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +</copyright> + +<charset name="swe7"> + +<ctype> +<map> 00 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 @@ -18,8 +41,12 @@ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +</map> +</ctype> -# to_lower array (must have 256 elements) + +<lower> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -36,8 +63,12 @@ D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</lower> + -# to_upper array (must have 256 elements) +<upper> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -54,8 +85,34 @@ D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</upper> -# sort_order array (must have 256 elements) + +<unicode> +<map> +0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F +0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F +0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F +0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F +00C9 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F +0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 00C4 00D6 00C5 00DC 005F +00E9 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F +0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 00E4 00F6 00E5 00FC 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 +</map> +</unicode> + + +<collation name="swe7_swedish_ci"> +<map> 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F @@ -72,3 +129,13 @@ D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF +</map> +</collation> + +<collation name="swe7_bin" flag="binary"/> + +</charset> + +</charsets> + + diff --git a/sql/share/charsets/usa7.conf b/sql/share/charsets/usa7.conf deleted file mode 100644 index b9e7a44c894..00000000000 --- a/sql/share/charsets/usa7.conf +++ /dev/null @@ -1,74 +0,0 @@ -# Configuration file for the usa7 character set - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5C 5D 5B 5E 5F - 45 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 59 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF diff --git a/sql/share/charsets/win1251.conf b/sql/share/charsets/win1251.conf deleted file mode 100644 index e05568323b4..00000000000 --- a/sql/share/charsets/win1251.conf +++ /dev/null @@ -1,82 +0,0 @@ -# Configuration file for the win1251 character set - -# NOTE: this character set is deprecated. Please don't use it -# unless you must because of old tables. -# -# If you want to convert your files to charset cp1251, you can do: -# -# myisamchk -rq --set-character-set-name=cp1251 *.MYI - - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 01 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 02 10 10 10 10 10 10 10 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 B8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 A8 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F - 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F - A0 A1 A2 A3 A4 A5 A6 A7 C6 A9 AA AB AC AD AE AF - B0 B1 B2 B3 B4 B5 B6 B7 C6 B9 BA BB BC BD BE BF - C0 C1 C2 C3 C4 C5 C7 C8 C9 CA CB CC CD CE CF D0 - D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 - C0 C1 C2 C3 C4 C5 C7 C8 C9 CA CB CC CD CE CF D0 - D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF E0 diff --git a/sql/share/charsets/win1251ukr.conf b/sql/share/charsets/win1251ukr.conf deleted file mode 100644 index e693958910e..00000000000 --- a/sql/share/charsets/win1251ukr.conf +++ /dev/null @@ -1,77 +0,0 @@ -# Configuration file for the win1251ukr character set - -# it's really, just a cp1251 charset with Ukranian letter -# marked as "letters" - -# ctype array (must have 257 elements) - 00 - 20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10 - 10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10 - 10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 - 10 10 10 10 10 10 10 10 01 10 01 10 10 10 10 01 - 10 10 01 02 10 10 10 10 02 10 02 10 10 10 10 02 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 - -# to_lower array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F - 20 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F - 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 20 20 20 20 20 A5 20 20 A8 20 AA 20 20 20 20 AF - 20 20 B2 B2 A5 20 20 20 A8 20 AA 20 20 20 20 AF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF - D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF - -# to_upper array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 20 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 - 20 20 20 20 20 B4 20 20 B8 20 BA 20 20 20 20 BF - 20 20 B3 B3 B4 20 20 20 B8 20 BA 20 20 20 20 BF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF - F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF - -# sort_order array (must have 256 elements) - 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F - 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F - 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F - 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F - 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F - 20 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F - 50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F - A5 A6 A7 A8 A9 AA AB AC AD AE AF B0 B1 B2 B3 B4 - B5 B6 B7 B8 B9 BA BB BC BD BE BF C0 C1 C2 C3 C4 - C5 C6 C7 C8 C9 84 CA CB 88 CC 87 CD CE CF D0 8D - D1 D2 8C 8C 84 D3 D4 D5 88 D6 87 D7 D8 D9 DA 8D - 80 81 82 83 85 86 89 8A 8B 8E 8F 90 91 92 93 94 - 95 96 97 98 99 9A 9B 9C 9D 9E 9F A0 A1 A2 A3 A4 - 80 81 82 83 85 86 89 8A 8B 8E 8F 90 91 92 93 94 - 95 96 97 98 99 9A 9B 9C 9D 9E 9F A0 A1 A2 A3 A4 diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt index 70aea3fdf62..04f8fcc8dd4 100644 --- a/sql/share/czech/errmsg.txt +++ b/sql/share/czech/errmsg.txt @@ -26,14 +26,16 @@ Thu Nov 30 14:02:52 MET 2000 podle 3.23.28 */ +character-set=latin2 + "hashchk", "isamchk", "NE", "ANO", "Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)", "Nemohu vytvo-Bøit tabulku '%-.64s' (chybový kód: %d)", -"Nemohu vytvo-Bøit databázi '%-.64s', chyba %d", -"Nemohu vytvo-Bøit databázi '%-.64s', databáze ji¾ existuje", +"Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)", +"Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje", "Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje", "Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)", "Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)", @@ -60,8 +62,8 @@ "Obsluha tabulky '%-.64s' nem-Bá tento parametr", "Nemohu naj-Bít záznam v '%-.64s'", "Nespr-Bávná informace v souboru '%-.64s'", -"Nespr-Bávný klíè pro tabulku '%-.64s'. Pokuste se ho opravit", -"Star-Bý klíèový soubor pro '%-.64s'. Opravte ho.", +"Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit", +"Star-Bý klíèový soubor pro '%-.64s'; opravte ho.", "'%-.64s' je jen pro -Bètení", "M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)", "M-Bálo pamìti pro tøídìní. Zvy¹te velikost tøídícího bufferu", @@ -70,8 +72,8 @@ "M-Bálo prostoru/pamìti pro thread", "Nemohu zjistit jm-Béno stroje pro Va¹i adresu", "Chyba p-Bøi ustavování spojení", -"P-Bøístup pro u¾ivatele '%-.32s@%-.64s' k databázi '%-.64s' není povolen", -"P-Bøístup pro u¾ivatele '%-.32s@%-.64s' (s heslem %s)", +"P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen", +"P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)", "Nebyla vybr-Bána ¾ádná databáze", "Nezn-Bámý pøíkaz", "Sloupec '%-.64s' nem-Bù¾e být null", @@ -119,7 +121,7 @@ "Nen-Bí mo¾né vymazat v¹echny polo¾ky s ALTER TABLE. Pou¾ijte DROP TABLE", "Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe", "Z-Báznamù: %ld Zdvojených: %ld Varování: %ld", -"INSERT TABLE '%-.64s' nen-Bí dovoleno v seznamu tabulek FROM", +"You can't specify target table '%-.64s' for update in FROM clause", "Nezn-Bámá identifikace threadu: %lu", "Nejste vlastn-Bíkem threadu %lu", "Nejsou pou-B¾ity ¾ádné tabulky", @@ -168,8 +170,8 @@ "Regul-Bární výraz vrátil chybu '%-.64s'", "Pokud nen-Bí ¾ádná GROUP BY klauzule, není dovoleno souèasné pou¾ití GROUP polo¾ek (MIN(),MAX(),COUNT()...) s ne GROUP polo¾kami", "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s'", -"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s@%-.64s' pro tabulku '%-.64s'", -"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s@%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'", +"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'", +"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'", "Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít.", "Argument p-Bøíkazu GRANT u¾ivatel nebo stroj je pøíli¹ dlouhý", "Tabulka '%-.64s.%s' neexistuje", @@ -188,7 +190,7 @@ "Zji-B¹tìn timeout pøi ètení komunikaèního packetu", "Zji-B¹tìna chyba pøi zápisu komunikaèního packetu", "Zji-B¹tìn timeout pøi zápisu komunikaèního packetu", -"V-Býsledný øetìzec je del¹í ne¾ max_allowed_packet", +"V-Býsledný øetìzec je del¹í ne¾ 'max_allowed_packet'", "Typ pou-B¾ité tabulky nepodporuje BLOB/TEXT sloupce", "Typ pou-B¾ité tabulky nepodporuje AUTO_INCREMENT sloupce", "INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES", @@ -222,24 +224,24 @@ "Nezn-Bámá systémová promìnná '%-.64s'", "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena", "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -247,22 +249,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updatable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt index 8c0bf688311..4983d39714a 100644 --- a/sql/share/danish/errmsg.txt +++ b/sql/share/danish/errmsg.txt @@ -17,28 +17,30 @@ /* Knud Riishøjgård knudriis@post.tele.dk 99 && Carsten H. Pedersen, carsten.pedersen@bitbybit.dk oct. 1999 / aug. 2001. */ +character-set=latin1 + "hashchk", "isamchk", "NEJ", "JA", "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)", "Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)", -"Kan ikke oprette databasen '%-.64s'. Fejl %d", -"Kan ikke oprette databasen '%-.64s'. Databasen eksisterer", -"Kan ikke slette (droppe) '%-.64s'. Databasen eksisterer ikke", -"Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejl %d)", -"Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejl %d)", +"Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)", +"Kan ikke oprette databasen '%-.64s'; databasen eksisterer", +"Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke", +"Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)", +"Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)", "Fejl ved sletning af '%-.64s' (Fejlkode: %d)", "Kan ikke læse posten i systemfolderen", "Kan ikke læse status af '%-.64s' (Fejlkode: %d)", "Kan ikke læse aktive folder (Fejlkode: %d)", "Kan ikke låse fil (Fejlkode: %d)", -"Kan ikke åbne fil: '%-.64s'. (Fejlkode: %d)", +"Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)", "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)", "Kan ikke læse folder '%-.64s' (Fejlkode: %d)", "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)", "Posten er ændret siden sidste læsning '%-.64s'", -"Ikke mere diskplads (%s). Venter på at få frigjort plads....", +"Ikke mere diskplads (%s). Venter på at få frigjort plads...", "Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'", "Fejl ved lukning af '%-.64s' (Fejlkode: %d)", "Fejl ved læsning af '%-.64s' (Fejlkode: %d)", @@ -51,8 +53,8 @@ "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'", "Kan ikke finde posten i '%-.64s'", "Forkert indhold i: '%-.64s'", -"Fejl i indeksfilen til tabellen '%-.64s', prøv at reparere den", -"Gammel indeksfil for tabellen '%-.64s'; Reparer den", +"Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den", +"Gammel indeksfil for tabellen '%-.64s'; reparer den", "'%-.64s' er skrivebeskyttet", "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)", "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren", @@ -61,8 +63,8 @@ "Udgået for tråde/hukommelse", "Kan ikke få værtsnavn for din adresse", "Forkert håndtryk (handshake)", -"Adgang nægtet bruger: '%-.32s@%-.64s' til databasen '%-.64s'", -"Adgang nægtet bruger: '%-.32s@%-.64s' (Bruger adgangskode: %s)", +"Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'", +"Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)", "Ingen database valgt", "Ukendt kommando", "Kolonne '%-.64s' kan ikke være NULL", @@ -110,7 +112,7 @@ "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet.", "Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer.", "Poster: %ld Ens: %ld Advarsler: %ld", -"INSERT TABLE '%-.64s' er ikke tilladt i FROM tabel liste", +"You can't specify target table '%-.64s' for update in FROM clause", "Ukendt tråd id: %lu", "Du er ikke ejer af tråden %lu", "Ingen tabeller i brug", @@ -137,7 +139,7 @@ "For mange felter", "For store poster. Max post størrelse, uden BLOB's, er %d. Du må lave nogle felter til BLOB's", "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt", -"Krydsreferencer fundet i OUTER JOIN. Check dine ON conditions", +"Krydsreferencer fundet i OUTER JOIN; check dine ON conditions", "Kolonne '%-.32s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL", "Kan ikke læse funktionen '%-.64s'", "Kan ikke starte funktionen '%-.64s'; %-.80s", @@ -159,8 +161,8 @@ "Fik fejl '%-.64s' fra regexp", "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat", "Denne tilladelse findes ikke for brugeren '%-.32s' på vært '%-.64s'", -"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s@%-.64s' for tabellen '%-.64s'", -"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s@%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'", +"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'", +"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'", "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres.", "Værts- eller brugernavn for langt til GRANT", "Tabellen '%-.64s.%-.64s' eksisterer ikke", @@ -179,7 +181,7 @@ "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)", "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)", "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)", -"Strengen med resultater er større end max_allowed_packet", +"Strengen med resultater er større end 'max_allowed_packet'", "Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner", "Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner", "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES", @@ -215,11 +217,11 @@ "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede", "Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles", "Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen", -"Denne handling kunne ikke udføres med kørende slave, brug først kommandoen SLAVE STOP", -"Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen SLAVE START", +"Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE", +"Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE", "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO", "Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Kunne ikke danne en slave-tråd. Check systemressourcerne", +"Kunne ikke danne en slave-tråd; check systemressourcerne", "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser", "Du må kun bruge konstantudtryk med SET", "Lock wait timeout overskredet", @@ -227,10 +229,10 @@ "Update lås kan ikke opnås under en READ UNCOMMITTED transaktion", "DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock", "CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -238,22 +240,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Modtog fejl %d '%-.100s' fra %s", +"Modtog temporary fejl %d '%-.100s' fra %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt index c2baf0240af..3f320dca750 100644 --- a/sql/share/dutch/errmsg.txt +++ b/sql/share/dutch/errmsg.txt @@ -26,6 +26,8 @@ Translated new error messages. */ +character-set=latin1 + "hashchk", "isamchk", "NEE", @@ -33,8 +35,8 @@ "Kan file '%-.64s' niet aanmaken (Errcode: %d)", "Kan tabel '%-.64s' niet aanmaken (Errcode: %d)", "Kan database '%-.64s' niet aanmaken (Errcode: %d)", -"Kan database '%-.64s' niet aanmaken. Database bestaat reeds", -"Kan database '%-.64s' niet verwijderen. Database bestaat niet", +"Kan database '%-.64s' niet aanmaken; database bestaat reeds", +"Kan database '%-.64s' niet verwijderen; database bestaat niet", "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)", "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)", "Fout bij het verwijderen van '%-.64s' (Errcode: %d)", @@ -42,8 +44,8 @@ "Kan de status niet krijgen van '%-.64s' (Errcode: %d)", "Kan de werkdirectory niet krijgen (Errcode: %d)", "Kan de file niet blokeren (Errcode: %d)", -"Kan de file '%-.64s' niet openen. (Errcode: %d)", -"Kan de file: '%-.64s' niet vinden. (Errcode: %d)", +"Kan de file '%-.64s' niet openen (Errcode: %d)", +"Kan de file: '%-.64s' niet vinden (Errcode: %d)", "Kan de directory niet lezen van '%-.64s' (Errcode: %d)", "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)", "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'", @@ -60,8 +62,8 @@ "Tabel handler voor '%-.64s' heeft deze optie niet", "Kan record niet vinden in '%-.64s'", "Verkeerde info in file: '%-.64s'", -"Verkeerde zoeksleutel file voor tabel: '%-.64s'. Probeer het te repareren", -"Oude zoeksleutel file voor tabel '%-.64s'; Repareer het!", +"Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren", +"Oude zoeksleutel file voor tabel '%-.64s'; repareer het!", "'%-.64s' is alleen leesbaar", "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)", "Geen geheugen om te sorteren. Verhoog de server sort buffer size", @@ -70,8 +72,8 @@ "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen", "Kan de hostname niet krijgen van uw adres", "Verkeerde handshake", -"Toegang geweigerd voor gebruiker: '%-.32s@%-.64s' naar database '%-.64s'", -"Toegang geweigerd voor gebruiker: '%-.32s@%-.64s' (Wachtwoord gebruikt: %s)", +"Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'", +"Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)", "Geen database geselecteerd", "Onbekend commando", "Kolom '%-.64s' kan niet null zijn", @@ -119,7 +121,7 @@ "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!", "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat.", "Records: %ld Dubbel: %ld Waarschuwing: %ld", -"INSERT TABLE '%-.64s' is niet toegestaan in de FROM tabel-lijst", +"You can't specify target table '%-.64s' for update in FROM clause", "Onbekend thread id: %lu", "U bent geen bezitter van thread %lu", "Geen tabellen gebruikt.", @@ -168,8 +170,8 @@ "Fout '%-.64s' ontvangen van regexp", "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is", "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'", -"%-.16s commando geweigerd voor gebruiker: '%-.32s@%-.64s' voor tabel '%-.64s'", -"%-.16s commando geweigerd voor gebruiker: '%-.32s@%-.64s' voor kolom '%-.64s' in tabel '%-.64s'", +"%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'", +"%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'", "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden.", "De host of gebruiker parameter voor GRANT is te lang", "Tabel '%-.64s.%s' bestaat niet", @@ -188,7 +190,7 @@ "Timeout bij het lezen van communicatiepakketten", "Fout bij het schrijven van communicatiepakketten", "Timeout bij het schrijven van communicatiepakketten", -"Resultaat string is langer dan max_allowed_packet", +"Resultaat string is langer dan 'max_allowed_packet'", "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen", "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen", "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES", @@ -224,8 +226,8 @@ "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte", "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen", "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw", -"Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst SLAVE STOP", -"Deze operatie vereist een actieve slave, configureer slave en doe dan SLAVE START", +"Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE", +"Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE", "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO", "Could not initialize master info structure, more error messages can be found in the MySQL error log", "Kon slave thread niet aanmaken, controleer systeem resources", @@ -237,8 +239,8 @@ "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit", "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit", "Foutieve parameters voor %s", -"%-.32s@%-.64s mag geen nieuwe gebruikers creeren", -"Incorrecte tabel definitie; Alle MERGE tabellen moeten tot dezelfde database behoren", +"'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren", +"Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren", "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie", "Het gebruikte tabel type ondersteund geen FULLTEXT indexen", "Kan foreign key beperking niet toevoegen", @@ -254,7 +256,7 @@ "Optie '%s' tweemaal gebruikt in opdracht", "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)", "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie", -"Variabele '%-.64s' is LOCAL en kan niet worden gebruikt met SET GLOBAL", +"Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL", "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL", "Variabele '%-.64s' heeft geen standaard waarde", "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'", @@ -263,6 +265,70 @@ "Foutieve toepassing/plaatsing van '%s'", "Deze versie van MySQL ondersteunt nog geen '%s'", "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt index ed5977fd78c..763c984866b 100644 --- a/sql/share/english/errmsg.txt +++ b/sql/share/english/errmsg.txt @@ -14,15 +14,17 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +character-set=latin1 + "hashchk", "isamchk", "NO", "YES", "Can't create file '%-.64s' (errno: %d)", "Can't create table '%-.64s' (errno: %d)", -"Can't create database '%-.64s'. (errno: %d)", -"Can't create database '%-.64s'. Database exists", -"Can't drop database '%-.64s'. Database doesn't exist", +"Can't create database '%-.64s' (errno: %d)", +"Can't create database '%-.64s'; database exists", +"Can't drop database '%-.64s'; database doesn't exist", "Error dropping database (can't delete '%-.64s', errno: %d)", "Error dropping database (can't rmdir '%-.64s', errno: %d)", "Error on delete of '%-.64s' (errno: %d)", @@ -30,13 +32,13 @@ "Can't get status of '%-.64s' (errno: %d)", "Can't get working directory (errno: %d)", "Can't lock file (errno: %d)", -"Can't open file: '%-.64s'. (errno: %d)", +"Can't open file: '%-.64s' (errno: %d)", "Can't find file: '%-.64s' (errno: %d)", "Can't read dir of '%-.64s' (errno: %d)", "Can't change dir to '%-.64s' (errno: %d)", "Record has changed since last read in table '%-.64s'", -"Disk full (%s). Waiting for someone to free some space....", -"Can't write, duplicate key in table '%-.64s'", +"Disk full (%s); waiting for someone to free some space...", +"Can't write; duplicate key in table '%-.64s'", "Error on close of '%-.64s' (errno: %d)", "Error reading file '%-.64s' (errno: %d)", "Error on rename of '%-.64s' to '%-.64s' (errno: %d)", @@ -44,29 +46,29 @@ "'%-.64s' is locked against change", "Sort aborted", "View '%-.64s' doesn't exist for '%-.64s'", -"Got error %d from table handler", -"Table handler for '%-.64s' doesn't have this option", +"Got error %d from storage engine", +"Table storage engine for '%-.64s' doesn't have this option", "Can't find record in '%-.64s'", "Incorrect information in file: '%-.64s'", -"Incorrect key file for table: '%-.64s'. Try to repair it", -"Old key file for table '%-.64s'; Repair it!", +"Incorrect key file for table '%-.64s'; try to repair it", +"Old key file for table '%-.64s'; repair it!", "Table '%-.64s' is read only", -"Out of memory. Restart daemon and try again (needed %d bytes)", -"Out of sort memory. Increase daemon sort buffer size", -"Unexpected eof found when reading file '%-.64s' (errno: %d)", +"Out of memory; restart server and try again (needed %d bytes)", +"Out of sort memory; increase server sort buffer size", +"Unexpected EOF found when reading file '%-.64s' (errno: %d)", "Too many connections", -"Out of memory; Check if mysqld or some other process uses all available memory. If not you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space", +"Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space", "Can't get hostname for your address", "Bad handshake", -"Access denied for user: '%-.32s@%-.64s' to database '%-.64s'", -"Access denied for user: '%-.32s@%-.64s' (Using password: %s)", -"No Database Selected", +"Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'", +"Access denied for user '%-.32s'@'%-.64s' (using password: %s)", +"No database selected", "Unknown command", "Column '%-.64s' cannot be null", "Unknown database '%-.64s'", "Table '%-.64s' already exists", -"Unknown table '%-.180s'", -"Column: '%-.64s' in %-.64s is ambiguous", +"Unknown table '%-.100s'", +"Column '%-.64s' in %-.64s is ambiguous", "Server shutdown in progress", "Unknown column '%-.64s' in '%-.64s'", "'%-.64s' isn't in GROUP BY", @@ -83,31 +85,31 @@ "Not unique table/alias: '%-.64s'", "Invalid default value for '%-.64s'", "Multiple primary key defined", -"Too many keys specified. Max %d keys allowed", -"Too many key parts specified. Max %d parts allowed", -"Specified key was too long. Max key length is %d", +"Too many keys specified; max %d keys allowed", +"Too many key parts specified; max %d parts allowed", +"Specified key was too long; max key length is %d bytes", "Key column '%-.64s' doesn't exist in table", "BLOB column '%-.64s' can't be used in key specification with the used table type", -"Too big column length for column '%-.64s' (max = %d). Use BLOB instead", -"Incorrect table definition; There can only be one auto column and it must be defined as a key", +"Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead", +"Incorrect table definition; there can be only one auto column and it must be defined as a key", "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d", "%s: Normal shutdown\n", "%s: Got signal %d. Aborting!\n", -"%s: Shutdown Complete\n", +"%s: Shutdown complete\n", "%s: Forcing close of thread %ld user: '%-.32s'\n", "Can't create IP socket", -"Table '%-.64s' has no index like the one used in CREATE INDEX. Recreate the table", -"Field separator argument is not what is expected. Check the manual", -"You can't use fixed rowlength with BLOBs. Please use 'fields terminated by'.", +"Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table", +"Field separator argument is not what is expected; check the manual", +"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'", "The file '%-.64s' must be in the database directory or be readable by all", "File '%-.80s' already exists", "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld", "Records: %ld Duplicates: %ld", -"Incorrect sub part key. The used key part isn't a string, the used length is longer than the key part or the table handler doesn't support unique sub keys", -"You can't delete all columns with ALTER TABLE. Use DROP TABLE instead", -"Can't DROP '%-.64s'. Check that column/key exists", +"Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys", +"You can't delete all columns with ALTER TABLE; use DROP TABLE instead", +"Can't DROP '%-.64s'; check that column/key exists", "Records: %ld Duplicates: %ld Warnings: %ld", -"INSERT TABLE '%-.64s' isn't allowed in FROM table list", +"You can't specify target table '%-.64s' for update in FROM clause", "Unknown thread id: %lu", "You are not owner of thread %lu", "No tables used", @@ -115,10 +117,10 @@ "Can't generate a unique log-filename %-.64s.(1-999)\n", "Table '%-.64s' was locked with a READ lock and can't be updated", "Table '%-.64s' was not locked with LOCK TABLES", -"BLOB column '%-.64s' can't have a default value", +"BLOB/TEXT column '%-.64s' can't have a default value", "Incorrect database name '%-.100s'", "Incorrect table name '%-.100s'", -"The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok", +"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", "Unknown error", "Unknown procedure '%-.64s'", "Incorrect parameter count to procedure '%-.64s'", @@ -130,44 +132,44 @@ "A table must have at least 1 column", "The table '%-.64s' is full", "Unknown character set: '%-.64s'", -"Too many tables. MySQL can only use %d tables in a join", +"Too many tables; MySQL can only use %d tables in a join", "Too many columns", -"Too big row size. The maximum row size, not counting BLOBs, is %d (can be lower for some table types). You have to change some fields to BLOBs", +"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN. Examine your ON conditions", +"Cross dependency found in OUTER JOIN; examine your ON conditions", "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL", "Can't load function '%-.64s'", "Can't initialize function '%-.64s'; %-.80s", "No paths allowed for shared library", -"Function '%-.64s' already exist", +"Function '%-.64s' already exists", "Can't open shared library '%-.64s' (errno: %d %-.64s)", "Can't find function '%-.64s' in library", "Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors. Unblock with 'mysqladmin flush-hosts'", +"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", "Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous users and anonymous users are not allowed to change passwords", +"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", "You must have privileges to update tables in the mysql database to be able to change passwords for others", "Can't find any matching row in the user table", "Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d). If you are not out of available memory, you can consult the manual for a possible OS-dependent bug", +"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", "Column count doesn't match value count at row %ld", "Can't reopen table: '%-.64s'", "Invalid use of NULL value", "Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", "There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command. Please consult the manual which privileges can be used.", +"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", +"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", +"Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used", "The host or user argument to GRANT is too long", "Table '%-.64s.%-.64s' doesn't exist", "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", "The used command is not allowed with this MySQL version", -"You have an error in your SQL syntax. Check the manual that corresponds to your MySQL server version for the right syntax to use", +"You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use", "Delayed insert thread couldn't get requested lock for table %-.64s", "Too many delayed threads in use", "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)", -"Got a packet bigger than 'max_allowed_packet'", +"Got a packet bigger than 'max_allowed_packet' bytes", "Got a read error from the connection pipe", "Got an error from fcntl()", "Got packets out of order", @@ -176,30 +178,30 @@ "Got timeout reading communication packets", "Got an error writing communication packets", "Got timeout writing communication packets", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES", "Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", +"The used storage engine can't index column '%-.64s'", "All tables in the MERGE table are not identically defined", "Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"BLOB/TEXT column '%-.64s' used in key specification without a key length", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", "Key '%-.64s' doesn't exist in table '%-.64s'", "Can't open table", -"The handler for the table doesn't support %s", +"The storage engine for the table doesn't support %s", "You are not allowed to execute this command in a transaction", "Got error %d during COMMIT", "Got error %d during ROLLBACK", "Got error %d during FLUSH_LOGS", "Got error %d during CHECKPOINT", "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", +"The storage engine for the table does not support binary table dump", "Binlog closed, cannot RESET MASTER", "Failed rebuilding the index of dumped table '%-.64s'", "Error from master: '%-.64s'", @@ -210,24 +212,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded; Try restarting transaction", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add or update a child row: a foreign key constraint fails", @@ -235,24 +237,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", "Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable" -] # End of error messages -# Do NOT add new error messages here as this makes merges to 4.1 too hard! +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu; new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated; use '%s' instead", +"The target table %-.100s of the %s is not updatable", +"The '%s' feature is disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated incorrect %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt index 581baf24e09..b7557a37670 100644 --- a/sql/share/estonian/errmsg.txt +++ b/sql/share/estonian/errmsg.txt @@ -19,13 +19,15 @@ Parandanud ja täiendanud: Indrek Siitan (tfr@mysql.com) */ +character-set=latin7 + "hashchk", "isamchk", "EI", "JAH", "Ei suuda luua faili '%-.64s' (veakood: %d)", "Ei suuda luua tabelit '%-.64s' (veakood: %d)", -"Ei suuda luua andmebaasi '%-.64s'. (veakood: %d)", +"Ei suuda luua andmebaasi '%-.64s' (veakood: %d)", "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib", "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri", "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)", @@ -35,7 +37,7 @@ "Ei suuda lugeda '%-.64s' olekut (veakood: %d)", "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)", "Ei suuda lukustada faili (veakood: %d)", -"Ei suuda avada faili '%-.64s'. (veakood: %d)", +"Ei suuda avada faili '%-.64s' (veakood: %d)", "Ei suuda leida faili '%-.64s' (veakood: %d)", "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)", "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)", @@ -53,8 +55,8 @@ "Tabeli '%-.64s' handler ei toeta antud operatsiooni", "Ei suuda leida kirjet '%-.64s'-s", "Vigane informatsioon failis '%-.64s'", -"Tabeli '%-.64s' võtmefail on vigane; Proovi seda parandada", -"Tabeli '%-.64s' võtmefail on aegunud; Paranda see!", +"Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada", +"Tabeli '%-.64s' võtmefail on aegunud; paranda see!", "Tabel '%-.64s' on ainult lugemiseks", "Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)", "Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit", @@ -63,8 +65,8 @@ "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine", "Ei suuda lahendada IP aadressi masina nimeks", "Väär handshake", -"Ligipääs keelatud kasutajale '%-.32s@%-.64s' andmebaasile '%-.64s'", -"Ligipääs keelatud kasutajale '%-.32s@%-.64s' (kasutab parooli: %s)", +"Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'", +"Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)", "Andmebaasi ei ole valitud", "Tundmatu käsk", "Tulp '%-.64s' ei saa omada nullväärtust", @@ -112,7 +114,7 @@ "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil", "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib", "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld", -"INSERT TABLE '%-.64s' ei ole lubatud FROM tabelite nimekirjas", +"You can't specify target table '%-.64s' for update in FROM clause", "Tundmatu lõim: %lu", "Ei ole lõime %lu omanik", "Ühtegi tabelit pole kasutusel", @@ -161,8 +163,8 @@ "regexp tagastas vea '%-.64s'", "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud", "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'", -"%-.16s käsk ei ole lubatud kasutajale '%-.32s@%-.64s' tabelis '%-.64s'", -"%-.16s käsk ei ole lubatud kasutajale '%-.32s@%-.64s' tulbale '%-.64s' tabelis '%-.64s'", +"%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'", +"%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'", "Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga", "Masina või kasutaja nimi GRANT lauses on liiga pikk", "Tabelit '%-.64s.%-.64s' ei eksisteeri", @@ -217,11 +219,11 @@ "Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus", "Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida", "Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga", "Ainult konstantsed suurused on lubatud SET klauslis", "Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata", @@ -230,7 +232,7 @@ "DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku", "CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku", "Vigased parameetrid %s-le", -"Kasutajal %-.32s@%-.64s ei ole lubatud luua uusi kasutajaid", +"Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid", "Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis", "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast", "Antud tabelitüüp ei toeta FULLTEXT indekseid", @@ -246,16 +248,80 @@ "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud", "Määrangut '%s' on lauses kasutatud topelt", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt index 9aa430aa718..6f10a468e26 100644 --- a/sql/share/french/errmsg.txt +++ b/sql/share/french/errmsg.txt @@ -14,15 +14,17 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +character-set=latin1 + "hashchk", "isamchk", "NON", "OUI", "Ne peut créer le fichier '%-.64s' (Errcode: %d)", "Ne peut créer la table '%-.64s' (Errcode: %d)", -"Ne peut créer la base '%-.64s'. Erreur %d", -"Ne peut créer la base '%-.64s'. Elle existe déjà", -"Ne peut effacer la base '%-.64s'. Elle n'existe pas", +"Ne peut créer la base '%-.64s' (Erreur %d)", +"Ne peut créer la base '%-.64s'; elle existe déjà", +"Ne peut effacer la base '%-.64s'; elle n'existe pas", "Ne peut effacer la base '%-.64s' (erreur %d)", "Erreur en effaçant la base (rmdir '%-.64s', erreur %d)", "Erreur en effaçant '%-.64s' (Errcode: %d)", @@ -30,7 +32,7 @@ "Ne peut obtenir le status de '%-.64s' (Errcode: %d)", "Ne peut obtenir le répertoire de travail (Errcode: %d)", "Ne peut verrouiller le fichier (Errcode: %d)", -"Ne peut ouvrir le fichier: '%-.64s'. (Errcode: %d)", +"Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)", "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)", "Ne peut lire le répertoire de '%-.64s' (Errcode: %d)", "Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)", @@ -48,8 +50,8 @@ "Le handler de la table '%-.64s' n'a pas cette option", "Ne peut trouver l'enregistrement dans '%-.64s'", "Information erronnée dans le fichier: '%-.64s'", -"Index corrompu dans la table: '%-.64s'. Essayez de le réparer", -"Vieux fichier d'index pour la table '%-.64s'; Réparez le!", +"Index corrompu dans la table: '%-.64s'; essayez de le réparer", +"Vieux fichier d'index pour la table '%-.64s'; réparez le!", "'%-.64s' est en lecture seulement", "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)", "Manque de mémoire pour le tri. Augmentez-la.", @@ -58,8 +60,8 @@ "Manque de 'threads'/mémoire", "Ne peut obtenir de hostname pour votre adresse", "Mauvais 'handshake'", -"Accès refusé pour l'utilisateur: '%-.32s@%-.64s'. Base '%-.64s'", -"Accès refusé pour l'utilisateur: '%-.32s@%-.64s' (mot de passe: %s)", +"Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'", +"Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)", "Aucune base n'a été sélectionnée", "Commande inconnue", "Le champ '%-.64s' ne peut être vide (null)", @@ -107,7 +109,7 @@ "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE", "Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe", "Enregistrements: %ld Doublons: %ld Avertissements: %ld", -"INSERT TABLE '%-.64s' n'est pas permis dans FROM liste des tables", +"You can't specify target table '%-.64s' for update in FROM clause", "Numéro de tâche inconnu: %lu", "Vous n'êtes pas propriétaire de la tâche no: %lu", "Aucune table utilisée", @@ -156,8 +158,8 @@ "Erreur '%-.64s' provenant de regexp", "Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY", "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s'", -"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s@%-.64s' sur la table '%-.64s'", -"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'", +"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'", +"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'", "Commande GRANT/REVOKE incorrecte. Consultez le manuel.", "L'hôte ou l'utilisateur donné en argument à GRANT est trop long", "La table '%-.64s.%s' n'existe pas", @@ -176,7 +178,7 @@ "Timeout en lecture des paquets reçus", "Erreur d'écriture des paquets envoyés", "Timeout d'écriture des paquets envoyés", -"La chaîne résultat est plus grande que max_allowed_packet", +"La chaîne résultat est plus grande que 'max_allowed_packet'", "Ce type de table ne supporte pas les colonnes BLOB/TEXT", "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT", "INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES", @@ -207,13 +209,13 @@ "Erreur d'écriture réseau reçue du maître", "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes", "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active", -"Variable système '%-.64' inconnue", +"Variable système '%-.64s' inconnue", "La table '%-.64s' est marquée 'crashed' et devrait être réparée", "La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué", "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées", "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez", -"Cette opération ne peut être réalisée avec un esclave actif, faites SLAVE STOP d'abord", -"Cette opération nécessite un esclave actif, configurez les esclaves et faites SLAVE START", +"Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord", +"Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE", "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO", "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL", "Impossible de créer une tâche esclave, vérifiez les ressources système", @@ -225,8 +227,8 @@ "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture", "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture", "Mauvais arguments à %s", -"%-.32s@%-.64s n'est pas autorisé à créer de nouveaux utilisateurs", -"Définition de table incorrecte : toutes les tables MERGE doivent être dans la même base de donnée", +"'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs", +"Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée", "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction", "Le type de table utilisé ne supporte pas les index FULLTEXT", "Impossible d'ajouter des contraintes d'index externe", @@ -235,22 +237,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt index 7fa896bcc34..c3d00ae06b4 100644 --- a/sql/share/german/errmsg.txt +++ b/sql/share/german/errmsg.txt @@ -15,246 +15,321 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* - Dirk Munzinger (dmun@4t2.com) - Version: 07.06.2001 */ + Dirk Munzinger (dmun@4t2.com) + 2001-06-07 + + Georg Richter (georg@php.net) + fixed typos and translation + translated new error messages + 2002-12-11 + + Stefan Hinz (stefan@mysql.com) + 2003-10-01 +*/ + +character-set=latin1 "hashchk", "isamchk", "Nein", "Ja", -"Kann Datei '%-.64s' nicht erzeugen. (Fehler: %d)", -"Kann Tabelle '%-.64s' nicht erzeugen. (Fehler: %d)", -"Kann Datenbank '%-.64s' nicht erzeugen. (Fehler: %d)", -"Kann Datenbank '%-.64s' nicht erzeugen. Datenbank '%-.64s' existiert bereits.", -"Kann Datenbank '%-.64s' nicht löschen. Keine Datenbank '%-.64s' vorhanden.", -"Fehler beim Löschen der Datenbank. ('%-.64s' kann nicht gelöscht werden, Fehler %d)", -"Fehler beim Löschen der Datenbank. (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehler %d)", -"Fehler beim Löschen von '%-.64s'. (Fehler: %d)", -"Datensatz in der Systemtabelle nicht lesbar.", -"Kann Status von '%-.64s' nicht erhalten. (Fehler: %d)", -"Kann Arbeitsverzeichnis nicht erhalten. (Fehler: %d)", -"File nicht sperrbar. (Fehler: %d)", -"Kann Datei '%-.64s' nicht öffnen. (Fehler: %d)", -"Kann Datei '%-.64s' nicht finden. (Fehler: %d)", -"Verzeichnis von '%-.64s' nicht lesbar. (Fehler: %d)", -"Verzeichnis kann nicht nach '%-.64s' gewechselt werden. (Fehler: %d)", -"Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert.", -"Festplatte voll (%-.64s). Warte bis jemand Platz schafft ...", -"Kann nicht speichern, doppelter Schlüssel in Tabelle '%-.64s'.", -"Fehler beim Schließen von '%-.64s'. (Fehler: %d)", -"Fehler beim Lesen der Datei '%-.64s'. (Fehler: %d)", -"Fehler beim Umbennenen von '%-.64s' nach '%-.64s'. (Fehler: %d)", -"Fehler beim Speichern der Datei '%-.64s'. (Fehler: %d)", -"'%-.64s' ist für Veränderungen gesperrt.", -"Sortieren abgebrochen.", -"View '%-.64s' existiert für '%-.64s' nicht.", -"Fehler %d. (table handler)", -"Diese Option gibt es nicht. (table handler)", -"Kann Datensatz nicht finden.", -"Falsche Information in Datei: '%-.64s'", -"Falsche Schlüssel-Datei für Tabelle '%-.64s'. Versuche zu reparieren!", -"Alte Schlüssel-Datei für Tabelle '%-.64s'. Repariere!", -"'%-.64s' ist nur lesbar.", -"Kein Speicher (benötigt %d bytes). Server neu starten.", -"Kein Speicher zum Sortieren. Server Sortier-Buffer erhöhen.", -"Unerwartetes EOF beim Lesen der Datei '%-.64s'. (Fehler: %d)", -"Zu viele Verbindungen.", -"Zuwenig Speicher.", -"Kann Hostname für diese Adresse nicht erhalten.", -"Schlechter handshake.", -"Keine Zugriffsberechtigung für Benutzer: '%-.32s@%-.64s' für Datenbank '%-.64s'.", -"Keine Zugriffsberechtigung für Benutzer: '%-.32s@%-.64s'. (Verwendetes Passwort: %-.64s)", -"Keine Datenbank ausgewählt.", -"Unbekannter Befehl.", -"Feld '%-.64s' kann nicht NULL sein.", -"Unbekannte Datenbank '%-.64s'.", -"Tabelle '%-.64s' bereits vorhanden.", -"Unbekannte Tabelle '%-.64s'.", -"Spalte: '%-.64s' in %-.64s ist mißverständlich.", -"Der Server wird herunter gefahren...", -"Unbekanntes Tabellenfeld '%-.64s' in %-.64s.", -"'%-.64s' ist nicht in GROUP BY.", -"Gruppierung nicht möglich bei '%-.64s'.", -"Im Statement wurden sowohl sum-Funktionen als auch Spalten verwendet. Nicht möglich.", -"Spaltenzähler entspricht nicht dem Wertzähler.", -"Name des Identifizierers '%-.64s' ist zu lang.", +"Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)", +"Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)", +"Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)", +"Kann Datenbank '%-.64s' nicht erzeugen. Datenbank '%-.64s' existiert bereits", +"Kann Datenbank '%-.64s' nicht löschen. Keine Datenbank '%-.64s' vorhanden", +"Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehlernuumer: %d)", +"Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehlernummer: %d)", +"Fehler beim Löschen von '%-.64s' (Fehler: %d)", +"Datensatz in der Systemtabelle nicht lesbar", +"Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)", +"Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)", +"Datei kann nicht gesperrt werden (Fehler: %d)", +"Datei '%-.64s' nicht öffnen (Fehler: %d)", +"Kann Datei '%-.64s' nicht finden (Fehler: %d)", +"Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)", +"Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)", +"Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert", +"Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ...", +"Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'", +"Fehler beim Schließen von '%-.64s' (Fehler: %d)", +"Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)", +"Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)", +"Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)", +"'%-.64s' ist für Änderungen gesperrt", +"Sortiervorgang abgebrochen", +"View '%-.64s' existiert für '%-.64s' nicht", +"Fehler %d (Tabellenhandler)", +"Diese Option gibt es nicht (Tabellenhandler)", +"Kann Datensatz nicht finden", +"Falsche Information in Datei '%-.64s'", +"Falsche Schlüssel-Datei für Tabelle '%-.64s'. versuche zu reparieren", +"Alte Schlüssel-Datei für Tabelle '%-.64s'. Bitte reparieren", +"'%-.64s' ist nur lesbar", +"Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten", +"Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte erhöht werden", +"Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)", +"Zu viele Verbindungen", +"Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess allen Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten", +"Kann Hostnamen für diese Adresse nicht erhalten", +"Schlechter Handshake", +"Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'", +"Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)", +"Keine Datenbank ausgewählt", +"Unbekannter Befehl", +"Feld '%-.64s' darf nicht NULL sein", +"Unbekannte Datenbank '%-.64s'", +"Tabelle '%-.64s' bereits vorhanden", +"Unbekannte Tabelle '%-.64s'", +"Spalte '%-.64s' in %-.64s ist nicht eindeutig", +"Der Server wird heruntergefahren", +"Unbekanntes Tabellenfeld '%-.64s' in %-.64s", +"'%-.64s' ist nicht in GROUP BY vorhanden", +"Gruppierung über '%-.64s' nicht möglich", +"Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt", +"Die Anzahl der Spalten entspricht nicht der Anzahl der Werte", +"Name des Bezeichners '%-.64s' ist zu lang", "Doppelter Spaltenname vorhanden: '%-.64s'", "Doppelter Name für Schlüssel (Key) vorhanden: '%-.64s'", -"Doppelter Eintrag '%-.64s' für Schlüssel %d.", -"Falsche Spalten-Spezifizierung für Spalte '%-.64s'.", -"%-.64s bei '%-.64s' in Zeile %d.", -"Leere Abfrage.", -"Keine eindeutige(n) Tabelle/Alias: '%-.64s'", -"Fehlerhafter Vorgabewert (Default-Wert): '%-.64s'", -"Mehrfacher Primärschlüssel (Primary Key) definiert.", -"Zuviele Schlüssel definiert. Maximal %d Schlüssel erlaubt.", -"Zuviele Teilschlüssel definiert. Maximal sind %d Teilschlüssel erlaubt.", -"Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d.", -"In der Tabelle gibt es keine Schlüsselspalte '%-.64s'.", -"BLOB-Feld '%-.64s' kann nicht als Schlüssel verwendet werden.", -"Feldlänge für Feld '%-.64s' zu groß (max = %d). BLOB-Feld verwenden!", -"Nur ein Auto-Feld möglich, welches als Schlüssel definiert werden muß.", -"%-.64s: Warten auf Verbindungen", -"%-.64s: Normal beendet.\n", +"Doppelter Eintrag '%-.64s' für Schlüssel %d", +"Falsche Spaltenangaben für Spalte '%-.64s'", +"%s bei '%-.80s' in Zeile %d", +"Leere Abfrage", +"Tabellenname/Alias '%-.64s' nicht eindeutig", +"Fehlerhafter Vorgabewert (DEFAULT): '%-.64s'", +"Mehrfacher Primärschlüssel (PRIMARY KEY) definiert", +"Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt", +"Zu viele Teilschlüssel definiert. Maximal sind %d Teilschlüssel erlaubt", +"Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d", +"In der Tabelle gibt es keine Schlüsselspalte '%-.64s'", +"BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden", +"Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB-Feld verwenden!", +"Falsche Tabellendefinition. Es darf nur ein Auto-Feld geben und dieses muss als Schlüssel definiert werden", +"%-.64s: Bereit für Verbindungen", +"%-.64s: Normal heruntergefahren\n", "%-.64s: Signal %d erhalten. Abbruch!\n", -"%-.64s: Shutdown ausgeführt.\n", -"%-.64s: Beendigung des Thread %ld veranlaßt. Benutzer: '%-.64s'\n", -"Kann IP-Socket nicht erstellen.", -"Tabelle '%-.64s' hat keinen solchen Index wie in CREATE INDEX verwendet. Index neu anlegen.", -"Feld-Separator Argument ist nicht in der Form wie erwartet. Bitte im Manual nachlesen.", -"Eine feste Reihenlänge kann für BLOBs nicht verwendet werden. Verwende 'fields terminated by' stattdessen.", -"Feld '%-.64s' muß im Datenbank-Directory vorhanden und lesbar für alle sein.", -"File '%-.64s' bereits vorhanden.", +"%-.64s: Heruntergefahren (shutdown)\n", +"%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n", +"Kann IP-Socket nicht erzeugen", +"Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Index neu anlegen", +"Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen", +"Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden", +"Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden und lesbar für alle sein", +"Datei '%-.64s' bereits vorhanden", "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld", "Datensätze: %ld Duplikate: %ld", -"Falscher Subteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String oder die verwendete Länge ist länger als der Teilschlüssel.", -"Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Verwende DROP TABLE stattdessen.", -"Kann '%-.64s' nicht löschen (DROP). Existiert das Feld/der Schlüssel?", +"Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder der Tabellenhandler unterstützt keine Unterteilschlüssel", +"Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden", +"Kann '%-.64s' nicht löschen. Existiert das Feld / der Schlüssel?", "Datensätze: %ld Duplikate: %ld Warnungen: %ld", -"INSERT TABLE '%-.64s' nicht erlaubt im FROM Abschnitt.", +"Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig.", "Unbekannte Thread-ID: %lu", -"Nicht Besitzer des Threads %lu.", -"Keine Tabellen in Verwendung.", -"Zuviele Strings für Spalte %-.64s und SET.", -"Kann keinen eindeutigen Log-Filenamen erstellen %-.64s.(1-999)\n", -"Tabelle '%-.64s' mit Lese-Sperre versehen und kann nicht upgedated werden.", -"Tabelle '%-.64s' wurde nicht mittels LOCK TABLES gesperrt.", -"BLOB-Feld '%-.64s' kann keinen Vorgabewert (Default-Value) besitzen.", -"Unerlaubter Datenbankname '%-.64s'.", -"Unerlaubter Tabellenname '%-.64s'.", -"Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange daueren. Bitte WHERE überprüfen und SET SQL_BIG_SELECTS=1 verwenden, sofern SELECT ok ist.", -"Unbekannter Fehler.", -"Unbekannte Procedure %-.64s.", -"Falsche Parameterzahl für Procedure %-.64s.", -"Falsche Parameter in Procedure %-.64s.", -"Unbekannte Tabelle '%-.64s' in %-.64s.", -"Feld '%-.64s' wurde zweimal spezifiziert.", -"Falsche Verwendung der GROUP-Funktion.", -"Tabelle '%-.64s' verwendet eine Extension, die in dieser MySQL Version nicht verfügbar ist.", -"Eine Tabelle muß mindestens eine Spalte besitzen.", -"Tabelle '%-.64s' voll.", -"Unbekannter Zeichensatz: '%-.64s'.", -"Zuviele Tabellen. MySQL kann maximal %d Tabellen in einem Join verwenden.", -"Zuviele Felder.", -"Zuviele Spalten. Maximal sind %d Spalten erlaubt (Ohne BLOBs). Einige Felder sollten in BLOBs umgewandelt werden.", -"Thread Stack Überlauf : Verwendet: %ld von %ld Stack. Verwende 'mysqld -O thread_stack=#' um ggf. einen größeren Stack anzulegen.", -"OUTER JOIN enthält fehlerhafte Abhängigkeiten. Prüfen Sie Ihre ON Bedingungen.", -"Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt ohne als NOT NULL definiert zu sein.", -"Kann Funktion '%-.64s' nicht laden.", -"Kann Funktion '%-.64s' nicht initialisieren; %-.80s.", -"Keine Pfade gestattet für Shared Library.", -"Funktion '%-.64s' existiert schon.", -"Kann Shared Library '%-.64s' nicht öffnen. (Fehler: %d %-.64s)", -"Kann Funktion '%-.64s' in der Library nicht finden.", -"Funktion '%-.64s' ist nicht definiert.", -"Host blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'.", -"Host hat keine Berechtigung, eine Verbindung zu diesem MySQL Server herzustellen.", -"Sie benutzen MySQL als anonymer User; diese User dürfen keine Passwörter ändern.", -"Sie müssen autorisiert sein zum UPDATE von Tabellen in der mysql Datenbank, um für andere Benutzer Passwörter ändern zu können.", -"Kann keinen passenden Datensatz in der User-Tabelle finden.", +"Sie sind nicht Eigentümer von Thread %lu", +"Keine Tabellen verwendet", +"Zu viele Strings für SET-Spalte %-.64s angegeben", +"Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s erzeugen (1-999)\n", +"Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden", +"Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt", +"BLOB-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben", +"Unerlaubter Datenbankname '%-.64s'", +"Unerlaubter Tabellenname '%-.64s'", +"Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen oder gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden", +"Unbekannter Fehler", +"Unbekannte Prozedur '%-.64s'", +"Falsche Parameterzahl für Prozedur '%-.64s'", +"Falsche Parameter für Prozedur '%-.64s'", +"Unbekannte Tabelle '%-.64s' in '%-.64s'", +"Feld '%-.64s' wurde zweimal angegeben", +"Falsche Verwendung einer Gruppierungsfunktion", +"Tabelle '%-.64s' verwendet eine Extension, die in dieser MySQL-Version nicht verfügbar ist", +"Eine Tabelle muß mindestens 1 Spalte besitzen", +"Tabelle '%-.64s' ist voll", +"Unbekannter Zeichensatz: '%-.64s'", +"Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden", +"Zu viele Spalten", +"Zeilenlänge zu groß. Die maximale Spaltenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %d. Einige Felder müssen in BLOB oder TEXT umgewandelt werden", +"Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenen, um notfalls einen größeren Stack anzulegen", +"OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen", +"Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert", +"Kann Funktion '%-.64s' nicht laden", +"Kann Funktion '%-.64s' nicht initialisieren: %-.80s", +"Keine Pfade gestattet für Shared Library", +"Funktion '%-.64s' existiert schon", +"Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.64s)", +"Kann Funktion '%-.64s' in der Library nicht finden", +"Funktion '%-.64s' ist nicht definiert", +"Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'", +"Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden", +"Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern", +"Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können", +"Kann keinen passenden Datensatz in Tabelle 'user' finden", "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld", -"Kann keinen neuen Thread erzeugen (errno %d). Sollte nicht die Speichergrenze erreicht sein, bitte im Manual nach vorhanden OS-Abhängigen Fehlern nachschauen.", -"Spaltenzahl stimmt nicht mit der Anzahl der Werte überein in Reihe%ld", -"Kann Tabelle'%-.64s' nicht wieder öffnen", -"Unerlaubte Verwendung eines NULL-Wertes", -"Fehler '%-.64s' von regexp", -"Das Vermischen von GROUP Spalten (MIN(),MAX(),COUNT()...) mit Nicht-GROUP Spalten ist nicht erlaubt, sofern keine GROUP BY Klausel vorhanden ist.", -"Keine solche Berechtigung für User '%-.32s' auf Host '%-.64s'", -"%-.16s Kommando abgelehnt für User: '%-.32s@%-.64s' für Tabelle '%-.64s'", -"%-.16s Kommando abgelehnt für User: '%-.32s@%-.64s' in Spalte '%-.64s' in Tabelle '%-.64s'", -"Unzulässiges GRANT/REVOKE Kommando. Weiteres zum Thema Berechtigungen im Manual.", -"Das Host oder User Argument für GRANT ist zu lang", +"Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen", +"Anzahl der Spalten stimmt nicht mit der Anzahl der Werte in Zeile %ld überein", +"Kann Tabelle'%-.64s' nicht erneut öffnen", +"Unerlaubte Verwendung eines NULL-Werts", +"regexp lieferte Fehler '%-.64s'", +"Das Vermischen von GROUP-Spalten (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Spalten ist nicht zulässig, wenn keine GROUP BY-Klausel vorhanden ist", +"Für Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung", +"%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und für Tabelle '%-.64s'", +"%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Spalte '%-.64s' in Tabelle '%-.64s'", +"Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt", +"Das Host- oder User-Argument für GRANT ist zu lang", "Tabelle '%-.64s.%-.64s' existiert nicht", "Keine solche Berechtigung für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s'", -"Das used Kommando ist mit dieser MySQL Version nicht erlaubt", -"Fehler in der Syntax", -"Verzögerter Einfüge-Thread konnte den angeforderten Lock für Tabelle %-.64s nicht bekommen", -"Zu viele Delayed Threads in Verwendung", -"Abbruch der Verbindung %ld zur Datenbank: '%-.64s' User: '%-.64s' (%-.64s)", +"Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig", +"Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen (diese kann für verschiedene Server-Versionen unterschiedlich sein)", +"Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten", +"Zu viele verzögerte (DELAYED) Threads in Verwendung", +"Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)", "Empfangenes Paket ist größer als 'max_allowed_packet'", "Lese-Fehler bei einer Kommunikations-Pipe", -"Fehler von fcntl()", -"Empfangenes Paket ist nicht in Reihenfolge", -"Communikation-Packet läßt sich nicht entpacken", -"Fehler beim Lesen eines Communication-Packets", -"Timeout beim Lesen eines Communication-Packets", -"Fehler beim Schreiben eines Communication-Packets", -"Timeout beim Schreiben eines Communication-Packets", -"Ergebnisstring ist länger als max_allowed_packet", -"Der verwendete Tabellentyp unterstützt keine BLOB/TEXT Spalten", -"Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT Spalte", +"fcntl() lieferte einen Fehler", +"Pakete nicht in der richtigen Reihenfolge empfangen", +"Kommunikationspaket lässt sich nicht entpacken", +"Fehler beim Lesen eines Kommunikationspakets", +"Zeitüberschreitung beim Lesen eines Kommunikationspakets", +"Fehler beim Schreiben eines Kommunikationspakets", +"Zeitüberschreitung beim Schreiben eines Kommunikationspakets", +"Ergebnis ist länger als 'max_allowed_packet'", +"Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Spalten", +"Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Spalten", "INSERT DELAYED kann nicht auf Tabelle '%-.64s' angewendet werden, da diese mit LOCK TABLES gesperrt ist", "Falscher Spaltenname '%-.100s'", "Der verwendete Tabellen-Handler kann die Spalte '%-.64s' nicht indizieren", -"Alle Tabelle in der MERGE-Tabelle sind nicht gleich definiert", -"Schreiben in Tabelle '%-.64s' nicht möglich wegen eines Unique Constraint", -"BLOB Spalte '%-.64s' wird in der Key-Definition ohne Längenangabe verwendet", -"Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein; Wenn NULL benötigt wird sollte ein UNIQUE Key verwendet werden", -"Ergebnis besteht aus mehr als einer Reihe", -"Dieser Tabellentyp verlangt nach einem PRIMARY KEY", +"Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert", +"Schreiben in Tabelle '%-.64s' nicht möglich wegen einer eindeutigen Beschränkung (unique constraint)", +"BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet", +"Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel verwendet wird, muss ein UNIQUE-Schlüssel verwendet werden", +"Ergebnis besteht aus mehr als einer Zeile", +"Dieser Tabellentyp benötigt einen PRIMARY KEY", "Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert", -"Unter Verwendung des Sicheren Updatemodes wurde versucht eine Tabelle zu updaten ohne eine KEY-Spalte in der WHERE-Klausel", -"Schlüssel '%-.64s' existiert nicht in der Tabelle '%-.64s'", +"MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel eine KEY-Spalte anzugeben", +"Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht", "Kann Tabelle nicht öffnen", -"Der Tabellen-Handler für diese Tabelle unterstützt kein %s", -"Keine Berechtigung dieses Kommando in einer Transaktion auszuführen", -"Fehler %d wärend COMMIT", -"Fehler %d wärend ROLLBACK", -"Fehler %d wärend FLUSH_LOGS", -"Fehler %d wärend CHECKPOINT", -"Verbindungsabbruch %ld zu db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"Der Tabellenhandler für die Tabelle unterstützt kein Binary Tabellendump", -"Binlog wurde beendet wärend FLUSH MASTER", -"Neubau des Index der gedumpten Tabelle '%-.64s' fehlgeschlagen", +"Die Speicher-Engine für diese Tabelle unterstützt kein %s", +"Sie dürfen diesen Befehl nicht in einer Transaktion ausführen", +"Fehler %d beim COMMIT", +"Fehler %d beim ROLLBACK", +"Fehler %d bei FLUSH_LOGS", +"Fehler %d bei CHECKPOINT", +"Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: `%-.64s' (%-.64s)", +"Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump", +"Binlog geschlossen. Kann RESET MASTER nicht ausführen", +"Neuerstellung des Indizes der Dump-Tabelle '%-.64s' fehlgeschlagen", "Fehler vom Master: '%-.64s'", "Netzfehler beim Lesen vom Master", "Netzfehler beim Schreiben zum Master", -"Kann keinen FULLTEXT-Index finden der der Spaltenliste entspricht", -"Kann das aktuelle Kommando wegen aktiver Tabellensperre oder aktiver Transaktion nicht ausführen", -"Unbekannte System-Variabel '%-.64s'", -"Tabelle '%-.64s' ist als defekt makiert und sollte repariert werden", -"Tabelle '%-.64s' ist als defekt makiert und der letzte (automatische) Reparaturversuch schlug fehl.", -"Warnung: Das Rollback konnte bei einigen Tabellen, die nicht mittels Transaktionen geändert wurden, nicht ausgeführt werden.", -"Multi-Statement Transaktionen benötigen mehr als 'max_binlog_cache_size' Bytes An Speicher. Diese mysqld-Variabel vergrössern und nochmal versuchen.", -"Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Das Kommand SLAVE STOP muss zuerst ausgeführt werden.", -"Diese Operationbenötigt einen aktiven Slave. Slave konfigurieren und mittels SLAVE START aktivieren.", -"Der Server ist nicht als Slave konfigiriert. Im Konfigurations-File oder mittel CHANGE MASTER TO beheben.", +"Kann keinen FULLTEXT-Index finden, der der Spaltenliste entspricht", +"Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen", +"Unbekannte Systemvariable '%-.64s'", +"Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden", +"Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl", +"Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden", +"Transaktionen, die aus mehreren Befehlen bestehen, benötigen mehr als 'max_binlog_cache_size' Bytes an Speicher. Diese mysqld-Variable bitte vergrössern und erneut versuchen", +"Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Bitte zuerst STOP SLAVE ausführen", +"Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren", +"Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben", "Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Konnte keinen Slave-Thread starten. System-Resourcen überprüfen.", -"Benutzer %-.64s hat mehr als 'max_user_connections' aktive Verbindungen", -"Bei der Verwendung mit SET dürfen nur Constante Ausdrücke verwendet werden", -"Lock wait timeout exceeded", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Konnte keinen Slave-Thread starten. Bitte System-Ressourcen überprüfen", +"Benutzer '%-.64s' hat mehr als max_user_connections aktive Verbindungen", +"Bei SET dürfen nur konstante Ausdrücke verwendet werden", +"Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten", +"Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle", +"Während einer READ UNCOMMITED-Transaktion können keine UPDATE-Sperren angefordert werden", +"DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält", +"CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält", +"Falsche Argumente für %s", +"'%-.32s'@'%-.64s' is nicht berechtigt, neue Benutzer hinzuzufügen", +"Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden", +"Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion erneut zu starten", +"Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes", +"Fremdschlüssel-Beschränkung konnte nicht hinzugefügt werden", +"Hinzufügen eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl", +"Löschen eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl", +"Fehler bei der Verbindung zum Master: %-.128s", +"Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s", +"Fehler beim Ausführen des Befehls %s: %-.128s", +"Falsche Verwendung von %s und %s", +"Die verwendeten SELECT-Befehle liefern eine unterschiedliche Anzahl von Spalten zurück", +"Augrund eines READ LOCK-Konflikts kann die Abfrage nicht ausgeführt werden", +"Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert", +"Option '%s' wird im Befehl zweimal verwendet", +"Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)", +"Befehl nicht zulässig. Hierfür wird die Berechtigung %-.128s benötigt", +"Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden", +"Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden", +"Variable '%-.64s' hat keinen Vorgabewert", +"Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden", +"Falscher Argumenttyp für Variable '%-.64s'", +"Variable '%-.64s' kann nur verändert, nicht gelesen werden", +"Falsche Verwendung oder Platzierung von '%s'", +"Diese MySQL-Version unterstützt '%s' nicht", +"Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs aufgetreten", +"Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert", +"Variable '%-.64s' is a %s variable", +"Falsche Fremdschlüssel-Definition für '%-64s': %s", +"Schlüssel- und Tabellenverweis passen nicht zusammen", +"Operand solle %d Spalte(n) enthalten", +"Unterabfrage lieferte mehr als einen Datensatz zurück", +"Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben", +"Die Hilfe-Datenbank ist beschädigt oder existiert nicht", +"Zyklischer Verweis in Unterabfragen", +"Spalte '%s' wird von %s nach %s umgewandelt", +"Verweis '%-.64s' wird nicht unterstützt (%s)", +"Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden", +"Select %u wurde während der Optimierung reduziert", +"Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden", +"Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client", +"Alle Teile eines SPATIAL KEY müssen als NOT NULL deklariert sein", +"COLLATION '%s' ist für CHARACTER SET '%s' ungültig", +"Slave läuft bereits", +"Slave wurde bereits angehalten", +"Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d", +"ZLIB: Steht nicht genug Speicher zur Verfügung", +"ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)", +"ZLIB: Eingabedaten beschädigt", +"%d Zeile(n) durch GROUP_CONCAT() abgeschnitten", +"Anzahl der Datensätze in Zeile %ld geringer als Anzahl der Spalten", +"Anzahl der Datensätze in Zeile %ld größer als Anzahl der Spalten", +"Daten abgeschnitten, NULL für NOT NULL-Spalte '%s' in Zeile %ld angegeben", +"Daten abgeschnitten, außerhalb des Wertebereichs für Spalte '%s' in Zeile %ld", +"Daten abgeschnitten für Spalte '%s' in Zeile %ld", +"Für Tabelle '%s' wird Speicher-Engine %s benutzt", +"Unerlaubte Vermischung der Kollationen (%s,%s) und (%s,%s) für die Operation '%s'", +"Kann einen oder mehrere der angegebenen Benutzer nicht löschen", +"Kann nicht alle Berechtigungen widerrufen, grant for one or more of the requested users", +"Unerlaubte Vermischung der Kollationen (%s,%s), (%s,%s), (%s,%s) für die Operation '%s'", +"Unerlaubte Vermischung der Kollationen für die Operation '%s'", +"Variable '%-.64s' ist keine Variablen-Komponenten (kann nicht als XXXX.variablen_name verwendet werden)", +"Unbekannte Kollation: '%-.64s'", +"SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn der MySQL-Slave mit SSL gestartet wird", +"Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern", +"Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst", +"Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL", +"Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn der Slave-Server unerwartet neu startet", +"SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt index 2a2bee5d69a..979091a566c 100644 --- a/sql/share/greek/errmsg.txt +++ b/sql/share/greek/errmsg.txt @@ -14,14 +14,16 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +character-set=greek + "hashchk", "isamchk", "Ï×É", "ÍÁÉ", "Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", "Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'. (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç", +"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)", +"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç", "Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé", "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)", "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)", @@ -30,12 +32,12 @@ "Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", "Ï öÜêåëëïò åñãáóßáò äåí âñÝèçêå (êùäéêüò ëÜèïõò: %d)", "Ôï áñ÷åßï äåí ìðïñåß íá êëåéäùèåß (êùäéêüò ëÜèïõò: %d)", -"Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s'. (êùäéêüò ëÜèïõò: %d)", +"Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)", "Äåí âñÝèçêå ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)", "Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", "Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)", "Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'", -"Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò....", +"Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò...", "Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'", "ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)", "Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", @@ -48,7 +50,7 @@ "Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ", "Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'", "ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.64s'", -"ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'. Ðáñáêáëþ, äéïñèþóôå ôï!", +"ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!", "Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!", "'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç", "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç. ÐñïóðáèÞóôå ðÜëé, åðáíåêéíþíôáò ôç äéáäéêáóßá (demon) (÷ñåéÜæïíôáé %d bytes)", @@ -58,8 +60,8 @@ "Ðñüâëçìá ìå ôç äéáèÝóéìç ìíÞìç (Out of thread space/memory)", "Äåí Ýãéíå ãíùóôü ôï hostname ãéá ôçí address óáò", "Ç áíáãíþñéóç (handshake) äåí Ýãéíå óùóôÜ", -"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s@%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'", -"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s@%-.64s' (÷ñÞóç password: %s)", +"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'", +"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)", "Äåí åðéëÝ÷èçêå âÜóç äåäïìÝíùí", "Áãíùóôç åíôïëÞ", "Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)", @@ -107,7 +109,7 @@ "Äåí åßíáé äõíáôÞ ç äéáãñáöÞ üëùí ôùí ðåäßùí ìå ALTER TABLE. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå DROP TABLE", "Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé", "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld ÐñïåéäïðïéÞóåéò: %ld", -"INSERT TABLE '%-.64s' äåí åðéôñÝðåôáé óôï FROM table list", +"You can't specify target table '%-.64s' for update in FROM clause", "Áãíùóôï thread id: %lu", "Äåí åßóèå owner ôïõ thread %lu", "Äåí ÷ñçóéìïðïéÞèçêáí ðßíáêåò", @@ -149,16 +151,16 @@ "ÐñÝðåé íá Ý÷åôå äéêáßùìá äéüñèùóçò ðéíÜêùí (update) óôç âÜóç äåäïìÝíùí mysql ãéá íá ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí", "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò áíôßóôïé÷çò åããñáöÞò óôïí ðßíáêá ôùí ÷ñçóôþí", "Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d). If you are not out of available memory, you can consult the manual for a possible OS-dependent bug", +"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", "Column count doesn't match value count at row %ld", "Can't reopen table: '%-.64s'", "Invalid use of NULL value", "Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", "There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command. Please consult the manual which privileges can be used.", +"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", +"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", +"Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used.", "The host or user argument to GRANT is too long", "Table '%-.64s.%-.64s' doesn't exist", "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", @@ -166,8 +168,8 @@ "You have an error in your SQL syntax", "Delayed insert thread couldn't get requested lock for table %-.64s", "Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-32s' (%-.64s)", -"Got a packet bigger than 'max_allowed_packet'", +"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)", +"Got a packet bigger than 'max_allowed_packet' bytes", "Got a read error from the connection pipe", "Got an error from fcntl()", "Got packets out of order", @@ -176,7 +178,7 @@ "Got timeout reading communication packets", "Got an error writing communication packets", "Got timeout writing communication packets", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", @@ -185,7 +187,7 @@ "All tables in the MERGE table are not identically defined", "Can't write, because of unique constraint, to table '%-.64s'", "BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", @@ -210,24 +212,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -235,22 +237,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt index f998116410f..5d32c5b9cc2 100644 --- a/sql/share/hungarian/errmsg.txt +++ b/sql/share/hungarian/errmsg.txt @@ -19,6 +19,8 @@ Updated May, 2000 */ +character-set=latin2 + "hashchk", "isamchk", "NEM", @@ -35,8 +37,8 @@ "A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)", "A munkakonyvtar nem allapithato meg (hibakod: %d)", "A file nem zarolhato. (hibakod: %d)", -"A '%-.64s' file nem nyithato meg. (hibakod: %d)", -"A(z) '%-.64s' file nem talalhato. (hibakod: %d)", +"A '%-.64s' file nem nyithato meg (hibakod: %d)", +"A(z) '%-.64s' file nem talalhato (hibakod: %d)", "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)", "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)", "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota", @@ -53,8 +55,8 @@ "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja", "Nem talalhato a rekord '%-.64s'-ben", "Ervenytelen info a file-ban: '%-.64s'", -"Ervenytelen kulcsfile a tablahoz: '%-.64s'. Probalja kijavitani!", -"Regi kulcsfile a '%-.64s'tablahoz; Probalja kijavitani!", +"Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!", +"Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!", "'%-.64s' irasvedett", "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)", "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet", @@ -63,8 +65,8 @@ "Elfogyott a thread-memoria", "A gepnev nem allapithato meg a cimbol", "A kapcsolatfelvetel nem sikerult (Bad handshake)", -"A(z) '%-.32s@%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz.", -"A(z) '%-.32s@%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)", +"A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz.", +"A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)", "Nincs kivalasztott adatbazis", "Ervenytelen parancs", "A(z) '%-.64s' oszlop erteke nem lehet nulla", @@ -112,7 +114,7 @@ "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette", "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e", "Rekordok: %ld Duplikalva: %ld Warnings: %ld", -"INSERT TABLE '%-.64s' nem engedelyezett a FROM table listabol", +"You can't specify target table '%-.64s' for update in FROM clause", "Ervenytelen szal (thread) id: %lu", "A %lu thread-nek mas a tulajdonosa", "Nincs hasznalt tabla", @@ -123,7 +125,7 @@ "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke", "Hibas adatbazisnev: '%-.100s'", "Hibas tablanev: '%-.100s'", -"A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT ok", +"A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay", "Ismeretlen hiba", "Ismeretlen eljaras: '%-.64s'", "Rossz parameter a(z) '%-.64s'eljaras szamitasanal", @@ -161,8 +163,8 @@ "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)", "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul", "A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on", -"%-.16s parancs a '%-.32s@%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban", -"%-.16s parancs a '%-.32s@%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban", +"%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban", +"%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban", "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek", "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban", "A '%-.64s.%s' tabla nem letezik", @@ -181,7 +183,7 @@ "Idotullepes a kommunikacios adatcsomagok olvasasa soran", "Hiba a kommunikacios csomagok irasa soran", "Idotullepes a kommunikacios csomagok irasa soran", -"Ez eredmeny sztring nagyobb, mint a lehetseges maximum: max_allowed_packet", +"Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'", "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket", "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket", "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)", @@ -215,24 +217,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -240,22 +242,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt index 2ab360dff4e..556b90511b0 100644 --- a/sql/share/italian/errmsg.txt +++ b/sql/share/italian/errmsg.txt @@ -14,15 +14,17 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +character-set=latin1 + "hashchk", "isamchk", "NO", "SI", "Impossibile creare il file '%-.64s' (errno: %d)", "Impossibile creare la tabella '%-.64s' (errno: %d)", -"Impossibile creare il database '%-.64s'. (errno: %d)", -"Impossibile creare il database '%-.64s'. Il database esiste", -"Impossibile cancellare '%-.64s'. Il database non esiste", +"Impossibile creare il database '%-.64s' (errno: %d)", +"Impossibile creare il database '%-.64s'; il database esiste", +"Impossibile cancellare '%-.64s'; il database non esiste", "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)", "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)", "Errore durante la cancellazione di '%-.64s' (errno: %d)", @@ -30,12 +32,12 @@ "Impossibile leggere lo stato di '%-.64s' (errno: %d)", "Impossibile leggere la directory di lavoro (errno: %d)", "Impossibile il locking il file (errno: %d)", -"Impossibile aprire il file: '%-.64s'. (errno: %d)", +"Impossibile aprire il file: '%-.64s' (errno: %d)", "Impossibile trovare il file: '%-.64s' (errno: %d)", "Impossibile leggere la directory di '%-.64s' (errno: %d)", "Impossibile cambiare la directory in '%-.64s' (errno: %d)", "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'", -"Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio....", +"Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio...", "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'", "Errore durante la chiusura di '%-.64s' (errno: %d)", "Errore durante la lettura del file '%-.64s' (errno: %d)", @@ -48,8 +50,8 @@ "Il gestore delle tabelle per '%-.64s' non ha questa opzione", "Impossibile trovare il record in '%-.64s'", "Informazione errata nel file: '%-.64s'", -"File chiave errato per la tabella : '%-.64s'. Prova a riparalo", -"File chiave vecchio per la tabella '%-.64s'; Riparalo!", +"File chiave errato per la tabella : '%-.64s'; prova a riparalo", +"File chiave vecchio per la tabella '%-.64s'; riparalo!", "'%-.64s' e` di sola lettura", "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)", "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone", @@ -58,8 +60,8 @@ "Fine dello spazio/memoria per i thread", "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)", "Negoziazione impossibile", -"Accesso non consentito per l'utente: '%-.32s@%-.64s' al database '%-.64s'", -"Accesso non consentito per l'utente: '%-.32s@%-.64s' (Password: %s)", +"Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'", +"Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)", "Nessun database selezionato", "Comando sconosciuto", "La colonna '%-.64s' non puo` essere nulla", @@ -107,7 +109,7 @@ "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE", "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista", "Records: %ld Duplicati: %ld Avvertimenti: %ld", -"INSERT TABLE '%-.64s' non e` permesso nella FROM table list", +"You can't specify target table '%-.64s' for update in FROM clause", "Thread id: %lu sconosciuto", "Utente non proprietario del thread %lu", "Nessuna tabella usata", @@ -156,8 +158,8 @@ "Errore '%-.64s' da regexp", "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY", "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'", -"Comando %-.16s negato per l'utente: '%-.32s@%-.64s' sulla tabella '%-.64s'", -"Comando %-.16s negato per l'utente: '%-.32s@%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'", +"Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'", +"Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'", "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati.", "L'argomento host o utente per la GRANT e` troppo lungo", "La tabella '%-.64s.%s' non esiste", @@ -176,7 +178,7 @@ "Rilevato un timeout ricevendo i pacchetti di comunicazione", "Rilevato un errore inviando i pacchetti di comunicazione", "Rilevato un timeout inviando i pacchetti di comunicazione", -"La stringa di risposta e` piu` lunga di max_allowed_packet", +"La stringa di risposta e` piu` lunga di 'max_allowed_packet'", "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT", "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT", "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'", @@ -212,8 +214,8 @@ "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita", "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)", "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare", -"Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima SLAVE STOP", -"Questa operaione richiede un database 'slave', configurarlo ed eseguire SLAVE START", +"Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE", +"Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE", "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO", "Could not initialize master info structure, more error messages can be found in the MySQL error log", "Impossibile creare il thread 'slave', controllare le risorse di sistema", @@ -225,7 +227,7 @@ "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura", "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura", "Argomenti errati a %s", -"A %-.32s@%-.64s non e' permesso creare nuovi utenti", +"A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti", "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database", "Trovato deadlock durante il lock; Provare a far ripartire la transazione", "La tabella usata non supporta gli indici FULLTEXT", @@ -242,7 +244,7 @@ "L'opzione '%s' e' stata usata due volte nel comando", "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)", "Accesso non consentito. Serve il privilegio %-.128s per questa operazione", -"La variabile '%-.64s' e' una variabile locale ( LOCAL ) e non puo' essere cambiata usando SET GLOBAL", +"La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL", "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL", "La variabile '%-.64s' non ha un valore di default", "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'", @@ -251,6 +253,70 @@ "Uso/posizione di '%s' sbagliato", "Questa versione di MySQL non supporta ancora '%s'", "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/japanese-sjis/errmsg.txt b/sql/share/japanese-sjis/errmsg.txt new file mode 100644 index 00000000000..1aa9ef74d5f --- /dev/null +++ b/sql/share/japanese-sjis/errmsg.txt @@ -0,0 +1,326 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Shift-JIS Japanese +*/ + +character-set=sjis + +"hashchk", +"isamchk", +"NO", +"YES", +"'%-.64s' ƒtƒ@ƒCƒ‹‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)", +"'%-.64s' ƒe[ƒuƒ‹‚ªì‚ê‚Ü‚¹‚ñ.(errno: %d)", +"'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)", +"'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ.Šù‚É‚»‚̃f[ƒ^ƒx[ƒX‚ª‘¶Ý‚µ‚Ü‚·", +"'%-.64s' ƒf[ƒ^ƒx[ƒX‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ. ‚»‚̃f[ƒ^ƒx[ƒX‚ª‚È‚¢‚̂ł·.", +"ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð휂ł«‚Ü‚¹‚ñ, errno: %d)", +"ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð rmdir ‚Å‚«‚Ü‚¹‚ñ, errno: %d)", +"'%-.64s' ‚Ì휂ªƒGƒ‰[ (errno: %d)", +"system table ‚̃ŒƒR[ƒh‚ð“ǂގ–‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½", +"'%-.64s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)", +"working directory ‚𓾂鎖‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½ (errno: %d)", +"ƒtƒ@ƒCƒ‹‚ðƒƒbƒN‚Å‚«‚Ü‚¹‚ñ (errno: %d)", +"'%-.64s' ƒtƒ@ƒCƒ‹‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", +"'%-.64s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", +"'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚ª“ǂ߂܂¹‚ñ.(errno: %d)", +"'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚É chdir ‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", +"Record has changed since last read in table '%-.64s'", +"Disk full (%s). ’N‚©‚ª‰½‚©‚ðŒ¸‚ç‚·‚܂ł܂Á‚Ä‚‚¾‚³‚¢...", +"table '%-.64s' ‚É key ‚ªd•¡‚µ‚Ä‚¢‚Ä‘‚«‚±‚߂܂¹‚ñ", +"Error on close of '%-.64s' (errno: %d)", +"'%-.64s' ƒtƒ@ƒCƒ‹‚̓ǂݞ‚݃Gƒ‰[ (errno: %d)", +"'%-.64s' ‚ð '%-.64s' ‚É rename ‚Å‚«‚Ü‚¹‚ñ (errno: %d)", +"'%-.64s' ƒtƒ@ƒCƒ‹‚ð‘‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", +"'%-.64s' ‚̓ƒbƒN‚³‚ê‚Ä‚¢‚Ü‚·", +"Sort ’†’f", +"View '%-.64s' ‚ª '%-.64s' ‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", +"Got error %d from table handler", +"Table handler for '%-.64s' doesn't have this option", +"'%-.64s'‚̂Ȃ©‚ɃŒƒR[ƒh‚ªŒ©•t‚©‚è‚Ü‚¹‚ñ", +"ƒtƒ@ƒCƒ‹ '%-.64s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚邿‚¤‚Å‚·", +"'%-.64s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚邿‚¤‚Å‚·. C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", +"'%-.64s' ƒe[ƒuƒ‹‚͌¢Œ`Ž®‚Ì key file ‚̂悤‚Å‚·; C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", +"'%-.64s' ‚͓ǂݞ‚Ýê—p‚Å‚·", +"Out of memory. ƒf[ƒ‚ƒ“‚ðƒŠƒXƒ^[ƒg‚µ‚Ă݂Ă‚¾‚³‚¢ (%d bytes •K—v)", +"Out of sort memory. sort buffer size ‚ª‘«‚è‚È‚¢‚悤‚Å‚·.", +"'%-.64s' ƒtƒ@ƒCƒ‹‚ð“ǂݞ‚Ý’†‚É EOF ‚ª—\Šú‚¹‚ÊŠ‚ÅŒ»‚ê‚Ü‚µ‚½. (errno: %d)", +"Ú‘±‚ª‘½‚·‚¬‚Ü‚·", +"Out of memory; mysqld ‚©‚»‚Ì‘¼‚̃vƒƒZƒX‚ªƒƒ‚ƒŠ[‚ð‘S‚ÄŽg‚Á‚Ä‚¢‚é‚©Šm”F‚µ‚Ä‚‚¾‚³‚¢. ƒƒ‚ƒŠ[‚ðŽg‚¢Ø‚Á‚Ä‚¢‚È‚¢ê‡A'ulimit' ‚ðݒ肵‚Ä mysqld ‚̃ƒ‚ƒŠ[Žg—pŒÀŠE—ʂ𑽂‚·‚é‚©Aswap space ‚ð‘‚₵‚Ă݂Ă‚¾‚³‚¢", +"‚»‚Ì address ‚Ì hostname ‚ªˆø‚¯‚Ü‚¹‚ñ.", +"Bad handshake", +"ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚Ì '%-.64s' ƒf[ƒ^ƒx[ƒX‚ւ̃AƒNƒZƒX‚ð‹‘”Û‚µ‚Ü‚·", +"ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)", +"ƒf[ƒ^ƒx[ƒX‚ª‘I‘ð‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.", +"‚»‚̃Rƒ}ƒ“ƒh‚͉½H", +"Column '%-.64s' ‚Í null ‚ɂ͂ł«‚È‚¢‚̂ł·", +"'%-.64s' ‚È‚ñ‚ăf[ƒ^ƒx[ƒX‚Í’m‚è‚Ü‚¹‚ñ.", +"Table '%-.64s' ‚ÍŠù‚É‚ ‚è‚Ü‚·", +"table '%-.64s' ‚Í‚ ‚è‚Ü‚¹‚ñ.", +"Column: '%-.64s' in %-.64s is ambiguous", +"Server ‚ð shutdown ’†...", +"'%-.64s' column ‚Í '%-.64s' ‚ɂ͂ ‚è‚Ü‚¹‚ñ.", +"'%-.64s' isn't in GROUP BY", +"Can't group on '%-.64s'", +"Statement has sum functions and columns in same statement", +"Column count doesn't match value count", +"Identifier name '%-.100s' ‚Í’·‚·‚¬‚Ü‚·", +"'%-.64s' ‚Æ‚¢‚¤ column –¼‚Íd•¡‚µ‚Ă܂·", +"'%-.64s' ‚Æ‚¢‚¤ key ‚Ì–¼‘O‚Íd•¡‚µ‚Ä‚¢‚Ü‚·", +"'%-.64s' ‚Í key %d ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·", +"Incorrect column specifier for column '%-.64s'", +"%s : '%-.80s' •t‹ß : %d s–Ú", +"Query ‚ª‹ó‚Å‚·.", +"'%-.64s' ‚͈êˆÓ‚Ì table/alias –¼‚ł͂ ‚è‚Ü‚¹‚ñ", +"Invalid default value for '%-.64s'", +"•¡”‚Ì primary key ‚ª’è‹`‚³‚ê‚Ü‚µ‚½", +"key ‚ÌŽw’肪‘½‚·‚¬‚Ü‚·. key ‚ÍÅ‘å %d ‚܂łł·", +"Too many key parts specified; max %d parts allowed", +"key ‚ª’·‚·‚¬‚Ü‚·. key ‚Ì’·‚³‚ÍÅ‘å %d ‚Å‚·", +"Key column '%-.64s' ‚ªƒe[ƒuƒ‹‚É‚ ‚è‚Ü‚¹‚ñ.", +"BLOB column '%-.64s' can't be used in key specification with the used table type", +"column '%-.64s' ‚Í,Šm•Û‚·‚é column ‚̑傫‚³‚ª‘½‚·‚¬‚Ü‚·. (Å‘å %d ‚Ü‚Å). BLOB ‚ð‚©‚í‚è‚ÉŽg—p‚µ‚Ä‚‚¾‚³‚¢.", +"ƒe[ƒuƒ‹‚Ì’è‹`‚ªˆá‚¢‚Ü‚·; there can be only one auto column and it must be defined as a key", +"%s: €”õŠ®—¹", +"%s: Normal shutdown\n", +"%s: Got signal %d. ’†’f!\n", +"%s: Shutdown Š®—¹\n", +"%s: ƒXƒŒƒbƒh %ld ‹§I—¹ user: '%-.64s'\n", +"IP socket ‚ªì‚ê‚Ü‚¹‚ñ", +"Table '%-.64s' ‚Í‚»‚̂悤‚È index ‚ðŽ‚Á‚Ä‚¢‚Ü‚¹‚ñ(CREATE INDEX ŽÀsŽž‚ÉŽw’肳‚ê‚Ä‚¢‚Ü‚¹‚ñ). ƒe[ƒuƒ‹‚ðì‚è’¼‚µ‚Ä‚‚¾‚³‚¢", +"Field separator argument is not what is expected; check the manual", +"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'.", +"ƒtƒ@ƒCƒ‹ '%-.64s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚æ‚¤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", +"File '%-.64s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·", +"ƒŒƒR[ƒh”: %ld íœ: %ld Skipped: %ld Warnings: %ld", +"ƒŒƒR[ƒh”: %ld d•¡: %ld", +"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", +"ALTER TABLE ‚Å‘S‚Ä‚Ì column ‚Í휂ł«‚Ü‚¹‚ñ. DROP TABLE ‚ðŽg—p‚µ‚Ä‚‚¾‚³‚¢", +"'%-.64s' ‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½; check that column/key exists", +"ƒŒƒR[ƒh”: %ld d•¡”: %ld Warnings: %ld", +"You can't specify target table '%-.64s' for update in FROM clause", +"thread id: %lu ‚Í‚ ‚è‚Ü‚¹‚ñ", +"thread %lu ‚̃I[ƒi[‚ł͂ ‚è‚Ü‚¹‚ñ", +"No tables used", +"Too many strings for column %-.64s and SET", +"Can't generate a unique log-filename %-.64s.(1-999)\n", +"Table '%-.64s' ‚Í READ lock ‚ɂȂÁ‚Ä‚¢‚ÄAXV‚͂ł«‚Ü‚¹‚ñ", +"Table '%-.64s' ‚Í LOCK TABLES ‚É‚æ‚Á‚ăƒbƒN‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", +"BLOB column '%-.64s' can't have a default value", +"Žw’肵‚½ database –¼ '%-.100s' ‚ªŠÔˆá‚Á‚Ä‚¢‚Ü‚·", +"Žw’肵‚½ table –¼ '%-.100s' ‚͂܂¿‚ª‚Á‚Ä‚¢‚Ü‚·", +"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", +"Unknown error", +"Unknown procedure '%-.64s'", +"Incorrect parameter count to procedure '%-.64s'", +"Incorrect parameters to procedure '%-.64s'", +"Unknown table '%-.64s' in %s", +"Column '%-.64s' specified twice", +"Invalid use of group function", +"Table '%-.64s' uses an extension that doesn't exist in this MySQL version", +"ƒe[ƒuƒ‹‚ÍÅ’á 1 ŒÂ‚Ì column ‚ª•K—v‚Å‚·", +"table '%-.64s' ‚Í‚¢‚Á‚Ï‚¢‚Å‚·", +"character set '%-.64s' ‚̓Tƒ|[ƒg‚µ‚Ä‚¢‚Ü‚¹‚ñ", +"ƒe[ƒuƒ‹‚ª‘½‚·‚¬‚Ü‚·; MySQL can only use %d tables in a join", +"column ‚ª‘½‚·‚¬‚Ü‚·", +"row size ‚ª‘å‚«‚·‚¬‚Ü‚·. BLOB ‚ðŠÜ‚܂Ȃ¢ê‡‚Ì row size ‚ÌÅ‘å‚Í %d ‚Å‚·. ‚¢‚‚‚©‚Ì field ‚ð BLOB ‚ɕς¦‚Ä‚‚¾‚³‚¢.", +"Thread stack overrun: Used: %ld of a %ld stack. ƒXƒ^ƒbƒN—̈æ‚𑽂‚Ƃ肽‚¢ê‡A'mysqld -O thread_stack=#' ‚ÆŽw’肵‚Ä‚‚¾‚³‚¢", +"Cross dependency found in OUTER JOIN; examine your ON conditions", +"Column '%-.64s' ‚ª UNIQUE ‚© INDEX ‚ÅŽg—p‚³‚ê‚Ü‚µ‚½. ‚±‚̃Jƒ‰ƒ€‚Í NOT NULL ‚Æ’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.", +"function '%-.64s' ‚ð ƒ[ƒh‚Å‚«‚Ü‚¹‚ñ", +"function '%-.64s' ‚ð‰Šú‰»‚Å‚«‚Ü‚¹‚ñ; %-.80s", +"shared library ‚ւ̃pƒX‚ª’Ê‚Á‚Ä‚¢‚Ü‚¹‚ñ", +"Function '%-.64s' ‚ÍŠù‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚·", +"shared library '%-.64s' ‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d %s)", +"function '%-.64s' ‚ðƒ‰ƒCƒuƒ‰ƒŠ[’†‚ÉŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ", +"Function '%-.64s' ‚Í’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", +"Host '%-.64s' ‚Í many connection error ‚Ì‚½‚ßA‹‘”Û‚³‚ê‚Ü‚µ‚½. 'mysqladmin flush-hosts' ‚ʼn𜂵‚Ä‚‚¾‚³‚¢", +"Host '%-.64s' ‚Í MySQL server ‚ÉÚ‘±‚ð‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", +"MySQL ‚ð anonymous users ‚ÅŽg—p‚µ‚Ä‚¢‚éó‘Ô‚Å‚ÍAƒpƒXƒ[ƒh‚Ì•ÏX‚͂ł«‚Ü‚¹‚ñ", +"‘¼‚̃†[ƒU[‚̃pƒXƒ[ƒh‚ð•ÏX‚·‚邽‚߂ɂÍ, mysql ƒf[ƒ^ƒx[ƒX‚ɑ΂µ‚Ä update ‚Ì‹–‰Â‚ª‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", +"Can't find any matching row in the user table", +"ˆê’v”(Rows matched): %ld •ÏX: %ld Warnings: %ld", +"V‹K‚ɃXƒŒƒbƒh‚ªì‚ê‚Ü‚¹‚ñ‚Å‚µ‚½ (errno %d). ‚à‚µÅ‘åŽg—p‹–‰Âƒƒ‚ƒŠ[”‚ð‰z‚¦‚Ä‚¢‚È‚¢‚̂ɃGƒ‰[‚ª”¶‚µ‚Ä‚¢‚é‚È‚ç, ƒ}ƒjƒ…ƒAƒ‹‚Ì’†‚©‚ç 'possible OS-dependent bug' ‚Æ‚¢‚¤•¶Žš‚ð’T‚µ‚Ä‚‚݂Ă¾‚³‚¢.", +"Column count doesn't match value count at row %ld", +"Can't reopen table: '%-.64s'", +"NULL ’l‚ÌŽg—p•û–@‚ª•s“K؂ł·", +"Got error '%-.64s' from regexp", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", +"ƒ†[ƒU[ '%-.32s' (ƒzƒXƒg '%-.64s' ‚̃†[ƒU[) ‚Í‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", +"ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s' ,ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", +"ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s'\n ƒJƒ‰ƒ€ '%-.64s' ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", +"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", +"The host or user argument to GRANT is too long", +"Table '%-.64s.%s' doesn't exist", +"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", +"The used command is not allowed with this MySQL version", +"Something is wrong in your syntax", +"Delayed insert thread couldn't get requested lock for table %-.64s", +"Too many delayed threads in use", +"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", +"Got a packet bigger than 'max_allowed_packet' bytes", +"Got a read error from the connection pipe", +"Got an error from fcntl()", +"Got packets out of order", +"Couldn't uncompress communication packet", +"Got an error reading communication packets", +"Got timeout reading communication packets", +"Got an error writing communication packets", +"Got timeout writing communication packets", +"Result string is longer than 'max_allowed_packet' bytes", +"The used table type doesn't support BLOB/TEXT columns", +"The used table type doesn't support AUTO_INCREMENT columns", +"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", +"Incorrect column name '%-.100s'", +"The used table handler can't index column '%-.64s'", +"All tables in the MERGE table are not defined identically", +"Can't write, because of unique constraint, to table '%-.64s'", +"BLOB column '%-.64s' used in key specification without a key length", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", +"Result consisted of more than one row", +"This table type requires a primary key", +"This version of MySQL is not compiled with RAID support", +"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", +"Key '%-.64s' doesn't exist in table '%-.64s'", +"Can't open table", +"The handler for the table doesn't support %s", +"You are not allowed to execute this command in a transaction", +"Got error %d during COMMIT", +"Got error %d during ROLLBACK", +"Got error %d during FLUSH_LOGS", +"Got error %d during CHECKPOINT", +"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", +"The handler for the table does not support binary table dump", +"Binlog closed while trying to FLUSH MASTER", +"Failed rebuilding the index of dumped table '%-.64s'", +"Error from master: '%-.64s'", +"Net error reading from master", +"Net error writing to master", +"Can't find FULLTEXT index matching the column list", +"Can't execute the given command because you have active locked tables or an active transaction", +"Unknown system variable '%-.64s'", +"Table '%-.64s' is marked as crashed and should be repaired", +"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", +"User %-.64s has already more than 'max_user_connections' active connections", +"You may only use constant expressions with SET", +"Lock wait timeout exceeded; try restarting transaction", +"The total number of locks exceeds the lock table size", +"Update locks cannot be acquired during a READ UNCOMMITTED transaction", +"DROP DATABASE not allowed while thread is holding global read lock", +"CREATE DATABASE not allowed while thread is holding global read lock", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", +"The used table type doesn't support FULLTEXT indexes", +"Cannot add foreign key constraint", +"Cannot add a child row: a foreign key constraint fails", +"Cannot delete a parent row: a foreign key constraint fails", +"Error connecting to master: %-.128s", +"Error running query on master: %-.128s", +"Error when executing command %s: %-.128s", +"Incorrect usage of %s and %s", +"The used SELECT statements have a different number of columns", +"Can't execute the query because you have a conflicting read lock", +"Mixing of transactional and non-transactional tables is disabled", +"Option '%s' used twice in statement", +"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", +"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", +"Variable '%-.64s' doesn't have a default value", +"Variable '%-.64s' can't be set to the value of '%-.64s'", +"Incorrect argument type to variable '%-.64s'", +"Variable '%-.64s' can only be set, not read", +"Incorrect usage/placement of '%s'", +"This version of MySQL doesn't yet support '%s'", +"Got fatal error %d: '%-.128s' from master when reading data from binary log", +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got NDB error %d '%-.100s'", +"Got temporary NDB error %d '%-.100s'", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" + diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt index 4ad7cc3ec37..47adbf74b86 100644 --- a/sql/share/japanese/errmsg.txt +++ b/sql/share/japanese/errmsg.txt @@ -18,6 +18,8 @@ 3.22.10-beta euc-japanese (ujis) text */ +character-set=ujis + "hashchk", "isamchk", "NO", @@ -32,9 +34,9 @@ "'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)", "system table ¤Î¥ì¥³¡¼¥É¤òÆÉ¤à»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿", "'%-.64s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)", -"working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿. (errno: %d)", -"¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤¤Þ¤»¤ó.(errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)", +"working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿ (errno: %d)", +"¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤¤Þ¤»¤ó (errno: %d)", +"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)", "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)", "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬ÆÉ¤á¤Þ¤»¤ó.(errno: %d)", "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤¤Þ¤»¤ó.(errno: %d)", @@ -62,8 +64,8 @@ "Out of memory; mysqld ¤«¤½¤Î¾¤Î¥×¥í¥»¥¹¤¬¥á¥â¥ê¡¼¤òÁ´¤Æ»È¤Ã¤Æ¤¤¤ë¤«³Îǧ¤·¤Æ¤¯¤À¤µ¤¤. ¥á¥â¥ê¡¼¤ò»È¤¤ÀڤäƤ¤¤Ê¤¤¾ì¹ç¡¢'ulimit' ¤òÀßÄꤷ¤Æ mysqld ¤Î¥á¥â¥ê¡¼»ÈÍѸ³¦Î̤ò¿¤¯¤¹¤ë¤«¡¢swap space ¤òÁý¤ä¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤", "¤½¤Î address ¤Î hostname ¤¬°ú¤±¤Þ¤»¤ó.", "Bad handshake", -"¥æ¡¼¥¶¡¼ '%-.32s@%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹", -"¥æ¡¼¥¶¡¼ '%-.32s@%-.64s' ¤òµñÈݤ·¤Þ¤¹.(Using password: %s)", +"¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹", +"¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)", "¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ÁªÂò¤µ¤ì¤Æ¤¤¤Þ¤»¤ó.", "¤½¤Î¥³¥Þ¥ó¥É¤Ï²¿¡©", "Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤¤Ê¤¤¤Î¤Ç¤¹", @@ -88,12 +90,12 @@ "Invalid default value for '%-.64s'", "Ê£¿ô¤Î primary key ¤¬ÄêµÁ¤µ¤ì¤Þ¤·¤¿", "key ¤Î»ØÄ꤬¿¤¹¤®¤Þ¤¹. key ¤ÏºÇÂç %d ¤Þ¤Ç¤Ç¤¹", -"Too many key parts specified. Max %d parts allowed", +"Too many key parts specified; max %d parts allowed", "key ¤¬Ä¹¤¹¤®¤Þ¤¹. key ¤ÎŤµ¤ÏºÇÂç %d ¤Ç¤¹", "Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó.", "BLOB column '%-.64s' can't be used in key specification with the used table type", "column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤.", -"¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; There can only be one auto column and it must be defined as a key", +"¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; there can be only one auto column and it must be defined as a key", "%s: ½àÈ÷´°Î»", "%s: Normal shutdown\n", "%s: Got signal %d. ÃæÃÇ!\n", @@ -101,17 +103,17 @@ "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.64s'\n", "IP socket ¤¬ºî¤ì¤Þ¤»¤ó", "Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤", -"Field separator argument is not what is expected. Check the manual", -"You can't use fixed rowlength with BLOBs. Please use 'fields terminated by'.", +"Field separator argument is not what is expected; check the manual", +"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'.", "¥Õ¥¡¥¤¥ë '%-.64s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬ÆÉ¤á¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó.", "File '%-.64s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹", "¥ì¥³¡¼¥É¿ô: %ld ºï½ü: %ld Skipped: %ld Warnings: %ld", "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£: %ld", -"Incorrect sub part key. The used key part isn't a string or the used length is longer than the key part", +"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", "ALTER TABLE ¤ÇÁ´¤Æ¤Î column ¤Ïºï½ü¤Ç¤¤Þ¤»¤ó. DROP TABLE ¤ò»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤", -"'%-.64s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿. Check that column/key exists", +"'%-.64s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists", "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£¿ô: %ld Warnings: %ld", -"INSERT TABLE '%-.64s' isn't allowed in FROM table list", +"You can't specify target table '%-.64s' for update in FROM clause", "thread id: %lu ¤Ï¤¢¤ê¤Þ¤»¤ó", "thread %lu ¤Î¥ª¡¼¥Ê¡¼¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó", "No tables used", @@ -122,7 +124,7 @@ "BLOB column '%-.64s' can't have a default value", "»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹", "»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹", -"The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok", +"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", "Unknown error", "Unknown procedure '%-.64s'", "Incorrect parameter count to procedure '%-.64s'", @@ -134,11 +136,11 @@ "¥Æ¡¼¥Ö¥ë¤ÏºÇÄã 1 ¸Ä¤Î column ¤¬É¬ÍפǤ¹", "table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹", "character set '%-.64s' ¤Ï¥µ¥Ý¡¼¥È¤·¤Æ¤¤¤Þ¤»¤ó", -"¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹. MySQL can only use %d tables in a join", +"¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹; MySQL can only use %d tables in a join", "column ¤¬Â¿¤¹¤®¤Þ¤¹", "row size ¤¬Â礤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %d ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤.", "Thread stack overrun: Used: %ld of a %ld stack. ¥¹¥¿¥Ã¥¯Îΰè¤ò¿¤¯¤È¤ê¤¿¤¤¾ì¹ç¡¢'mysqld -O thread_stack=#' ¤È»ØÄꤷ¤Æ¤¯¤À¤µ¤¤", -"Cross dependency found in OUTER JOIN. Examine your ON conditions", +"Cross dependency found in OUTER JOIN; examine your ON conditions", "Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó.", "function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤¤Þ¤»¤ó", "function '%-.64s' ¤ò½é´ü²½¤Ç¤¤Þ¤»¤ó; %-.80s", @@ -158,11 +160,11 @@ "Can't reopen table: '%-.64s'", "NULL ÃͤλÈÍÑÊýË¡¤¬ÉÔŬÀڤǤ¹", "Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", "¥æ¡¼¥¶¡¼ '%-.32s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s@%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s@%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"Illegal GRANT/REVOKE command. Please consult the manual which privleges can be used.", +"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", +"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", +"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", "The host or user argument to GRANT is too long", "Table '%-.64s.%s' doesn't exist", "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", @@ -171,7 +173,7 @@ "Delayed insert thread couldn't get requested lock for table %-.64s", "Too many delayed threads in use", "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet'", +"Got a packet bigger than 'max_allowed_packet' bytes", "Got a read error from the connection pipe", "Got an error from fcntl()", "Got packets out of order", @@ -180,7 +182,7 @@ "Got timeout reading communication packets", "Got an error writing communication packets", "Got timeout writing communication packets", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", @@ -189,7 +191,7 @@ "All tables in the MERGE table are not defined identically", "Can't write, because of unique constraint, to table '%-.64s'", "BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", @@ -214,24 +216,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -239,22 +241,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got NDB error %d '%-.100s'", +"Got temporary NDB error %d '%-.100s'", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt index 3b027796c3b..aeafef9d159 100644 --- a/sql/share/korean/errmsg.txt +++ b/sql/share/korean/errmsg.txt @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +character-set=euckr + "hashchk", "isamchk", "¾Æ´Ï¿À", @@ -30,12 +32,12 @@ "'%-.64s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", "¼öÇà µð·ºÅ丮¸¦ ãÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", "ÈÀÏÀ» Àá±×Áö(lock) ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.64s'. (¿¡·¯¹øÈ£: %d)", +"ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)", "ÈÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)", "'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", "'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", "Å×À̺í '%-.64s'¿¡¼ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù.", -"Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù.....", +"Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù...", "±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼ Áߺ¹ Ű", "'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", "'%-.64s'ÈÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)", @@ -58,10 +60,10 @@ "Out of memory; mysqld³ª ¶Ç´Ù¸¥ ÇÁ·Î¼¼¼¿¡¼ »ç¿ë°¡´ÉÇÑ ¸Þ¸ð¸®¸¦ »ç¿ëÇÑÁö äũÇϽÿÀ. ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é ulimit ¸í·ÉÀ» ÀÌ¿¿ëÇÏ¿© ´õ¸¹Àº ¸Þ¸ð¸®¸¦ »ç¿ëÇÒ ¼ö ÀÖµµ·Ï Çϰųª ½º¿Ò ½ºÆÐÀ̽º¸¦ Áõ°¡½ÃŰ½Ã¿À", "´ç½ÅÀÇ ÄÄÇ»ÅÍÀÇ È£½ºÆ®À̸§À» ¾òÀ» ¼ö ¾øÀ¾´Ï´Ù.", "Bad handshake", -"'%-.32s@%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù.", -"'%-.32s@%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (Using password: %s)", +"'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù.", +"'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)", "¼±ÅÃµÈ µ¥ÀÌŸº£À̽º°¡ ¾ø½À´Ï´Ù.", -"¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä....", +"¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä...", "Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. ", "µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½", "Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ", @@ -107,7 +109,7 @@ "ALTER TABLE ¸í·ÉÀ¸·Î´Â ¸ðµç Ä®·³À» Áö¿ï ¼ö ¾ø½À´Ï´Ù. DROP TABLE ¸í·ÉÀ» ÀÌ¿ëÇϼ¼¿ä.", "'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª ۰¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä.", "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³ °æ°í: %ld°³", -"INSERT TABLE '%-.64s' ´Â FROM Å×À̺í list¿¡¼ Çã°¡µÇÁö ¾Ê¾Ò½À´Ï´Ù.", +"You can't specify target table '%-.64s' for update in FROM clause", "¾Ë¼ö ¾ø´Â ¾²·¹µå id: %lu", "¾²·¹µå(Thread) %luÀÇ ¼ÒÀ¯ÀÚ°¡ ¾Æ´Õ´Ï´Ù.", "¾î¶² Å×ÀÌºíµµ »ç¿ëµÇÁö ¾Ê¾Ò½À´Ï´Ù.", @@ -134,7 +136,7 @@ "Ä®·³ÀÌ ³Ê¹« ¸¹½À´Ï´Ù.", "³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %dÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä..", "¾²·¹µå ½ºÅÃÀÌ ³ÑÃÆ½À´Ï´Ù. »ç¿ë: %ld°³ ½ºÅÃ: %ld°³. ¸¸¾à ÇÊ¿ä½Ã ´õÅ« ½ºÅÃÀ» ¿øÇÒ¶§¿¡´Â 'mysqld -O thread_stack=#' ¸¦ Á¤ÀÇÇϼ¼¿ä", -"Cross dependency found in OUTER JOIN. Examine your ON conditions", +"Cross dependency found in OUTER JOIN; examine your ON conditions", "'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä...", "'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù.", "'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s", @@ -154,10 +156,10 @@ "Å×À̺íÀ» ´Ù½Ã ¿¼ö ¾ø±º¿ä: '%-.64s", "NULL °ªÀ» À߸ø »ç¿ëÇϼ̱º¿ä...", "regexp¿¡¼ '%-.64s'°¡ ³µ½À´Ï´Ù.", -"Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT()...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause", +"Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT(),...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause", "»ç¿ëÀÚ '%-.32s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù.", -"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s@%-.64s' for Å×À̺í '%-.64s'", -"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s@%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'", +"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'", +"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'", "À߸øµÈ GRANT/REVOKE ¸í·É. ¾î¶² ±Ç¸®¿Í ½ÂÀÎÀÌ »ç¿ëµÇ¾î Áú ¼ö ÀÖ´ÂÁö ¸Þ´º¾óÀ» º¸½Ã¿À.", "½ÂÀÎ(GRANT)À» À§ÇÏ¿© »ç¿ëÇÑ »ç¿ëÀÚ³ª È£½ºÆ®ÀÇ °ªµéÀÌ ³Ê¹« ±é´Ï´Ù.", "Å×À̺í '%-.64s.%s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.", @@ -176,7 +178,7 @@ "Åë½Å ÆÐŶÀ» Àд Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù.", "Åë½Å ÆÐŶÀ» ±â·ÏÇÏ´Â Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", "Åë½Å ÆÐÆÂÀ» ±â·ÏÇÏ´Â Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", @@ -185,7 +187,7 @@ "All tables in the MERGE table are not defined identically", "Can't write, because of unique constraint, to table '%-.64s'", "BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", @@ -210,24 +212,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -235,22 +237,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt index 5b994d2dc5b..3f60876348f 100644 --- a/sql/share/norwegian-ny/errmsg.txt +++ b/sql/share/norwegian-ny/errmsg.txt @@ -16,15 +16,17 @@ /* Roy-Magne Mo rmo@www.hivolda.no 97 */ +character-set=latin1 + "hashchk", "isamchk", "NEI", "JA", "Kan ikkje opprette fila '%-.64s' (Feilkode: %d)", "Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)", -"Kan ikkje opprette databasen '%-.64s'. Feil %d", -"Kan ikkje opprette databasen '%-.64s'. Databasen eksisterer", -"Kan ikkje fjerne (drop) '%-.64s'. Databasen eksisterer ikkje", +"Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)", +"Kan ikkje opprette databasen '%-.64s'; databasen eksisterer", +"Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje", "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)", "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)", "Feil ved sletting av '%-.64s' (Feilkode: %d)", @@ -32,12 +34,12 @@ "Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)", "Kan ikkje lese aktiv katalog(Feilkode: %d)", "Kan ikkje låse fila (Feilkode: %d)", -"Kan ikkje åpne fila: '%-.64s'. (Feilkode: %d)", +"Kan ikkje åpne fila: '%-.64s' (Feilkode: %d)", "Kan ikkje finne fila: '%-.64s' (Feilkode: %d)", "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)", "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)", "Posten har vorte endra sidan den sist vart lesen '%-.64s'", -"Ikkje meir diskplass (%s). Ventar på å få frigjort plass....", +"Ikkje meir diskplass (%s). Ventar på å få frigjort plass...", "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'", "Feil ved lukking av '%-.64s' (Feilkode: %d)", "Feil ved lesing av '%-.64s' (Feilkode: %d)", @@ -50,8 +52,8 @@ "Tabell håndteraren for '%-.64s' har ikkje denne moglegheita", "Kan ikkje finne posten i '%-.64s'", "Feil informasjon i fila: '%-.64s'", -"Tabellen '%-.64s' har feil i nykkelfila, Prøv å reparere den", -"Gammel nykkelfil for tabellen '%-.64s'; Reparer den!", +"Tabellen '%-.64s' har feil i nykkelfila; prøv å reparere den", +"Gammel nykkelfil for tabellen '%-.64s'; reparer den!", "'%-.64s' er skrivetryggja", "Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)", "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten", @@ -60,8 +62,8 @@ "Tomt for tråd plass/minne", "Kan ikkje få tak i vertsnavn for di adresse", "Feil handtrykk (handshake)", -"Tilgang ikkje tillate for brukar: '%-.32s@%-.64s' til databasen '%-.64s' nekta", -"Tilgang ikke tillate for brukar: '%-.32s@%-.64s' (Brukar passord: %s)", +"Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta", +"Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)", "Ingen database vald", "Ukjent kommando", "Kolonne '%-.64s' kan ikkje vere null", @@ -109,7 +111,7 @@ "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor.", "Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar.", "Postar: %ld Like: %ld Åtvaringar: %ld", -"INSERT TABLE '%-.64s' er ikkje tillate i FROM tabell liste", +"You can't specify target table '%-.64s' for update in FROM clause", "Ukjent tråd id: %lu", "Du er ikkje eigar av tråd %lu", "Ingen tabellar i bruk", @@ -132,35 +134,35 @@ "A table must have at least 1 column", "The table '%-.64s' is full", "Unknown character set: '%-.64s'", -"Too many tables. MySQL can only use %d tables in a join", -"Too many fields", -"Too big row size. The maximum row size, not counting blobs, is %d. You have to change some fields to blobs", +"Too many tables; MySQL can only use %d tables in a join", +"Too many columns", +"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN. Examine your ON conditions", +"Cross dependency found in OUTER JOIN; examine your ON conditions", "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", "Can't load function '%-.64s'", "Can't initialize function '%-.64s'; %-.80s", "No paths allowed for shared library", -"Function '%-.64s' already exist", +"Function '%-.64s' already exists", "Can't open shared library '%-.64s' (errno: %d %s)", "Can't find function '%-.64s' in library'", "Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors. Unblock with 'mysqladmin flush-hosts'", +"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", "Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous users and anonymous users are not allowed to change passwords", +"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", "You must have privileges to update tables in the mysql database to be able to change passwords for others", "Can't find any matching row in the user table", "Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d). If you are not out of available memory you can consult the manual for any possible OS dependent bug", +"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", "Column count doesn't match value count at row %ld", "Can't reopen table: '%-.64s", "Invalid use of NULL value", "Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", "There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command. Please consult the manual which privleges can be used.", +"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", +"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", +"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", "The host or user argument to GRANT is too long", "Table '%-.64s.%s' doesn't exist", "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", @@ -169,7 +171,7 @@ "Delayed insert thread couldn't get requested lock for table %-.64s", "Too many delayed threads in use", "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet'", +"Got a packet bigger than 'max_allowed_packet' bytes", "Got a read error from the connection pipe", "Got an error from fcntl()", "Got packets out of order", @@ -178,7 +180,7 @@ "Got timeout reading communication packets", "Got an error writing communication packets", "Got timeout writing communication packets", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", @@ -187,7 +189,7 @@ "All tables in the MERGE table are not defined identically", "Can't write, because of unique constraint, to table '%-.64s'", "BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", @@ -212,24 +214,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -237,22 +239,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Mottok feil %d '%-.100s' fra %s", +"Mottok temporary feil %d '%-.100s' fra %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt index 305bf10899b..badeed1c0dd 100644 --- a/sql/share/norwegian/errmsg.txt +++ b/sql/share/norwegian/errmsg.txt @@ -16,15 +16,17 @@ /* Roy-Magne Mo rmo@www.hivolda.no 97 */ +character-set=latin1 + "hashchk", "isamchk", "NEI", "JA", "Kan ikke opprette fila '%-.64s' (Feilkode: %d)", "Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)", -"Kan ikke opprette databasen '%-.64s'. Feil %d", -"Kan ikke opprette databasen '%-.64s'. Databasen eksisterer", -"Kan ikke fjerne (drop) '%-.64s'. Databasen eksisterer ikke", +"Kan ikke opprette databasen '%-.64s' (Feilkode: %d)", +"Kan ikke opprette databasen '%-.64s'; databasen eksisterer", +"Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke", "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)", "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)", "Feil ved sletting av '%-.64s' (Feilkode: %d)", @@ -32,12 +34,12 @@ "Kan ikke lese statusen til '%-.64s' (Feilkode: %d)", "Kan ikke lese aktiv katalog(Feilkode: %d)", "Kan ikke låse fila (Feilkode: %d)", -"Kan ikke åpne fila: '%-.64s'. (Feilkode: %d)", +"Kan ikke åpne fila: '%-.64s' (Feilkode: %d)", "Kan ikke finne fila: '%-.64s' (Feilkode: %d)", "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)", "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)", "Posten har blitt endret siden den ble lest '%-.64s'", -"Ikke mer diskplass (%s). Venter på å få frigjort plass....", +"Ikke mer diskplass (%s). Venter på å få frigjort plass...", "Kan ikke skrive, flere like nøkler i tabellen '%-.64s'", "Feil ved lukking av '%-.64s' (Feilkode: %d)", "Feil ved lesing av '%-.64s' (Feilkode: %d)", @@ -50,8 +52,8 @@ "Tabell håndtereren for '%-.64s' har ikke denne muligheten", "Kan ikke finne posten i '%-.64s'", "Feil informasjon i filen: '%-.64s'", -"Tabellen '%-.64s' har feil i nøkkelfilen, forsøk å reparer den", -"Gammel nøkkelfil for tabellen '%-.64s'; Reparer den!", +"Tabellen '%-.64s' har feil i nøkkelfilen; forsøk å reparer den", +"Gammel nøkkelfil for tabellen '%-.64s'; reparer den!", "'%-.64s' er skrivebeskyttet", "Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)", "Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten", @@ -60,8 +62,8 @@ "Tomt for tråd plass/minne", "Kan ikke få tak i vertsnavn for din adresse", "Feil håndtrykk (handshake)", -"Tilgang nektet for bruker: '%-.32s@%-.64s' til databasen '%-.64s' nektet", -"Tilgang nektet for bruker: '%-.32s@%-.64s' (Bruker passord: %s)", +"Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet", +"Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)", "Ingen database valgt", "Ukjent kommando", "Kolonne '%-.64s' kan ikke vere null", @@ -109,7 +111,7 @@ "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden.", "Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer.", "Poster: %ld Like: %ld Advarsler: %ld", -"INSERT TABLE '%-.64s' er ikke tillatt i FROM tabell liste", +"You can't specify target table '%-.64s' for update in FROM clause", "Ukjent tråd id: %lu", "Du er ikke eier av tråden %lu", "Ingen tabeller i bruk", @@ -132,35 +134,35 @@ "A table must have at least 1 column", "The table '%-.64s' is full", "Unknown character set: '%-.64s'", -"Too many tables. MySQL can only use %d tables in a join", -"Too many fields", -"Too big row size. The maximum row size, not counting blobs, is %d. You have to change some fields to blobs", +"Too many tables; MySQL can only use %d tables in a join", +"Too many columns", +"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN. Examine your ON conditions", +"Cross dependency found in OUTER JOIN; examine your ON conditions", "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", "Can't load function '%-.64s'", "Can't initialize function '%-.64s'; %-.80s", "No paths allowed for shared library", -"Function '%-.64s' already exist", +"Function '%-.64s' already exists", "Can't open shared library '%-.64s' (errno: %d %s)", "Can't find function '%-.64s' in library'", "Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors. Unblock with 'mysqladmin flush-hosts'", +"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", "Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous users and anonymous users are not allowed to change passwords", +"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", "You must have privileges to update tables in the mysql database to be able to change passwords for others", "Can't find any matching row in the user table", "Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d). If you are not out of available memory you can consult the manual for any possible OS dependent bug", +"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", "Column count doesn't match value count at row %ld", "Can't reopen table: '%-.64s", "Invalid use of NULL value", "Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", "There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command. Please consult the manual which privleges can be used.", +"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", +"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", +"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", "The host or user argument to GRANT is too long", "Table '%-.64s.%s' doesn't exist", "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", @@ -169,7 +171,7 @@ "Delayed insert thread couldn't get requested lock for table %-.64s", "Too many delayed threads in use", "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet'", +"Got a packet bigger than 'max_allowed_packet' bytes", "Got a read error from the connection pipe", "Got an error from fcntl()", "Got packets out of order", @@ -178,7 +180,7 @@ "Got timeout reading communication packets", "Got an error writing communication packets", "Got timeout writing communication packets", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", @@ -187,7 +189,7 @@ "All tables in the MERGE table are not defined identically", "Can't write, because of unique constraint, to table '%-.64s'", "BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", @@ -212,24 +214,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -237,22 +239,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Mottok feil %d '%-.100s' fa %s", +"Mottok temporary feil %d '%-.100s' fra %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt index 8d6b2fa542e..664a8e8e539 100644 --- a/sql/share/polish/errmsg.txt +++ b/sql/share/polish/errmsg.txt @@ -19,15 +19,17 @@ Charset ISO-8859-2 */ +character-set=latin2 + "hashchk", "isamchk", "NIE", "TAK", "Nie mo¿na stworzyæ pliku '%-.64s' (Kod b³êdu: %d)", "Nie mo¿na stworzyæ tabeli '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na stworzyæ bazy danych '%-.64s'. B³?d %d", -"Nie mo¿na stworzyæ bazy danych '%-.64s'. Baza danych ju¿ istnieje", -"Nie mo¿na usun?æ bazy danych '%-.64s'. Baza danych nie istnieje", +"Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)", +"Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje", +"Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje", "B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)", "B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)", "B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)", @@ -35,12 +37,12 @@ "Nie mo¿na otrzymaæ statusu '%-.64s' (Kod b³êdu: %d)", "Nie mo¿na rozpoznaæ aktualnego katalogu (Kod b³êdu: %d)", "Nie mo¿na zablokowaæ pliku (Kod b³êdu: %d)", -"Nie mo¿na otworzyæ pliku: '%-.64s'. (Kod b³êdu: %d)", +"Nie mo¿na otworzyæ pliku: '%-.64s' (Kod b³êdu: %d)", "Nie mo¿na znale¥æ pliku: '%-.64s' (Kod b³êdu: %d)", "Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)", "Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)", "Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'", -"Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca....", +"Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca...", "Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'", "B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)", "B³?d podczas odczytu pliku '%-.64s' (Kod b³êdu: %d)", @@ -53,8 +55,8 @@ "Obs³uga tabeli '%-.64s' nie posiada tej opcji", "Nie mo¿na znale¥æ rekordu w '%-.64s'", "Niew³a?ciwa informacja w pliku: '%-.64s'", -"Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'. Spróbuj go naprawiæ", -"Plik kluczy dla tabeli '%-.64s' jest starego typu; Napraw go!", +"Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'; spróbuj go naprawiæ", +"Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!", "'%-.64s' jest tylko do odczytu", "Zbyt ma³o pamiêci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)", "Zbyt ma³o pamiêci dla sortowania. Zwiêksz wielko?æ bufora demona dla sortowania", @@ -63,8 +65,8 @@ "Zbyt ma³o miejsca/pamiêci dla w?tku", "Nie mo¿na otrzymaæ nazwy hosta dla twojego adresu", "Z³y uchwyt(handshake)", -"Access denied for user: '%-.32s@%-.64s' to database '%-.64s'", -"Access denied for user: '%-.32s@%-.64s' (Using password: %s)", +"Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'", +"Access denied for user '%-.32s'@'%-.64s' (using password: %s)", "Nie wybrano ¿adnej bazy danych", "Nieznana komenda", "Kolumna '%-.64s' nie mo¿e byæ null", @@ -112,7 +114,7 @@ "Nie mo¿na usun?æ wszystkich pól wykorzystuj?c ALTER TABLE. W zamian u¿yj DROP TABLE", "Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje", "Rekordów: %ld Duplikatów: %ld Ostrze¿eñ: %ld", -"Operacja INSERT TABLE '%-.64s' nie jest dozwolona w li?cie tabel w FROM", +"You can't specify target table '%-.64s' for update in FROM clause", "Nieznany identyfikator w?tku: %lu", "Nie jeste? w³a?cicielem w?tku %lu", "Nie ma ¿adej u¿ytej tabeli", @@ -126,8 +128,8 @@ "Operacja SELECT bêdzie dotyczy³a zbyt wielu rekordów i prawdopodobnie zajmie bardzo du¿o czasu. Sprawd¥ warunek WHERE i u¿yj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna", "Unknown error", "Unkown procedure %s", -"Wrong parameter count to procedure %s", -"Wrong parameters to procedure %s", +"Incorrect parameter count to procedure %s", +"Incorrect parameters to procedure %s", "Unknown table '%-.64s' in %s", "Field '%-.64s' specified twice", "Invalid use of group function", @@ -135,35 +137,35 @@ "A table must have at least 1 column", "The table '%-.64s' is full", "Unknown character set: '%-.64s'", -"Too many tables. MySQL can only use %d tables in a join", -"Too many fields", -"Too big row size. The maximum row size, not counting blobs, is %d. You have to change some fields to blobs", +"Too many tables; MySQL can only use %d tables in a join", +"Too many columns", +"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN. Examine your ON conditions", +"Cross dependency found in OUTER JOIN; examine your ON conditions", "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", "Can't load function '%-.64s'", "Can't initialize function '%-.64s'; %-.80s", "No paths allowed for shared library", -"Function '%-.64s' already exist", +"Function '%-.64s' already exists", "Can't open shared library '%-.64s' (errno: %d %s)", "Can't find function '%-.64s' in library'", "Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors. Unblock with 'mysqladmin flush-hosts'", +"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", "Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous users and anonymous users are not allowed to change passwords", +"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", "You must have privileges to update tables in the mysql database to be able to change passwords for others", "Can't find any matching row in the user table", "Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d). If you are not out of available memory you can consult the manual for any possible OS dependent bug", +"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", "Column count doesn't match value count at row %ld", "Can't reopen table: '%-.64s", "Invalid use of NULL value", "Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", "There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command. Please consult the manual which privleges can be used.", +"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", +"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", +"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", "The host or user argument to GRANT is too long", "Table '%-.64s.%s' doesn't exist", "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", @@ -172,7 +174,7 @@ "Delayed insert thread couldn't get requested lock for table %-.64s", "Too many delayed threads in use", "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet'", +"Got a packet bigger than 'max_allowed_packet' bytes", "Got a read error from the connection pipe", "Got an error from fcntl()", "Got packets out of order", @@ -181,7 +183,7 @@ "Got timeout reading communication packets", "Got an error writing communication packets", "Got timeout writing communication packets", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", @@ -190,7 +192,7 @@ "All tables in the MERGE table are not defined identically", "Can't write, because of unique constraint, to table '%-.64s'", "BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", @@ -215,24 +217,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -240,22 +242,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt index 6fcc044c6b6..453c9dd5c18 100644 --- a/sql/share/portuguese/errmsg.txt +++ b/sql/share/portuguese/errmsg.txt @@ -16,6 +16,8 @@ /* Updated by Thiago Delgado Pinto - thiagodp@ieg.com.br - 06.07.2002 */ +character-set=latin1 + "hashchk", "isamchk", "NÃO", @@ -23,8 +25,8 @@ "Não pode criar o arquivo '%-.64s' (erro no. %d)", "Não pode criar a tabela '%-.64s' (erro no. %d)", "Não pode criar o banco de dados '%-.64s' (erro no. %d)", -"Não pode criar o banco de dados '%-.64s'. Este banco de dados já existe", -"Não pode eliminar o banco de dados '%-.64s'. Este banco de dados não existe", +"Não pode criar o banco de dados '%-.64s'; este banco de dados já existe", +"Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe", "Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)", "Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)", "Erro na remoção de '%-.64s' (erro no. %d)", @@ -50,8 +52,8 @@ "Manipulador de tabela para '%-.64s' não tem esta opção", "Não pode encontrar registro em '%-.64s'", "Informação incorreta no arquivo '%-.64s'", -"Arquivo de índice incorreto para tabela '%-.64s'. Tente repará-lo", -"Arquivo de índice desatualizado para tabela '%-.64s'. Repare-o!", +"Arquivo de índice incorreto para tabela '%-.64s'; tente repará-lo", +"Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!", "Tabela '%-.64s' é somente para leitura", "Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)", "Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação", @@ -60,8 +62,8 @@ "Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'", "Não pode obter nome do 'host' para seu endereço", "Negociação de acesso falhou", -"Acesso negado para o usuário '%-.32s@%-.64s' ao banco de dados '%-.64s'", -"Acesso negado para o usuário '%-.32s@%-.64s' (senha usada: %s)", +"Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'", +"Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)", "Nenhum banco de dados foi selecionado", "Comando desconhecido", "Coluna '%-.64s' não pode ser vazia", @@ -90,7 +92,7 @@ "Chave especificada longa demais. O comprimento de chave máximo permitido é %d", "Coluna chave '%-.64s' não existe na tabela", "Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado", -"Comprimento da coluna '%-.64s' grande demais (max = %d). Use BLOB em seu lugar", +"Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar", "Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave", "%s: Pronto para conexões", "%s: 'Shutdown' normal\n", @@ -106,10 +108,10 @@ "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld", "Registros: %ld - Duplicados: %ld", "Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas", -"Você não pode deletar todas as colunas com ALTER TABLE. Use DROP TABLE em seu lugar", +"Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar", "Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe", "Registros: %ld - Duplicados: %ld - Avisos: %ld", -"INSERT TABLE '%-.64s' não é permitido na lista de tabelas contidas em FROM", +"You can't specify target table '%-.64s' for update in FROM clause", "'Id' de 'thread' %lu desconhecido", "Você não é proprietário da 'thread' %lu", "Nenhuma tabela usada", @@ -135,8 +137,8 @@ "Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)", "Colunas demais", "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %d. Você tem que mudar alguns campos para BLOBs", -"Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld . Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário", -"Dependência cruzada encontrada em junção externa (OUTER JOIN). Examine as condições utilizadas nas cláusulas 'ON'", +"Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário", +"Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'", "Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)", "Não pode carregar a função '%-.64s'", "Não pode inicializar a função '%-.64s' - '%-.80s'", @@ -158,8 +160,8 @@ "Obteve erro '%-.64s' em regexp", "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)", "Não existe tal permissão (grant) definida para o usuário '%-.32s' no 'host' '%-.64s'", -"Comando '%-.16s' negado para o usuário '%-.32s@%-.64s' na tabela '%-.64s'", -"Comando '%-.16s' negado para o usuário '%-.32s@%-.64s' na coluna '%-.64s', na tabela '%-.64s'", +"Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'", +"Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'", "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados.", "Argumento de 'host' ou de usuário para o GRANT é longo demais", "Tabela '%-.64s.%-.64s' não existe", @@ -209,13 +211,13 @@ "Erro de rede gravando no 'master'", "Não pode encontrar um índice para o texto todo que combine com a lista de colunas", "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa", -"Variável de sistema '%-.64' desconhecida", +"Variável de sistema '%-.64s' desconhecida", "Tabela '%-.64s' está marcada como danificada e deve ser reparada", "Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou", "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)", "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente", -"Esta operação não pode ser realizada com um 'slave' em execução. Execute SLAVE STOP primeiro", -"Esta operação requer um 'slave' em execução. Configure o 'slave' e execute SLAVE START", +"Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro", +"Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE", "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO", "Could not initialize master info structure, more error messages can be found in the MySQL error log", "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema", @@ -227,7 +229,7 @@ "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura", "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura", "Argumentos errados para %s", -"Não é permitido a %-.32s@%-.64s criar novos usuários", +"Não é permitido a '%-.32s'@'%-.64s' criar novos usuários", "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados.", "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação.", "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)", @@ -244,7 +246,7 @@ "Opção '%s' usada duas vezes no comando", "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)", "Acesso negado. Você precisa o privilégio %-.128s para essa operação", -"Variável '%-.64s' é uma LOCAL variável e não pode ser usada com SET GLOBAL", +"Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL", "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL", "Variável '%-.64s' não tem um valor padrão", "Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'", @@ -253,6 +255,70 @@ "Errado uso/colocação de '%s'", "Esta versão de MySQL não suporta ainda '%s'", "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log", -"Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela", +"Variable '%-.64s' is a %s variable", +"Definição errada da chave estrangeira para '%-.64s': %s", +"Referência da chave e referência da tabela não coincidem", +"Operand should contain %d column(s)", +"Subconsulta retorna mais que 1 registro", +"Desconhecido manipulador de declaração preparado (%.*s) determinado para %s", +"Banco de dado de ajuda corrupto ou não existente", +"Referência cíclica em subconsultas", +"Convertendo coluna '%s' de %s para %s", +"Referência '%-.64s' não suportada (%s)", +"Cada tabela derivada deve ter seu próprio alias", +"Select %u foi reduzido durante otimização", +"Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s", +"Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL", +"Todas as partes de uma SPATIAL KEY devem ser NOT NULL", +"COLLATION '%s' não é válida para CHARACTER SET '%s'", +"O slave já está rodando", +"O slave já está parado", +"Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)", +"ZLIB: Não suficiente memória disponível", +"ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)", +"ZLIB: Dados de entrada está corrupto", +"%d linha(s) foram cortada(s) por GROUP_CONCAT()", +"Conta de registro é menor que a conta de coluna na linha %ld", +"Conta de registro é maior que a conta de coluna na linha %ld", +"Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld", +"Dado truncado, fora de alcance para coluna '%s' na linha %ld", +"Dado truncado para coluna '%s' na linha %ld", +"Usando engine de armazenamento %s para tabela '%s'", +"Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'", +"Não pode remover um ou mais dos usuários pedidos", +"Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos", +"Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'", +"Ilegal combinação de collations para operação '%s'", +"Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)", +"Collation desconhecida: '%-.64s'", +"SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado.", +"Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato", +"Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d", +"Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL", +"É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo", +"Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas", +"Incorreto nome de índice '%-.100s'", +"Incorreto nome de catálogo '%-.100s'", +"Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu", +"Coluna '%-.64s' não pode ser parte de índice FULLTEXT", +"Key cache desconhecida '%-.100s'", +"MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar", +"Motor de tabela desconhecido '%s'", +"'%s' é desatualizado. Use '%s' em seu lugar", +"A tabela destino %-.100s do %s não é atualizável", +"O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando", +"O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando", +"Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" +"Truncado errado %-.32s valor: '%-.128s'" +"Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" +"Inválida cláusula ON UPDATE para campo '%-.64s'", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt index c218f19e212..0fcc2804326 100644 --- a/sql/share/romanian/errmsg.txt +++ b/sql/share/romanian/errmsg.txt @@ -19,15 +19,17 @@ e-mail: tzoompy@cs.washington.edu */ +character-set=latin2 + "hashchk", "isamchk", "NU", "DA", "Nu pot sa creez fisierul '%-.64s' (Eroare: %d)", "Nu pot sa creez tabla '%-.64s' (Eroare: %d)", -"Nu pot sa creez baza de date '%-.64s'. (Eroare: %d)", -"Nu pot sa creez baza de date '%-.64s'. Baza de date exista deja", -"Nu pot sa drop baza de date '%-.64s'. Baza da date este inexistenta", +"Nu pot sa creez baza de date '%-.64s' (Eroare: %d)", +"Nu pot sa creez baza de date '%-.64s'; baza de date exista deja", +"Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta", "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)", "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)", "Eroare incercind sa delete '%-.64s' (Eroare: %d)", @@ -35,12 +37,12 @@ "Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)", "Nu pot sa obtin directorul current (working directory) (Eroare: %d)", "Nu pot sa lock fisierul (Eroare: %d)", -"Nu pot sa deschid fisierul: '%-.64s'. (Eroare: %d)", +"Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)", "Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)", "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)", "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)", "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'", -"Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu....", +"Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu...", "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'", "Eroare inchizind '%-.64s' (errno: %d)", "Eroare citind fisierul '%-.64s' (errno: %d)", @@ -53,8 +55,8 @@ "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune", "Nu pot sa gasesc recordul in '%-.64s'", "Informatie incorecta in fisierul: '%-.64s'", -"Cheia fisierului incorecta pentru tabela: '%-.64s'. Incearca s-o repari", -"Cheia fisierului e veche pentru tabela '%-.64s'; Repar-o!", +"Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari", +"Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!", "Tabela '%-.64s' e read-only", "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)", "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)", @@ -63,8 +65,8 @@ "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)", "Nu pot sa obtin hostname-ul adresei tale", "Prost inceput de conectie (bad handshake)", -"Acces interzis pentru utilizatorul: '%-.32s@%-.64s' la baza de date '%-.64s'", -"Acces interzis pentru utilizatorul: '%-.32s@%-.64s' (Folosind parola: %s)", +"Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'", +"Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)", "Nici o baza de data nu a fost selectata inca", "Comanda invalida", "Coloana '%-.64s' nu poate sa fie null", @@ -112,7 +114,7 @@ "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb", "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista", "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld", -"INSERT TABLE '%-.64s' nu este permis in lista FROM de tabele", +"You can't specify target table '%-.64s' for update in FROM clause", "Id-ul: %lu thread-ului este necunoscut", "Nu sinteti proprietarul threadului %lu", "Nici o tabela folosita", @@ -123,7 +125,7 @@ "Coloana BLOB '%-.64s' nu poate avea o valoare default", "Numele bazei de date este incorect '%-.100s'", "Numele tabelei este incorect '%-.100s'", -"SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp. Verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e ok", +"SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay", "Eroare unknown", "Procedura unknown '%-.64s'", "Procedura '%-.64s' are un numar incorect de parametri", @@ -161,8 +163,8 @@ "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)", "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY", "Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'", -"Comanda %-.16s interzisa utilizatorului: '%-.32s@%-.64s' pentru tabela '%-.64s'", -"Comanda %-.16s interzisa utilizatorului: '%-.32s@%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'", +"Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'", +"Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'", "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite.", "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung", "Tabela '%-.64s.%-.64s' nu exista", @@ -171,7 +173,7 @@ "Aveti o eroare in sintaxa RSQL", "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s", "Prea multe threaduri aminate care sint in uz", -"Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-32s' (%-.64s)", +"Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)", "Un packet mai mare decit 'max_allowed_packet' a fost primit", "Eroare la citire din cauza lui 'connection pipe'", "Eroare obtinuta de la fcntl()", @@ -181,7 +183,7 @@ "Timeout obtinut citind pachetele de comunicatie (communication packets)", "Eroare in scrierea pachetelor de comunicatie (communication packets)", "Timeout obtinut scriind pachetele de comunicatie (communication packets)", -"Sirul rezultat este mai lung decit max_allowed_packet", +"Sirul rezultat este mai lung decit 'max_allowed_packet'", "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT", "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT", "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES", @@ -204,7 +206,7 @@ "Got error %d during FLUSH_LOGS", "Got error %d during CHECKPOINT", "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump","Binlog closed while trying to FLUSH MASTER", +"The handler for the table does not support binary table dump", "Binlog closed while trying to FLUSH MASTER", "Failed rebuilding the index of dumped table '%-.64s'", "Error from master: '%-.64s'", @@ -215,24 +217,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave; run STOP SLAVE first", +"This operation requires a running slave; configure slave and do START SLAVE", +"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", +"Could not initialize master info structure; more error messages can be found in the MySQL error log", +"Could not create slave thread; check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -240,22 +242,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt index 4419da08051..1913fd3f1c1 100644 --- a/sql/share/russian/errmsg.txt +++ b/sql/share/russian/errmsg.txt @@ -19,13 +19,15 @@ */ /* charset: KOI8-R */ +character-set=koi8r + "hashchk", "isamchk", "îåô", "äá", "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)", "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. (ÏÛÉÂËÁ: %d)", +"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)", "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ", "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)", @@ -35,12 +37,12 @@ "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.64s' (ÏÛÉÂËÁ: %d)", "îÅ×ÏÚÍÏÖÎÏ ÏÐÒÅÄÅÌÉÔØ ÒÁÂÏÞÉÊ ËÁÔÁÌÏÇ (ÏÛÉÂËÁ: %d)", "îÅ×ÏÚÍÏÖÎÏ ÐÏÓÔÁ×ÉÔØ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÆÁÊÌÅ (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s'. (ÏÛÉÂËÁ: %d)", +"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)", "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)", "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)", "îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)", "úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'", -"äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ....", +"äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ...", "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'", "ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)", "ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)", @@ -63,8 +65,8 @@ "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ; ÕÄÏÓÔÏ×ÅÒØÔÅÓØ, ÞÔÏ mysqld ÉÌÉ ËÁËÏÊ-ÌÉÂÏ ÄÒÕÇÏÊ ÐÒÏÃÅÓÓ ÎÅ ÚÁÎÉÍÁÅÔ ×ÓÀ ÄÏÓÔÕÐÎÕÀ ÐÁÍÑÔØ. åÓÌÉ ÎÅÔ, ÔÏ ×Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ ulimit, ÞÔÏÂÙ ×ÙÄÅÌÉÔØ ÄÌÑ mysqld ÂÏÌØÛÅ ÐÁÍÑÔÉ, ÉÌÉ Õ×ÅÌÉÞÉÔØ ÏÂßÅÍ ÆÁÊÌÁ ÐÏÄËÁÞËÉ", "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÉÍÑ ÈÏÓÔÁ ÄÌÑ ×ÁÛÅÇÏ ÁÄÒÅÓÁ", "îÅËÏÒÒÅËÔÎÏÅ ÐÒÉ×ÅÔÓÔ×ÉÅ", -"äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s@%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ", -"äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s@%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)", +"äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ", +"äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)", "âÁÚÁ ÄÁÎÎÙÈ ÎÅ ×ÙÂÒÁÎÁ", "îÅÉÚ×ÅÓÔÎÁÑ ËÏÍÁÎÄÁ ËÏÍÍÕÎÉËÁÃÉÏÎÎÏÇÏ ÐÒÏÔÏËÏÌÁ", "óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL", @@ -90,7 +92,7 @@ "õËÁÚÁÎÏ ÎÅÓËÏÌØËÏ ÐÅÒ×ÉÞÎÙÈ ËÌÀÞÅÊ", "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ËÌÀÞÅÊ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ËÌÀÞÅÊ", "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÞÁÓÔÅÊ ÓÏÓÔÁ×ÎÏÇÏ ËÌÀÞÁ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ÞÁÓÔÅÊ", -"õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d", +"õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d ÂÁÊÔ", "ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ", "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ", "óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ", @@ -103,7 +105,7 @@ "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ IP-ÓÏËÅÔ", "÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï", "áÒÇÕÍÅÎÔ ÒÁÚÄÅÌÉÔÅÌÑ ÐÏÌÅÊ - ÎÅ ÔÏÔ, ËÏÔÏÒÙÊ ÏÖÉÄÁÌÓÑ. ïÂÒÁÝÁÊÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ", -"æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ. ðÒÉÍÅÎÑÊÔÅ 'fields terminated by'.", +"æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ, ÐÒÉÍÅÎÑÊÔÅ 'fields terminated by'", "æÁÊÌ '%-.64s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ", "æÁÊÌ '%-.80s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", "úÁÐÉÓÅÊ: %ld õÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld", @@ -161,8 +163,8 @@ "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ '%-.64s' ÏÔ ÒÅÇÕÌÑÒÎÏÇÏ ×ÙÒÁÖÅÎÉÑ", "ïÄÎÏ×ÒÅÍÅÎÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÈ (GROUP) ÓÔÏÌÂÃÏ× (MIN(),MAX(),COUNT(),...) Ó ÎÅÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÍÉ ÓÔÏÌÂÃÁÍÉ Ñ×ÌÑÅÔÓÑ ÎÅËÏÒÒÅËÔÎÙÍ, ÅÓÌÉ × ×ÙÒÁÖÅÎÉÉ ÅÓÔØ GROUP BY", "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ÈÏÓÔÅ '%-.64s'", -"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s@%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'", -"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s@%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'", +"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'", +"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'", "îÅ×ÅÒÎÁÑ ËÏÍÁÎÄÁ GRANT ÉÌÉ REVOKE. ïÂÒÁÔÉÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ, ÞÔÏÂÙ ×ÙÑÓÎÉÔØ, ËÁËÉÅ ÐÒÉ×ÉÌÅÇÉÉ ÍÏÖÎÏ ÉÓÐÏÌØÚÏ×ÁÔØ", "óÌÉÛËÏÍ ÄÌÉÎÎÏÅ ÉÍÑ ÐÏÌØÚÏ×ÁÔÅÌÑ/ÈÏÓÔÁ ÄÌÑ GRANT", "ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ", @@ -230,7 +232,7 @@ "îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ", "îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ", "îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s", -"%-.32s@%-.64s ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ", +"'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ", "îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ", "÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ", "éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×", @@ -247,15 +249,79 @@ "ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ", "ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)", "÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (LOCAL) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL", +"ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL", "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL", "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ", "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'", "îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'", "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ", -"îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'", +"îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'", "üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'", "ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË", +"ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ", +"ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s", +"óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)", +"Every derived table must have its own alias", +"Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ", +"ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/serbian/errmsg.txt b/sql/share/serbian/errmsg.txt new file mode 100644 index 00000000000..adda7d7cf53 --- /dev/null +++ b/sql/share/serbian/errmsg.txt @@ -0,0 +1,315 @@ +/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB + This file is public domain and comes with NO WARRANTY of any kind */ + +/* Serbian Translation, version 1.0: + Copyright 2002 Vladimir Kraljevic, vladimir_kraljevic@yahoo.com + This file is public domain and comes with NO WARRANTY of any kind. + Charset: cp1250 +*/ + +character-set=cp1250 + +"hashchk", +"isamchk", +"NE", +"DA", +"Ne mogu da kreiram file '%-.64s' (errno: %d)", +"Ne mogu da kreiram tabelu '%-.64s' (errno: %d)", +"Ne mogu da kreiram bazu '%-.64s' (errno: %d)", +"Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji.", +"Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji.", +"Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)", +"Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)", +"Greška pri brisanju '%-.64s' (errno: %d)", +"Ne mogu da proèitam slog iz sistemske tabele", +"Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)", +"Ne mogu da dobijem trenutni direktorijum (errno: %d)", +"Ne mogu da zakljuèam file (errno: %d)", +"Ne mogu da otvorim file: '%-.64s' (errno: %d)", +"Ne mogu da pronaðem file: '%-.64s' (errno: %d)", +"Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)", +"Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)", +"Slog je promenjen od zadnjeg èitanja tabele '%-.64s'", +"Disk je pun (%s). Èekam nekoga da doðe i oslobodi nešto mesta...", +"Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'", +"Greška pri zatvaranju '%-.64s' (errno: %d)", +"Greška pri èitanju file-a '%-.64s' (errno: %d)", +"Greška pri promeni imena '%-.64s' na '%-.64s' (errno: %d)", +"Greška pri upisu '%-.64s' (errno: %d)", +"'%-.64s' je zakljuèan za upis", +"Sortiranje je prekinuto", +"View '%-.64s' ne postoji za '%-.64s'", +"Handler tabela je vratio grešku %d", +"Handler tabela za '%-.64s' nema ovu opciju", +"Ne mogu da pronaðem slog u '%-.64s'", +"Pogrešna informacija u file-u: '%-.64s'", +"Pogrešan key file za tabelu: '%-.64s'; probajte da ga ispravite", +"Zastareo key file za tabelu '%-.64s'; ispravite ga", +"Tabelu '%-.64s' je dozvoljeno samo èitati", +"Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)", +"Nema memorije za sortiranje. Poveæajte velièinu sort buffer-a MySQL server-u", +"Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)", +"Previše konekcija", +"Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)", +"Ne mogu da dobijem ime host-a za vašu IP adresu", +"Loš poèetak komunikacije (handshake)", +"Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'", +"Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')", +"Ni jedna baza nije selektovana", +"Nepoznata komanda", +"Kolona '%-.64s' ne može biti NULL", +"Nepoznata baza '%-.64s'", +"Tabela '%-.64s' veæ postoji", +"Nepoznata tabela '%-.64s'", +"Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu", +"Gašenje servera je u toku", +"Nepoznata kolona '%-.64s' u '%-.64s'", +"Entitet '%-.64s' nije naveden u komandi 'GROUP BY'", +"Ne mogu da grupišem po '%-.64s'", +"Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme", +"Broj kolona ne odgovara broju vrednosti", +"Ime '%-.100s' je predugaèko", +"Duplirano ime kolone '%-.64s'", +"Duplirano ime kljuèa '%-.64s'", +"Dupliran unos '%-.64s' za kljuè '%d'", +"Pogrešan naziv kolone za kolonu '%-.64s'", +"'%s' u iskazu '%-.80s' na liniji %d", +"Upit je bio prazan", +"Tabela ili alias nisu bili jedinstveni: '%-.64s'", +"Loša default vrednost za '%-.64s'", +"Definisani višestruki primarni kljuèevi", +"Navedeno je previše kljuèeva. Maksimum %d kljuèeva je dozvoljeno", +"Navedeno je previše delova kljuèa. Maksimum %d delova je dozvoljeno", +"Navedeni kljuè je predug. Maksimalna dužina kljuèa je %d", +"Kljuèna kolona '%-.64s' ne postoji u tabeli", +"BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi", +"Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje", +"Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuèa", +"%s: Spreman za konekcije\n", +"%s: Normalno gašenje\n", +"%s: Dobio signal %d. Prekidam!\n", +"%s: Gašenje završeno\n", +"%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n", +"Ne mogu da kreiram IP socket", +"Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo", +"Argument separatora polja nije ono što se oèekivalo. Proverite uputstvo MySQL server-a", +"Ne možete koristiti fiksnu velièinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju.", +"File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa", +"File '%-.80s' veæ postoji", +"Slogova: %ld Izbrisano: %ld Preskoèeno: %ld Upozorenja: %ld", +"Slogova: %ld Duplikata: %ld", +"Pogrešan pod-kljuè dela kljuèa. Upotrebljeni deo kljuèa nije string, upotrebljena dužina je veæa od dela kljuèa ili handler tabela ne podržava jedinstvene pod-kljuèeve", +"Ne možete da izbrišete sve kolone pomoæu komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite", +"Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji", +"Slogova: %ld Duplikata: %ld Upozorenja: %ld", +"You can't specify target table '%-.64s' for update in FROM clause", +"Nepoznat thread identifikator: %lu", +"Vi niste vlasnik thread-a %lu", +"Nema upotrebljenih tabela", +"Previše string-ova za kolonu '%-.64s' i komandu 'SET'", +"Ne mogu da generišem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n", +"Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati", +"Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'", +"BLOB kolona '%-.64s' ne može imati default vrednost", +"Pogrešno ime baze '%-.100s'", +"Pogrešno ime tabele '%-.100s'", +"Komanda 'SELECT' æe ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu", +"Nepoznata greška", +"Nepoznata procedura '%-.64s'", +"Pogrešan broj parametara za proceduru '%-.64s'", +"Pogrešni parametri prosleðeni proceduri '%-.64s'", +"Nepoznata tabela '%-.64s' u '%-.32s'", +"Kolona '%-.64s' je navedena dva puta", +"Pogrešna upotreba 'GROUP' funkcije", +"Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a", +"Tabela mora imati najmanje jednu kolonu", +"Tabela '%-.64s' je popunjena do kraja", +"Nepoznati karakter-set: '%-.64s'", +"Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji", +"Previše kolona", +"Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB", +"Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veæi stack ako je potrebno", +"Unakrsna zavisnost pronaðena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove", +"Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'", +"Ne mogu da uèitam funkciju '%-.64s'", +"Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s", +"Ne postoje dozvoljene putanje do share-ovane biblioteke", +"Funkcija '%-.64s' veæ postoji", +"Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.64s)", +"Ne mogu da pronadjem funkciju '%-.64s' u biblioteci", +"Funkcija '%-.64s' nije definisana", +"Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoæu komande 'mysqladmin flush-hosts'", +"Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server", +"Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke", +"Morate imati privilegije da možete da update-ujete odreðene tabele ako želite da menjate lozinke za druge korisnike", +"Ne mogu da pronaðem odgovarajuæi slog u 'user' tabeli", +"Odgovarajuæih slogova: %ld Promenjeno: %ld Upozorenja: %ld", +"Ne mogu da kreiram novi thread (errno %d). Ako imate još slobodne memorije, trebali biste da pogledate u priruèniku da li je ovo specifièna greška vašeg operativnog sistema", +"Broj kolona ne odgovara broju vrednosti u slogu %ld", +"Ne mogu da ponovo otvorim tabelu '%-.64s'", +"Pogrešna upotreba vrednosti NULL", +"Funkcija regexp je vratila grešku '%-.64s'", +"Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz", +"Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'", +"%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'", +"%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'", +"Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruèniku koje vrednosti mogu biti upotrebljene.", +"Argument 'host' ili 'korisnik' prosleðen komandi 'GRANT' je predugaèak", +"Tabela '%-.64s.%-.64s' ne postoji", +"Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'", +"Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera", +"Imate grešku u vašoj SQL sintaksi", +"Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'", +"Previše prolongiranih thread-ova je u upotrebi", +"Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)", +"Primio sam mrežni paket veæi od definisane vrednosti 'max_allowed_packet'", +"Greška pri èitanju podataka sa pipe-a", +"Greška pri izvršavanju funkcije fcntl()", +"Primio sam mrežne pakete van reda", +"Ne mogu da dekompresujem mrežne pakete", +"Greška pri primanju mrežnih paketa", +"Vremenski limit za èitanje mrežnih paketa je istekao", +"Greška pri slanju mrežnih paketa", +"Vremenski limit za slanje mrežnih paketa je istekao", +"Rezultujuèi string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'", +"Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'", +"Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'", +"Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'", +"Pogrešno ime kolone '%-.100s'", +"Handler tabele ne može da indeksira kolonu '%-.64s'", +"Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti naèin", +"Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'", +"BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa", +"Svi delovi primarnog kljuèa moraju biti razlièiti od NULL; Ako Vam ipak treba NULL vrednost u kljuèu, upotrebite 'UNIQUE'", +"Rezultat je saèinjen od više slogova", +"Ovaj tip tabele zahteva da imate definisan primarni kljuè", +"Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID ureðaje", +"Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuèa", +"Kljuè '%-.64s' ne postoji u tabeli '%-.64s'", +"Ne mogu da otvorim tabelu", +"Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande", +"Nije Vam dozvoljeno da izvršite ovu komandu u transakciji", +"Greška %d za vreme izvršavanja komande 'COMMIT'", +"Greška %d za vreme izvršavanja komande 'ROLLBACK'", +"Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'", +"Greška %d za vreme izvršavanja komande 'CHECKPOINT'", +"Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: `%-.64s' (%-.64s)", +"Handler tabele ne podržava binarni dump tabele", +"Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'", +"Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela", +"Greška iz glavnog servera '%-.64s' u klasteru", +"Greška u primanju mrežnih paketa sa glavnog servera u klasteru", +"Greška u slanju mrežnih paketa na glavni server u klasteru", +"Ne mogu da pronaðem 'FULLTEXT' indeks koli odgovara listi kolona", +"Ne mogu da izvršim datu komandu zbog toga što su tabele zakljuèane ili je transakcija u toku", +"Nepoznata sistemska promenljiva '%-.64s'", +"Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena", +"Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela", +"Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'", +"Transakcija sa više stavki zahtevala je više od 'max_binlog_cache_size' bajtova skladišnog prostora. Uveæajte ovu promenljivu servera i pokušajte ponovo', +"Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server.", +"Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'", +"Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'", +"Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'", +"Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse", +"Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom", +"Možete upotrebiti samo konstantan iskaz sa komandom 'SET'", +"Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju", +"Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja", +"Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija", +"Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka", +"Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka", +"Pogrešni argumenti prosleðeni na %s", +"Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike", +"Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka", +"Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju", +"Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse", +"Ne mogu da dodam proveru spoljnog kljuèa", +"Ne mogu da dodam slog: provera spoljnog kljuèa je neuspela", +"Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela", +"Greška pri povezivanju sa glavnim serverom u klasteru: %-.128s", +"Greška pri izvršavanju upita na glavnom serveru u klasteru: %-.128s", +"Greška pri izvršavanju komande %s: %-.128s", +"Pogrešna upotreba %s i %s", +"Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona", +"Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu", +"Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno", +"Opcija '%s' je upotrebljena dva puta u istom iskazu", +"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", +"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", +"Variable '%-.64s' doesn't have a default value", +"Variable '%-.64s' can't be set to the value of '%-.64s'", +"Incorrect argument type to variable '%-.64s'", +"Variable '%-.64s' can only be set, not read", +"Incorrect usage/placement of '%s'", +"This version of MySQL doesn't yet support '%s'", +"Got fatal error %d: '%-.128s' from master when reading data from binary log", +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updatable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working" +"The MySQL server is running with the %s option so it cannot execute this statement" +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" + diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt index bb241673706..fafab0c2716 100644 --- a/sql/share/slovak/errmsg.txt +++ b/sql/share/slovak/errmsg.txt @@ -22,15 +22,17 @@ Date: Streda 11. November 1998 20:58:15 */ +character-set=latin2 + "hashchk", "isamchk", "NIE", "Áno", "Nemô¾em vytvori» súbor '%-.64s' (chybový kód: %d)", "Nemô¾em vytvori» tabuµku '%-.64s' (chybový kód: %d)", -"Nemô¾em vytvori» databázu '%-.64s'. (chybový kód: %d)", -"Nemô¾em vytvori» databázu '%-.64s'. Databáza existuje", -"Nemô¾em zmaza» databázu '%-.64s'. Databáza neexistuje", +"Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)", +"Nemô¾em vytvori» databázu '%-.64s'; databáza existuje", +"Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje", "Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)", "Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)", "Chyba pri mazaní '%-.64s' (chybový kód: %d)", @@ -38,12 +40,12 @@ "Nemô¾em zisti» stav '%-.64s' (chybový kód: %d)", "Nemô¾em zisti» pracovný adresár (chybový kód: %d)", "Nemô¾em zamknú» súbor (chybový kód: %d)", -"Nemô¾em otvori» súbor: '%-.64s'. (chybový kód: %d)", +"Nemô¾em otvori» súbor: '%-.64s' (chybový kód: %d)", "Nemô¾em nájs» súbor: '%-.64s' (chybový kód: %d)", "Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)", "Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)", "Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'", -"Disk je plný (%s), èakám na uvoµnenie miesta....", +"Disk je plný (%s), èakám na uvoµnenie miesta...", "Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'", "Chyba pri zatváraní '%-.64s' (chybový kód: %d)", "Chyba pri èítaní súboru '%-.64s' (chybový kód: %d)", @@ -56,8 +58,8 @@ "Obsluha tabuµky '%-.64s' nemá tento parameter", "Nemô¾em nájs» záznam v '%-.64s'", "Nesprávna informácia v súbore: '%-.64s'", -"Nesprávny kµúè pre tabuµku '%-.64s'. Pokúste sa ho opravi»", -"Starý kµúèový súbor pre '%-.64s'; Opravte ho!", +"Nesprávny kµúè pre tabuµku '%-.64s'; pokúste sa ho opravi»", +"Starý kµúèový súbor pre '%-.64s'; opravte ho!", "'%-.64s' is èíta» only", "Málo pamäti. Re¹tartujte daemona a skúste znova (je potrebných %d bytov)", "Málo pamäti pre triedenie, zvý¹te veµkos» triediaceho bufferu", @@ -66,8 +68,8 @@ "Málo miesta-pamäti pre vlákno", "Nemô¾em zisti» meno hostiteµa pre va¹u adresu", "Chyba pri nadväzovaní spojenia", -"Zakázaný prístup pre u¾ívateµa: '%-.32s@%-.64s' k databázi '%-.64s'", -"Zakázaný prístup pre u¾ívateµa: '%-.32s@%-.64s' (pou¾itie hesla: %s)", +"Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'", +"Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)", "Nebola vybraná databáza", "Neznámy príkaz", "Pole '%-.64s' nemô¾e by» null", @@ -111,11 +113,11 @@ "Súbor '%-.64s' u¾ existuje", "Záznamov: %ld Zmazaných: %ld Preskoèených: %ld Varovania: %ld", "Záznamov: %ld Opakovaných: %ld", -"Wrong sub part key. The used key part isn't a string or the used length is longer than the key part", -"One nemô¾em zmaza» all fields with ALTER TABLE. Use DROP TABLE instead", +"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", +"One nemô¾em zmaza» all fields with ALTER TABLE; use DROP TABLE instead", "Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe", "Záznamov: %ld Opakovaných: %ld Varovania: %ld", -"INSERT TABLE '%-.64s' nie je dovolené v zozname tabuliek FROM", +"You can't specify target table '%-.64s' for update in FROM clause", "Neznáma identifikácia vlákna: %lu", "Nie ste vlastníkom vlákna %lu", "Nie je pou¾itá ¾iadna tabuµka", @@ -151,22 +153,22 @@ "Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %s)", "Nemô¾em nájs» funkciu '%-.64s' v kni¾nici'", "Funkcia '%-.64s' nie je definovaná", -"Host '%-.64s' is blocked because of many connection errors. Unblock with 'mysqladmin flush-hosts'", +"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", "Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous users and anonymous users are not allowed to change passwords", +"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", "You must have privileges to update tables in the mysql database to be able to change passwords for others", "Can't find any matching row in the user table", "Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d). If you are not out of available memory, you can consult the manual for a possible OS-dependent bug", +"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", "Column count doesn't match value count at row %ld", "Can't reopen table: '%-.64s", "Invalid use of NULL value", "Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause", +"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", "There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'", -"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command. Please consult the manual which privleges can be used.", +"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", +"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", +"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", "The host or user argument to GRANT is too long", "Table '%-.64s.%s' doesn't exist", "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", @@ -175,7 +177,7 @@ "Delayed insert thread couldn't get requested lock for table %-.64s", "Too many delayed threads in use", "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet'", +"Got a packet bigger than 'max_allowed_packet' bytes", "Got a read error from the connection pipe", "Got an error from fcntl()", "Got packets out of order", @@ -184,7 +186,7 @@ "Got timeout reading communication packets", "Got an error writing communication packets", "Got timeout writing communication packets", -"Result string is longer than max_allowed_packet", +"Result string is longer than 'max_allowed_packet' bytes", "The used table type doesn't support BLOB/TEXT columns", "The used table type doesn't support AUTO_INCREMENT columns", "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", @@ -193,7 +195,7 @@ "All tables in the MERGE table are not defined identically", "Can't write, because of unique constraint, to table '%-.64s'", "BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead", +"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Result consisted of more than one row", "This table type requires a primary key", "This version of MySQL is not compiled with RAID support", @@ -218,24 +220,24 @@ "Unknown system variable '%-.64s'", "Table '%-.64s' is marked as crashed and should be repaired", "Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Warning: Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run SLAVE STOP first", -"This operation requires a running slave, configure slave and do SLAVE START", +"Some non-transactional changed tables couldn't be rolled back", +"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", +"This operation cannot be performed with a running slave, run STOP SLAVE first", +"This operation requires a running slave, configure slave and do START SLAVE", "The server is not configured as slave, fix in config file or with CHANGE MASTER TO", "Could not initialize master info structure, more error messages can be found in the MySQL error log", "Could not create slave thread, check system resources", "User %-.64s has already more than 'max_user_connections' active connections", "You may only use constant expressions with SET", -"Lock wait timeout exceeded", +"Lock wait timeout exceeded; try restarting transaction", "The total number of locks exceeds the lock table size", "Update locks cannot be acquired during a READ UNCOMMITTED transaction", "DROP DATABASE not allowed while thread is holding global read lock", "CREATE DATABASE not allowed while thread is holding global read lock", -"Wrong arguments to %s", -"%-.32s@%-.64s is not allowed to create new users", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"Incorrect arguments to %s", +"'%-.32s'@'%-.64s' is not allowed to create new users", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -243,22 +245,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"Operand should contain %d column(s)", +"Subquery returns more than 1 row", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"Cyclic reference on subqueries", +"Converting column '%s' from %s to %s", +"Reference '%-.64s' not supported (%s)", +"Every derived table must have its own alias", +"Select %u was reduced during optimization", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"Query cache failed to set size %lu, new query cache size is %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"The target table %-.100s of the %s is not updateable", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt index 2daeebc8eb8..3af8e7b97d1 100644 --- a/sql/share/spanish/errmsg.txt +++ b/sql/share/spanish/errmsg.txt @@ -17,15 +17,18 @@ /* Traduccion por Miguel Angel Fernandez Roiz -- LoboCom Sistemas, s.l. From June 28, 2001 translated by Miguel Solorzano miguel@mysql.com */ + +character-set=latin1 + "hashchk", "isamchk", "NO", "SI", "No puedo crear archivo '%-.64s' (Error: %d)", "No puedo crear tabla '%-.64s' (Error: %d)", -"No puedo crear base de datos '%-.64s'. Error %d", -"No puedo crear base de datos '%-.64s'. La base de datos ya existe", -"No puedo eliminar base de datos '%-.64s'. La base de datos no existe", +"No puedo crear base de datos '%-.64s' (Error: %d)", +"No puedo crear base de datos '%-.64s'; la base de datos ya existe", +"No puedo eliminar base de datos '%-.64s'; la base de datos no existe", "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)", "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)", "Error en el borrado de '%-.64s' (Error: %d)", @@ -33,12 +36,12 @@ "No puedo obtener el estado de '%-.64s' (Error: %d)", "No puedo acceder al directorio (Error: %d)", "No puedo bloquear archivo: (Error: %d)", -"No puedo abrir archivo: '%-.64s'. (Error: %d)", +"No puedo abrir archivo: '%-.64s' (Error: %d)", "No puedo encontrar archivo: '%-.64s' (Error: %d)", "No puedo leer el directorio de '%-.64s' (Error: %d)", "No puedo cambiar al directorio de '%-.64s' (Error: %d)", "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'", -"Disco lleno (%s). Esperando para que se libere algo de espacio....", +"Disco lleno (%s). Esperando para que se libere algo de espacio...", "No puedo escribir, clave duplicada en la tabla '%-.64s'", "Error en el cierre de '%-.64s' (Error: %d)", "Error leyendo el fichero '%-.64s' (Error: %d)", @@ -51,8 +54,8 @@ "El manejador de la tabla de '%-.64s' no tiene esta opcion", "No puedo encontrar el registro en '%-.64s'", "Informacion erronea en el archivo: '%-.64s'", -"Clave de archivo erronea para la tabla: '%-.64s'. Intente repararlo", -"Clave de archivo antigua para la tabla '%-.64s'; Reparelo!", +"Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo", +"Clave de archivo antigua para la tabla '%-.64s'; reparelo!", "'%-.64s' es de solo lectura", "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)", "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion", @@ -61,8 +64,8 @@ "Memoria/espacio de tranpaso insuficiente", "No puedo obtener el nombre de maquina de tu direccion", "Protocolo erroneo", -"Acceso negado para usuario: '%-.32s@%-.64s' para la base de datos '%-.64s'", -"Acceso negado para usuario: '%-.32s@%-.64s' (Usando clave: %s)", +"Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'", +"Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)", "Base de datos no seleccionada", "Comando desconocido", "La columna '%-.64s' no puede ser nula", @@ -110,7 +113,7 @@ "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo", "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe", "Registros: %ld Duplicados: %ld Peligros: %ld", -"INSERT TABLE '%-.64s' no esta permitido en FROM tabla lista", +"You can't specify target table '%-.64s' for update in FROM clause", "Identificador del thread: %lu desconocido", "Tu no eres el propietario del thread%lu", "No ha tablas usadas", @@ -137,7 +140,7 @@ "Muchos campos", "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob", "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario", -"Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON", +"Dependencia cruzada encontrada en OUTER JOIN; examine su condición ON", "Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL", "No puedo cargar función '%-.64s'", "No puedo inicializar función '%-.64s'; %-.80s", @@ -159,8 +162,8 @@ "Obtenido error '%-.64s' de regexp", "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY", "No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'", -"%-.16s comando negado para usuario: '%-.32s@%-.64s' para tabla '%-.64s'", -"%-.16s comando negado para usuario: '%-.32s@%-.64s' para columna '%-.64s' en la tabla '%-.64s'", +"%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'", +"%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'", "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados.", "El argumento para servidor o usuario para GRANT es demasiado grande", "Tabla '%-.64s.%s' no existe", @@ -179,7 +182,7 @@ "Obtenido timeout leyendo paquetes de comunicación", "Obtenido un error de escribiendo paquetes de comunicación", "Obtenido timeout escribiendo paquetes de comunicación", -"La string resultante es mayor que max_allowed_packet", +"La string resultante es mayor que 'max_allowed_packet'", "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT", "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT", "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES", @@ -215,8 +218,8 @@ "Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló", "Aviso: Algunas tablas no transancionales no pueden tener rolled back", "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo", -"Esta operación no puede ser hecha con el esclavo funcionando, primero use SLAVE STOP", -"Esta operación necesita el esclavo funcionando, configure esclavo y haga el SLAVE START", +"Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE", +"Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE", "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO", "Could not initialize master info structure, more error messages can be found in the MySQL error log", "No puedo crear el thread esclavo, verifique recursos del sistema", @@ -228,16 +231,16 @@ "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global", "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global", "Argumentos errados para %s", -"%-.32s@%-.64s no es permitido para crear nuevos usuarios", +"'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios", "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos", "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición", "El tipo de tabla usada no soporta índices FULLTEXT", "No puede adicionar clave extranjera constraint", "No puede adicionar una línea hijo: falla de clave extranjera constraint", "No puede deletar una línea padre: falla de clave extranjera constraint", -"Error de coneccion a master: %-128s", -"Error executando el query en master: %-128%", -"Error de %s: %-128%", +"Error de coneccion a master: %-.128s", +"Error executando el query en master: %-.128s", +"Error de %s: %-.128s", "Equivocado uso de %s y %s", "El comando SELECT usado tiene diferente número de columnas", "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura", @@ -245,7 +248,7 @@ "Opción '%s' usada dos veces en el comando", "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)", "Acceso negado. Usted necesita el privilegio %-.128s para esta operación", -"Variable '%-.64s' es una LOCAL variable y no puede ser usada con SET GLOBAL", +"Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL", "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL", "Variable '%-.64s' no tiene un valor patrón", "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'", @@ -254,6 +257,70 @@ "Equivocado uso/colocación de '%s'", "Esta versión de MySQL no soporta todavia '%s'", "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log", -"Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla", +"Variable '%-.64s' es una %s variable", +"Equivocada definición de llave extranjera para '%-.64s': %s", +"Referencia de llave y referencia de tabla no coinciden", +"Operando debe tener %d columna(s)", +"Subconsulta retorna mas que 1 línea", +"Desconocido preparado comando handler (%.*s) dado para %s", +"Base de datos Help está corrupto o no existe", +"Cíclica referencia en subconsultas", +"Convirtiendo columna '%s' de %s para %s", +"Referencia '%-.64s' no soportada (%s)", +"Cada tabla derivada debe tener su propio alias", +"Select %u fué reducido durante optimización", +"Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s", +"Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL", +"Todas las partes de una SPATIAL index deben ser NOT NULL", +"COLLATION '%s' no es válido para CHARACTER SET '%s'", +"Slave ya está funcionando", +"Slave ya fué parado", +"Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)", +"ZLIB: No suficiente memoria", +"ZLIB: No suficiente espacio en el búfer de salida (probablemente, extensión de datos descomprimidos fué corrompida)", +"ZLIB: Dato de entrada fué corrompido", +"%d línea(s) fueron cortadas por GROUP_CONCAT()", +"Línea %ld no contiene datos para todas las columnas", +"Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada", +"Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld", +"Datos truncados, fuera de gama para columna '%s' en la línea %ld", +"Datos truncados para columna '%s' en la línea %ld", +"Usando motor de almacenamiento %s para tabla '%s'", +"Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'", +"No puede remover uno o mas de los usuarios solicitados", +"No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados", +"Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'", +"Ilegal mezcla de collations para operación '%s'", +"Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)", +"Collation desconocida: '%-.64s'", +"Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado", +"Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato", +"Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d", +"Parametro equivocado o combinación de parametros para START SLAVE UNTIL", +"Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave", +"SQL thread no es inicializado tal que opciones UNTIL son ignoradas", +"Nombre de índice incorrecto '%-.100s'", +"Nombre de catalog incorrecto '%-.100s'", +"Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu", +"Columna '%-.64s' no puede ser parte de FULLTEXT index", +"Desconocida key cache '%-.100s'", +"MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar", +"Desconocido motor de tabla '%s'", +"'%s' está desaprobado, use '%s' en su lugar", +"La tabla destino %-.100s del %s no es actualizable", +"El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando", +"El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando", +"Columna '%-.100s' tiene valor doblado '%-.64s' en %s" +"Equivocado truncado %-.32s valor: '%-.128s'" +"Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" +"Inválido ON UPDATE cláusula para campo '%-.64s'", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt index a9273f6222c..b552df08bf3 100644 --- a/sql/share/swedish/errmsg.txt +++ b/sql/share/swedish/errmsg.txt @@ -14,15 +14,17 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +character-set=latin1 + "hashchk", "isamchk", "NO", "YES", "Kan inte skapa filen '%-.64s' (Felkod: %d)", "Kan inte skapa tabellen '%-.64s' (Felkod: %d)", -"Kan inte skapa databasen '%-.64s'. (Felkod: %d)", +"Kan inte skapa databasen '%-.64s' (Felkod: %d)", "Databasen '%-.64s' existerar redan", -"Kan inte radera databasen '%-.64s'. Databasen finns inte", +"Kan inte radera databasen '%-.64s'; databasen finns inte", "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)", "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)", "Kan inte radera filen '%-.64s' (Felkod: %d)", @@ -30,12 +32,12 @@ "Kan inte läsa filinformationen (stat) från '%-.64s' (Felkod: %d)", "Kan inte inte läsa aktivt bibliotek. (Felkod: %d)", "Kan inte låsa filen. (Felkod: %d)", -"Kan inte använda '%-.64s'. (Felkod: %d)", -"Hittar inte filen '%-.64s'. (Felkod: %d)", -"Kan inte läsa från bibliotek '%-.64s'. (Felkod: %d)", -"Kan inte byta till '%-.64s'. (Felkod: %d)", +"Kan inte använda '%-.64s' (Felkod: %d)", +"Hittar inte filen '%-.64s' (Felkod: %d)", +"Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)", +"Kan inte byta till '%-.64s' (Felkod: %d)", "Posten har förändrats sedan den lästes i register '%-.64s'", -"Disken är full (%s). Väntar tills det finns ledigt utrymme....", +"Disken är full (%s). Väntar tills det finns ledigt utrymme...", "Kan inte skriva, dubbel söknyckel i register '%-.64s'", "Fick fel vid stängning av '%-.64s' (Felkod: %d)", "Fick fel vid läsning av '%-.64s' (Felkod %d)", @@ -48,8 +50,8 @@ "Registrets databas har inte denna facilitet", "Hittar inte posten", "Felaktig fil: '%-.64s'", -"Fatalt fel vid hantering av register '%-.64s'. Kör en reparation", -"Gammal nyckelfil '%-.64s'. Reparera registret", +"Fatalt fel vid hantering av register '%-.64s'; kör en reparation", +"Gammal nyckelfil '%-.64s'; reparera registret", "'%-.64s' är skyddad mot förändring", "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)", "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna", @@ -58,14 +60,14 @@ "Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap", "Kan inte hitta 'hostname' för din adress", "Fel vid initiering av kommunikationen med klienten", -"Användare '%-.32s@%-.64s' är ej berättigad att använda databasen %-.64s", -"Användare '%-.32s@%-.64s' är ej berättigad att logga in (Använder lösen: %s)", +"Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s", +"Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)", "Ingen databas i användning", "Okänt commando", "Kolumn '%-.64s' får inte vara NULL", "Okänd databas: '%-.64s'", "Tabellen '%-.64s' finns redan", -"Okänd tabell '%-.64s'", +"Okänd tabell '%-.180s'", "Kolumn '%-.64s' i %s är inte unik", "Servern går nu ned", "Okänd kolumn '%-.64s' i %s", @@ -99,7 +101,7 @@ "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen", "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen", "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'", -"Textfilen '%' måste finnas i databasbiblioteket eller vara läsbar för alla", +"Textfilen '%.64s' måste finnas i databasbiblioteket eller vara läsbar för alla", "Filen '%-.64s' existerar redan", "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld", "Rader: %ld Dubletter: %ld", @@ -156,8 +158,8 @@ "Fick fel '%-.64s' från REGEXP", "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del", "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s'", -"%-.16s ej tillåtet för '%-.32s@%-.64s' för tabell '%-.64s'", -"%-.16s ej tillåtet för '%-.32s@%-.64s' för kolumn '%-.64s' i tabell '%-.64s'", +"%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'", +"%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'", "Felaktigt GRANT-privilegium använt", "Felaktigt maskinnamn eller användarnamn använt med GRANT", "Det finns ingen tabell som heter '%-.64s.%s'", @@ -176,7 +178,7 @@ "Fick 'timeout' vid läsning från klienten", "Fick ett fel vid skrivning till klienten", "Fick 'timeout' vid skrivning till klienten", -"Resultatsträngen är längre än max_allowed_packet", +"Resultatsträngen är längre än 'max_allowed_packet'", "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner", "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner", "INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES", @@ -198,7 +200,7 @@ "Fick fel %d vid ROLLBACK", "Fick fel %d vid FLUSH_LOGS", "Fick fel %d vid CHECKPOINT", -"Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%.-64s)", +"Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)", "Tabellhanteraren klarar inte en binär kopiering av tabellen", "Binärloggen stängdes medan FLUSH MASTER utfördes", "Failed rebuilding the index of dumped table '%-.64s'", @@ -212,10 +214,10 @@ "Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades", "Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK", "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt", -"Denna operation kan inte göras under replikering; Gör SLAVE STOP först", -"Denna operation kan endast göras under replikering; Konfigurera slaven och gör SLAVE START", +"Denna operation kan inte göras under replikering; Gör STOP SLAVE först", +"Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE", "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", +"Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information", "Kunde inte starta en tråd för replikering", "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar", "Man kan endast använda konstantuttryck med SET", @@ -225,8 +227,8 @@ "DROP DATABASE är inte tillåtet när man har ett globalt läslås", "CREATE DATABASE är inte tillåtet när man har ett globalt läslås", "Felaktiga argument till %s", -"%-.32s@%-.64s har inte rättighet att skapa nya användare", -"Felaktig tabelldefinition. Alla tabeller i en MERGE-tabell måste vara i samma databas", +"'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare", +"Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas", "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen", "Tabelltypen har inte hantering av FULLTEXT-index", "Kan inte lägga till 'FOREIGN KEY constraint'", @@ -236,13 +238,13 @@ "Fick fel vid utförande av command på mastern: %-.128s", "Fick fel vid utförande av %s: %-.128s", "Felaktig använding av %s and %s", -"SELECT-kommandona har olika antal kolumner" +"SELECT-kommandona har olika antal kolumner", "Kan inte utföra kommandot emedan du har ett READ-lås", "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat", "Option '%s' användes två gånger", "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)", "Du har inte privlegiet '%-.128s' som behövs för denna operation", -"Variabel '%-.64s' är en LOCAL variabel och kan inte ändrad med SET GLOBAL", +"Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL", "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL", "Variabel '%-.64s' har inte ett DEFAULT-värde", "Variabel '%-.64s' kan inte sättas till '%-.64s'", @@ -250,7 +252,71 @@ "Variabeln '%-.64s' kan endast sättas, inte läsas", "Fel använding/placering av '%s'", "Denna version av MySQL kan ännu inte utföra '%s'", -"Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen" -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen", +"Slav SQL tråden ignorerade frågan pga en replicate-*-table regel", +"Variabel '%-.64s' är av typ %s", +"Felaktig FOREIGN KEY-definition för '%-.64s': %s", +"Nyckelreferensen och tabellreferensen stämmer inte överens", +"Operand should contain %d column(s)", +"Subquery returnerade mer än 1 rad", +"Okänd PREPARED STATEMENT id (%.*s) var given till %s", +"Hjälpdatabasen finns inte eller är skadad", +"Cyklisk referens i subqueries", +"Konvertar kolumn '%s' från %s till %s", +"Referens '%-.64s' stöds inte (%s)", +"Varje 'derived table' måste ha sitt eget alias", +"Select %u reducerades vid optimiering", +"Tabell '%-.64s' från en SELECT kan inte användas i %-.32s", +"Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet.", +"Alla delar av en SPATIAL index måste vara NOT NULL", +"COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'", +"Slaven har redan startat", +"Slaven har redan stoppat", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d rad(er) kapades av GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Använder handler %s för tabell '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Felaktigt index namn '%-.100s'", +"Felaktigt katalog namn '%-.100s'", +"Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu", +"Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index", +"Okänd nyckel cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"Tabel %-.100s använd med '%s' är inte uppdateringsbar", +"'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad", +"MySQL är startad med --skip-grant-tables. Pga av detta kan du inte använda detta kommando", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Fick felkod %d '%-.100s' från %s", +"Fick tilfällig felkod %d '%-.100s' från %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/ukrainian/errmsg.txt b/sql/share/ukrainian/errmsg.txt index 371dd16d737..9914846b1f8 100644 --- a/sql/share/ukrainian/errmsg.txt +++ b/sql/share/ukrainian/errmsg.txt @@ -20,13 +20,15 @@ * Version: 13/09/2001 mysql-3.23.41 */ +character-set=koi8u + "hashchk", "isamchk", "î¶", "ôáë", "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)", "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. (ÐÏÍÉÌËÁ: %d)", +"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)", "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤", "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤", "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)", @@ -54,7 +56,7 @@ "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦", "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'", "èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.64s'", -"èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'. óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ", +"èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ", "óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏ×¦ÔØ ÊÏÇÏ!", "ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ", "âÒÁË ÐÁÍ'ÑÔ¦. òÅÓÔÁÒÔÕÊÔÅ ÓÅÒ×ÅÒ ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ (ÐÏÔÒ¦ÂÎÏ %d ÂÁÊÔ¦×)", @@ -64,8 +66,8 @@ "âÒÁË ÐÁÍ'ÑÔ¦; ðÅÒÅצÒÔÅ ÞÉ mysqld ÁÂÏ ÑË¦ÓØ ¦ÎÛ¦ ÐÒÏÃÅÓÉ ×ÉËÏÒÉÓÔÏ×ÕÀÔØ ÕÓÀ ÄÏÓÔÕÐÎÕ ÐÁÍ'ÑÔØ. ñË Î¦, ÔÏ ×É ÍÏÖÅÔÅ ÓËÏÒÉÓÔÁÔÉÓÑ 'ulimit', ÁÂÉ ÄÏÚ×ÏÌÉÔÉ mysqld ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ Â¦ÌØÛÅ ÐÁÍ'ÑÔ¦ ÁÂÏ ×É ÍÏÖÅÔÅ ÄÏÄÁÔÉ Â¦ÌØÛŠͦÓÃÑ Ð¦Ä Ó×ÁÐ", "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ¦Í'Ñ ÈÏÓÔÕ ÄÌÑ ×ÁÛϧ ÁÄÒÅÓÉ", "îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ", -"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s@%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'", -"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s@%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)", +"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'", +"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)", "âÁÚÕ ÄÁÎÎÉÈ ÎÅ ×ÉÂÒÁÎÏ", "îÅצÄÏÍÁ ËÏÍÁÎÄÁ", "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ", @@ -91,7 +93,7 @@ "ðÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ ×ÉÚÎÁÞÅÎÏ ÎÅÏÄÎÏÒÁÚÏ×Ï", "úÁÂÁÇÁÔÏ ËÌÀÞ¦× ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ËÌÀÞ¦×", "úÁÂÁÇÁÔÏ ÞÁÓÔÉÎ ËÌÀÞÁ ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ÞÁÓÔÉÎ", -"úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁÊÂ¦ÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d", +"úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁÊÂ¦ÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×", "ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ", "BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ", "úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB", @@ -113,7 +115,7 @@ "îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE", "îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤", "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld", -"INSERT TABLE '%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ Õ ÐÅÒÅ̦ËÕ FROM TABLE", +"ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM", "îÅצÄÏÍÉÊ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒ Ç¦ÌËÉ: %lu", "÷É ÎÅ ×ÏÌÏÄÁÒ Ç¦ÌËÉ %lu", "îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ", @@ -162,9 +164,9 @@ "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ '%-.64s' ×¦Ä ÒÅÇÕÌÑÒÎÏÇÏ ×ÉÒÁÚÕ", "úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY", "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s'", -"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s@%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", -"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s@%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", -"èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ. ðÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ.", +"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", +"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", +"èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ; ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ", "áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ", "ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤", "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'", @@ -173,7 +175,7 @@ "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s", "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ", "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)", -"ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö max_allowed_packet", +"ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö 'max_allowed_packet'", "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ", "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()", "ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ", @@ -182,7 +184,7 @@ "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö max_allowed_packet", +"óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö 'max_allowed_packet'", "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ", "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ", "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES", @@ -218,8 +220,8 @@ "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ", "úÁÓÔÅÒÅÖÅÎÎÑ: äÅÑ˦ ÎÅÔÒÁÎÚÁËæÊΦ ÚͦÎÉ ÔÁÂÌÉÃØ ÎÅ ÍÏÖÎÁ ÂÕÄÅ ÐÏ×ÅÒÎÕÔÉ", "ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. úÂ¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ", -"ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ SLAVE STOP", -"ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎÆ¦ÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ SLAVE START", +"ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE", +"ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎÆ¦ÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE", "óÅÒ×ÅÒ ÎÅ ÚËÏÎÆ¦ÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎÆ¦ÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO", "Could not initialize master info structure, more error messages can be found in the MySQL error log", "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ", @@ -231,9 +233,9 @@ "DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ", "CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ", "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s", -"ëÏÒÉÓÔÕ×ÁÞÕ %-.32s@%-.64s ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×", -"Incorrect table definition; All MERGE tables must be in the same database", -"Deadlock found when trying to get lock; Try restarting transaction", +"ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×", +"Incorrect table definition; all MERGE tables must be in the same database", +"Deadlock found when trying to get lock; try restarting transaction", "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -241,22 +243,86 @@ "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Wrong usage of %s and %s", +"Incorrect usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied. You need the %-.128s privilege for this operation", -"Variable '%-.64s' is a LOCAL variable and can't be used with SET GLOBAL", +"Access denied; you need the %-.128s privilege for this operation", +"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Wrong argument type to variable '%-.64s'", +"Incorrect argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Wrong usage/placement of '%s'", +"Incorrect usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules" -"Variable '%-.64s' is a %s variable" +"Slave SQL thread ignored the query because of replicate-*-table rules", +"Variable '%-.64s' is a %s variable", +"Incorrect foreign key definition for '%-.64s': %s", +"Key reference and table reference don't match", +"ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×", +"ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ Â¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ", +"Unknown prepared statement handler (%.*s) given to %s", +"Help database is corrupt or does not exist", +"ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ", +"ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s", +"ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)", +"Every derived table must have its own alias", +"Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii", +"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Client does not support authentication protocol requested by server; consider upgrading MySQL client", +"All parts of a SPATIAL index must be NOT NULL", +"COLLATION '%s' is not valid for CHARACTER SET '%s'", +"Slave is already running", +"Slave has already been stopped", +"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", +"ZLIB: Not enough memory", +"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", +"ZLIB: Input data corrupted", +"%d line(s) were cut by GROUP_CONCAT()", +"Row %ld doesn't contain data for all columns", +"Row %ld was truncated; it contained more data than there were input columns", +"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated; out of range for column '%s' at row %ld", +"Data truncated for column '%s' at row %ld", +"Using storage engine %s for table '%s'", +"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", +"Can't drop one or more of the requested users", +"Can't revoke all privileges, grant for one or more of the requested users", +"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", +"Illegal mix of collations for operation '%s'", +"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Unknown collation: '%-.64s'", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", +"óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d", +"Incorrect parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"SQL thread is not to be started so UNTIL options are ignored", +"Incorrect index name '%-.100s'", +"Incorrect catalog name '%-.100s'", +"ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu", +"Column '%-.64s' cannot be part of FULLTEXT index", +"Unknown key cache '%-.100s'", +"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"Unknown table engine '%s'", +"'%s' is deprecated, use '%s' instead", +"ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ", +"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The MySQL server is running with the %s option so it cannot execute this statement", +"Column '%-.100s' has duplicated value '%-.64s' in %s" +"Truncated wrong %-.32s value: '%-.128s'" +"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' column", +"This command is not supported in the prepared statement protocol yet", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", +"Unknown or incorrect time zone: '%-.64s'", +"Invalid TIMESTAMP value in column '%s' at row %ld", +"Invalid %s character string: '%.64s'", +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" +"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/slave.cc b/sql/slave.cc index 5e3d073b38a..b2862a437bb 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -14,18 +14,20 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - #include "mysql_priv.h" + +#ifdef HAVE_REPLICATION + #include <mysql.h> #include <myisam.h> -#include "mini_client.h" #include "slave.h" #include "sql_repl.h" #include "repl_failsafe.h" #include <thr_alarm.h> #include <my_dir.h> -#include <assert.h> +#include <sql_common.h> +#define MAX_SLAVE_RETRY_PAUSE 5 bool use_slave_mask = 0; MY_BITMAP slave_error_mask; @@ -54,7 +56,6 @@ static int events_till_disconnect = -1; typedef enum { SLAVE_THD_IO, SLAVE_THD_SQL} SLAVE_THD_TYPE; -void skip_load_data_infile(NET* net); static int process_io_rotate(MASTER_INFO* mi, Rotate_log_event* rev); static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev); static bool wait_for_relay_log_space(RELAY_LOG_INFO* rli); @@ -70,14 +71,27 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed, void* thread_killed_arg); static int request_table_dump(MYSQL* mysql, const char* db, const char* table); -static int create_table_from_dump(THD* thd, NET* net, const char* db, +static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite); -static int check_master_version(MYSQL* mysql, MASTER_INFO* mi); +static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); /* - Get a bit mask for which threads are running so that we later can - restart these threads + Find out which replications threads are running + + SYNOPSIS + init_thread_mask() + mask Return value here + mi master_info for slave + inverse If set, returns which threads are not running + + IMPLEMENTATION + Get a bit mask for which threads are running so that we can later restart + these threads. + + RETURN + mask If inverse == 0, running threads + If inverse == 1, stopped threads */ void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse) @@ -94,6 +108,10 @@ void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse) } +/* + lock_slave_threads() +*/ + void lock_slave_threads(MASTER_INFO* mi) { //TODO: see if we can do this without dual mutex @@ -101,6 +119,11 @@ void lock_slave_threads(MASTER_INFO* mi) pthread_mutex_lock(&mi->rli.run_lock); } + +/* + unlock_slave_threads() +*/ + void unlock_slave_threads(MASTER_INFO* mi) { //TODO: see if we can do this without dual mutex @@ -109,6 +132,8 @@ void unlock_slave_threads(MASTER_INFO* mi) } +/* Initialize slave structures */ + int init_slave() { DBUG_ENTER("init_slave"); @@ -175,6 +200,7 @@ static void free_table_ent(TABLE_RULE_ENT* e) my_free((gptr) e, MYF(0)); } + static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, my_bool not_used __attribute__((unused))) { @@ -201,11 +227,7 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, - If not, open the 'log' binary file. TODO - - check proper initialization of master_log_name/master_log_pos - - We may always want to delete all logs before 'log'. - Currently if we are not calling this with 'log' as NULL or the first - log we will never delete relay logs. - If we want this we should not set skip_log_purge to 1. + - check proper initialization of group_master_log_name/group_master_log_pos RETURN VALUES 0 ok @@ -220,10 +242,12 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); - pthread_mutex_lock(log_lock); + if (need_data_lock) pthread_mutex_lock(&rli->data_lock); + pthread_mutex_lock(log_lock); + /* Close log file and free buffers if it's already open */ if (rli->cur_log_fd >= 0) { @@ -232,7 +256,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, rli->cur_log_fd = -1; } - rli->relay_log_pos = pos; + rli->group_relay_log_pos = rli->event_relay_log_pos = pos; /* Test to see if the previous run was with the skip of purging @@ -244,18 +268,15 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, goto err; } - if (log) // If not first log + if (log && rli->relay_log.find_log_pos(&rli->linfo, log, 1)) { - if (strcmp(log, rli->linfo.log_file_name)) - rli->skip_log_purge= 1; // Different name; Don't purge - if (rli->relay_log.find_log_pos(&rli->linfo, log, 1)) - { - *errmsg="Could not find target log during relay log initialization"; - goto err; - } + *errmsg="Could not find target log during relay log initialization"; + goto err; } - strmake(rli->relay_log_name,rli->linfo.log_file_name, - sizeof(rli->relay_log_name)-1); + strmake(rli->group_relay_log_name,rli->linfo.log_file_name, + sizeof(rli->group_relay_log_name)-1); + strmake(rli->event_relay_log_name,rli->linfo.log_file_name, + sizeof(rli->event_relay_log_name)-1); if (rli->relay_log.is_active(rli->linfo.log_file_name)) { /* @@ -263,8 +284,9 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, In this case, we will use the same IO_CACHE pointer to read data as the IO thread is using to write data. */ - if (my_b_tell((rli->cur_log=rli->relay_log.get_log_file())) == 0 && - check_binlog_magic(rli->cur_log,errmsg)) + rli->cur_log= rli->relay_log.get_log_file(); + if (my_b_tell(rli->cur_log) == 0 && + check_binlog_magic(rli->cur_log, errmsg)) goto err; rli->cur_log_old_open_count=rli->relay_log.get_open_count(); } @@ -286,31 +308,42 @@ err: If we don't purge, we can't honour relay_log_space_limit ; silently discard it */ - if (rli->skip_log_purge) + if (!relay_log_purge) rli->log_space_limit= 0; pthread_cond_broadcast(&rli->data_cond); + + pthread_mutex_unlock(log_lock); + if (need_data_lock) pthread_mutex_unlock(&rli->data_lock); - pthread_mutex_unlock(log_lock); DBUG_RETURN ((*errmsg) ? 1 : 0); } -/* called from get_options() in mysqld.cc on start-up */ +/* + Init functio to set up array for errors that should be skipped for slave + + SYNOPSIS + init_slave_skip_errors() + arg List of errors numbers to skip, separated with ',' + + NOTES + Called from get_options() in mysqld.cc on start-up +*/ void init_slave_skip_errors(const char* arg) { const char *p; - if (bitmap_init(&slave_error_mask,MAX_SLAVE_ERROR,0)) + if (bitmap_init(&slave_error_mask,0,MAX_SLAVE_ERROR,0)) { fprintf(stderr, "Badly out of memory, please check your system status\n"); exit(1); } use_slave_mask = 1; - for (;isspace(*arg);++arg) + for (;my_isspace(system_charset_info,*arg);++arg) /* empty */; - if (!my_casecmp(arg,"all",3)) + if (!my_strnncoll(system_charset_info,(uchar*)arg,4,(const uchar*)"all",4)) { bitmap_set_all(&slave_error_mask); return; @@ -322,24 +355,37 @@ void init_slave_skip_errors(const char* arg) break; if (err_code < MAX_SLAVE_ERROR) bitmap_set_bit(&slave_error_mask,(uint)err_code); - while (!isdigit(*p) && *p) + while (!my_isdigit(system_charset_info,*p) && *p) p++; } } -void st_relay_log_info::inc_pending(ulonglong val) -{ - pending += val; -} -/* TODO: this probably needs to be fixed */ -void st_relay_log_info::inc_pos(ulonglong val, ulonglong log_pos, bool skip_lock) +void st_relay_log_info::inc_group_relay_log_pos(ulonglong val, + ulonglong log_pos, + bool skip_lock) { if (!skip_lock) pthread_mutex_lock(&data_lock); - relay_log_pos += val+pending; - pending = 0; - if (log_pos) + inc_event_relay_log_pos(val); + group_relay_log_pos= event_relay_log_pos; + strmake(group_relay_log_name,event_relay_log_name, + sizeof(group_relay_log_name)-1); + + notify_group_relay_log_name_update(); + + /* + If the slave does not support transactions and replicates a transaction, + users should not trust group_master_log_pos (which they can display with + SHOW SLAVE STATUS or read from relay-log.info), because to compute + group_master_log_pos the slave relies on log_pos stored in the master's + binlog, but if we are in a master's transaction these positions are always + the BEGIN's one (excepted for the COMMIT), so group_master_log_pos does + not advance as it should on the non-transactional slave (it advances by + big leaps, whereas it should advance by small leaps). + */ + if (log_pos) // 3.23 binlogs don't have log_posx + { #if MYSQL_VERSION_ID < 50000 /* If the event was converted from a 3.23 format, get_event_len() has @@ -350,28 +396,18 @@ void st_relay_log_info::inc_pos(ulonglong val, ulonglong log_pos, bool skip_lock mi->old_format will not help if the I/O thread has not started yet. Yes this is a hack but it's just to make 3.23->4.x replication work; 3.23->5.0 replication is working much better. - - The line "mi->old_format ? : " below should NOT BE MERGED to 5.0 which - already works. But it SHOULD be merged to 4.1. */ - master_log_pos= log_pos + val - + group_master_log_pos= log_pos + val - (mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#endif +#else + group_master_log_pos= log_pos+ val; +#endif /* MYSQL_VERSION_ID < 5000 */ + } pthread_cond_broadcast(&data_cond); if (!skip_lock) pthread_mutex_unlock(&data_lock); } -/* - thread safe read of position - not needed if we are in the slave thread, - but required otherwise as var is a longlong -*/ -void st_relay_log_info::read_pos(ulonglong& var) -{ - pthread_mutex_lock(&data_lock); - var = relay_log_pos; - pthread_mutex_unlock(&data_lock); -} void st_relay_log_info::close_temporary_tables() { @@ -391,8 +427,10 @@ void st_relay_log_info::close_temporary_tables() } /* - We assume we have a run lock on rli and that both slave thread - are not running + purge_relay_logs() + + NOTES + Assumes to have a run lock on rli and that no slave thread are running. */ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, @@ -419,9 +457,8 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, to display fine in any case. */ - rli->master_log_name[0]= 0; - rli->master_log_pos= 0; - rli->pending= 0; + rli->group_master_log_name[0]= 0; + rli->group_master_log_pos= 0; if (!rli->inited) { @@ -441,16 +478,18 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, goto err; } /* Save name of used relay log file */ - strmake(rli->relay_log_name, rli->relay_log.get_log_fname(), - sizeof(rli->relay_log_name)-1); + strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(), + sizeof(rli->group_relay_log_name)-1); + strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(), + sizeof(rli->event_relay_log_name)-1); // Just first log with magic number and nothing else rli->log_space_total= BIN_LOG_HEADER_SIZE; - rli->relay_log_pos= BIN_LOG_HEADER_SIZE; + rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE; rli->relay_log.reset_bytes_written(); if (!just_reset) - error= init_relay_log_pos(rli, rli->relay_log_name, rli->relay_log_pos, - 0 /* do not need data lock */, errmsg); - + error= init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, + 0 /* do not need data lock */, errmsg); + err: #ifndef DBUG_OFF char buf[22]; @@ -507,7 +546,7 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock) int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, pthread_mutex_t *cond_lock, pthread_cond_t* term_cond, - volatile bool* slave_running) + volatile uint *slave_running) { if (term_lock) { @@ -545,9 +584,10 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, pthread_mutex_t *cond_lock, pthread_cond_t *start_cond, - volatile bool *slave_running, + volatile uint *slave_running, volatile ulong *slave_run_id, - MASTER_INFO* mi) + MASTER_INFO* mi, + bool high_priority) { pthread_t th; ulong start_id; @@ -576,6 +616,8 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, } start_id= *slave_run_id; DBUG_PRINT("info",("Creating new slave thread")); + if (high_priority) + my_pthread_attr_setprio(&connection_attrib,CONNECT_PRIOR); if (pthread_create(&th, &connection_attrib, h_func, (void*)mi)) { if (start_lock) @@ -604,9 +646,12 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, /* - SLAVE_FORCE_ALL is not implemented here on purpose since it does not make - sense to do that for starting a slave - we always care if it actually - started the threads that were not previously running + start_slave_threads() + + NOTES + SLAVE_FORCE_ALL is not implemented here on purpose since it does not make + sense to do that for starting a slave--we always care if it actually + started the threads that were not previously running */ int start_slave_threads(bool need_slave_mutex, bool wait_for_start, @@ -635,13 +680,13 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start, error=start_slave_thread(handle_slave_io,lock_io,lock_cond_io, cond_io, &mi->slave_running, &mi->slave_run_id, - mi); + mi, 1); //high priority, to read the most possible if (!error && (thread_mask & SLAVE_SQL)) { error=start_slave_thread(handle_slave_sql,lock_sql,lock_cond_sql, cond_sql, &mi->rli.slave_running, &mi->rli.slave_run_id, - mi); + mi, 0); if (error) terminate_slave_threads(mi, thread_mask & SLAVE_IO, 0); } @@ -651,12 +696,13 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start, void init_table_rule_hash(HASH* h, bool* h_inited) { - hash_init(h, TABLE_RULE_HASH_SIZE,0,0, + hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0, (hash_get_key) get_table_key, (hash_free_key) free_table_ent, 0); *h_inited = 1; } + void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited) { my_init_dynamic_array(a, sizeof(TABLE_RULE_ENT*), TABLE_RULE_ARR_SIZE, @@ -664,6 +710,7 @@ void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited) *a_inited = 1; } + static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len) { uint i; @@ -673,8 +720,10 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len) { TABLE_RULE_ENT* e ; get_dynamic(a, (gptr)&e, i); - if (!wild_case_compare(key, key_end, (const char*)e->db, - (const char*)(e->db + e->key_len),'\\')) + if (!my_wildcmp(system_charset_info, key, key_end, + (const char*)e->db, + (const char*)(e->db + e->key_len), + '\\',wild_one,wild_many)) return e; } @@ -697,21 +746,14 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len) rules (see code below). For that reason, users should not set conflicting rules because they may get unpredicted results (precedence order is explained in the manual). - If no table of the list is marked "updating" (so far this can only happen - if the statement is a multi-delete (SQLCOM_DELETE_MULTI) and the "tables" - is the tables in the FROM): then we always return 0, because there is no - reason we play this statement on this slave if it updates nothing. In the - case of SQLCOM_DELETE_MULTI, there will be a second call to tables_ok(), - with tables having "updating==TRUE" (those after the DELETE), so this - second call will make the decision (because - all_tables_not_ok() = !tables_ok(1st_list) && !tables_ok(2nd_list)). + RETURN VALUES 0 should not be logged/replicated 1 should be logged/replicated */ -int tables_ok(THD* thd, TABLE_LIST* tables) +bool tables_ok(THD* thd, TABLE_LIST* tables) { bool some_tables_updating= 0; DBUG_ENTER("tables_ok"); @@ -823,10 +865,15 @@ int add_table_rule(HASH* h, const char* table_spec) e->tbl_name = e->db + (dot - table_spec) + 1; e->key_len = len; memcpy(e->db, table_spec, len); - (void)hash_insert(h, (byte*)e); + (void)my_hash_insert(h, (byte*)e); return 0; } + +/* + Add table expression with wildcards to dynamic array +*/ + int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec) { const char* dot = strchr(table_spec, '.'); @@ -843,6 +890,7 @@ int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec) return 0; } + static void free_string_array(DYNAMIC_ARRAY *a) { uint i; @@ -855,8 +903,8 @@ static void free_string_array(DYNAMIC_ARRAY *a) delete_dynamic(a); } -#ifdef NOT_USED_YET +#ifdef NOT_USED_YET static int end_slave_on_walk(MASTER_INFO* mi, gptr /*unused*/) { end_master_info(mi); @@ -865,6 +913,13 @@ static int end_slave_on_walk(MASTER_INFO* mi, gptr /*unused*/) #endif +/* + Free all resources used by slave + + SYNOPSIS + end_slave() +*/ + void end_slave() { /* @@ -902,7 +957,7 @@ void end_slave() static bool io_slave_killed(THD* thd, MASTER_INFO* mi) { DBUG_ASSERT(mi->io_thd == thd); - DBUG_ASSERT(mi->slave_running == 1); // tracking buffer overrun + DBUG_ASSERT(mi->slave_running); // tracking buffer overrun return mi->abort_slave || abort_loop || thd->killed; } @@ -949,17 +1004,29 @@ void slave_print_error(RELAY_LOG_INFO* rli, int err_code, const char* msg, ...) } +/* + skip_load_data_infile() + + NOTES + This is used to tell a 3.23 master to break send_file() +*/ -void skip_load_data_infile(NET* net) +void skip_load_data_infile(NET *net) { - (void)my_net_write(net, "\xfb/dev/null", 10); - (void)net_flush(net); - (void)my_net_read(net); // discard response - send_ok(net); // the master expects it + (void)net_request_file(net, "/dev/null"); + (void)my_net_read(net); // discard response + (void)net_write_command(net, 0, "", 0, "", 0); // Send ok } -const char *rewrite_db(const char* db) +bool net_request_file(NET* net, const char* fname) +{ + DBUG_ENTER("net_request_file"); + DBUG_RETURN(net_write_command(net, 251, fname, strlen(fname), "", 0)); +} + + +const char *rewrite_db(const char* db, uint32 *new_len) { if (replicate_rewrite_db.is_empty() || !db) return db; @@ -969,7 +1036,10 @@ const char *rewrite_db(const char* db) while ((tmp=it++)) { if (!strcmp(tmp->key, db)) + { + *new_len= (uint32)strlen(tmp->val); return tmp->val; + } } return db; } @@ -983,7 +1053,7 @@ const char *rewrite_db(const char* db) const char *print_slave_db_safe(const char* db) { - return (db ? rewrite_db(db) : ""); + return (db ? db : ""); } /* @@ -1088,7 +1158,7 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val) } -static int check_master_version(MYSQL* mysql, MASTER_INFO* mi) +static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) { const char* errmsg= 0; @@ -1107,16 +1177,114 @@ static int check_master_version(MYSQL* mysql, MASTER_INFO* mi) break; default: /* 5.0 is not supported */ - errmsg = "Master reported an unrecognized MySQL version. Note that 4.0 \ + errmsg = "Master reported an unrecognized MySQL version. Note that 4.1 \ slaves can't replicate a 5.0 or newer master."; break; } + /* + Compare the master and slave's clock. Do not die if master's clock is + unavailable (very old master not supporting UNIX_TIMESTAMP()?). + */ + MYSQL_RES *master_res= 0; + MYSQL_ROW master_row; + + if (!mysql_real_query(mysql, "SELECT UNIX_TIMESTAMP()", 23) && + (master_res= mysql_store_result(mysql)) && + (master_row= mysql_fetch_row(master_res))) + { + mi->clock_diff_with_master= + (long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10)); + } + else + { + mi->clock_diff_with_master= 0; /* The "most sensible" value */ + sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, \ +do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"); + } + if (master_res) + mysql_free_result(master_res); + + /* + Check that the master's server id and ours are different. Because if they + are equal (which can result from a simple copy of master's datadir to slave, + thus copying some my.cnf), replication will work but all events will be + skipped. + Do not die if SHOW VARIABLES LIKE 'SERVER_ID' fails on master (very old + master?). + Note: we could have put a @@SERVER_ID in the previous SELECT + UNIX_TIMESTAMP() instead, but this would not have worked on 3.23 masters. + */ + if (!mysql_real_query(mysql, "SHOW VARIABLES LIKE 'SERVER_ID'", 31) && + (master_res= mysql_store_result(mysql))) + { + if ((master_row= mysql_fetch_row(master_res)) && + (::server_id == strtoul(master_row[1], 0, 10)) && + !replicate_same_server_id) + errmsg= "The slave I/O thread stops because master and slave have equal \ +MySQL server ids; these ids must be different for replication to work (or \ +the --replicate-same-server-id option must be used on slave but this does \ +not always make sense; please check the manual before using it)."; + mysql_free_result(master_res); + } + + /* + Check that the master's global character_set_server and ours are the same. + Not fatal if query fails (old master?). + Note that we don't check for equality of global character_set_client and + collation_connection (neither do we prevent their setting in + set_var.cc). That's because from what I (Guilhem) have tested, the global + values of these 2 are never used (new connections don't use them). + We don't test equality of global collation_database either as it's is + going to be deprecated (made read-only) in 4.1 very soon. + We don't do it for <3.23.57 because masters <3.23.50 hang on + SELECT @@unknown_var (BUG#7965 - see changelog of 3.23.50). + */ + if (mi->old_format == BINLOG_FORMAT_323_LESS_57) + goto err; + if (!mysql_real_query(mysql, "SELECT @@GLOBAL.COLLATION_SERVER", 32) && + (master_res= mysql_store_result(mysql))) + { + if ((master_row= mysql_fetch_row(master_res)) && + strcmp(master_row[0], global_system_variables.collation_server->name)) + errmsg= "The slave I/O thread stops because master and slave have \ +different values for the COLLATION_SERVER global variable. The values must \ +be equal for replication to work"; + mysql_free_result(master_res); + } + + /* + Perform analogous check for time zone. Theoretically we also should + perform check here to verify that SYSTEM time zones are the same on + slave and master, but we can't rely on value of @@system_time_zone + variable (it is time zone abbreviation) since it determined at start + time and so could differ for slave and master even if they are really + in the same system time zone. So we are omiting this check and just + relying on documentation. Also according to Monty there are many users + who are using replication between servers in various time zones. Hence + such check will broke everything for them. (And now everything will + work for them because by default both their master and slave will have + 'SYSTEM' time zone). + */ + if (!mysql_real_query(mysql, "SELECT @@GLOBAL.TIME_ZONE", 25) && + (master_res= mysql_store_result(mysql))) + { + if ((master_row= mysql_fetch_row(master_res)) && + strcmp(master_row[0], + global_system_variables.time_zone->get_name()->ptr())) + errmsg= "The slave I/O thread stops because master and slave have \ +different values for the TIME_ZONE global variable. The values must \ +be equal for replication to work"; + mysql_free_result(master_res); + } + +err: if (errmsg) { sql_print_error(errmsg); return 1; } + return 0; } @@ -1131,10 +1299,10 @@ slaves can't replicate a 5.0 or newer master."; 1 error */ -static int create_table_from_dump(THD* thd, NET* net, const char* db, +static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite) { - ulong packet_len = my_net_read(net); // read create table statement + ulong packet_len; char *query, *save_db; uint32 save_db_length; Vio* save_vio; @@ -1143,51 +1311,49 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db, int error= 1; handler *file; ulong save_options; - + NET *net= &mysql->net; + DBUG_ENTER("create_table_from_dump"); + + packet_len= my_net_read(net); // read create table statement if (packet_len == packet_error) { - send_error(&thd->net, ER_MASTER_NET_READ); - return 1; + send_error(thd, ER_MASTER_NET_READ); + DBUG_RETURN(1); } if (net->read_pos[0] == 255) // error from master { - net->read_pos[packet_len] = 0; - net_printf(&thd->net, ER_MASTER, net->read_pos + 3); - return 1; + char *err_msg; + err_msg= (char*) net->read_pos + ((mysql->server_capabilities & + CLIENT_PROTOCOL_41) ? + 3+SQLSTATE_LENGTH+1 : 3); + net_printf(thd, ER_MASTER, err_msg); + DBUG_RETURN(1); } thd->command = COM_TABLE_DUMP; + thd->query_length= packet_len; /* Note that we should not set thd->query until the area is initalized */ - if (!(query = sql_alloc(packet_len + 1))) + if (!(query = thd->strmake((char*) net->read_pos, packet_len))) { sql_print_error("create_table_from_dump: out of memory"); - net_printf(&thd->net, ER_GET_ERRNO, "Out of memory"); - return 1; + net_printf(thd, ER_GET_ERRNO, "Out of memory"); + DBUG_RETURN(1); } - memcpy(query, net->read_pos, packet_len); - query[packet_len]= 0; - thd->query_length= packet_len; - /* - We make the following lock in an attempt to ensure that the compiler will - not rearrange the code so that thd->query is set too soon - */ - VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query= query; - VOID(pthread_mutex_unlock(&LOCK_thread_count)); - thd->current_tablenr = 0; thd->query_error = 0; thd->net.no_send_ok = 1; bzero((char*) &tables,sizeof(tables)); tables.db = (char*)db; tables.alias= tables.real_name= (char*)table_name; + /* Drop the table if 'overwrite' is true */ - if (overwrite && mysql_rm_table(thd,&tables,1)) /* drop if exists */ + if (overwrite && mysql_rm_table(thd,&tables,1,0)) /* drop if exists */ { - send_error(&thd->net); + send_error(thd); sql_print_error("create_table_from_dump: failed to drop the table"); goto err; } - + /* Create the table. We do not want to log the "create table" statement */ save_options = thd->options; thd->options &= ~(ulong) (OPTION_BIN_LOG); @@ -1210,7 +1376,7 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db, tables.lock_type = TL_WRITE; if (!open_ltable(thd, &tables, TL_WRITE)) { - send_error(&thd->net,0,0); // Send error from open_ltable + send_error(thd,0,0); // Send error from open_ltable sql_print_error("create_table_from_dump: could not open created table"); goto err; } @@ -1220,7 +1386,7 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db, /* Copy the data file */ if (file->net_read_dump(net)) { - net_printf(&thd->net, ER_MASTER_NET_READ); + net_printf(thd, ER_MASTER_NET_READ); sql_print_error("create_table_from_dump: failed in\ handler::net_read_dump()"); goto err; @@ -1240,14 +1406,15 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db, error=file->repair(thd,&check_opt) != 0; thd->net.vio = save_vio; if (error) - net_printf(&thd->net, ER_INDEX_REBUILD,tables.table->real_name); + net_printf(thd, ER_INDEX_REBUILD,tables.table->real_name); err: close_thread_tables(thd); thd->net.no_send_ok = 0; - return error; + DBUG_RETURN(error); } + int fetch_master_table(THD *thd, const char *db_name, const char *table_name, MASTER_INFO *mi, MYSQL *mysql, bool overwrite) { @@ -1260,15 +1427,15 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, if (!called_connected) { - if (!(mysql = mc_mysql_init(NULL))) + if (!(mysql = mysql_init(NULL))) { - send_error(&thd->net); // EOM + send_error(thd); // EOM DBUG_RETURN(1); } if (connect_to_master(thd, mysql, mi)) { - net_printf(&thd->net, ER_CONNECT_TO_MASTER, mc_mysql_error(mysql)); - mc_mysql_close(mysql); + net_printf(thd, ER_CONNECT_TO_MASTER, mysql_error(mysql)); + mysql_close(mysql); DBUG_RETURN(1); } if (thd->killed) @@ -1281,18 +1448,17 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, errmsg= "Failed on table dump request"; goto err; } - - if (create_table_from_dump(thd, &mysql->net, db_name, - table_name, overwrite)) - goto err; // create_table_from_dump will have send_error already + if (create_table_from_dump(thd, mysql, db_name, + table_name, overwrite)) + goto err; // create_table_from_dump have sent the error already error = 0; err: thd->net.no_send_ok = 0; // Clear up garbage after create_table_from_dump if (!called_connected) - mc_mysql_close(mysql); - if (errmsg && thd->net.vio) - send_error(&thd->net, error, errmsg); + mysql_close(mysql); + if (errmsg && thd->vio_ok()) + send_error(thd, error, errmsg); DBUG_RETURN(test(error)); // Return 1 on error } @@ -1329,13 +1495,11 @@ int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname) fn_format(fname, info_fname, mysql_data_home, "", 4+32); pthread_mutex_lock(&rli->data_lock); info_fd = rli->info_fd; - rli->pending = 0; rli->cur_log_fd = -1; rli->slave_skip_counter=0; rli->abort_pos_wait=0; - rli->skip_log_purge=0; - rli->log_space_limit = relay_log_space_limit; - rli->log_space_total = 0; + rli->log_space_limit= relay_log_space_limit; + rli->log_space_total= 0; // TODO: make this work with multi-master if (!opt_relay_logname) @@ -1351,32 +1515,9 @@ int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname) } /* - The relay log will now be opened, as a SEQ_READ_APPEND IO_CACHE. It is - notable that the last kilobytes of it (8 kB for example) may live in - memory, not on disk (depending on what the thread using it does). While - this is efficient, it has a side-effect one must know: - The size of the relay log on disk (displayed by 'ls -l' on Unix) can be a - few kilobytes less than one would expect by doing SHOW SLAVE STATUS; this - happens when only the IO thread is started (not the SQL thread). The - "missing" kilobytes are in memory, are preserved during 'STOP SLAVE; START - SLAVE IO_THREAD', and are flushed to disk when the slave's mysqld stops. So - this does not cause any bug. Example of how disk size grows by leaps: - - Read_Master_Log_Pos: 7811 -rw-rw---- 1 guilhem qq 4 Jun 5 16:19 gbichot2-relay-bin.002 - ...later... - Read_Master_Log_Pos: 9744 -rw-rw---- 1 guilhem qq 8192 Jun 5 16:27 gbichot2-relay-bin.002 - - See how 4 is less than 7811 and 8192 is less than 9744. - - WARNING: this is risky because the slave can stay like this for a long - time; then if it has a power failure, master.info says the I/O thread has - read until 9744 while the relay-log contains only until 8192 (the - in-memory part from 8192 to 9744 has been lost), so the SQL slave thread - will miss some events, silently breaking replication. - Ideally we would like to flush master.info only when we know that the relay - log has no in-memory tail. - Note that the above problem may arise only when only the IO thread is - started, which is unlikely. + The relay log will now be opened, as a SEQ_READ_APPEND IO_CACHE. + Note that the I/O thread flushes it to disk after writing every event, in + flush_master_info(mi, 1). */ /* @@ -1432,8 +1573,8 @@ file '%s', errno %d)", fname, my_errno); sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)"); goto err; } - rli->master_log_name[0]= 0; - rli->master_log_pos= 0; + rli->group_master_log_name[0]= 0; + rli->group_master_log_pos= 0; rli->info_fd= info_fd; } else // file exists @@ -1470,36 +1611,39 @@ Failed to open the existing relay log info file '%s' (errno %d)", rli->info_fd = info_fd; int relay_log_pos, master_log_pos; - if (init_strvar_from_file(rli->relay_log_name, - sizeof(rli->relay_log_name), &rli->info_file, - "") || + if (init_strvar_from_file(rli->group_relay_log_name, + sizeof(rli->group_relay_log_name), + &rli->info_file, "") || init_intvar_from_file(&relay_log_pos, &rli->info_file, BIN_LOG_HEADER_SIZE) || - init_strvar_from_file(rli->master_log_name, - sizeof(rli->master_log_name), &rli->info_file, - "") || + init_strvar_from_file(rli->group_master_log_name, + sizeof(rli->group_master_log_name), + &rli->info_file, "") || init_intvar_from_file(&master_log_pos, &rli->info_file, 0)) { msg="Error reading slave log configuration"; goto err; } - rli->relay_log_pos= relay_log_pos; - rli->master_log_pos= master_log_pos; + strmake(rli->event_relay_log_name,rli->group_relay_log_name, + sizeof(rli->event_relay_log_name)-1); + rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos; + rli->group_master_log_pos= master_log_pos; if (init_relay_log_pos(rli, - rli->relay_log_name, - rli->relay_log_pos, + rli->group_relay_log_name, + rli->group_relay_log_pos, 0 /* no data lock*/, &msg)) { char llbuf[22]; sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)", - rli->relay_log_name, llstr(rli->relay_log_pos, llbuf)); + rli->group_relay_log_name, + llstr(rli->group_relay_log_pos, llbuf)); goto err; } } - DBUG_ASSERT(rli->relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->relay_log_pos); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); /* Now change the cache from READ to WRITE - must do this before flush_relay_log_info @@ -1551,14 +1695,16 @@ static bool wait_for_relay_log_space(RELAY_LOG_INFO* rli) { bool slave_killed=0; MASTER_INFO* mi = rli->mi; + const char *save_proc_info; THD* thd = mi->io_thd; DBUG_ENTER("wait_for_relay_log_space"); + pthread_mutex_lock(&rli->log_space_lock); - const char* save_proc_info= thd->enter_cond(&rli->log_space_cond, - &rli->log_space_lock, - "Waiting for the SQL slave \ -thread to free enough relay log space"); + save_proc_info= thd->enter_cond(&rli->log_space_cond, + &rli->log_space_lock, + "\ +Waiting for the slave SQL thread to free enough relay log space"); while (rli->log_space_limit < rli->log_space_total && !(slave_killed=io_slave_killed(thd,mi)) && !rli->ignore_log_space_limit) @@ -1572,7 +1718,7 @@ static int count_relay_log_space(RELAY_LOG_INFO* rli) { LOG_INFO linfo; DBUG_ENTER("count_relay_log_space"); - rli->log_space_total = 0; + rli->log_space_total= 0; if (rli->relay_log.find_log_pos(&linfo, NullS, 1)) { sql_print_error("Could not find first log while counting relay log space"); @@ -1592,6 +1738,56 @@ static int count_relay_log_space(RELAY_LOG_INFO* rli) DBUG_RETURN(0); } + +/* + Builds a Rotate from the ignored events' info and writes it to relay log. + + SYNOPSIS + write_ignored_events_info_to_relay_log() + thd pointer to I/O thread's thd + mi + + DESCRIPTION + Slave I/O thread, going to die, must leave a durable trace of the + ignored events' end position for the use of the slave SQL thread, by + calling this function. Only that thread can call it (see assertion). + */ +static void write_ignored_events_info_to_relay_log(THD *thd, MASTER_INFO *mi) +{ + RELAY_LOG_INFO *rli= &mi->rli; + pthread_mutex_t *log_lock= rli->relay_log.get_log_lock(); + DBUG_ASSERT(thd == mi->io_thd); + pthread_mutex_lock(log_lock); + if (rli->ign_master_log_name_end[0]) + { + DBUG_PRINT("info",("writing a Rotate event to track down ignored events")); + Rotate_log_event *ev= new Rotate_log_event(thd, rli->ign_master_log_name_end, + 0, rli->ign_master_log_pos_end, + Rotate_log_event::DUP_NAME); + rli->ign_master_log_name_end[0]= 0; + /* can unlock before writing as slave SQL thd will soon see our Rotate */ + pthread_mutex_unlock(log_lock); + if (likely((bool)ev)) + { + ev->server_id= 0; // don't be ignored by slave SQL thread + if (unlikely(rli->relay_log.append(ev))) + sql_print_error("Slave I/O thread failed to write a Rotate event" + " to the relay log, " + "SHOW SLAVE STATUS may be inaccurate"); + rli->relay_log.harvest_bytes_written(&rli->log_space_total); + flush_master_info(mi, 1); + delete ev; + } + else + sql_print_error("Slave I/O thread failed to create a Rotate event" + " (out of memory?), " + "SHOW SLAVE STATUS may be inaccurate"); + } + else + pthread_mutex_unlock(log_lock); +} + + void init_master_info_with_options(MASTER_INFO* mi) { mi->master_log_name[0] = 0; @@ -1602,18 +1798,47 @@ void init_master_info_with_options(MASTER_INFO* mi) if (master_user) strmake(mi->user, master_user, sizeof(mi->user) - 1); if (master_password) - strmake(mi->password, master_password, HASH_PASSWORD_LENGTH); + strmake(mi->password, master_password, MAX_PASSWORD_LENGTH); mi->port = master_port; mi->connect_retry = master_connect_retry; + + mi->ssl= master_ssl; + if (master_ssl_ca) + strmake(mi->ssl_ca, master_ssl_ca, sizeof(mi->ssl_ca)-1); + if (master_ssl_capath) + strmake(mi->ssl_capath, master_ssl_capath, sizeof(mi->ssl_capath)-1); + if (master_ssl_cert) + strmake(mi->ssl_cert, master_ssl_cert, sizeof(mi->ssl_cert)-1); + if (master_ssl_cipher) + strmake(mi->ssl_cipher, master_ssl_cipher, sizeof(mi->ssl_cipher)-1); + if (master_ssl_key) + strmake(mi->ssl_key, master_ssl_key, sizeof(mi->ssl_key)-1); +} + +void clear_slave_error(RELAY_LOG_INFO* rli) +{ + /* Clear the errors displayed by SHOW SLAVE STATUS */ + rli->last_slave_error[0]= 0; + rli->last_slave_errno= 0; } -void clear_last_slave_error(RELAY_LOG_INFO* rli) +/* + Reset UNTIL condition for RELAY_LOG_INFO + SYNOPSYS + clear_until_condition() + rli - RELAY_LOG_INFO structure where UNTIL condition should be reset + */ +void clear_until_condition(RELAY_LOG_INFO* rli) { - //Clear the errors displayed by SHOW SLAVE STATUS - rli->last_slave_error[0]=0; - rli->last_slave_errno=0; + rli->until_condition= RELAY_LOG_INFO::UNTIL_NONE; + rli->until_log_name[0]= 0; + rli->until_log_pos= 0; } + +#define LINES_IN_MASTER_INFO_WITH_SSL 14 + + int init_master_info(MASTER_INFO* mi, const char* master_info_fname, const char* slave_info_fname, bool abort_if_no_master_info_file, @@ -1646,7 +1871,6 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname, mi->mysql=0; mi->file_id=1; - mi->ignore_stop_event=0; fn_format(fname, master_info_fname, mysql_data_home, "", 4+32); /* @@ -1712,25 +1936,87 @@ file '%s')", fname); } mi->fd = fd; - int port, connect_retry, master_log_pos; - + int port, connect_retry, master_log_pos, ssl= 0, lines; + char *first_non_digit; + + /* + Starting from 4.1.x master.info has new format. Now its + first line contains number of lines in file. By reading this + number we will be always distinguish to which version our + master.info corresponds to. We can't simply count lines in + file since versions before 4.1.x could generate files with more + lines than needed. + If first line doesn't contain a number or contain number less than + 14 then such file is treated like file from pre 4.1.1 version. + There is no ambiguity when reading an old master.info, as before + 4.1.1, the first line contained the binlog's name, which is either + empty or has an extension (contains a '.'), so can't be confused + with an integer. + + So we're just reading first line and trying to figure which version + is this. + */ + + /* + The first row is temporarily stored in mi->master_log_name, + if it is line count and not binlog name (new format) it will be + overwritten by the second row later. + */ if (init_strvar_from_file(mi->master_log_name, sizeof(mi->master_log_name), &mi->file, - "") || - init_intvar_from_file(&master_log_pos, &mi->file, 4) || + "")) + goto errwithmsg; + + lines= strtoul(mi->master_log_name, &first_non_digit, 10); + + if (mi->master_log_name[0]!='\0' && + *first_non_digit=='\0' && lines >= LINES_IN_MASTER_INFO_WITH_SSL) + { // Seems to be new format + if (init_strvar_from_file(mi->master_log_name, + sizeof(mi->master_log_name), &mi->file, "")) + goto errwithmsg; + } + else + lines= 7; + + if (init_intvar_from_file(&master_log_pos, &mi->file, 4) || init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file, master_host) || init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file, master_user) || - init_strvar_from_file(mi->password, HASH_PASSWORD_LENGTH+1, &mi->file, - master_password) || + init_strvar_from_file(mi->password, SCRAMBLED_PASSWORD_CHAR_LENGTH+1, + &mi->file, master_password) || init_intvar_from_file(&port, &mi->file, master_port) || init_intvar_from_file(&connect_retry, &mi->file, master_connect_retry)) - { - sql_print_error("Error reading master configuration"); - goto err; - } + goto errwithmsg; + + /* + If file has ssl part use it even if we have server without + SSL support. But these option will be ignored later when + slave will try connect to master, so in this case warning + is printed. + */ + if (lines >= LINES_IN_MASTER_INFO_WITH_SSL && + (init_intvar_from_file(&ssl, &mi->file, master_ssl) || + init_strvar_from_file(mi->ssl_ca, sizeof(mi->ssl_ca), + &mi->file, master_ssl_ca) || + init_strvar_from_file(mi->ssl_capath, sizeof(mi->ssl_capath), + &mi->file, master_ssl_capath) || + init_strvar_from_file(mi->ssl_cert, sizeof(mi->ssl_cert), + &mi->file, master_ssl_cert) || + init_strvar_from_file(mi->ssl_cipher, sizeof(mi->ssl_cipher), + &mi->file, master_ssl_cipher) || + init_strvar_from_file(mi->ssl_key, sizeof(mi->ssl_key), + &mi->file, master_ssl_key))) + goto errwithmsg; +#ifndef HAVE_OPENSSL + if (ssl) + sql_print_error("SSL information in the master info file " + "('%s') are ignored because this MySQL slave was compiled " + "without SSL support.", fname); +#endif /* HAVE_OPENSSL */ + /* This has to be handled here as init_intvar_from_file can't handle my_off_t types @@ -1738,6 +2024,7 @@ file '%s')", fname); mi->master_log_pos= (my_off_t) master_log_pos; mi->port= (uint) port; mi->connect_retry= (uint) connect_retry; + mi->ssl= (my_bool) ssl; } DBUG_PRINT("master_info",("log_file_name: %s position: %ld", mi->master_log_name, @@ -1750,11 +2037,14 @@ file '%s')", fname); mi->inited = 1; // now change cache READ -> WRITE - must do this before flush_master_info reinit_io_cache(&mi->file, WRITE_CACHE,0L,0,1); - if ((error= test(flush_master_info(mi, 1)))) + if ((error=test(flush_master_info(mi, 1)))) sql_print_error("Failed to flush master info file"); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(error); - + +errwithmsg: + sql_print_error("Error reading master configuration"); + err: if (fd >= 0) { @@ -1769,110 +2059,264 @@ err: int register_slave_on_master(MYSQL* mysql) { - String packet; - char buf[4]; + char buf[1024], *pos= buf; + uint report_host_len, report_user_len=0, report_password_len=0; if (!report_host) return 0; - - int4store(buf, server_id); - packet.append(buf, 4); - - net_store_data(&packet, report_host); + report_host_len= strlen(report_host); if (report_user) - net_store_data(&packet, report_user); - else - packet.append((char)0); - + report_user_len= strlen(report_user); if (report_password) - net_store_data(&packet, report_password); - else - packet.append((char)0); - - int2store(buf, (uint16)report_port); - packet.append(buf, 2); - int4store(buf, rpl_recovery_rank); - packet.append(buf, 4); - int4store(buf, 0); /* tell the master will fill in master_id */ - packet.append(buf, 4); - - if (mc_simple_command(mysql, COM_REGISTER_SLAVE, (char*)packet.ptr(), - packet.length(), 0)) + report_password_len= strlen(report_password); + /* 30 is a good safety margin */ + if (report_host_len + report_user_len + report_password_len + 30 > + sizeof(buf)) + return 0; // safety + + int4store(pos, server_id); pos+= 4; + pos= net_store_data(pos, report_host, report_host_len); + pos= net_store_data(pos, report_user, report_user_len); + pos= net_store_data(pos, report_password, report_password_len); + int2store(pos, (uint16) report_port); pos+= 2; + int4store(pos, rpl_recovery_rank); pos+= 4; + /* The master will fill in master_id */ + int4store(pos, 0); pos+= 4; + + if (simple_command(mysql, COM_REGISTER_SLAVE, (char*) buf, + (uint) (pos- buf), 0)) { sql_print_error("Error on COM_REGISTER_SLAVE: %d '%s'", - mc_mysql_errno(mysql), - mc_mysql_error(mysql)); + mysql_errno(mysql), + mysql_error(mysql)); return 1; } - return 0; } + +/* + Builds a String from a HASH of TABLE_RULE_ENT. Cannot be used for any other + hash, as it assumes that the hash entries are TABLE_RULE_ENT. + + SYNOPSIS + table_rule_ent_hash_to_str() + s pointer to the String to fill + h pointer to the HASH to read + + RETURN VALUES + none +*/ + +void table_rule_ent_hash_to_str(String* s, HASH* h) +{ + s->length(0); + for (uint i=0 ; i < h->records ; i++) + { + TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) hash_element(h, i); + if (s->length()) + s->append(','); + s->append(e->db,e->key_len); + } +} + +/* + Mostly the same thing as above +*/ + +void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a) +{ + s->length(0); + for (uint i=0 ; i < a->elements ; i++) + { + TABLE_RULE_ENT* e; + get_dynamic(a, (gptr)&e, i); + if (s->length()) + s->append(','); + s->append(e->db,e->key_len); + } +} + int show_master_info(THD* thd, MASTER_INFO* mi) { // TODO: fix this for multi-master - DBUG_ENTER("show_master_info"); List<Item> field_list; + Protocol *protocol= thd->protocol; + DBUG_ENTER("show_master_info"); + + field_list.push_back(new Item_empty_string("Slave_IO_State", + 14)); field_list.push_back(new Item_empty_string("Master_Host", sizeof(mi->host))); field_list.push_back(new Item_empty_string("Master_User", sizeof(mi->user))); - field_list.push_back(new Item_empty_string("Master_Port", 6)); - field_list.push_back(new Item_empty_string("Connect_retry", 6)); + field_list.push_back(new Item_return_int("Master_Port", 7, + MYSQL_TYPE_LONG)); + field_list.push_back(new Item_return_int("Connect_Retry", 10, + MYSQL_TYPE_LONG)); field_list.push_back(new Item_empty_string("Master_Log_File", - FN_REFLEN)); - field_list.push_back(new Item_empty_string("Read_Master_Log_Pos", 12)); + FN_REFLEN)); + field_list.push_back(new Item_return_int("Read_Master_Log_Pos", 10, + MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Relay_Log_File", - FN_REFLEN)); - field_list.push_back(new Item_empty_string("Relay_Log_Pos", 12)); + FN_REFLEN)); + field_list.push_back(new Item_return_int("Relay_Log_Pos", 10, + MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Relay_Master_Log_File", - FN_REFLEN)); + FN_REFLEN)); field_list.push_back(new Item_empty_string("Slave_IO_Running", 3)); field_list.push_back(new Item_empty_string("Slave_SQL_Running", 3)); - field_list.push_back(new Item_empty_string("Replicate_do_db", 20)); - field_list.push_back(new Item_empty_string("Replicate_ignore_db", 20)); - field_list.push_back(new Item_empty_string("Last_errno", 4)); - field_list.push_back(new Item_empty_string("Last_error", 20)); - field_list.push_back(new Item_empty_string("Skip_counter", 12)); - field_list.push_back(new Item_empty_string("Exec_master_log_pos", 12)); - field_list.push_back(new Item_empty_string("Relay_log_space", 12)); - if (send_fields(thd, field_list, 1)) + field_list.push_back(new Item_empty_string("Replicate_Do_DB", 20)); + field_list.push_back(new Item_empty_string("Replicate_Ignore_DB", 20)); + field_list.push_back(new Item_empty_string("Replicate_Do_Table", 20)); + field_list.push_back(new Item_empty_string("Replicate_Ignore_Table", 23)); + field_list.push_back(new Item_empty_string("Replicate_Wild_Do_Table", 24)); + field_list.push_back(new Item_empty_string("Replicate_Wild_Ignore_Table", + 28)); + field_list.push_back(new Item_return_int("Last_Errno", 4, MYSQL_TYPE_LONG)); + field_list.push_back(new Item_empty_string("Last_Error", 20)); + field_list.push_back(new Item_return_int("Skip_Counter", 10, + MYSQL_TYPE_LONG)); + field_list.push_back(new Item_return_int("Exec_Master_Log_Pos", 10, + MYSQL_TYPE_LONGLONG)); + field_list.push_back(new Item_return_int("Relay_Log_Space", 10, + MYSQL_TYPE_LONGLONG)); + field_list.push_back(new Item_empty_string("Until_Condition", 6)); + field_list.push_back(new Item_empty_string("Until_Log_File", FN_REFLEN)); + field_list.push_back(new Item_return_int("Until_Log_Pos", 10, + MYSQL_TYPE_LONGLONG)); + field_list.push_back(new Item_empty_string("Master_SSL_Allowed", 7)); + field_list.push_back(new Item_empty_string("Master_SSL_CA_File", + sizeof(mi->ssl_ca))); + field_list.push_back(new Item_empty_string("Master_SSL_CA_Path", + sizeof(mi->ssl_capath))); + field_list.push_back(new Item_empty_string("Master_SSL_Cert", + sizeof(mi->ssl_cert))); + field_list.push_back(new Item_empty_string("Master_SSL_Cipher", + sizeof(mi->ssl_cipher))); + field_list.push_back(new Item_empty_string("Master_SSL_Key", + sizeof(mi->ssl_key))); + field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10, + MYSQL_TYPE_LONGLONG)); + + if (protocol->send_fields(&field_list, 1)) DBUG_RETURN(-1); if (mi->host[0]) { DBUG_PRINT("info",("host is set: '%s'", mi->host)); String *packet= &thd->packet; - packet->length(0); + protocol->prepare_for_resend(); + /* + TODO: we read slave_running without run_lock, whereas these variables + are updated under run_lock and not data_lock. In 5.0 we should lock + run_lock on top of data_lock (with good order). + */ pthread_mutex_lock(&mi->data_lock); pthread_mutex_lock(&mi->rli.data_lock); - net_store_data(packet, mi->host); - net_store_data(packet, mi->user); - net_store_data(packet, (uint32) mi->port); - net_store_data(packet, (uint32) mi->connect_retry); - net_store_data(packet, mi->master_log_name); - net_store_data(packet, (longlong) mi->master_log_pos); - net_store_data(packet, mi->rli.relay_log_name + - dirname_length(mi->rli.relay_log_name)); - net_store_data(packet, (longlong) mi->rli.relay_log_pos); - net_store_data(packet, mi->rli.master_log_name); - net_store_data(packet, mi->slave_running ? "Yes":"No"); - net_store_data(packet, mi->rli.slave_running ? "Yes":"No"); - net_store_data(packet, &replicate_do_db); - net_store_data(packet, &replicate_ignore_db); - net_store_data(packet, (uint32)mi->rli.last_slave_errno); - net_store_data(packet, mi->rli.last_slave_error); - net_store_data(packet, mi->rli.slave_skip_counter); - net_store_data(packet, (longlong) mi->rli.master_log_pos); - net_store_data(packet, (longlong) mi->rli.log_space_total); + + protocol->store(mi->io_thd ? mi->io_thd->proc_info : "", &my_charset_bin); + protocol->store(mi->host, &my_charset_bin); + protocol->store(mi->user, &my_charset_bin); + protocol->store((uint32) mi->port); + protocol->store((uint32) mi->connect_retry); + protocol->store(mi->master_log_name, &my_charset_bin); + protocol->store((ulonglong) mi->master_log_pos); + protocol->store(mi->rli.group_relay_log_name + + dirname_length(mi->rli.group_relay_log_name), + &my_charset_bin); + protocol->store((ulonglong) mi->rli.group_relay_log_pos); + protocol->store(mi->rli.group_master_log_name, &my_charset_bin); + protocol->store(mi->slave_running == MYSQL_SLAVE_RUN_CONNECT ? + "Yes" : "No", &my_charset_bin); + protocol->store(mi->rli.slave_running ? "Yes":"No", &my_charset_bin); + protocol->store(&replicate_do_db); + protocol->store(&replicate_ignore_db); + /* + We can't directly use some protocol->store for + replicate_*_table, + as Protocol doesn't know the TABLE_RULE_ENT struct. + We first build Strings and then pass them to protocol->store. + */ + char buf[256]; + String tmp(buf, sizeof(buf), &my_charset_bin); + table_rule_ent_hash_to_str(&tmp, &replicate_do_table); + protocol->store(&tmp); + table_rule_ent_hash_to_str(&tmp, &replicate_ignore_table); + protocol->store(&tmp); + table_rule_ent_dynamic_array_to_str(&tmp, &replicate_wild_do_table); + protocol->store(&tmp); + table_rule_ent_dynamic_array_to_str(&tmp, &replicate_wild_ignore_table); + protocol->store(&tmp); + + protocol->store((uint32) mi->rli.last_slave_errno); + protocol->store(mi->rli.last_slave_error, &my_charset_bin); + protocol->store((uint32) mi->rli.slave_skip_counter); + protocol->store((ulonglong) mi->rli.group_master_log_pos); + protocol->store((ulonglong) mi->rli.log_space_total); + + protocol->store( + mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_NONE ? "None": + ( mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_MASTER_POS? "Master": + "Relay"), &my_charset_bin); + protocol->store(mi->rli.until_log_name, &my_charset_bin); + protocol->store((ulonglong) mi->rli.until_log_pos); + +#ifdef HAVE_OPENSSL + protocol->store(mi->ssl? "Yes":"No", &my_charset_bin); +#else + protocol->store(mi->ssl? "Ignored":"No", &my_charset_bin); +#endif + protocol->store(mi->ssl_ca, &my_charset_bin); + protocol->store(mi->ssl_capath, &my_charset_bin); + protocol->store(mi->ssl_cert, &my_charset_bin); + protocol->store(mi->ssl_cipher, &my_charset_bin); + protocol->store(mi->ssl_key, &my_charset_bin); + + /* + Seconds_Behind_Master: if SQL thread is running and I/O thread is + connected, we can compute it otherwise show NULL (i.e. unknown). + */ + if ((mi->slave_running == MYSQL_SLAVE_RUN_CONNECT) && + mi->rli.slave_running) + { + long tmp= (long)((time_t)time((time_t*) 0) + - mi->rli.last_master_timestamp) + - mi->clock_diff_with_master; + /* + Apparently on some systems tmp can be <0. Here are possible reasons + related to MySQL: + - the master is itself a slave of another master whose time is ahead. + - somebody used an explicit SET TIMESTAMP on the master. + Possible reason related to granularity-to-second of time functions + (nothing to do with MySQL), which can explain a value of -1: + assume the master's and slave's time are perfectly synchronized, and + that at slave's connection time, when the master's timestamp is read, + it is at the very end of second 1, and (a very short time later) when + the slave's timestamp is read it is at the very beginning of second + 2. Then the recorded value for master is 1 and the recorded value for + slave is 2. At SHOW SLAVE STATUS time, assume that the difference + between timestamp of slave and rli->last_master_timestamp is 0 + (i.e. they are in the same second), then we get 0-(2-1)=-1 as a result. + This confuses users, so we don't go below 0: hence the max(). + + last_master_timestamp == 0 (an "impossible" timestamp 1970) is a + special marker to say "consider we have caught up". + */ + protocol->store((longlong)(mi->rli.last_master_timestamp ? max(0, tmp) + : 0)); + } + else + protocol->store_null(); + pthread_mutex_unlock(&mi->rli.data_lock); pthread_mutex_unlock(&mi->data_lock); if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length())) DBUG_RETURN(-1); } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } @@ -1884,33 +2328,65 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) DBUG_ENTER("flush_master_info"); DBUG_PRINT("enter",("master_pos: %ld", (long) mi->master_log_pos)); - if (flush_relay_log_cache) /* Comments for this are in MySQL 4.1 */ + /* + Flush the relay log to disk. If we don't do it, then the relay log while + have some part (its last kilobytes) in memory only, so if the slave server + dies now, with, say, from master's position 100 to 150 in memory only (not + on disk), and with position 150 in master.info, then when the slave + restarts, the I/O thread will fetch binlogs from 150, so in the relay log + we will have "[0, 100] U [150, infinity[" and nobody will notice it, so the + SQL thread will jump from 100 to 150, and replication will silently break. + + When we come to this place in code, relay log may or not be initialized; + the caller is responsible for setting 'flush_relay_log_cache' accordingly. + */ + if (flush_relay_log_cache) flush_io_cache(mi->rli.relay_log.get_log_file()); + + /* + We flushed the relay log BEFORE the master.info file, because if we crash + now, we will get a duplicate event in the relay log at restart. If we + flushed in the other order, we would get a hole in the relay log. + And duplicate is better than hole (with a duplicate, in later versions we + can add detection and scrap one event; with a hole there's nothing we can + do). + */ + + /* + In certain cases this code may create master.info files that seems + corrupted, because of extra lines filled with garbage in the end + file (this happens if new contents take less space than previous + contents of file). But because of number of lines in the first line + of file we don't care about this garbage. + */ + my_b_seek(file, 0L); - my_b_printf(file, "%s\n%s\n%s\n%s\n%s\n%d\n%d\n", - mi->master_log_name, llstr(mi->master_log_pos, lbuf), + my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n", + LINES_IN_MASTER_INFO_WITH_SSL, + mi->master_log_name, llstr(mi->master_log_pos, lbuf), mi->host, mi->user, - mi->password, mi->port, mi->connect_retry - ); + mi->password, mi->port, mi->connect_retry, + (int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert, + mi->ssl_cipher, mi->ssl_key); flush_io_cache(file); DBUG_RETURN(0); } st_relay_log_info::st_relay_log_info() - :info_fd(-1), cur_log_fd(-1), master_log_pos(0), save_temporary_tables(0), - cur_log_old_open_count(0), log_space_total(0), ignore_log_space_limit(0), - slave_skip_counter(0), abort_pos_wait(0), slave_run_id(0), - sql_thd(0), last_slave_errno(0), inited(0), abort_slave(0), - slave_running(0), skip_log_purge(0), - inside_transaction(0) /* the default is autocommit=1 */ -{ - relay_log_name[0] = master_log_name[0] = 0; - last_slave_error[0]=0; - + :info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), + cur_log_old_open_count(0), group_master_log_pos(0), log_space_total(0), + ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0), + abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0), + inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE), + until_log_pos(0), retried_trans(0) +{ + group_relay_log_name[0]= event_relay_log_name[0]= + group_master_log_name[0]= 0; + last_slave_error[0]= until_log_name[0]= ign_master_log_name_end[0]= 0; - bzero((char *)&info_file,sizeof(info_file)); - bzero((char *)&cache_buf, sizeof(cache_buf)); + bzero((char*) &info_file, sizeof(info_file)); + bzero((char*) &cache_buf, sizeof(cache_buf)); pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST); pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST); pthread_mutex_init(&log_space_lock, MY_MUTEX_INIT_FAST); @@ -1971,13 +2447,13 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, set_timespec(abstime,timeout); DBUG_ENTER("wait_for_pos"); - DBUG_PRINT("enter",("master_log_name: '%s' pos: %lu timeout: %ld", - master_log_name, (ulong) master_log_pos, + DBUG_PRINT("enter",("group_master_log_name: '%s' pos: %lu timeout: %ld", + group_master_log_name, (ulong) group_master_log_pos, (long) timeout)); pthread_mutex_lock(&data_lock); const char *msg= thd->enter_cond(&data_cond, &data_lock, - "Waiting for the SQL slave thread to " + "Waiting for the slave SQL thread to " "advance position"); /* This function will abort when it notices that some CHANGE MASTER or @@ -1995,18 +2471,19 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, init_abort_pos_wait= abort_pos_wait; /* - We'll need to + We'll need to handle all possible log names comparisons (e.g. 999 vs 1000). - We use ulong for string->number conversion ; this is no + We use ulong for string->number conversion ; this is no stronger limitation than in find_uniq_filename in sql/log.cc */ ulong log_name_extension; char log_name_tmp[FN_REFLEN]; //make a char[] from String - char *end= strmake(log_name_tmp, log_name->ptr(), min(log_name->length(), - FN_REFLEN-1)); + + strmake(log_name_tmp, log_name->ptr(), min(log_name->length(), FN_REFLEN-1)); + char *p= fn_ext(log_name_tmp); char *p_end; - if (!*p || log_pos<0) + if (!*p || log_pos<0) { error= -2; //means improper arguments goto err; @@ -2033,21 +2510,24 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, { bool pos_reached; int cmp_result= 0; + /* - master_log_name can be "", if we are just after a fresh replication start - or after a CHANGE MASTER TO MASTER_HOST/PORT (before we have executed one - Rotate event from the master) or (rare) if the user is doing a weird - slave setup (see next paragraph). - If master_log_name is "", we assume we don't have enough info to do the - comparison yet, so we just wait until more data. In this case - master_log_pos is always 0 except if somebody (wrongly) sets this slave - to be a slave of itself without using --replicate-same-server-id (an - unsupported configuration which does nothing), then master_log_pos will - grow and master_log_name will stay "". + group_master_log_name can be "", if we are just after a fresh + replication start or after a CHANGE MASTER TO MASTER_HOST/PORT + (before we have executed one Rotate event from the master) or + (rare) if the user is doing a weird slave setup (see next + paragraph). If group_master_log_name is "", we assume we don't + have enough info to do the comparison yet, so we just wait until + more data. In this case master_log_pos is always 0 except if + somebody (wrongly) sets this slave to be a slave of itself + without using --replicate-same-server-id (an unsupported + configuration which does nothing), then group_master_log_pos + will grow and group_master_log_name will stay "". */ - if (*master_log_name) + if (*group_master_log_name) { - char *basename= master_log_name + dirname_length(master_log_name); + char *basename= (group_master_log_name + + dirname_length(group_master_log_name)); /* First compare the parts before the extension. Find the dot in the master's log basename, @@ -2062,13 +2542,13 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, } // Now compare extensions. char *q_end; - ulong master_log_name_extension= strtoul(q, &q_end, 10); - if (master_log_name_extension < log_name_extension) + ulong group_master_log_name_extension= strtoul(q, &q_end, 10); + if (group_master_log_name_extension < log_name_extension) cmp_result= -1 ; else - cmp_result= (master_log_name_extension > log_name_extension) ? 1 : 0 ; + cmp_result= (group_master_log_name_extension > log_name_extension) ? 1 : 0 ; - pos_reached= ((!cmp_result && master_log_pos >= (ulonglong)log_pos) || + pos_reached= ((!cmp_result && group_master_log_pos >= (ulonglong)log_pos) || cmp_result > 0); if (pos_reached || thd->killed) break; @@ -2127,21 +2607,23 @@ improper_arguments: %d timed_out: %d", } +/* + init_slave_thread() +*/ + static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) { DBUG_ENTER("init_slave_thread"); thd->system_thread = (thd_type == SLAVE_THD_SQL) ? SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO; - thd->bootstrap= 1; thd->host_or_ip= ""; - thd->client_capabilities = 0; my_net_init(&thd->net, 0); thd->net.read_timeout = slave_net_timeout; - thd->master_access= ~0; + thd->master_access= ~(ulong)0; thd->priv_user = 0; thd->slave_thread = 1; /* - It's nonsense to constraint the slave threads with max_join_size; if a + It's nonsense to constrain the slave threads with max_join_size; if a query succeeded on master, we HAVE to execute it. So set OPTION_BIG_SELECTS. Setting max_join_size to HA_POS_ERROR is not enough (and it's not needed if we have OPTION_BIG_SELECTS) because an INSERT @@ -2219,23 +2701,23 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi, DBUG_ENTER("request_dump"); // TODO if big log files: Change next to int8store() - int4store(buf, (longlong) mi->master_log_pos); + int4store(buf, (ulong) mi->master_log_pos); int2store(buf + 4, binlog_flags); int4store(buf + 6, server_id); len = (uint) strlen(logname); memcpy(buf + 10, logname,len); - if (mc_simple_command(mysql, COM_BINLOG_DUMP, buf, len + 10, 1)) + if (simple_command(mysql, COM_BINLOG_DUMP, buf, len + 10, 1)) { /* Something went wrong, so we will just reconnect and retry later in the future, we should do a better error analysis, but for now we just fill up the error log :-) */ - if (mc_mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) + if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) *suppress_warnings= 1; // Suppress reconnect warning else sql_print_error("Error on COM_BINLOG_DUMP: %d %s, will retry in %d secs", - mc_mysql_errno(mysql), mc_mysql_error(mysql), + mysql_errno(mysql), mysql_error(mysql), master_connect_retry); DBUG_RETURN(1); } @@ -2262,7 +2744,7 @@ static int request_table_dump(MYSQL* mysql, const char* db, const char* table) *p++ = table_len; memcpy(p, table, table_len); - if (mc_simple_command(mysql, COM_TABLE_DUMP, buf, p - buf + table_len, 1)) + if (simple_command(mysql, COM_TABLE_DUMP, buf, p - buf + table_len, 1)) { sql_print_error("request_table_dump: Error sending the table dump \ command"); @@ -2274,7 +2756,7 @@ command"); /* - read one event from the master + Read one event from the master SYNOPSIS read_event() @@ -2288,7 +2770,6 @@ command"); RETURN VALUES 'packet_error' Error number Length of packet - */ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) @@ -2307,10 +2788,10 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) return packet_error; #endif - len = mc_net_safe_read(mysql); + len = net_safe_read(mysql); if (len == packet_error || (long) len < 1) { - if (mc_mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) + if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) { /* We are trying a normal reconnect after a read timeout; @@ -2322,15 +2803,16 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) else sql_print_error("Error reading packet from server: %s (\ server_errno=%d)", - mc_mysql_error(mysql), mc_mysql_errno(mysql)); + mysql_error(mysql), mysql_errno(mysql)); return packet_error; } - if (len == 1) + /* Check if eof packet */ + if (len < 8 && mysql->net.read_pos[0] == 254) { - sql_print_error("Slave: received 0 length packet from server, apparent\ + sql_print_error("Slave: received end packet from server, apparent\ master shutdown: %s", - mc_mysql_error(mysql)); + mysql_error(mysql)); return packet_error; } @@ -2353,14 +2835,135 @@ int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error) } } +/* + Check if condition stated in UNTIL clause of START SLAVE is reached. + SYNOPSYS + st_relay_log_info::is_until_satisfied() + DESCRIPTION + Checks if UNTIL condition is reached. Uses caching result of last + comparison of current log file name and target log file name. So cached + value should be invalidated if current log file name changes + (see st_relay_log_info::notify_... functions). + + This caching is needed to avoid of expensive string comparisons and + strtol() conversions needed for log names comparison. We don't need to + compare them each time this function is called, we only need to do this + when current log name changes. If we have UNTIL_MASTER_POS condition we + need to do this only after Rotate_log_event::exec_event() (which is + rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS + condition then we should invalidate cached comarison value after + inc_group_relay_log_pos() which called for each group of events (so we + have some benefit if we have something like queries that use + autoincrement or if we have transactions). + + Should be called ONLY if until_condition != UNTIL_NONE ! + RETURN VALUE + true - condition met or error happened (condition seems to have + bad log file name) + false - condition not met +*/ + +bool st_relay_log_info::is_until_satisfied() +{ + const char *log_name; + ulonglong log_pos; + + DBUG_ASSERT(until_condition != UNTIL_NONE); + + if (until_condition == UNTIL_MASTER_POS) + { + log_name= group_master_log_name; + log_pos= group_master_log_pos; + } + else + { /* until_condition == UNTIL_RELAY_POS */ + log_name= group_relay_log_name; + log_pos= group_relay_log_pos; + } + + if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) + { + /* + We have no cached comaprison results so we should compare log names + and cache result + */ + + DBUG_ASSERT(*log_name || log_pos == 0); + + if (*log_name) + { + const char *basename= log_name + dirname_length(log_name); + + const char *q= (const char*)(fn_ext(basename)+1); + if (strncmp(basename, until_log_name, (int)(q-basename)) == 0) + { + /* Now compare extensions. */ + char *q_end; + ulong log_name_extension= strtoul(q, &q_end, 10); + if (log_name_extension < until_log_name_extension) + until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_LESS; + else + until_log_names_cmp_result= + (log_name_extension > until_log_name_extension) ? + UNTIL_LOG_NAMES_CMP_GREATER : UNTIL_LOG_NAMES_CMP_EQUAL ; + } + else + { + /* Probably error so we aborting */ + sql_print_error("Slave SQL thread is stopped because UNTIL " + "condition is bad."); + return TRUE; + } + } + else + return until_log_pos == 0; + } + + return ((until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_EQUAL && + log_pos >= until_log_pos) || + until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_GREATER); +} + static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) { - DBUG_ASSERT(rli->sql_thd==thd); + /* + We acquire this mutex since we need it for all operations except + event execution. But we will release it in places where we will + wait for something for example inside of next_event(). + */ + pthread_mutex_lock(&rli->data_lock); + + /* + This tests if the position of the end of the last previous executed event + hits the UNTIL barrier. + We would prefer to test if the position of the start (or possibly) end of + the to-be-read event hits the UNTIL barrier, this is different if there + was an event ignored by the I/O thread just before (BUG#13861 to be + fixed). + */ + if (rli->until_condition!=RELAY_LOG_INFO::UNTIL_NONE && + rli->is_until_satisfied()) + { + char buf[22]; + sql_print_error("Slave SQL thread stopped because it reached its" + " UNTIL position %s", llstr(rli->until_pos(), buf)); + /* + Setting abort_slave flag because we do not want additional message about + error in query execution to be printed. + */ + rli->abort_slave= 1; + pthread_mutex_unlock(&rli->data_lock); + return 1; + } + Log_event * ev = next_event(rli); + DBUG_ASSERT(rli->sql_thd==thd); + if (sql_slave_killed(thd,rli)) { + pthread_mutex_unlock(&rli->data_lock); delete ev; return 1; } @@ -2368,7 +2971,6 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) { int type_code = ev->get_type_code(); int exec_res; - pthread_mutex_lock(&rli->data_lock); /* Skip queries originating from this server or number of @@ -2377,16 +2979,10 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) log files themselves. */ - /* - In 4.1, we updated queue_event() to add a similar test for - replicate_same_server_id, because in 4.1 the I/O thread is also filtering - events based on the server id. - */ if ((ev->server_id == (uint32) ::server_id && !replicate_same_server_id) || (rli->slave_skip_counter && type_code != ROTATE_EVENT)) { - /* TODO: I/O thread should not even log events with the same server id */ - rli->inc_pos(ev->get_event_len(), + rli->inc_group_relay_log_pos(ev->get_event_len(), type_code != STOP_EVENT ? ev->log_pos : LL(0), 1/* skip lock*/); flush_relay_log_info(rli); @@ -2402,21 +2998,77 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) pthread_mutex_unlock(&rli->data_lock); delete ev; return 0; // avoid infinite update loops - } + } pthread_mutex_unlock(&rli->data_lock); thd->server_id = ev->server_id; // use the original server id for logging thd->set_time(); // time the query + thd->lex->current_select= 0; if (!ev->when) ev->when = time(NULL); ev->thd = thd; exec_res = ev->exec_event(rli); DBUG_ASSERT(rli->sql_thd==thd); delete ev; + if (slave_trans_retries) + { + if (exec_res && + (thd->net.last_errno == ER_LOCK_DEADLOCK || + thd->net.last_errno == ER_LOCK_WAIT_TIMEOUT) && + !thd->is_fatal_error) + { + const char *errmsg; + /* + We were in a transaction which has been rolled back because of a + deadlock (currently, InnoDB deadlock detected by InnoDB) or lock + wait timeout (innodb_lock_wait_timeout exceeded); let's seek back to + BEGIN log event and retry it all again. + We have to not only seek but also + a) init_master_info(), to seek back to hot relay log's start for later + (for when we will come back to this hot log after re-processing the + possibly existing old logs where BEGIN is: check_binlog_magic() will + then need the cache to be at position 0 (see comments at beginning of + init_master_info()). + b) init_relay_log_pos(), because the BEGIN may be an older relay log. + */ + if (rli->trans_retries < slave_trans_retries) + { + if (init_master_info(rli->mi, 0, 0, 0, SLAVE_SQL)) + sql_print_error("Failed to initialize the master info structure"); + else if (init_relay_log_pos(rli, + rli->group_relay_log_name, + rli->group_relay_log_pos, + 1, &errmsg)) + sql_print_error("Error initializing relay log position: %s", + errmsg); + else + { + exec_res= 0; + /* chance for concurrent connection to get more locks */ + safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE), + (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli); + pthread_mutex_lock(&rli->data_lock); // because of SHOW STATUS + rli->trans_retries++; + rli->retried_trans++; + pthread_mutex_unlock(&rli->data_lock); + DBUG_PRINT("info", ("Slave retries transaction " + "rli->trans_retries: %lu", rli->trans_retries)); + } + } + else + sql_print_error("Slave SQL thread retried transaction %lu time(s) " + "in vain, giving up. Consider raising the value of " + "the slave_transaction_retries variable.", + slave_trans_retries); + } + if (!((thd->options & OPTION_BEGIN) && opt_using_transactions)) + rli->trans_retries= 0; // restart from fresh + } return exec_res; } else { + pthread_mutex_unlock(&rli->data_lock); slave_print_error(rli, 0, "\ Could not parse relay log event entry. The possible reasons are: the master's \ binary log is corrupted (you can check this by running 'mysqlbinlog' on the \ @@ -2431,12 +3083,14 @@ on this slave.\ } -/* slave I/O thread */ +/* Slave I/O Thread entry point */ + extern "C" pthread_handler_decl(handle_slave_io,arg) { THD *thd; // needs to be first for thread_stack MYSQL *mysql; - MASTER_INFO *mi = (MASTER_INFO*)arg; + MASTER_INFO *mi = (MASTER_INFO*)arg; + RELAY_LOG_INFO *rli= &mi->rli; char llbuff[22]; uint retry_count; @@ -2445,7 +3099,7 @@ extern "C" pthread_handler_decl(handle_slave_io,arg) DBUG_ENTER("handle_slave_io"); #ifndef DBUG_OFF -slave_begin: +slave_begin: #endif DBUG_ASSERT(mi->inited); mysql= NULL ; @@ -2484,9 +3138,9 @@ slave_begin: mi->master_log_name, llstr(mi->master_log_pos,llbuff))); - if (!(mi->mysql = mysql = mc_mysql_init(NULL))) + if (!(mi->mysql = mysql = mysql_init(NULL))) { - sql_print_error("Slave I/O thread: error in mc_mysql_init()"); + sql_print_error("Slave I/O thread: error in mysql_init()"); goto err; } @@ -2494,7 +3148,7 @@ slave_begin: thd->proc_info = "Connecting to master"; // we can get killed during safe_connect if (!safe_connect(thd, mysql, mi)) - sql_print_error("Slave I/O thread: connected to master '%s@%s:%d',\ + sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\ replication started in log '%s' at position %s", mi->user, mi->host, mi->port, IO_RPL_LOG_NAME, @@ -2507,9 +3161,11 @@ slave_begin: connected: + // TODO: the assignment below should be under mutex (5.0) + mi->slave_running= MYSQL_SLAVE_RUN_CONNECT; thd->slave_net = &mysql->net; thd->proc_info = "Checking master version"; - if (check_master_version(mysql, mi)) + if (get_master_version_and_clock(mysql, mi)) goto err; if (!mi->old_format) { @@ -2538,8 +3194,12 @@ dump"); goto err; } + mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT; thd->proc_info= "Waiting to reconnect after a failed binlog dump request"; - mc_end_server(mysql); +#ifdef SIGNAL_WITH_VIO_CLOSE + thd->clear_active_vio(); +#endif + end_server(mysql); /* First time retry immediately, assuming that we can recover right away - if first time fails, sleep between re-tries @@ -2595,7 +3255,7 @@ after reconnect"); if (event_len == packet_error) { - uint mysql_error_number= mc_mysql_errno(mysql); + uint mysql_error_number= mysql_errno(mysql); if (mysql_error_number == ER_NET_PACKET_TOO_LARGE) { sql_print_error("\ @@ -2608,12 +3268,15 @@ max_allowed_packet", if (mysql_error_number == ER_MASTER_FATAL_ERROR_READING_BINLOG) { sql_print_error(ER(mysql_error_number), mysql_error_number, - mc_mysql_error(mysql)); + mysql_error(mysql)); goto err; } - thd->proc_info = "Waiting to reconnect after a failed master event \ -read"; - mc_end_server(mysql); + mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT; + thd->proc_info = "Waiting to reconnect after a failed master event read"; +#ifdef SIGNAL_WITH_VIO_CLOSE + thd->clear_active_vio(); +#endif + end_server(mysql); if (retry_count++) { if (retry_count > master_retry_count) @@ -2652,7 +3315,7 @@ reconnect done to recover from failed read"); sql_print_error("Slave I/O thread could not queue event from master"); goto err; } - flush_master_info(mi, 1); + flush_master_info(mi, 1); /* sure that we can flush the relay log */ /* See if the relay logs take too much space. We don't lock mi->rli.log_space_lock here; this dirty read saves time @@ -2670,16 +3333,16 @@ reconnect done to recover from failed read"); char llbuf1[22], llbuf2[22]; DBUG_PRINT("info", ("log_space_limit=%s log_space_total=%s \ ignore_log_space_limit=%d", - llstr(mi->rli.log_space_limit,llbuf1), - llstr(mi->rli.log_space_total,llbuf2), - (int) mi->rli.ignore_log_space_limit)); + llstr(rli->log_space_limit,llbuf1), + llstr(rli->log_space_total,llbuf2), + (int) rli->ignore_log_space_limit)); } #endif - if (mi->rli.log_space_limit && mi->rli.log_space_limit < - mi->rli.log_space_total && - !mi->rli.ignore_log_space_limit) - if (wait_for_relay_log_space(&mi->rli)) + if (rli->log_space_limit && rli->log_space_limit < + rli->log_space_total && + !rli->ignore_log_space_limit) + if (wait_for_relay_log_space(rli)) { sql_print_error("Slave I/O thread aborted while waiting for relay \ log space"); @@ -2707,9 +3370,10 @@ err: VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (mysql) { - mc_mysql_close(mysql); + mysql_close(mysql); mi->mysql=0; } + write_ignored_events_info_to_relay_log(thd, mi); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&mi->run_lock); mi->slave_running = 0; @@ -2736,7 +3400,7 @@ err: } -/* slave SQL logic thread */ +/* Slave SQL Thread entry point */ extern "C" pthread_handler_decl(handle_slave_sql,arg) { @@ -2762,7 +3426,8 @@ slave_begin: #endif thd = new THD; // note that contructor of THD uses DBUG_ ! - THD_CHECK_SENTRY(thd); + thd->thread_stack = (char*)&thd; // remember where our stack is + /* Inform waiting threads that slave has started */ rli->slave_run_id++; @@ -2778,33 +3443,46 @@ slave_begin: sql_print_error("Failed during slave thread initialization"); goto err; } + thd->init_for_queries(); rli->sql_thd= thd; thd->temporary_tables = rli->save_temporary_tables; // restore temp tables - thd->thread_stack = (char*)&thd; // remember where our stack is pthread_mutex_lock(&LOCK_thread_count); threads.append(thd); pthread_mutex_unlock(&LOCK_thread_count); + /* + We are going to set slave_running to 1. Assuming slave I/O thread is + alive and connected, this is going to make Seconds_Behind_Master be 0 + i.e. "caught up". Even if we're just at start of thread. Well it's ok, at + the moment we start we can think we are caught up, and the next second we + start receiving data so we realize we are not caught up and + Seconds_Behind_Master grows. No big deal. + */ rli->slave_running = 1; rli->abort_slave = 0; pthread_mutex_unlock(&rli->run_lock); pthread_cond_broadcast(&rli->start_cond); - // This should always be set to 0 when the slave thread is started - rli->pending = 0; + /* Reset errors for a clean start (otherwise, if the master is idle, the SQL thread may execute no Query_log_event, so the error will remain even - though there's no problem anymore). + though there's no problem anymore). Do not reset the master timestamp + (imagine the slave has caught everything, the STOP SLAVE and START SLAVE: + as we are not sure that we are going to receive a query, we want to + remember the last master timestamp (to say how many seconds behind we are + now. + But the master timestamp is reset by RESET SLAVE & CHANGE MASTER. */ - clear_last_slave_error(rli); + clear_slave_error(rli); //tell the I/O thread to take relay_log_space_limit into account from now on pthread_mutex_lock(&rli->log_space_lock); rli->ignore_log_space_limit= 0; pthread_mutex_unlock(&rli->log_space_lock); + rli->trans_retries= 0; // start from "no error" if (init_relay_log_pos(rli, - rli->relay_log_name, - rli->relay_log_pos, + rli->group_relay_log_name, + rli->group_relay_log_pos, 1 /*need data lock*/, &errmsg)) { sql_print_error("Error initializing relay log position: %s", @@ -2812,24 +3490,36 @@ slave_begin: goto err; } THD_CHECK_SENTRY(thd); - DBUG_ASSERT(rli->relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->relay_log_pos); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); DBUG_ASSERT(rli->sql_thd == thd); DBUG_PRINT("master_info",("log_file_name: %s position: %s", - rli->master_log_name, - llstr(rli->master_log_pos,llbuff))); + rli->group_master_log_name, + llstr(rli->group_master_log_pos,llbuff))); if (global_system_variables.log_warnings) - sql_print_error("Slave SQL thread initialized, starting replication in \ + sql_print_information("Slave SQL thread initialized, starting replication in \ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME, - llstr(rli->master_log_pos,llbuff),rli->relay_log_name, - llstr(rli->relay_log_pos,llbuff1)); + llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name, + llstr(rli->group_relay_log_pos,llbuff1)); + + /* execute init_slave variable */ + if (sys_init_slave.value_length) + { + execute_init_command(thd, &sys_init_slave, &LOCK_sys_init_slave); + if (thd->query_error) + { + sql_print_error("\ +Slave SQL thread aborted. Can't execute init_slave query"); + goto err; + } + } /* Read queries from the IO/THREAD until this thread is killed */ while (!sql_slave_killed(thd,rli)) { - thd->proc_info = "Reading event from the relay log"; + thd->proc_info = "Reading event from the relay log"; DBUG_ASSERT(rli->sql_thd == thd); THD_CHECK_SENTRY(thd); if (exec_relay_log_event(thd,rli)) @@ -2839,16 +3529,14 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME, sql_print_error("\ Error running query, slave SQL thread aborted. Fix the problem, and restart \ the slave SQL thread with \"SLAVE START\". We stopped at log \ -'%s' position %s", - RPL_LOG_NAME, llstr(rli->master_log_pos, llbuff)); +'%s' position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff)); goto err; } } /* Thread stopped. Print the current replication position to the log */ - sql_print_error("Slave SQL thread exiting, replication stopped in log \ - '%s' at position %s", - RPL_LOG_NAME, llstr(rli->master_log_pos,llbuff)); + sql_print_information("Slave SQL thread exiting, replication stopped in log \ + '%s' at position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); err: VOID(pthread_mutex_lock(&LOCK_thread_count)); @@ -2867,7 +3555,6 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ restarts replication from a non-transactional statement (with CHANGE MASTER). */ - rli->inside_transaction= 0; /* Wake up master_pos_wait() */ pthread_mutex_unlock(&rli->data_lock); DBUG_PRINT("info",("Signaling possibly waiting master_pos_wait() functions")); @@ -2896,19 +3583,23 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ if (abort_slave_event_count && !rli->events_till_abort) goto slave_begin; #endif - my_thread_end(); // clean-up before broadcasting termination + my_thread_end(); pthread_exit(0); DBUG_RETURN(0); // Can't return anything here } +/* + process_io_create_file() +*/ + static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) { int error = 1; ulong num_bytes; bool cev_not_written; - THD* thd; - NET* net = &mi->mysql->net; + THD *thd = mi->io_thd; + NET *net = &mi->mysql->net; DBUG_ENTER("process_io_create_file"); if (unlikely(!cev->is_valid())) @@ -2922,7 +3613,6 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) DBUG_RETURN(0); } DBUG_ASSERT(cev->inited_from_old); - thd = mi->io_thd; thd->file_id = cev->file_id = mi->file_id++; thd->server_id = cev->server_id; cev_not_written = 1; @@ -2930,7 +3620,7 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) if (unlikely(net_request_file(net,cev->fname))) { sql_print_error("Slave I/O: failed requesting download of '%s'", - cev->fname); + cev->fname); goto err; } @@ -2946,58 +3636,58 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) { if (unlikely((num_bytes=my_net_read(net)) == packet_error)) { - sql_print_error("Network read error downloading '%s' from master", - cev->fname); - goto err; + sql_print_error("Network read error downloading '%s' from master", + cev->fname); + goto err; } if (unlikely(!num_bytes)) /* eof */ { - send_ok(net); /* 3.23 master wants it */ - /* - If we wrote Create_file_log_event, then we need to write - Execute_load_log_event. If we did not write Create_file_log_event, - then this is an empty file and we can just do as if the LOAD DATA - INFILE had not existed, i.e. write nothing. - */ - if (unlikely(cev_not_written)) - break; - Execute_load_log_event xev(thd,0,0); - xev.log_pos = mi->master_log_pos; - if (unlikely(mi->rli.relay_log.append(&xev))) - { - sql_print_error("Slave I/O: error writing Exec_load event to \ + net_write_command(net, 0, "", 0, "", 0);/* 3.23 master wants it */ + /* + If we wrote Create_file_log_event, then we need to write + Execute_load_log_event. If we did not write Create_file_log_event, + then this is an empty file and we can just do as if the LOAD DATA + INFILE had not existed, i.e. write nothing. + */ + if (unlikely(cev_not_written)) + break; + Execute_load_log_event xev(thd,0,0); + xev.log_pos = mi->master_log_pos; + if (unlikely(mi->rli.relay_log.append(&xev))) + { + sql_print_error("Slave I/O: error writing Exec_load event to \ relay log"); - goto err; - } - mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); - break; + goto err; + } + mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); + break; } if (unlikely(cev_not_written)) { - cev->block = (char*)net->read_pos; - cev->block_len = num_bytes; - cev->log_pos = mi->master_log_pos; - if (unlikely(mi->rli.relay_log.append(cev))) - { - sql_print_error("Slave I/O: error writing Create_file event to \ + cev->block = (char*)net->read_pos; + cev->block_len = num_bytes; + cev->log_pos = mi->master_log_pos; + if (unlikely(mi->rli.relay_log.append(cev))) + { + sql_print_error("Slave I/O: error writing Create_file event to \ relay log"); - goto err; - } - cev_not_written=0; - mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); + goto err; + } + cev_not_written=0; + mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); } else { - aev.block = (char*)net->read_pos; - aev.block_len = num_bytes; - aev.log_pos = mi->master_log_pos; - if (unlikely(mi->rli.relay_log.append(&aev))) - { - sql_print_error("Slave I/O: error writing Append_block event to \ + aev.block = (char*)net->read_pos; + aev.block_len = num_bytes; + aev.log_pos = mi->master_log_pos; + if (unlikely(mi->rli.relay_log.append(&aev))) + { + sql_print_error("Slave I/O: error writing Append_block event to \ relay log"); - goto err; - } - mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ; + goto err; + } + mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ; } } } @@ -3006,6 +3696,7 @@ err: DBUG_RETURN(error); } + /* Start using a new binary log on the master @@ -3015,7 +3706,7 @@ err: rev The rotate log event read from the binary log DESCRIPTION - Updates the master info and relay data with the place in the next binary + Updates the master info with the place in the next binary log where we should start reading. NOTES @@ -3024,17 +3715,18 @@ err: RETURN VALUES 0 ok 1 Log event is illegal + */ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) { - int return_val= 1; DBUG_ENTER("process_io_rotate"); safe_mutex_assert_owner(&mi->data_lock); if (unlikely(!rev->is_valid())) DBUG_RETURN(1); + /* Safe copy as 'rev' has been "sanitized" in Rotate_log_event's ctor */ memcpy(mi->master_log_name, rev->new_log_ident, rev->ident_len+1); mi->master_log_pos= rev->pos; DBUG_PRINT("info", ("master_log_pos: '%s' %d", @@ -3050,7 +3742,12 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) DBUG_RETURN(0); } + /* + queue_old_event() + + Writes a 3.23 event to the relay log. + TODO: Test this code before release - it has to be tested on a separate setup with 3.23 master @@ -3109,8 +3806,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, ev->log_pos = mi->master_log_pos; switch (ev->get_type_code()) { case STOP_EVENT: - ignore_event= mi->ignore_stop_event; - mi->ignore_stop_event=0; + ignore_event= 1; inc_pos= event_len; break; case ROTATE_EVENT: @@ -3120,7 +3816,6 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(1); } - mi->ignore_stop_event=1; inc_pos= 0; break; case CREATE_FILE_EVENT: @@ -3146,7 +3841,6 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(error); } default: - mi->ignore_stop_event=0; inc_pos= event_len; break; } @@ -3167,17 +3861,18 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(0); } + /* - TODO: verify the issue with stop events, see if we need them at all - in the relay log + queue_event() + */ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) { int error= 0; ulong inc_pos; - bool ignore_event= 0; RELAY_LOG_INFO *rli= &mi->rli; + pthread_mutex_t *log_lock= rli->relay_log.get_log_lock(); DBUG_ENTER("queue_event"); if (mi->old_format) @@ -3185,41 +3880,91 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) pthread_mutex_lock(&mi->data_lock); - /* - TODO: figure out if other events in addition to Rotate - require special processing - */ switch (buf[EVENT_TYPE_OFFSET]) { case STOP_EVENT: - ignore_event= mi->ignore_stop_event; - mi->ignore_stop_event= 0; - inc_pos= event_len; - break; + /* + We needn't write this event to the relay log. Indeed, it just indicates a + master server shutdown. The only thing this does is cleaning. But + cleaning is already done on a per-master-thread basis (as the master + server is shutting down cleanly, it has written all DROP TEMPORARY TABLE + and DO RELEASE_LOCK; prepared statements' deletion are TODO). + + We don't even increment mi->master_log_pos, because we may be just after + a Rotate event. Btw, in a few milliseconds we are going to have a Start + event from the next binlog (unless the master is presently running + without --log-bin). + */ + goto err; case ROTATE_EVENT: { Rotate_log_event rev(buf,event_len,0); if (unlikely(process_io_rotate(mi,&rev))) { - pthread_mutex_unlock(&mi->data_lock); - DBUG_RETURN(1); + error= 1; + goto err; } - mi->ignore_stop_event= 1; + /* + Now the I/O thread has just changed its mi->master_log_name, so + incrementing mi->master_log_pos is nonsense. + */ inc_pos= 0; break; } default: - mi->ignore_stop_event= 0; inc_pos= event_len; break; } - - if (likely(!ignore_event && - !(error= rli->relay_log.appendv(buf,event_len,0)))) + + /* + If this event is originating from this server, don't queue it. + We don't check this for 3.23 events because it's simpler like this; 3.23 + will be filtered anyway by the SQL slave thread which also tests the + server id (we must also keep this test in the SQL thread, in case somebody + upgrades a 4.0 slave which has a not-filtered relay log). + + ANY event coming from ourselves can be ignored: it is obvious for queries; + for STOP_EVENT/ROTATE_EVENT/START_EVENT: these cannot come from ourselves + (--log-slave-updates would not log that) unless this slave is also its + direct master (an unsupported, useless setup!). + */ + + pthread_mutex_lock(log_lock); + + if ((uint4korr(buf + SERVER_ID_OFFSET) == ::server_id) && + !replicate_same_server_id) { + /* + Do not write it to the relay log. + a) We still want to increment mi->master_log_pos, so that we won't + re-read this event from the master if the slave IO thread is now + stopped/restarted (more efficient if the events we are ignoring are big + LOAD DATA INFILE). + b) We want to record that we are skipping events, for the information of + the slave SQL thread, otherwise that thread may let + rli->group_relay_log_pos stay too small if the last binlog's event is + ignored. + */ mi->master_log_pos+= inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); - rli->relay_log.harvest_bytes_written(&rli->log_space_total); + memcpy(rli->ign_master_log_name_end, mi->master_log_name, FN_REFLEN); + DBUG_ASSERT(rli->ign_master_log_name_end[0]); + rli->ign_master_log_pos_end= mi->master_log_pos; + rli->relay_log.signal_update(); // the slave SQL thread needs to re-check + DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); + } + else + { + /* write the event to the relay log */ + if (likely(!(error= rli->relay_log.appendv(buf,event_len,0)))) + { + mi->master_log_pos+= inc_pos; + DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + rli->relay_log.harvest_bytes_written(&rli->log_space_total); + } + rli->ign_master_log_name_end[0]= 0; // last event is not ignored } + pthread_mutex_unlock(log_lock); + +err: pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(error); } @@ -3254,7 +3999,20 @@ void end_relay_log_info(RELAY_LOG_INFO* rli) DBUG_VOID_RETURN; } -/* try to connect until successful or slave killed */ +/* + Try to connect until successful or slave killed + + SYNPOSIS + safe_connect() + thd Thread handler for slave + mysql MySQL connection handle + mi Replication handle + + RETURN + 0 ok + # Error +*/ + static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi) { return connect_to_master(thd, mysql, mi, 0, 0); @@ -3262,8 +4020,12 @@ static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi) /* - Try to connect until successful or slave killed or we have retried - master_retry_count times + SYNPOSIS + connect_to_master() + + IMPLEMENTATION + Try to connect until successful or slave killed or we have retried + master_retry_count times */ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, @@ -3278,27 +4040,43 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, #ifndef DBUG_OFF events_till_disconnect = disconnect_slave_event_count; #endif - uint client_flag=0; + ulong client_flag= CLIENT_REMEMBER_OPTIONS; if (opt_slave_compressed_protocol) client_flag=CLIENT_COMPRESS; /* We will use compression */ + mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout); + mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout); + +#ifdef HAVE_OPENSSL + if (mi->ssl) + mysql_ssl_set(mysql, + mi->ssl_key[0]?mi->ssl_key:0, + mi->ssl_cert[0]?mi->ssl_cert:0, + mi->ssl_ca[0]?mi->ssl_ca:0, + mi->ssl_capath[0]?mi->ssl_capath:0, + mi->ssl_cipher[0]?mi->ssl_cipher:0); +#endif + + mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname); + /* This one is not strictly needed but we have it here for completeness */ + mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir); + while (!(slave_was_killed = io_slave_killed(thd,mi)) && - (reconnect ? mc_mysql_reconnect(mysql) != 0: - !mc_mysql_connect(mysql, mi->host, mi->user, mi->password, 0, - mi->port, 0, client_flag, - thd->variables.net_read_timeout))) + (reconnect ? mysql_reconnect(mysql) != 0 : + mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0, + mi->port, 0, client_flag) == 0)) { /* Don't repeat last error */ - if (mc_mysql_errno(mysql) != last_errno) + if ((int)mysql_errno(mysql) != last_errno) { - last_errno=mc_mysql_errno(mysql); + last_errno=mysql_errno(mysql); suppress_warnings= 0; sql_print_error("Slave I/O thread: error %s to master \ '%s@%s:%d': \ Error: '%s' errno: %d retry-time: %d retries: %d", (reconnect ? "reconnecting" : "connecting"), mi->user,mi->host,mi->port, - mc_mysql_error(mysql), last_errno, + mysql_error(mysql), last_errno, mi->connect_retry, master_retry_count); } @@ -3324,7 +4102,7 @@ Error: '%s' errno: %d retry-time: %d retries: %d", if (reconnect) { if (!suppress_warnings && global_system_variables.log_warnings) - sql_print_error("Slave: connected to master '%s@%s:%d',\ + sql_print_information("Slave: connected to master '%s@%s:%d',\ replication resumed in log '%s' at position %s", mi->user, mi->host, mi->port, IO_RPL_LOG_NAME, @@ -3346,14 +4124,18 @@ replication resumed in log '%s' at position %s", mi->user, /* - Try to connect until successful or slave killed or we have retried - master_retry_count times + safe_reconnect() + + IMPLEMENTATION + Try to connect until successful or slave killed or we have retried + master_retry_count times */ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi, bool suppress_warnings) { - return connect_to_master(thd, mysql, mi, 1, suppress_warnings); + DBUG_ENTER("safe_reconnect"); + DBUG_RETURN(connect_to_master(thd, mysql, mi, 1, suppress_warnings)); } @@ -3392,18 +4174,14 @@ bool flush_relay_log_info(RELAY_LOG_INFO* rli) IO_CACHE *file = &rli->info_file; char buff[FN_REFLEN*2+22*2+4], *pos; - /* sql_thd is not set when calling from init_slave() */ - if ((rli->sql_thd && rli->sql_thd->options & OPTION_BEGIN)) - return 0; // Wait for COMMIT - my_b_seek(file, 0L); - pos=strmov(buff, rli->relay_log_name); + pos=strmov(buff, rli->group_relay_log_name); *pos++='\n'; - pos=longlong2str(rli->relay_log_pos, pos, 10); + pos=longlong2str(rli->group_relay_log_pos, pos, 10); *pos++='\n'; - pos=strmov(pos, rli->master_log_name); + pos=strmov(pos, rli->group_master_log_name); *pos++='\n'; - pos=longlong2str(rli->master_log_pos, pos, 10); + pos=longlong2str(rli->group_master_log_pos, pos, 10); *pos='\n'; if (my_b_write(file, (byte*) buff, (ulong) (pos-buff)+1)) error=1; @@ -3415,8 +4193,7 @@ bool flush_relay_log_info(RELAY_LOG_INFO* rli) /* - This function is called when we notice that the current "hot" log - got rotated under our feet. + Called when we notice that the current "hot" log got rotated under our feet. */ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg) @@ -3426,7 +4203,7 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg) DBUG_ENTER("reopen_relay_log"); IO_CACHE *cur_log = rli->cur_log=&rli->cache_buf; - if ((rli->cur_log_fd=open_binlog(cur_log,rli->relay_log_name, + if ((rli->cur_log_fd=open_binlog(cur_log,rli->event_relay_log_name, errmsg)) <0) DBUG_RETURN(0); /* @@ -3434,7 +4211,7 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg) relay_log_pos Current log pos pending Number of bytes already processed from the event */ - my_b_seek(cur_log,rli->relay_log_pos + rli->pending); + my_b_seek(cur_log,rli->event_relay_log_pos); DBUG_RETURN(cur_log); } @@ -3443,20 +4220,21 @@ Log_event* next_event(RELAY_LOG_INFO* rli) { Log_event* ev; IO_CACHE* cur_log = rli->cur_log; - pthread_mutex_t *log_lock = rli->relay_log.get_log_lock(); + pthread_mutex_t *log_lock = rli->relay_log.get_log_lock(); const char* errmsg=0; THD* thd = rli->sql_thd; + DBUG_ENTER("next_event"); DBUG_ASSERT(thd != 0); /* For most operations we need to protect rli members with data_lock, - so we will hold it for the most of the loop below - However, we will release it whenever it is worth the hassle, - and in the cases when we go into a pthread_cond_wait() with the - non-data_lock mutex + so we assume calling function acquired this mutex for us and we will + hold it for the most of the loop below However, we will release it + whenever it is worth the hassle, and in the cases when we go into a + pthread_cond_wait() with the non-data_lock mutex */ - pthread_mutex_lock(&rli->data_lock); + safe_mutex_assert_owner(&rli->data_lock); while (!sql_slave_killed(thd,rli)) { @@ -3493,23 +4271,19 @@ Log_event* next_event(RELAY_LOG_INFO* rli) } #ifndef DBUG_OFF { - DBUG_ASSERT(my_b_tell(cur_log) >= BIN_LOG_HEADER_SIZE); - /* The next assertion sometimes (very rarely) fails, let's try to track it */ char llbuf1[22], llbuf2[22]; - /* Merging man, please be careful with this; in 4.1, the assertion below is - replaced by - DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); - so you should not merge blindly (fortunately it won't build then), and - instead modify the merged code. Thanks. */ - DBUG_PRINT("info", ("Before assert, my_b_tell(cur_log)=%s \ -rli->relay_log_pos=%s rli->pending=%lu", + DBUG_ASSERT(my_b_tell(cur_log) >= BIN_LOG_HEADER_SIZE); + /* + The next assertion sometimes (very rarely) fails, let's try to track + it + */ + DBUG_PRINT("info", ("\ +Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", llstr(my_b_tell(cur_log),llbuf1), - llstr(rli->relay_log_pos,llbuf2), - rli->pending)); - DBUG_ASSERT(my_b_tell(cur_log) == rli->relay_log_pos + rli->pending); + llstr(rli->group_relay_log_pos,llbuf2))); + DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); } #endif - /* Relay log is always in new format - if the master is 3.23, the I/O thread will convert the format for us @@ -3519,7 +4293,6 @@ rli->relay_log_pos=%s rli->pending=%lu", DBUG_ASSERT(thd==rli->sql_thd); if (hot_log) pthread_mutex_unlock(log_lock); - pthread_mutex_unlock(&rli->data_lock); DBUG_RETURN(ev); } DBUG_ASSERT(thd==rli->sql_thd); @@ -3541,7 +4314,44 @@ rli->relay_log_pos=%s rli->pending=%lu", */ if (hot_log) { - DBUG_ASSERT(rli->relay_log.get_open_count() == rli->cur_log_old_open_count); + /* + We say in Seconds_Behind_Master that we have "caught up". Note that + for example if network link is broken but I/O slave thread hasn't + noticed it (slave_net_timeout not elapsed), then we'll say "caught + up" whereas we're not really caught up. Fixing that would require + internally cutting timeout in smaller pieces in network read, no + thanks. Another example: SQL has caught up on I/O, now I/O has read + a new event and is queuing it; the false "0" will exist until SQL + finishes executing the new event; it will be look abnormal only if + the events have old timestamps (then you get "many", 0, "many"). + Transient phases like this can't really be fixed. + */ + time_t save_timestamp= rli->last_master_timestamp; + rli->last_master_timestamp= 0; + + DBUG_ASSERT(rli->relay_log.get_open_count() == + rli->cur_log_old_open_count); + + if (rli->ign_master_log_name_end[0]) + { + /* We generate and return a Rotate, to make our positions advance */ + DBUG_PRINT("info",("seeing an ignored end segment")); + ev= new Rotate_log_event(thd, rli->ign_master_log_name_end, + 0, rli->ign_master_log_pos_end, + Rotate_log_event::DUP_NAME | + Rotate_log_event::ZERO_LEN); + rli->ign_master_log_name_end[0]= 0; + pthread_mutex_unlock(log_lock); + if (unlikely(!ev)) + { + errmsg= "Slave SQL thread failed to create a Rotate event " + "(out of memory?), SHOW SLAVE STATUS may be inaccurate"; + goto err; + } + ev->server_id= 0; // don't be ignored by slave SQL thread + DBUG_RETURN(ev); + } + /* We can, and should release data_lock while we are waiting for update. If we do not, show slave status will block @@ -3587,6 +4397,7 @@ rli->relay_log_pos=%s rli->pending=%lu", rli->relay_log.wait_for_update(rli->sql_thd, 1); // re-acquire data lock since we released it earlier pthread_mutex_lock(&rli->data_lock); + rli->last_master_timestamp= save_timestamp; continue; } /* @@ -3599,16 +4410,25 @@ rli->relay_log_pos=%s rli->pending=%lu", my_close(rli->cur_log_fd, MYF(MY_WME)); rli->cur_log_fd = -1; - /* - TODO: make skip_log_purge a start-up option. At this point this - is not critical priority - */ - if (!rli->skip_log_purge) + if (relay_log_purge) { - // purge_first_log will properly set up relay log coordinates in rli - if (rli->relay_log.purge_first_log(rli)) + /* + purge_first_log will properly set up relay log coordinates in rli. + If the group's coordinates are equal to the event's coordinates + (i.e. the relay log was not rotated in the middle of a group), + we can purge this relay log too. + We do ulonglong and string comparisons, this may be slow but + - purging the last relay log is nice (it can save 1GB of disk), so we + like to detect the case where we can do it, and given this, + - I see no better detection method + - purge_first_log is not called that often + */ + if (rli->relay_log.purge_first_log + (rli, + rli->group_relay_log_pos == rli->event_relay_log_pos + && !strcmp(rli->group_relay_log_name,rli->event_relay_log_name))) { - errmsg = "Error purging processed log"; + errmsg = "Error purging processed logs"; goto err; } } @@ -3626,10 +4446,9 @@ rli->relay_log_pos=%s rli->pending=%lu", errmsg = "error switching to the next log"; goto err; } - rli->relay_log_pos = BIN_LOG_HEADER_SIZE; - rli->pending=0; - strmake(rli->relay_log_name,rli->linfo.log_file_name, - sizeof(rli->relay_log_name)-1); + rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE; + strmake(rli->event_relay_log_name,rli->linfo.log_file_name, + sizeof(rli->event_relay_log_name)-1); flush_relay_log_info(rli); } @@ -3649,8 +4468,9 @@ rli->relay_log_pos=%s rli->pending=%lu", if (rli->relay_log.is_active(rli->linfo.log_file_name)) { #ifdef EXTRA_DEBUG - sql_print_error("next log '%s' is currently active", - rli->linfo.log_file_name); + if (global_system_variables.log_warnings) + sql_print_error("next log '%s' is currently active", + rli->linfo.log_file_name); #endif rli->cur_log= cur_log= rli->relay_log.get_log_file(); rli->cur_log_old_open_count= rli->relay_log.get_open_count(); @@ -3678,8 +4498,9 @@ rli->relay_log_pos=%s rli->pending=%lu", from hot to cold, but not from cold to hot). No need for LOCK_log. */ #ifdef EXTRA_DEBUG - sql_print_error("next log '%s' is not active", - rli->linfo.log_file_name); + if (global_system_variables.log_warnings) + sql_print_error("next log '%s' is not active", + rli->linfo.log_file_name); #endif // open_binlog() will check the magic header if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, @@ -3698,7 +4519,7 @@ rli->relay_log_pos=%s rli->pending=%lu", event(errno: %d cur_log->error: %d)", my_errno,cur_log->error); // set read position to the beginning of the event - my_b_seek(cur_log,rli->relay_log_pos+rli->pending); + my_b_seek(cur_log,rli->event_relay_log_pos); /* otherwise, we have had a partial read */ errmsg = "Aborting slave SQL thread because of partial event read"; break; // To end of function @@ -3708,7 +4529,6 @@ event(errno: %d cur_log->error: %d)", errmsg = "slave SQL thread was killed"; err: - pthread_mutex_unlock(&rli->data_lock); if (errmsg) sql_print_error("Error reading relay log event: %s", errmsg); DBUG_RETURN(0); @@ -3727,6 +4547,7 @@ void rotate_relay_log(MASTER_INFO* mi) RELAY_LOG_INFO* rli= &mi->rli; lock_slave_threads(mi); + pthread_mutex_lock(&mi->data_lock); pthread_mutex_lock(&rli->data_lock); /* We need to test inited because otherwise, new_file() will attempt to lock @@ -3757,6 +4578,7 @@ void rotate_relay_log(MASTER_INFO* mi) rli->relay_log.harvest_bytes_written(&rli->log_space_total); end: pthread_mutex_unlock(&rli->data_lock); + pthread_mutex_unlock(&mi->data_lock); unlock_slave_threads(mi); DBUG_VOID_RETURN; } @@ -3766,3 +4588,6 @@ end: template class I_List_iterator<i_string>; template class I_List_iterator<i_string_pair>; #endif + + +#endif /* HAVE_REPLICATION */ diff --git a/sql/slave.h b/sql/slave.h index a01ff93b4af..f780b7c8473 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#ifdef HAVE_REPLICATION + #ifndef SLAVE_H #define SLAVE_H @@ -23,22 +25,28 @@ #define MAX_SLAVE_ERRMSG 1024 #define MAX_SLAVE_ERROR 2000 -/* - The replication is accomplished by starting two threads - I/O - thread, and SQL thread. I/O thread is associated with its - MASTER_INFO struct, so MASTER_INFO can be viewed as I/O thread - descriptor. SQL thread is associated with RELAY_LOG_INFO struct. - - I/O thread reads maintains a connection to the master, and reads log - events from the master as they arrive, queueing them by writing them - out into the temporary slave binary log (relay log). The SQL thread, - in turn, reads the slave binary log executing each event. - - Relay log is needed to be able to handle situations when there is a large - backlog of unprocessed events from the master (eg. one particular update - takes a day to finish), and to be able to restart the slave server without - having to re-read the master updates. - */ +/***************************************************************************** + + MySQL Replication + + Replication is implemented via two types of threads: + + I/O Thread - One of these threads is started for each master server. + They maintain a connection to their master server, read log + events from the master as they arrive, and queues them into + a single, shared relay log file. A MASTER_INFO struct + represents each of these threads. + + SQL Thread - One of these threads is started and reads from the relay log + file, executing each event. A RELAY_LOG_INFO struct + represents this thread. + + Buffering in the relay log file makes it unnecessary to reread events from + a master server across a slave restart. It also decouples the slave from + the master where long-running updates and event logging are concerned--ie + it can continue to log new events while a slow query executes on the slave. + +*****************************************************************************/ /* MUTEXES in replication: @@ -74,7 +82,7 @@ (so that you have to update the .index file). */ -extern ulong slave_net_timeout, master_retry_count; +extern ulong master_retry_count; extern MY_BITMAP slave_error_mask; extern bool use_slave_mask; extern char* slave_load_tmpdir; @@ -91,10 +99,30 @@ enum enum_binlog_formats { BINLOG_FORMAT_323_GEQ_57 }; /* - st_relay_log_info contains information on the current relay log and - relay log offset, and master log name and log sequence corresponding to the - last update. Additionally, misc information specific to the SQL thread is - included. + 3 possible values for MASTER_INFO::slave_running and + RELAY_LOG_INFO::slave_running. + The values 0,1,2 are very important: to keep the diff small, I didn't + substitute places where we use 0/1 with the newly defined symbols. So don't change + these values. + The same way, code is assuming that in RELAY_LOG_INFO we use only values + 0/1. + I started with using an enum, but + enum_variable=1; is not legal so would have required many line changes. +*/ +#define MYSQL_SLAVE_NOT_RUN 0 +#define MYSQL_SLAVE_RUN_NOT_CONNECT 1 +#define MYSQL_SLAVE_RUN_CONNECT 2 + +/**************************************************************************** + + Replication SQL Thread + + st_relay_log_info contains: + - the current relay log + - the current relay log offset + - master log name + - master log sequence corresponding to the last update + - misc information specific to the SQL thread st_relay_log_info is initialized from the slave.info file if such exists. Otherwise, data members are intialized with defaults. The initialization is @@ -108,7 +136,8 @@ enum enum_binlog_formats { master_log_pos To clean up, call end_relay_log_info() - */ + +*****************************************************************************/ typedef struct st_relay_log_info { @@ -120,12 +149,6 @@ typedef struct st_relay_log_info cur_log_fd - file descriptor of the current read relay log */ File info_fd,cur_log_fd; - /* name of current read relay log */ - char relay_log_name[FN_REFLEN]; - /* master log name corresponding to current read position */ - char master_log_name[FN_REFLEN]; - /* original log position of last processed event */ - volatile my_off_t master_log_pos; /* Protected with internal locks. @@ -170,15 +193,36 @@ typedef struct st_relay_log_info uint32 cur_log_old_open_count; /* - Current offset in the relay log. - pending - in some cases we do not increment offset immediately after - processing an event, because the following event needs to be processed - atomically together with this one ( so far, there is only one type of - such event - Intvar_event that sets auto_increment value). However, once - both events have been processed, we need to increment by the cumulative - offset. pending stored the extra offset to be added to the position. + Let's call a group (of events) : + - a transaction + or + - an autocommiting query + its associated events (INSERT_ID, + TIMESTAMP...) + We need these rli coordinates : + - relay log name and position of the beginning of the group we currently are + executing. Needed to know where we have to restart when replication has + stopped in the middle of a group (which has been rolled back by the slave). + - relay log name and position just after the event we have just + executed. This event is part of the current group. + Formerly we only had the immediately above coordinates, plus a 'pending' + variable, but this dealt wrong with the case of a transaction starting on a + relay log and finishing (commiting) on another relay log. Case which can + happen when, for example, the relay log gets rotated because of + max_binlog_size. */ - ulonglong relay_log_pos, pending; + char group_relay_log_name[FN_REFLEN]; + ulonglong group_relay_log_pos; + char event_relay_log_name[FN_REFLEN]; + ulonglong event_relay_log_pos; + /* + Original log name and position of the group we're currently executing + (whose coordinates are group_relay_log_name/pos in the relay log) + in the master's binlog. These concern the *group*, because in the master's + binlog the log_pos that comes with each event is the position of the + beginning of the group. + */ + char group_master_log_name[FN_REFLEN]; + volatile my_off_t group_master_log_pos; /* Handling of the relay_log_space_limit optional constraint. @@ -201,6 +245,8 @@ typedef struct st_relay_log_info ulonglong future_group_master_log_pos; #endif + time_t last_master_timestamp; + /* Needed for problems when slave stops and we want to restart it skipping one or more events in the master log that have caused @@ -220,27 +266,109 @@ typedef struct st_relay_log_info /* if not set, the value of other members of the structure are undefined */ bool inited; - volatile bool abort_slave, slave_running; - bool skip_log_purge; - bool inside_transaction; + volatile bool abort_slave; + volatile uint slave_running; + + /* + Condition and its parameters from START SLAVE UNTIL clause. + + UNTIL condition is tested with is_until_satisfied() method that is + called by exec_relay_log_event(). is_until_satisfied() caches the result + of the comparison of log names because log names don't change very often; + this cache is invalidated by parts of code which change log names with + notify_*_log_name_updated() methods. (They need to be called only if SQL + thread is running). + */ + + enum {UNTIL_NONE= 0, UNTIL_MASTER_POS, UNTIL_RELAY_POS} until_condition; + char until_log_name[FN_REFLEN]; + ulonglong until_log_pos; + /* extension extracted from log_name and converted to int */ + ulong until_log_name_extension; + /* + Cached result of comparison of until_log_name and current log name + -2 means unitialised, -1,0,1 are comarison results + */ + enum + { + UNTIL_LOG_NAMES_CMP_UNKNOWN= -2, UNTIL_LOG_NAMES_CMP_LESS= -1, + UNTIL_LOG_NAMES_CMP_EQUAL= 0, UNTIL_LOG_NAMES_CMP_GREATER= 1 + } until_log_names_cmp_result; + + /* + trans_retries varies between 0 to slave_transaction_retries and counts how + many times the slave has retried the present transaction; gets reset to 0 + when the transaction finally succeeds. retried_trans is a cumulative + counter: how many times the slave has retried a transaction (any) since + slave started. + */ + ulong trans_retries, retried_trans; + + /* + If the end of the hot relay log is made of master's events ignored by the + slave I/O thread, these two keep track of the coords (in the master's + binlog) of the last of these events seen by the slave I/O thread. If not, + ign_master_log_name_end[0] == 0. + As they are like a Rotate event read/written from/to the relay log, they + are both protected by rli->relay_log.LOCK_log. + */ + char ign_master_log_name_end[FN_REFLEN]; + ulonglong ign_master_log_pos_end; st_relay_log_info(); ~st_relay_log_info(); - void inc_pending(ulonglong val); - void inc_pos(ulonglong val, ulonglong log_pos, bool skip_lock=0); - void read_pos(ulonglong& var); + + /* + Invalidate cached until_log_name and group_relay_log_name comparison + result. Should be called after any update of group_realy_log_name if + there chances that sql_thread is running. + */ + inline void notify_group_relay_log_name_update() + { + if (until_condition==UNTIL_RELAY_POS) + until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN; + } + + /* + The same as previous but for group_master_log_name. + */ + inline void notify_group_master_log_name_update() + { + if (until_condition==UNTIL_MASTER_POS) + until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN; + } + + inline void inc_event_relay_log_pos(ulonglong val) + { + event_relay_log_pos+= val; + } + + void inc_group_relay_log_pos(ulonglong val, ulonglong log_pos, bool skip_lock=0); int wait_for_pos(THD* thd, String* log_name, longlong log_pos, longlong timeout); void close_temporary_tables(); + + /* Check if UNTIL condition is satisfied. See slave.cc for more. */ + bool is_until_satisfied(); + inline ulonglong until_pos() + { + return ((until_condition == UNTIL_MASTER_POS) ? group_master_log_pos : + group_relay_log_pos); + } } RELAY_LOG_INFO; Log_event* next_event(RELAY_LOG_INFO* rli); -/* - st_master_info contains information about how to connect to a master, - current master log name, and current log offset, as well as misc - control variables +/***************************************************************************** + + Replication IO Thread + + st_master_info contains: + - information about how to connect to a master + - current master log name + - current master log offset + - misc control variables st_master_info is initialized once from the master.info file if such exists. Otherwise, data members corresponding to master.info fields @@ -262,21 +390,24 @@ Log_event* next_event(RELAY_LOG_INFO* rli); flush_master_info() is required. To clean up, call end_master_info() -*/ - +*****************************************************************************/ + typedef struct st_master_info { + /* the variables below are needed because we can change masters on the fly */ char master_log_name[FN_REFLEN]; char host[HOSTNAME_LENGTH+1]; char user[USERNAME_LENGTH+1]; char password[MAX_PASSWORD_LENGTH+1]; + my_bool ssl; // enables use of SSL connection if true + char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN]; + char ssl_cipher[FN_REFLEN], ssl_key[FN_REFLEN]; my_off_t master_log_pos; File fd; // we keep the file open, so we need to remember the file pointer IO_CACHE file; - /* the variables below are needed because we can change masters on the fly */ pthread_mutex_t data_lock,run_lock; pthread_cond_t data_cond,start_cond,stop_cond; THD *io_thd; @@ -289,17 +420,30 @@ typedef struct st_master_info int events_till_abort; #endif bool inited; - enum enum_binlog_formats old_format; /* binlog is in 3.23 format */ - volatile bool abort_slave, slave_running; + enum enum_binlog_formats old_format; + volatile bool abort_slave; + volatile uint slave_running; volatile ulong slave_run_id; - bool ignore_stop_event; + /* + The difference in seconds between the clock of the master and the clock of + the slave (second - first). It must be signed as it may be <0 or >0. + clock_diff_with_master is computed when the I/O thread starts; for this the + I/O thread does a SELECT UNIX_TIMESTAMP() on the master. + "how late the slave is compared to the master" is computed like this: + clock_of_slave - last_timestamp_executed_by_SQL_thread - clock_diff_with_master + + */ + long clock_diff_with_master; st_master_info() - :fd(-1), io_thd(0), inited(0), old_format(BINLOG_FORMAT_CURRENT), + :ssl(0), fd(-1), io_thd(0), inited(0), old_format(BINLOG_FORMAT_CURRENT), abort_slave(0),slave_running(0), slave_run_id(0) { host[0] = 0; user[0] = 0; password[0] = 0; - bzero((char *)&file, sizeof(file)); + ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0; + ssl_cipher[0]= 0; ssl_key[0]= 0; + + bzero((char*) &file, sizeof(file)); pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST); pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST); pthread_cond_init(&data_cond, NULL); @@ -332,7 +476,7 @@ typedef struct st_table_rule_ent #define TABLE_RULE_ARR_SIZE 16 #define MAX_SLAVE_ERRMSG 1024 -#define RPL_LOG_NAME (rli->master_log_name[0] ? rli->master_log_name :\ +#define RPL_LOG_NAME (rli->group_master_log_name[0] ? rli->group_master_log_name :\ "FIRST") #define IO_RPL_LOG_NAME (mi->master_log_name[0] ? mi->master_log_name :\ "FIRST") @@ -357,7 +501,7 @@ int terminate_slave_threads(MASTER_INFO* mi, int thread_mask, int terminate_slave_thread(THD* thd, pthread_mutex_t* term_mutex, pthread_mutex_t* cond_lock, pthread_cond_t* term_cond, - volatile bool* slave_running); + volatile uint* slave_running); int start_slave_threads(bool need_slave_mutex, bool wait_for_start, MASTER_INFO* mi, const char* master_info_fname, const char* slave_info_fname, int thread_mask); @@ -370,9 +514,10 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start, int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock, pthread_mutex_t *cond_lock, pthread_cond_t* start_cond, - volatile bool *slave_running, + volatile uint *slave_running, volatile ulong *slave_run_id, - MASTER_INFO* mi); + MASTER_INFO* mi, + bool high_priority); /* If fd is -1, dump to NET */ int mysql_table_dump(THD* thd, const char* db, @@ -382,11 +527,13 @@ int mysql_table_dump(THD* thd, const char* db, int fetch_master_table(THD* thd, const char* db_name, const char* table_name, MASTER_INFO* mi, MYSQL* mysql, bool overwrite); +void table_rule_ent_hash_to_str(String* s, HASH* h); +void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a); int show_master_info(THD* thd, MASTER_INFO* mi); int show_binlog_info(THD* thd); /* See if the query uses any tables that should not be replicated */ -int tables_ok(THD* thd, TABLE_LIST* tables); +bool tables_ok(THD* thd, TABLE_LIST* tables); /* Check to see if the database is ok to operate on with respect to the @@ -400,15 +547,16 @@ int add_table_rule(HASH* h, const char* table_spec); int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec); void init_table_rule_hash(HASH* h, bool* h_inited); void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited); -const char *rewrite_db(const char* db); -const char *print_slave_db_safe(const char* db); +const char *rewrite_db(const char* db, uint32 *new_db_len); +const char *print_slave_db_safe(const char *db); int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); void skip_load_data_infile(NET* net); void slave_print_error(RELAY_LOG_INFO* rli, int err_code, const char* msg, ...); void end_slave(); /* clean up */ void init_master_info_with_options(MASTER_INFO* mi); -void clear_last_slave_error(RELAY_LOG_INFO* rli); +void clear_until_condition(RELAY_LOG_INFO* rli); +void clear_slave_error(RELAY_LOG_INFO* rli); int init_master_info(MASTER_INFO* mi, const char* master_info_fname, const char* slave_info_fname, bool abort_if_no_master_info_file, @@ -445,8 +593,16 @@ extern my_string master_user, master_password, master_host, master_info_file, relay_log_info_file, report_user, report_host, report_password; +extern my_bool master_ssl; +extern my_string master_ssl_ca, master_ssl_capath, master_ssl_cert, + master_ssl_cipher, master_ssl_key; + extern I_List<i_string> replicate_do_db, replicate_ignore_db; extern I_List<i_string_pair> replicate_rewrite_db; extern I_List<THD> threads; #endif +#else +#define SLAVE_IO 1 +#define SLAVE_SQL 2 +#endif /* HAVE_REPLICATION */ diff --git a/sql/spatial.cc b/sql/spatial.cc new file mode 100644 index 00000000000..684f7e9ecf3 --- /dev/null +++ b/sql/spatial.cc @@ -0,0 +1,1938 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" + +#ifdef HAVE_SPATIAL + +#define MAX_DIGITS_IN_DOUBLE 16 + +/***************************** Gis_class_info *******************************/ + +Geometry::Class_info *Geometry::ci_collection[Geometry::wkb_end+1]= +{ + NULL, NULL, NULL, NULL, NULL, NULL, NULL +}; + +static Geometry::Class_info **ci_collection_end= + Geometry::ci_collection+Geometry::wkb_end + 1; + +Geometry::Class_info::Class_info(const char *name, int type_id, + void(*create_func)(void *)): + m_name(name, strlen(name)), m_type_id(type_id), m_create_func(create_func) +{ + ci_collection[type_id]= this; +} + +static void create_point(void *buffer) +{ + new(buffer) Gis_point; +} + +static void create_linestring(void *buffer) +{ + new(buffer) Gis_line_string; +} + +static void create_polygon(void *buffer) +{ + new(buffer) Gis_polygon; +} + +static void create_multipoint(void *buffer) +{ + new(buffer) Gis_multi_point; +} + +static void create_multipolygon(void *buffer) +{ + new(buffer) Gis_multi_polygon; +} + +static void create_multilinestring(void *buffer) +{ + new(buffer) Gis_multi_line_string; +} + +static void create_geometrycollection(void *buffer) +{ + new(buffer) Gis_geometry_collection; +} + + + +static Geometry::Class_info point_class("POINT", + Geometry::wkb_point, create_point); + +static Geometry::Class_info linestring_class("LINESTRING", + Geometry::wkb_linestring, + create_linestring); +static Geometry::Class_info polygon_class("POLYGON", + Geometry::wkb_polygon, + create_polygon); +static Geometry::Class_info multipoint_class("MULTIPOINT", + Geometry::wkb_multipoint, + create_multipoint); +static Geometry::Class_info +multilinestring_class("MULTILINESTRING", + Geometry::wkb_multilinestring, create_multilinestring); +static Geometry::Class_info multipolygon_class("MULTIPOLYGON", + Geometry::wkb_multipolygon, + create_multipolygon); +static Geometry::Class_info +geometrycollection_class("GEOMETRYCOLLECTION",Geometry::wkb_geometrycollection, + create_geometrycollection); + +static void get_point(double *x, double *y, const char *data) +{ + float8get(*x, data); + float8get(*y, data + SIZEOF_STORED_DOUBLE); +} + +/***************************** Geometry *******************************/ + +Geometry::Class_info *Geometry::find_class(const char *name, uint32 len) +{ + for (Class_info **cur_rt= ci_collection; + cur_rt < ci_collection_end; cur_rt++) + { + if (*cur_rt && + ((*cur_rt)->m_name.length == len) && + (my_strnncoll(&my_charset_latin1, + (const uchar*) (*cur_rt)->m_name.str, len, + (const uchar*) name, len) == 0)) + return *cur_rt; + } + return 0; +} + + +Geometry *Geometry::construct(Geometry_buffer *buffer, + const char *data, uint32 data_len) +{ + uint32 geom_type; + Geometry *result; + char byte_order; + + if (data_len < SRID_SIZE + WKB_HEADER_SIZE) // < 4 + (1 + 4) + return NULL; + byte_order= data[SRID_SIZE]; + geom_type= uint4korr(data + SRID_SIZE + 1); + if (!(result= create_by_typeid(buffer, (int) geom_type))) + return NULL; + result->m_data= data+ SRID_SIZE + WKB_HEADER_SIZE; + result->m_data_end= data + data_len; + return result; +} + + +Geometry *Geometry::create_from_wkt(Geometry_buffer *buffer, + Gis_read_stream *trs, String *wkt, + bool init_stream) +{ + LEX_STRING name; + Class_info *ci; + + if (trs->get_next_word(&name)) + { + trs->set_error_msg("Geometry name expected"); + return NULL; + } + if (!(ci= find_class(name.str, name.length)) || + wkt->reserve(1 + 4, 512)) + return NULL; + (*ci->m_create_func)((void *)buffer); + Geometry *result= (Geometry *)buffer; + + wkt->q_append((char) wkb_ndr); + wkt->q_append((uint32) result->get_class_info()->m_type_id); + if (trs->check_next_symbol('(') || + result->init_from_wkt(trs, wkt) || + trs->check_next_symbol(')')) + return NULL; + if (init_stream) + { + result->set_data_ptr(wkt->ptr(), wkt->length()); + result->shift_wkb_header(); + } + return result; +} + + +static double wkb_get_double(const char *ptr, Geometry::wkbByteOrder bo) +{ + double res; + if (bo != Geometry::wkb_xdr) + float8get(res, ptr); + else + { + char inv_array[8]; + inv_array[0]= ptr[7]; + inv_array[1]= ptr[6]; + inv_array[2]= ptr[5]; + inv_array[3]= ptr[4]; + inv_array[4]= ptr[3]; + inv_array[5]= ptr[2]; + inv_array[6]= ptr[1]; + inv_array[7]= ptr[0]; + float8get(res, inv_array); + } + return res; +} + + +static uint32 wkb_get_uint(const char *ptr, Geometry::wkbByteOrder bo) +{ + if (bo != Geometry::wkb_xdr) + return uint4korr(ptr); + /* else */ + { + char inv_array[4]; + inv_array[0]= ptr[3]; + inv_array[1]= ptr[2]; + inv_array[2]= ptr[1]; + inv_array[3]= ptr[0]; + return uint4korr(inv_array); + } +} + + +int Geometry::create_from_wkb(Geometry_buffer *buffer, + const char *wkb, uint32 len, String *res) +{ + uint32 geom_type; + Geometry *geom; + + if (len < WKB_HEADER_SIZE) + return 1; + geom_type= wkb_get_uint(wkb+1, (wkbByteOrder)wkb[0]); + if (!(geom= create_by_typeid(buffer, (int) geom_type)) || + res->reserve(WKB_HEADER_SIZE, 512)) + return 1; + + res->q_append((char) wkb_ndr); + res->q_append(geom_type); + return geom->init_from_wkb(wkb+WKB_HEADER_SIZE, len - WKB_HEADER_SIZE, + (wkbByteOrder) wkb[0], res); +} + + +bool Geometry::envelope(String *result) const +{ + MBR mbr; + const char *end; + + if (get_mbr(&mbr, &end) || result->reserve(1+4*3+SIZEOF_STORED_DOUBLE*10)) + return 1; + + result->q_append((char) wkb_ndr); + result->q_append((uint32) wkb_polygon); + result->q_append((uint32) 1); + result->q_append((uint32) 5); + result->q_append(mbr.xmin); + result->q_append(mbr.ymin); + result->q_append(mbr.xmax); + result->q_append(mbr.ymin); + result->q_append(mbr.xmax); + result->q_append(mbr.ymax); + result->q_append(mbr.xmin); + result->q_append(mbr.ymax); + result->q_append(mbr.xmin); + result->q_append(mbr.ymin); + + return 0; +} + + +/* + Create a point from data. + + SYNPOSIS + create_point() + result Put result here + data Data for point is here. + + RETURN + 0 ok + 1 Can't reallocate 'result' +*/ + +bool Geometry::create_point(String *result, const char *data) const +{ + if (no_data(data, SIZEOF_STORED_DOUBLE * 2) || + result->reserve(1 + 4 + SIZEOF_STORED_DOUBLE * 2)) + return 1; + result->q_append((char) wkb_ndr); + result->q_append((uint32) wkb_point); + /* Copy two double in same format */ + result->q_append(data, SIZEOF_STORED_DOUBLE*2); + return 0; +} + +/* + Create a point from coordinates. + + SYNPOSIS + create_point() + result Put result here + x x coordinate for point + y y coordinate for point + + RETURN + 0 ok + 1 Can't reallocate 'result' +*/ + +bool Geometry::create_point(String *result, double x, double y) const +{ + if (result->reserve(1 + 4 + SIZEOF_STORED_DOUBLE * 2)) + return 1; + + result->q_append((char) wkb_ndr); + result->q_append((uint32) wkb_point); + result->q_append(x); + result->q_append(y); + return 0; +} + +/* + Append N points from packed format to text + + SYNOPSIS + append_points() + txt Append points here + n_points Number of points + data Packed data + offset Offset between points + + RETURN + # end of data +*/ + +const char *Geometry::append_points(String *txt, uint32 n_points, + const char *data, uint32 offset) const +{ + while (n_points--) + { + double x,y; + data+= offset; + get_point(&x, &y, data); + data+= SIZEOF_STORED_DOUBLE * 2; + txt->qs_append(x); + txt->qs_append(' '); + txt->qs_append(y); + txt->qs_append(','); + } + return data; +} + + +/* + Get most bounding rectangle (mbr) for X points + + SYNOPSIS + get_mbr_for_points() + mbr MBR (store rectangle here) + points Number of points + data Packed data + offset Offset between points + + RETURN + 0 Wrong data + # end of data +*/ + +const char *Geometry::get_mbr_for_points(MBR *mbr, const char *data, + uint offset) const +{ + uint32 points; + /* read number of points */ + if (no_data(data, 4)) + return 0; + points= uint4korr(data); + data+= 4; + + if (no_data(data, (SIZEOF_STORED_DOUBLE * 2 + offset) * points)) + return 0; + + /* Calculate MBR for points */ + while (points--) + { + data+= offset; + mbr->add_xy(data, data + SIZEOF_STORED_DOUBLE); + data+= SIZEOF_STORED_DOUBLE * 2; + } + return data; +} + + +/***************************** Point *******************************/ + +uint32 Gis_point::get_data_size() const +{ + return POINT_DATA_SIZE; +} + + +bool Gis_point::init_from_wkt(Gis_read_stream *trs, String *wkb) +{ + double x, y; + if (trs->get_next_number(&x) || trs->get_next_number(&y) || + wkb->reserve(SIZEOF_STORED_DOUBLE * 2)) + return 1; + wkb->q_append(x); + wkb->q_append(y); + return 0; +} + + +uint Gis_point::init_from_wkb(const char *wkb, uint len, + wkbByteOrder bo, String *res) +{ + double x, y; + if (len < POINT_DATA_SIZE || res->reserve(POINT_DATA_SIZE)) + return 0; + x= wkb_get_double(wkb, bo); + y= wkb_get_double(wkb + SIZEOF_STORED_DOUBLE, bo); + res->q_append(x); + res->q_append(y); + return POINT_DATA_SIZE; +} + + +bool Gis_point::get_data_as_wkt(String *txt, const char **end) const +{ + double x, y; + if (get_xy(&x, &y)) + return 1; + if (txt->reserve(MAX_DIGITS_IN_DOUBLE * 2 + 1)) + return 1; + txt->qs_append(x); + txt->qs_append(' '); + txt->qs_append(y); + *end= m_data+ POINT_DATA_SIZE; + return 0; +} + + +bool Gis_point::get_mbr(MBR *mbr, const char **end) const +{ + double x, y; + if (get_xy(&x, &y)) + return 1; + mbr->add_xy(x, y); + *end= m_data+ POINT_DATA_SIZE; + return 0; +} + +const Geometry::Class_info *Gis_point::get_class_info() const +{ + return &point_class; +} + + +/***************************** LineString *******************************/ + +uint32 Gis_line_string::get_data_size() const +{ + if (no_data(m_data, 4)) + return GET_SIZE_ERROR; + return 4 + uint4korr(m_data) * POINT_DATA_SIZE; +} + + +bool Gis_line_string::init_from_wkt(Gis_read_stream *trs, String *wkb) +{ + uint32 n_points= 0; + uint32 np_pos= wkb->length(); + Gis_point p; + + if (wkb->reserve(4, 512)) + return 1; + wkb->length(wkb->length()+4); // Reserve space for points + + for (;;) + { + if (p.init_from_wkt(trs, wkb)) + return 1; + n_points++; + if (trs->skip_char(',')) // Didn't find ',' + break; + } + if (n_points < 1) + { + trs->set_error_msg("Too few points in LINESTRING"); + return 1; + } + wkb->write_at_position(np_pos, n_points); + return 0; +} + + +uint Gis_line_string::init_from_wkb(const char *wkb, uint len, + wkbByteOrder bo, String *res) +{ + uint32 n_points, proper_length; + const char *wkb_end; + Gis_point p; + + if (len < 4) + return 0; + n_points= wkb_get_uint(wkb, bo); + proper_length= 4 + n_points * POINT_DATA_SIZE; + + if (len < proper_length || res->reserve(proper_length)) + return 0; + + res->q_append(n_points); + wkb_end= wkb + proper_length; + for (wkb+= 4; wkb<wkb_end; wkb+= POINT_DATA_SIZE) + { + if (!p.init_from_wkb(wkb, POINT_DATA_SIZE, bo, res)) + return 0; + } + + return proper_length; +} + + +bool Gis_line_string::get_data_as_wkt(String *txt, const char **end) const +{ + uint32 n_points; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + data += 4; + + if (n_points < 1 || + no_data(data, SIZEOF_STORED_DOUBLE * 2 * n_points) || + txt->reserve(((MAX_DIGITS_IN_DOUBLE + 1)*2 + 1) * n_points)) + return 1; + + while (n_points--) + { + double x, y; + get_point(&x, &y, data); + data+= SIZEOF_STORED_DOUBLE * 2; + txt->qs_append(x); + txt->qs_append(' '); + txt->qs_append(y); + txt->qs_append(','); + } + txt->length(txt->length() - 1); // Remove end ',' + *end= data; + return 0; +} + + +bool Gis_line_string::get_mbr(MBR *mbr, const char **end) const +{ + return (*end=get_mbr_for_points(mbr, m_data, 0)) == 0; +} + + +int Gis_line_string::length(double *len) const +{ + uint32 n_points; + double prev_x, prev_y; + const char *data= m_data; + + *len= 0; // In case of errors + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + data+= 4; + if (n_points < 1 || no_data(data, SIZEOF_STORED_DOUBLE * 2 * n_points)) + return 1; + + get_point(&prev_x, &prev_y, data); + data+= SIZEOF_STORED_DOUBLE*2; + + while (--n_points) + { + double x, y; + get_point(&x, &y, data); + data+= SIZEOF_STORED_DOUBLE * 2; + *len+= sqrt(pow(prev_x-x,2)+pow(prev_y-y,2)); + prev_x= x; + prev_y= y; + } + return 0; +} + + +int Gis_line_string::is_closed(int *closed) const +{ + uint32 n_points; + double x1, y1, x2, y2; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + if (n_points == 1) + { + *closed=1; + return 0; + } + data+= 4; + if (no_data(data, SIZEOF_STORED_DOUBLE * 2 * n_points)) + return 1; + + /* Get first point */ + get_point(&x1, &y1, data); + + /* get last point */ + data+= SIZEOF_STORED_DOUBLE*2 + (n_points-2)*POINT_DATA_SIZE; + get_point(&x2, &y2, data); + + *closed= (x1==x2) && (y1==y2); + return 0; +} + + +int Gis_line_string::num_points(uint32 *n_points) const +{ + *n_points= uint4korr(m_data); + return 0; +} + + +int Gis_line_string::start_point(String *result) const +{ + /* +4 is for skipping over number of points */ + return create_point(result, m_data + 4); +} + + +int Gis_line_string::end_point(String *result) const +{ + uint32 n_points; + if (no_data(m_data, 4)) + return 1; + n_points= uint4korr(m_data); + return create_point(result, m_data + 4 + (n_points - 1) * POINT_DATA_SIZE); +} + + +int Gis_line_string::point_n(uint32 num, String *result) const +{ + uint32 n_points; + if (no_data(m_data, 4)) + return 1; + n_points= uint4korr(m_data); + if ((uint32) (num - 1) >= n_points) // means (num > n_points || num < 1) + return 1; + + return create_point(result, m_data + 4 + (num - 1) * POINT_DATA_SIZE); +} + +const Geometry::Class_info *Gis_line_string::get_class_info() const +{ + return &linestring_class; +} + + +/***************************** Polygon *******************************/ + +uint32 Gis_polygon::get_data_size() const +{ + uint32 n_linear_rings; + const char *data= m_data; + + if (no_data(data, 4)) + return GET_SIZE_ERROR; + n_linear_rings= uint4korr(data); + data+= 4; + + while (n_linear_rings--) + { + if (no_data(data, 4)) + return GET_SIZE_ERROR; + data+= 4 + uint4korr(data)*POINT_DATA_SIZE; + } + return (uint32) (data - m_data); +} + + +bool Gis_polygon::init_from_wkt(Gis_read_stream *trs, String *wkb) +{ + uint32 n_linear_rings= 0; + uint32 lr_pos= wkb->length(); + int closed; + + if (wkb->reserve(4, 512)) + return 1; + wkb->length(wkb->length()+4); // Reserve space for points + for (;;) + { + Gis_line_string ls; + uint32 ls_pos=wkb->length(); + if (trs->check_next_symbol('(') || + ls.init_from_wkt(trs, wkb) || + trs->check_next_symbol(')')) + return 1; + + ls.set_data_ptr(wkb->ptr() + ls_pos, wkb->length() - ls_pos); + if (ls.is_closed(&closed) || !closed) + { + trs->set_error_msg("POLYGON's linear ring isn't closed"); + return 1; + } + n_linear_rings++; + if (trs->skip_char(',')) // Didn't find ',' + break; + } + wkb->write_at_position(lr_pos, n_linear_rings); + return 0; +} + + +uint Gis_polygon::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, + String *res) +{ + uint32 n_linear_rings; + const char *wkb_orig= wkb; + + if (len < 4) + return 0; + + n_linear_rings= wkb_get_uint(wkb, bo); + if (res->reserve(4, 512)) + return 0; + wkb+= 4; + len-= 4; + res->q_append(n_linear_rings); + + while (n_linear_rings--) + { + Gis_line_string ls; + uint32 ls_pos= res->length(); + int ls_len; + int closed; + + if (!(ls_len= ls.init_from_wkb(wkb, len, bo, res))) + return 0; + + ls.set_data_ptr(res->ptr() + ls_pos, res->length() - ls_pos); + + if (ls.is_closed(&closed) || !closed) + return 0; + wkb+= ls_len; + } + + return (uint) (wkb - wkb_orig); +} + + +bool Gis_polygon::get_data_as_wkt(String *txt, const char **end) const +{ + uint32 n_linear_rings; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + + n_linear_rings= uint4korr(data); + data+= 4; + + while (n_linear_rings--) + { + uint32 n_points; + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + data+= 4; + if (no_data(data, (SIZEOF_STORED_DOUBLE*2) * n_points) || + txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points)) + return 1; + txt->qs_append('('); + data= append_points(txt, n_points, data, 0); + (*txt) [txt->length() - 1]= ')'; // Replace end ',' + txt->qs_append(','); + } + txt->length(txt->length() - 1); // Remove end ',' + *end= data; + return 0; +} + + +bool Gis_polygon::get_mbr(MBR *mbr, const char **end) const +{ + uint32 n_linear_rings; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_linear_rings= uint4korr(data); + data+= 4; + + while (n_linear_rings--) + { + if (!(data= get_mbr_for_points(mbr, data, 0))) + return 1; + } + *end= data; + return 0; +} + + +int Gis_polygon::area(double *ar, const char **end_of_data) const +{ + uint32 n_linear_rings; + double result= -1.0; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_linear_rings= uint4korr(data); + data+= 4; + + while (n_linear_rings--) + { + double prev_x, prev_y; + double lr_area= 0; + uint32 n_points; + + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + if (no_data(data, (SIZEOF_STORED_DOUBLE*2) * n_points)) + return 1; + get_point(&prev_x, &prev_y, data+4); + data+= (4+SIZEOF_STORED_DOUBLE*2); + + while (--n_points) // One point is already read + { + double x, y; + get_point(&x, &y, data); + data+= (SIZEOF_STORED_DOUBLE*2); + /* QQ: Is the following prev_x+x right ? */ + lr_area+= (prev_x + x)* (prev_y - y); + prev_x= x; + prev_y= y; + } + lr_area= fabs(lr_area)/2; + if (result == -1.0) + result= lr_area; + else + result-= lr_area; + } + *ar= fabs(result); + *end_of_data= data; + return 0; +} + + +int Gis_polygon::exterior_ring(String *result) const +{ + uint32 n_points, length; + const char *data= m_data + 4; // skip n_linerings + + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + data+= 4; + length= n_points * POINT_DATA_SIZE; + if (no_data(data, length) || result->reserve(1+4+4+ length)) + return 1; + + result->q_append((char) wkb_ndr); + result->q_append((uint32) wkb_linestring); + result->q_append(n_points); + result->q_append(data, n_points * POINT_DATA_SIZE); + return 0; +} + + +int Gis_polygon::num_interior_ring(uint32 *n_int_rings) const +{ + if (no_data(m_data, 4)) + return 1; + *n_int_rings= uint4korr(m_data)-1; + return 0; +} + + +int Gis_polygon::interior_ring_n(uint32 num, String *result) const +{ + const char *data= m_data; + uint32 n_linear_rings; + uint32 n_points; + uint32 points_size; + + if (no_data(data, 4)) + return 1; + n_linear_rings= uint4korr(data); + data+= 4; + + if (num >= n_linear_rings || num < 1) + return 1; + + while (num--) + { + if (no_data(data, 4)) + return 1; + data+= 4 + uint4korr(data) * POINT_DATA_SIZE; + } + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + points_size= n_points * POINT_DATA_SIZE; + data+= 4; + if (no_data(data, points_size) || result->reserve(1+4+4+ points_size)) + return 1; + + result->q_append((char) wkb_ndr); + result->q_append((uint32) wkb_linestring); + result->q_append(n_points); + result->q_append(data, points_size); + + return 0; +} + + +int Gis_polygon::centroid_xy(double *x, double *y) const +{ + uint32 n_linear_rings; + double res_area; + double res_cx, res_cy; + const char *data= m_data; + bool first_loop= 1; + LINT_INIT(res_area); + LINT_INIT(res_cx); + LINT_INIT(res_cy); + + if (no_data(data, 4)) + return 1; + n_linear_rings= uint4korr(data); + data+= 4; + + while (n_linear_rings--) + { + uint32 n_points, org_n_points; + double prev_x, prev_y; + double cur_area= 0; + double cur_cx= 0; + double cur_cy= 0; + + if (no_data(data, 4)) + return 1; + org_n_points= n_points= uint4korr(data); + data+= 4; + if (no_data(data, (SIZEOF_STORED_DOUBLE*2) * n_points)) + return 1; + get_point(&prev_x, &prev_y, data); + data+= (SIZEOF_STORED_DOUBLE*2); + + while (--n_points) // One point is already read + { + double x, y; + get_point(&x, &y, data); + data+= (SIZEOF_STORED_DOUBLE*2); + /* QQ: Is the following prev_x+x right ? */ + cur_area+= (prev_x + x) * (prev_y - y); + cur_cx+= x; + cur_cy+= y; + prev_x= x; + prev_y= y; + } + cur_area= fabs(cur_area) / 2; + cur_cx= cur_cx / (org_n_points - 1); + cur_cy= cur_cy / (org_n_points - 1); + + if (!first_loop) + { + double d_area= fabs(res_area - cur_area); + res_cx= (res_area * res_cx - cur_area * cur_cx) / d_area; + res_cy= (res_area * res_cy - cur_area * cur_cy) / d_area; + } + else + { + first_loop= 0; + res_area= cur_area; + res_cx= cur_cx; + res_cy= cur_cy; + } + } + + *x= res_cx; + *y= res_cy; + return 0; +} + + +int Gis_polygon::centroid(String *result) const +{ + double x, y; + if (centroid_xy(&x, &y)) + return 1; + return create_point(result, x, y); +} + +const Geometry::Class_info *Gis_polygon::get_class_info() const +{ + return &polygon_class; +} + + +/***************************** MultiPoint *******************************/ + +uint32 Gis_multi_point::get_data_size() const +{ + if (no_data(m_data, 4)) + return GET_SIZE_ERROR; + return 4 + uint4korr(m_data)*(POINT_DATA_SIZE + WKB_HEADER_SIZE); +} + + +bool Gis_multi_point::init_from_wkt(Gis_read_stream *trs, String *wkb) +{ + uint32 n_points= 0; + uint32 np_pos= wkb->length(); + Gis_point p; + + if (wkb->reserve(4, 512)) + return 1; + wkb->length(wkb->length()+4); // Reserve space for points + + for (;;) + { + if (wkb->reserve(1+4, 512)) + return 1; + wkb->q_append((char) wkb_ndr); + wkb->q_append((uint32) wkb_point); + if (p.init_from_wkt(trs, wkb)) + return 1; + n_points++; + if (trs->skip_char(',')) // Didn't find ',' + break; + } + wkb->write_at_position(np_pos, n_points); // Store number of found points + return 0; +} + + +uint Gis_multi_point::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, + String *res) +{ + uint32 n_points; + uint proper_size; + Gis_point p; + const char *wkb_end; + + if (len < 4) + return 0; + n_points= wkb_get_uint(wkb, bo); + proper_size= 4 + n_points * (WKB_HEADER_SIZE + POINT_DATA_SIZE); + + if (len < proper_size || res->reserve(proper_size)) + return 0; + + res->q_append(n_points); + wkb_end= wkb + proper_size; + for (wkb+=4; wkb < wkb_end; wkb+= (WKB_HEADER_SIZE + POINT_DATA_SIZE)) + { + res->q_append((char)wkb_ndr); + res->q_append((uint32)wkb_point); + if (!p.init_from_wkb(wkb + WKB_HEADER_SIZE, + POINT_DATA_SIZE, (wkbByteOrder) wkb[0], res)) + return 0; + } + return proper_size; +} + + +bool Gis_multi_point::get_data_as_wkt(String *txt, const char **end) const +{ + uint32 n_points; + if (no_data(m_data, 4)) + return 1; + + n_points= uint4korr(m_data); + if (no_data(m_data+4, + n_points * (SIZEOF_STORED_DOUBLE * 2 + WKB_HEADER_SIZE)) || + txt->reserve(((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points)) + return 1; + *end= append_points(txt, n_points, m_data+4, WKB_HEADER_SIZE); + txt->length(txt->length()-1); // Remove end ',' + return 0; +} + + +bool Gis_multi_point::get_mbr(MBR *mbr, const char **end) const +{ + return (*end= get_mbr_for_points(mbr, m_data, WKB_HEADER_SIZE)) == 0; +} + + +int Gis_multi_point::num_geometries(uint32 *num) const +{ + *num= uint4korr(m_data); + return 0; +} + + +int Gis_multi_point::geometry_n(uint32 num, String *result) const +{ + const char *data= m_data; + uint32 n_points; + + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + data+= 4+ (num - 1) * (WKB_HEADER_SIZE + POINT_DATA_SIZE); + + if (num > n_points || num < 1 || + no_data(data, WKB_HEADER_SIZE + POINT_DATA_SIZE) || + result->reserve(WKB_HEADER_SIZE + POINT_DATA_SIZE)) + return 1; + + result->q_append(data, WKB_HEADER_SIZE + POINT_DATA_SIZE); + return 0; +} + +const Geometry::Class_info *Gis_multi_point::get_class_info() const +{ + return &multipoint_class; +} + + +/***************************** MultiLineString *******************************/ + +uint32 Gis_multi_line_string::get_data_size() const +{ + uint32 n_line_strings; + const char *data= m_data; + + if (no_data(data, 4)) + return GET_SIZE_ERROR; + n_line_strings= uint4korr(data); + data+= 4; + + while (n_line_strings--) + { + if (no_data(data, WKB_HEADER_SIZE + 4)) + return GET_SIZE_ERROR; + data+= (WKB_HEADER_SIZE + 4 + uint4korr(data + WKB_HEADER_SIZE) * + POINT_DATA_SIZE); + } + return (uint32) (data - m_data); +} + + +bool Gis_multi_line_string::init_from_wkt(Gis_read_stream *trs, String *wkb) +{ + uint32 n_line_strings= 0; + uint32 ls_pos= wkb->length(); + + if (wkb->reserve(4, 512)) + return 1; + wkb->length(wkb->length()+4); // Reserve space for points + + for (;;) + { + Gis_line_string ls; + + if (wkb->reserve(1+4, 512)) + return 1; + wkb->q_append((char) wkb_ndr); + wkb->q_append((uint32) wkb_linestring); + + if (trs->check_next_symbol('(') || + ls.init_from_wkt(trs, wkb) || + trs->check_next_symbol(')')) + return 1; + n_line_strings++; + if (trs->skip_char(',')) // Didn't find ',' + break; + } + wkb->write_at_position(ls_pos, n_line_strings); + return 0; +} + + +uint Gis_multi_line_string::init_from_wkb(const char *wkb, uint len, + wkbByteOrder bo, String *res) +{ + uint32 n_line_strings; + const char *wkb_orig= wkb; + + if (len < 4) + return 0; + n_line_strings= wkb_get_uint(wkb, bo); + + if (res->reserve(4, 512)) + return 0; + res->q_append(n_line_strings); + + wkb+= 4; + while (n_line_strings--) + { + Gis_line_string ls; + int ls_len; + + if ((len < WKB_HEADER_SIZE) || + res->reserve(WKB_HEADER_SIZE, 512)) + return 0; + + res->q_append((char) wkb_ndr); + res->q_append((uint32) wkb_linestring); + + if (!(ls_len= ls.init_from_wkb(wkb + WKB_HEADER_SIZE, len, + (wkbByteOrder) wkb[0], res))) + return 0; + ls_len+= WKB_HEADER_SIZE;; + wkb+= ls_len; + len-= ls_len; + } + return (uint) (wkb - wkb_orig); +} + + +bool Gis_multi_line_string::get_data_as_wkt(String *txt, + const char **end) const +{ + uint32 n_line_strings; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_line_strings= uint4korr(data); + data+= 4; + + while (n_line_strings--) + { + uint32 n_points; + if (no_data(data, (WKB_HEADER_SIZE + 4))) + return 1; + n_points= uint4korr(data + WKB_HEADER_SIZE); + data+= WKB_HEADER_SIZE + 4; + if (no_data(data, n_points * (SIZEOF_STORED_DOUBLE*2)) || + txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points)) + return 1; + txt->qs_append('('); + data= append_points(txt, n_points, data, 0); + (*txt) [txt->length() - 1]= ')'; + txt->qs_append(','); + } + txt->length(txt->length() - 1); + *end= data; + return 0; +} + + +bool Gis_multi_line_string::get_mbr(MBR *mbr, const char **end) const +{ + uint32 n_line_strings; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_line_strings= uint4korr(data); + data+= 4; + + while (n_line_strings--) + { + data+= WKB_HEADER_SIZE; + if (!(data= get_mbr_for_points(mbr, data, 0))) + return 1; + } + *end= data; + return 0; +} + + +int Gis_multi_line_string::num_geometries(uint32 *num) const +{ + *num= uint4korr(m_data); + return 0; +} + + +int Gis_multi_line_string::geometry_n(uint32 num, String *result) const +{ + uint32 n_line_strings, n_points, length; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_line_strings= uint4korr(data); + data+= 4; + + if ((num > n_line_strings) || (num < 1)) + return 1; + + for (;;) + { + if (no_data(data, WKB_HEADER_SIZE + 4)) + return 1; + n_points= uint4korr(data + WKB_HEADER_SIZE); + length= WKB_HEADER_SIZE + 4+ POINT_DATA_SIZE * n_points; + if (no_data(data, length)) + return 1; + if (!--num) + break; + data+= length; + } + return result->append(data, length, (uint32) 0); +} + + +int Gis_multi_line_string::length(double *len) const +{ + uint32 n_line_strings; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_line_strings= uint4korr(data); + data+= 4; + + *len=0; + while (n_line_strings--) + { + double ls_len; + Gis_line_string ls; + data+= WKB_HEADER_SIZE; + ls.set_data_ptr(data, (uint32) (m_data_end - data)); + if (ls.length(&ls_len)) + return 1; + *len+= ls_len; + /* + We know here that ls was ok, so we can call the trivial function + Gis_line_string::get_data_size without error checking + */ + data+= ls.get_data_size(); + } + return 0; +} + + +int Gis_multi_line_string::is_closed(int *closed) const +{ + uint32 n_line_strings; + const char *data= m_data; + + if (no_data(data, 4 + WKB_HEADER_SIZE)) + return 1; + n_line_strings= uint4korr(data); + data+= 4 + WKB_HEADER_SIZE; + + while (n_line_strings--) + { + Gis_line_string ls; + if (no_data(data, 0)) + return 1; + ls.set_data_ptr(data, (uint32) (m_data_end - data)); + if (ls.is_closed(closed)) + return 1; + if (!*closed) + return 0; + /* + We know here that ls was ok, so we can call the trivial function + Gis_line_string::get_data_size without error checking + */ + data+= ls.get_data_size() + WKB_HEADER_SIZE; + } + return 0; +} + +const Geometry::Class_info *Gis_multi_line_string::get_class_info() const +{ + return &multilinestring_class; +} + + +/***************************** MultiPolygon *******************************/ + +uint32 Gis_multi_polygon::get_data_size() const +{ + uint32 n_polygons; + const char *data= m_data; + + if (no_data(data, 4)) + return GET_SIZE_ERROR; + n_polygons= uint4korr(data); + data+= 4; + + while (n_polygons--) + { + uint32 n_linear_rings; + if (no_data(data, 4 + WKB_HEADER_SIZE)) + return GET_SIZE_ERROR; + + n_linear_rings= uint4korr(data + WKB_HEADER_SIZE); + data+= 4 + WKB_HEADER_SIZE; + + while (n_linear_rings--) + { + if (no_data(data, 4)) + return GET_SIZE_ERROR; + data+= 4 + uint4korr(data) * POINT_DATA_SIZE; + } + } + return (uint32) (data - m_data); +} + + +bool Gis_multi_polygon::init_from_wkt(Gis_read_stream *trs, String *wkb) +{ + uint32 n_polygons= 0; + int np_pos= wkb->length(); + Gis_polygon p; + + if (wkb->reserve(4, 512)) + return 1; + wkb->length(wkb->length()+4); // Reserve space for points + + for (;;) + { + if (wkb->reserve(1+4, 512)) + return 1; + wkb->q_append((char) wkb_ndr); + wkb->q_append((uint32) wkb_polygon); + + if (trs->check_next_symbol('(') || + p.init_from_wkt(trs, wkb) || + trs->check_next_symbol(')')) + return 1; + n_polygons++; + if (trs->skip_char(',')) // Didn't find ',' + break; + } + wkb->write_at_position(np_pos, n_polygons); + return 0; +} + + +uint Gis_multi_polygon::init_from_wkb(const char *wkb, uint len, + wkbByteOrder bo, String *res) +{ + uint32 n_poly; + const char *wkb_orig= wkb; + + if (len < 4) + return 0; + n_poly= wkb_get_uint(wkb, bo); + + if (res->reserve(4, 512)) + return 0; + res->q_append(n_poly); + + wkb+=4; + while (n_poly--) + { + Gis_polygon p; + int p_len; + + if (len < WKB_HEADER_SIZE || + res->reserve(WKB_HEADER_SIZE, 512)) + return 0; + res->q_append((char) wkb_ndr); + res->q_append((uint32) wkb_polygon); + + if (!(p_len= p.init_from_wkb(wkb + WKB_HEADER_SIZE, len, + (wkbByteOrder) wkb[0], res))) + return 0; + p_len+= WKB_HEADER_SIZE; + wkb+= p_len; + len-= p_len; + } + return (uint) (wkb - wkb_orig); +} + + +bool Gis_multi_polygon::get_data_as_wkt(String *txt, const char **end) const +{ + uint32 n_polygons; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_polygons= uint4korr(data); + data+= 4; + + while (n_polygons--) + { + uint32 n_linear_rings; + if (no_data(data, 4 + WKB_HEADER_SIZE) || + txt->reserve(1, 512)) + return 1; + n_linear_rings= uint4korr(data+WKB_HEADER_SIZE); + data+= 4 + WKB_HEADER_SIZE; + txt->q_append('('); + + while (n_linear_rings--) + { + if (no_data(data, 4)) + return 1; + uint32 n_points= uint4korr(data); + data+= 4; + if (no_data(data, (SIZEOF_STORED_DOUBLE * 2) * n_points) || + txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points, + 512)) + return 1; + txt->qs_append('('); + data= append_points(txt, n_points, data, 0); + (*txt) [txt->length() - 1]= ')'; + txt->qs_append(','); + } + (*txt) [txt->length() - 1]= ')'; + txt->qs_append(','); + } + txt->length(txt->length() - 1); + *end= data; + return 0; +} + + +bool Gis_multi_polygon::get_mbr(MBR *mbr, const char **end) const +{ + uint32 n_polygons; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_polygons= uint4korr(data); + data+= 4; + + while (n_polygons--) + { + uint32 n_linear_rings; + if (no_data(data, 4+WKB_HEADER_SIZE)) + return 1; + n_linear_rings= uint4korr(data + WKB_HEADER_SIZE); + data+= WKB_HEADER_SIZE + 4; + + while (n_linear_rings--) + { + if (!(data= get_mbr_for_points(mbr, data, 0))) + return 1; + } + } + *end= data; + return 0; +} + + +int Gis_multi_polygon::num_geometries(uint32 *num) const +{ + *num= uint4korr(m_data); + return 0; +} + + +int Gis_multi_polygon::geometry_n(uint32 num, String *result) const +{ + uint32 n_polygons; + const char *data= m_data, *start_of_polygon; + + if (no_data(data, 4)) + return 1; + n_polygons= uint4korr(data); + data+= 4; + + if (num > n_polygons || num < 1) + return -1; + + do + { + uint32 n_linear_rings; + start_of_polygon= data; + + if (no_data(data, WKB_HEADER_SIZE + 4)) + return 1; + n_linear_rings= uint4korr(data + WKB_HEADER_SIZE); + data+= WKB_HEADER_SIZE + 4; + + while (n_linear_rings--) + { + uint32 n_points; + if (no_data(data, 4)) + return 1; + n_points= uint4korr(data); + data+= 4 + POINT_DATA_SIZE * n_points; + } + } while (--num); + if (no_data(data, 0)) // We must check last segment + return 1; + return result->append(start_of_polygon, (uint32) (data - start_of_polygon), + (uint32) 0); +} + + +int Gis_multi_polygon::area(double *ar, const char **end_of_data) const +{ + uint32 n_polygons; + const char *data= m_data; + double result= 0; + + if (no_data(data, 4)) + return 1; + n_polygons= uint4korr(data); + data+= 4; + + while (n_polygons--) + { + double p_area; + Gis_polygon p; + + data+= WKB_HEADER_SIZE; + p.set_data_ptr(data, (uint32) (m_data_end - data)); + if (p.area(&p_area, &data)) + return 1; + result+= p_area; + } + *ar= result; + *end_of_data= data; + return 0; +} + + +int Gis_multi_polygon::centroid(String *result) const +{ + uint32 n_polygons; + bool first_loop= 1; + Gis_polygon p; + double res_area, res_cx, res_cy; + double cur_area, cur_cx, cur_cy; + const char *data= m_data; + + LINT_INIT(res_area); + LINT_INIT(res_cx); + LINT_INIT(res_cy); + + if (no_data(data, 4)) + return 1; + n_polygons= uint4korr(data); + data+= 4; + + while (n_polygons--) + { + data+= WKB_HEADER_SIZE; + p.set_data_ptr(data, (uint32) (m_data_end - data)); + if (p.area(&cur_area, &data) || + p.centroid_xy(&cur_cx, &cur_cy)) + return 1; + + if (!first_loop) + { + double sum_area= res_area + cur_area; + res_cx= (res_area * res_cx + cur_area * cur_cx) / sum_area; + res_cy= (res_area * res_cy + cur_area * cur_cy) / sum_area; + } + else + { + first_loop= 0; + res_area= cur_area; + res_cx= cur_cx; + res_cy= cur_cy; + } + } + + return create_point(result, res_cx, res_cy); +} + +const Geometry::Class_info *Gis_multi_polygon::get_class_info() const +{ + return &multipolygon_class; +} + + +/************************* GeometryCollection ****************************/ + +uint32 Gis_geometry_collection::get_data_size() const +{ + uint32 n_objects; + const char *data= m_data; + Geometry_buffer buffer; + Geometry *geom; + + if (no_data(data, 4)) + return GET_SIZE_ERROR; + n_objects= uint4korr(data); + data+= 4; + + while (n_objects--) + { + uint32 wkb_type,object_size; + + if (no_data(data, WKB_HEADER_SIZE)) + return GET_SIZE_ERROR; + wkb_type= uint4korr(data + 1); + data+= WKB_HEADER_SIZE; + + if (!(geom= create_by_typeid(&buffer, wkb_type))) + return GET_SIZE_ERROR; + geom->set_data_ptr(data, (uint) (m_data_end - data)); + if ((object_size= geom->get_data_size()) == GET_SIZE_ERROR) + return GET_SIZE_ERROR; + data+= object_size; + } + return (uint32) (data - m_data); +} + + +bool Gis_geometry_collection::init_from_wkt(Gis_read_stream *trs, String *wkb) +{ + uint32 n_objects= 0; + uint32 no_pos= wkb->length(); + Geometry_buffer buffer; + Geometry *g; + + if (wkb->reserve(4, 512)) + return 1; + wkb->length(wkb->length()+4); // Reserve space for points + + for (;;) + { + if (!(g= create_from_wkt(&buffer, trs, wkb))) + return 1; + + if (g->get_class_info()->m_type_id == wkb_geometrycollection) + { + trs->set_error_msg("Unexpected GEOMETRYCOLLECTION"); + return 1; + } + n_objects++; + if (trs->skip_char(',')) // Didn't find ',' + break; + } + + wkb->write_at_position(no_pos, n_objects); + return 0; +} + + +uint Gis_geometry_collection::init_from_wkb(const char *wkb, uint len, + wkbByteOrder bo, String *res) +{ + uint32 n_geom; + const char *wkb_orig= wkb; + + if (len < 4) + return 0; + n_geom= wkb_get_uint(wkb, bo); + + if (res->reserve(4, 512)) + return 0; + res->q_append(n_geom); + + wkb+= 4; + while (n_geom--) + { + Geometry_buffer buffer; + Geometry *geom; + int g_len; + uint32 wkb_type; + + if (len < WKB_HEADER_SIZE || + res->reserve(WKB_HEADER_SIZE, 512)) + return 0; + + res->q_append((char) wkb_ndr); + wkb_type= wkb_get_uint(wkb+1, (wkbByteOrder) wkb[0]); + res->q_append(wkb_type); + + if (!(geom= create_by_typeid(&buffer, wkb_type)) || + !(g_len= geom->init_from_wkb(wkb + WKB_HEADER_SIZE, len, + (wkbByteOrder) wkb[0], res))) + return 0; + g_len+= WKB_HEADER_SIZE; + wkb+= g_len; + len-= g_len; + } + return (uint) (wkb - wkb_orig); +} + + +bool Gis_geometry_collection::get_data_as_wkt(String *txt, + const char **end) const +{ + uint32 n_objects; + Geometry_buffer buffer; + Geometry *geom; + const char *data= m_data; + + if (no_data(data, 4)) + return 1; + n_objects= uint4korr(data); + data+= 4; + + while (n_objects--) + { + uint32 wkb_type; + + if (no_data(data, WKB_HEADER_SIZE)) + return 1; + wkb_type= uint4korr(data + 1); + data+= WKB_HEADER_SIZE; + + if (!(geom= create_by_typeid(&buffer, wkb_type))) + return 1; + geom->set_data_ptr(data, (uint) (m_data_end - data)); + if (geom->as_wkt(txt, &data)) + return 1; + if (txt->append(",", 1, 512)) + return 1; + } + txt->length(txt->length() - 1); + *end= data; + return 0; +} + + +bool Gis_geometry_collection::get_mbr(MBR *mbr, const char **end) const +{ + uint32 n_objects; + const char *data= m_data; + Geometry_buffer buffer; + Geometry *geom; + + if (no_data(data, 4)) + return 1; + n_objects= uint4korr(data); + data+= 4; + + while (n_objects--) + { + uint32 wkb_type; + + if (no_data(data, WKB_HEADER_SIZE)) + return 1; + wkb_type= uint4korr(data + 1); + data+= WKB_HEADER_SIZE; + + if (!(geom= create_by_typeid(&buffer, wkb_type))) + return 1; + geom->set_data_ptr(data, (uint32) (m_data_end - data)); + if (geom->get_mbr(mbr, &data)) + return 1; + } + *end= data; + return 0; +} + + +int Gis_geometry_collection::num_geometries(uint32 *num) const +{ + if (no_data(m_data, 4)) + return 1; + *num= uint4korr(m_data); + return 0; +} + + +int Gis_geometry_collection::geometry_n(uint32 num, String *result) const +{ + uint32 n_objects, wkb_type, length; + const char *data= m_data; + Geometry_buffer buffer; + Geometry *geom; + + if (no_data(data, 4)) + return 1; + n_objects= uint4korr(data); + data+= 4; + if (num > n_objects || num < 1) + return 1; + + do + { + if (no_data(data, WKB_HEADER_SIZE)) + return 1; + wkb_type= uint4korr(data + 1); + data+= WKB_HEADER_SIZE; + + if (!(geom= create_by_typeid(&buffer, wkb_type))) + return 1; + geom->set_data_ptr(data, (uint) (m_data_end - data)); + if ((length= geom->get_data_size()) == GET_SIZE_ERROR) + return 1; + data+= length; + } while (--num); + + /* Copy found object to result */ + if (result->reserve(1+4+length)) + return 1; + result->q_append((char) wkb_ndr); + result->q_append((uint32) wkb_type); + result->q_append(data-length, length); // data-length = start_of_data + return 0; +} + + +/* + Return dimension for object + + SYNOPSIS + dimension() + res_dim Result dimension + end End of object will be stored here. May be 0 for + simple objects! + RETURN + 0 ok + 1 error +*/ + +bool Gis_geometry_collection::dimension(uint32 *res_dim, const char **end) const +{ + uint32 n_objects; + const char *data= m_data; + Geometry_buffer buffer; + Geometry *geom; + + if (no_data(data, 4)) + return 1; + n_objects= uint4korr(data); + data+= 4; + + *res_dim= 0; + while (n_objects--) + { + uint32 wkb_type, length, dim; + const char *end_data; + + if (no_data(data, WKB_HEADER_SIZE)) + return 1; + wkb_type= uint4korr(data + 1); + data+= WKB_HEADER_SIZE; + if (!(geom= create_by_typeid(&buffer, wkb_type))) + return 1; + geom->set_data_ptr(data, (uint32) (m_data_end - data)); + if (geom->dimension(&dim, &end_data)) + return 1; + set_if_bigger(*res_dim, dim); + if (end_data) // Complex object + data= end_data; + else if ((length= geom->get_data_size()) == GET_SIZE_ERROR) + return 1; + else + data+= length; + } + *end= data; + return 0; +} + +const Geometry::Class_info *Gis_geometry_collection::get_class_info() const +{ + return &geometrycollection_class; +} + +#endif /*HAVE_SPATIAL*/ diff --git a/sql/spatial.h b/sql/spatial.h new file mode 100644 index 00000000000..206958b3eaf --- /dev/null +++ b/sql/spatial.h @@ -0,0 +1,487 @@ +/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _spatial_h +#define _spatial_h + +#ifdef HAVE_SPATIAL + +const uint SRID_SIZE= 4; +const uint SIZEOF_STORED_DOUBLE= 8; +const uint POINT_DATA_SIZE= SIZEOF_STORED_DOUBLE*2; +const uint WKB_HEADER_SIZE= 1+4; +const uint32 GET_SIZE_ERROR= ((uint32) -1); + +struct st_point_2d +{ + double x; + double y; +}; + +struct st_linear_ring +{ + uint32 n_points; + st_point_2d points; +}; + +/***************************** MBR *******************************/ + + +/* + It's ok that a lot of the functions are inline as these are only used once + in MySQL +*/ + +struct MBR +{ + double xmin, ymin, xmax, ymax; + + MBR() + { + xmin= ymin= DBL_MAX; + xmax= ymax= -DBL_MAX; + } + + MBR(const double xmin_arg, const double ymin_arg, + const double xmax_arg, const double ymax_arg) + :xmin(xmin_arg), ymin(ymin_arg), xmax(xmax_arg), ymax(ymax_arg) + {} + + MBR(const st_point_2d &min, const st_point_2d &max) + :xmin(min.x), ymin(min.y), xmax(max.x), ymax(max.y) + {} + + inline void add_xy(double x, double y) + { + /* Not using "else" for proper one point MBR calculation */ + if (x < xmin) + xmin= x; + if (x > xmax) + xmax= x; + if (y < ymin) + ymin= y; + if (y > ymax) + ymax= y; + } + void add_xy(const char *px, const char *py) + { + double x, y; + float8get(x, px); + float8get(y, py); + add_xy(x,y); + } + void add_mbr(const MBR *mbr) + { + if (mbr->xmin < xmin) + xmin= mbr->xmin; + if (mbr->xmax > xmax) + xmax= mbr->xmax; + if (mbr->ymin < ymin) + ymin= mbr->ymin; + if (mbr->ymax > ymax) + ymax= mbr->ymax; + } + + int equals(const MBR *mbr) + { + /* The following should be safe, even if we compare doubles */ + return ((mbr->xmin == xmin) && (mbr->ymin == ymin) && + (mbr->xmax == xmax) && (mbr->ymax == ymax)); + } + + int disjoint(const MBR *mbr) + { + /* The following should be safe, even if we compare doubles */ + return ((mbr->xmin > xmax) || (mbr->ymin > ymax) || + (mbr->xmax < xmin) || (mbr->ymax < ymin)); + } + + int intersects(const MBR *mbr) + { + return !disjoint(mbr); + } + + int touches(const MBR *mbr) + { + /* The following should be safe, even if we compare doubles */ + return ((((mbr->xmin == xmax) || (mbr->xmax == xmin)) && + ((mbr->ymin >= ymin) && (mbr->ymin <= ymax) || + (mbr->ymax >= ymin) && (mbr->ymax <= ymax))) || + (((mbr->ymin == ymax) || (mbr->ymax == ymin)) && + ((mbr->xmin >= xmin) && (mbr->xmin <= xmax) || + (mbr->xmax >= xmin) && (mbr->xmax <= xmax)))); + } + + int within(const MBR *mbr) + { + /* The following should be safe, even if we compare doubles */ + return ((mbr->xmin <= xmin) && (mbr->ymin <= ymin) && + (mbr->xmax >= xmax) && (mbr->ymax >= ymax)); + } + + int contains(const MBR *mbr) + { + /* The following should be safe, even if we compare doubles */ + return ((mbr->xmin >= xmin) && (mbr->ymin >= ymin) && + (mbr->xmax <= xmax) && (mbr->ymax <= ymax)); + } + + bool inner_point(double x, double y) const + { + /* The following should be safe, even if we compare doubles */ + return (xmin<x) && (xmax>x) && (ymin<y) && (ymax>y); + } + + int overlaps(const MBR *mbr) + { + int lb= mbr->inner_point(xmin, ymin); + int rb= mbr->inner_point(xmax, ymin); + int rt= mbr->inner_point(xmax, ymax); + int lt= mbr->inner_point(xmin, ymax); + + int a = lb+rb+rt+lt; + return (a>0) && (a<4) && (!within(mbr)); + } +}; + + +/***************************** Geometry *******************************/ + +struct Geometry_buffer; + +class Geometry +{ +public: + static void *operator new(size_t size, void *buffer) + { + return buffer; + } + + static void operator delete(void *ptr, void *buffer) + {} + + enum wkbType + { + wkb_point= 1, + wkb_linestring= 2, + wkb_polygon= 3, + wkb_multipoint= 4, + wkb_multilinestring= 5, + wkb_multipolygon= 6, + wkb_geometrycollection= 7, + wkb_end=7 + }; + enum wkbByteOrder + { + wkb_xdr= 0, /* Big Endian */ + wkb_ndr= 1 /* Little Endian */ + }; + + class Class_info + { + public: + LEX_STRING_WITH_INIT m_name; + int m_type_id; + void (*m_create_func)(void *); + Class_info(const char *name, int type_id, void(*create_func)(void *)); + }; + + virtual const Class_info *get_class_info() const=0; + virtual uint32 get_data_size() const=0; + virtual bool init_from_wkt(Gis_read_stream *trs, String *wkb)=0; + + /* returns the length of the wkb that was read */ + virtual uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, + String *res)=0; + virtual bool get_data_as_wkt(String *txt, const char **end) const=0; + virtual bool get_mbr(MBR *mbr, const char **end) const=0; + virtual bool dimension(uint32 *dim, const char **end) const=0; + virtual int get_x(double *x) const { return -1; } + virtual int get_y(double *y) const { return -1; } + virtual int length(double *len) const { return -1; } + virtual int area(double *ar, const char **end) const { return -1;} + virtual int is_closed(int *closed) const { return -1; } + virtual int num_interior_ring(uint32 *n_int_rings) const { return -1; } + virtual int num_points(uint32 *n_points) const { return -1; } + virtual int num_geometries(uint32 *num) const { return -1; } + virtual int start_point(String *point) const { return -1; } + virtual int end_point(String *point) const { return -1; } + virtual int exterior_ring(String *ring) const { return -1; } + virtual int centroid(String *point) const { return -1; } + virtual int point_n(uint32 num, String *result) const { return -1; } + virtual int interior_ring_n(uint32 num, String *result) const { return -1; } + virtual int geometry_n(uint32 num, String *result) const { return -1; } + +public: + static Geometry *create_by_typeid(Geometry_buffer *buffer, int type_id) + { + Class_info *ci; + if (!(ci= find_class((int) type_id))) + return NULL; + (*ci->m_create_func)((void *)buffer); + return my_reinterpret_cast(Geometry *)(buffer); + } + + static Geometry *construct(Geometry_buffer *buffer, + const char *data, uint32 data_len); + static Geometry *create_from_wkt(Geometry_buffer *buffer, + Gis_read_stream *trs, String *wkt, + bool init_stream=1); + static int create_from_wkb(Geometry_buffer *buffer, + const char *wkb, uint32 len, String *res); + int as_wkt(String *wkt, const char **end) + { + uint32 len= get_class_info()->m_name.length; + if (wkt->reserve(len + 2, 512)) + return 1; + wkt->qs_append(get_class_info()->m_name.str, len); + wkt->qs_append('('); + if (get_data_as_wkt(wkt, end)) + return 1; + wkt->qs_append(')'); + return 0; + } + + inline void set_data_ptr(const char *data, uint32 data_len) + { + m_data= data; + m_data_end= data + data_len; + } + + inline void shift_wkb_header() + { + m_data+= WKB_HEADER_SIZE; + } + + bool envelope(String *result) const; + static Class_info *ci_collection[wkb_end+1]; + +protected: + static Class_info *find_class(int type_id) + { + return ((type_id < wkb_point) || (type_id > wkb_end)) ? + NULL : ci_collection[type_id]; + } + static Class_info *find_class(const char *name, uint32 len); + const char *append_points(String *txt, uint32 n_points, + const char *data, uint32 offset) const; + bool create_point(String *result, const char *data) const; + bool create_point(String *result, double x, double y) const; + const char *get_mbr_for_points(MBR *mbr, const char *data, uint offset) + const; + + inline bool no_data(const char *cur_data, uint32 data_amount) const + { + return (cur_data + data_amount > m_data_end); + } + const char *m_data; + const char *m_data_end; +}; + + +/***************************** Point *******************************/ + +class Gis_point: public Geometry +{ +public: + uint32 get_data_size() const; + bool init_from_wkt(Gis_read_stream *trs, String *wkb); + uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); + bool get_data_as_wkt(String *txt, const char **end) const; + bool get_mbr(MBR *mbr, const char **end) const; + + int get_xy(double *x, double *y) const + { + const char *data= m_data; + if (no_data(data, SIZEOF_STORED_DOUBLE * 2)) + return 1; + float8get(*x, data); + float8get(*y, data + SIZEOF_STORED_DOUBLE); + return 0; + } + + int get_x(double *x) const + { + if (no_data(m_data, SIZEOF_STORED_DOUBLE)) + return 1; + float8get(*x, m_data); + return 0; + } + + int get_y(double *y) const + { + const char *data= m_data; + if (no_data(data, SIZEOF_STORED_DOUBLE * 2)) return 1; + float8get(*y, data + SIZEOF_STORED_DOUBLE); + return 0; + } + + bool dimension(uint32 *dim, const char **end) const + { + *dim= 0; + *end= 0; /* No default end */ + return 0; + } + const Class_info *get_class_info() const; +}; + + +/***************************** LineString *******************************/ + +class Gis_line_string: public Geometry +{ +public: + uint32 get_data_size() const; + bool init_from_wkt(Gis_read_stream *trs, String *wkb); + uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); + bool get_data_as_wkt(String *txt, const char **end) const; + bool get_mbr(MBR *mbr, const char **end) const; + int length(double *len) const; + int is_closed(int *closed) const; + int num_points(uint32 *n_points) const; + int start_point(String *point) const; + int end_point(String *point) const; + int point_n(uint32 n, String *result) const; + bool dimension(uint32 *dim, const char **end) const + { + *dim= 1; + *end= 0; /* No default end */ + return 0; + } + const Class_info *get_class_info() const; +}; + + +/***************************** Polygon *******************************/ + +class Gis_polygon: public Geometry +{ +public: + uint32 get_data_size() const; + bool init_from_wkt(Gis_read_stream *trs, String *wkb); + uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); + bool get_data_as_wkt(String *txt, const char **end) const; + bool get_mbr(MBR *mbr, const char **end) const; + int area(double *ar, const char **end) const; + int exterior_ring(String *result) const; + int num_interior_ring(uint32 *n_int_rings) const; + int interior_ring_n(uint32 num, String *result) const; + int centroid_xy(double *x, double *y) const; + int centroid(String *result) const; + bool dimension(uint32 *dim, const char **end) const + { + *dim= 2; + *end= 0; /* No default end */ + return 0; + } + const Class_info *get_class_info() const; +}; + + +/***************************** MultiPoint *******************************/ + +class Gis_multi_point: public Geometry +{ +public: + uint32 get_data_size() const; + bool init_from_wkt(Gis_read_stream *trs, String *wkb); + uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); + bool get_data_as_wkt(String *txt, const char **end) const; + bool get_mbr(MBR *mbr, const char **end) const; + int num_geometries(uint32 *num) const; + int geometry_n(uint32 num, String *result) const; + bool dimension(uint32 *dim, const char **end) const + { + *dim= 0; + *end= 0; /* No default end */ + return 0; + } + const Class_info *get_class_info() const; +}; + + +/***************************** MultiLineString *******************************/ + +class Gis_multi_line_string: public Geometry +{ +public: + uint32 get_data_size() const; + bool init_from_wkt(Gis_read_stream *trs, String *wkb); + uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); + bool get_data_as_wkt(String *txt, const char **end) const; + bool get_mbr(MBR *mbr, const char **end) const; + int num_geometries(uint32 *num) const; + int geometry_n(uint32 num, String *result) const; + int length(double *len) const; + int is_closed(int *closed) const; + bool dimension(uint32 *dim, const char **end) const + { + *dim= 1; + *end= 0; /* No default end */ + return 0; + } + const Class_info *get_class_info() const; +}; + + +/***************************** MultiPolygon *******************************/ + +class Gis_multi_polygon: public Geometry +{ +public: + uint32 get_data_size() const; + bool init_from_wkt(Gis_read_stream *trs, String *wkb); + uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); + bool get_data_as_wkt(String *txt, const char **end) const; + bool get_mbr(MBR *mbr, const char **end) const; + int num_geometries(uint32 *num) const; + int geometry_n(uint32 num, String *result) const; + int area(double *ar, const char **end) const; + int centroid(String *result) const; + bool dimension(uint32 *dim, const char **end) const + { + *dim= 2; + *end= 0; /* No default end */ + return 0; + } + const Class_info *get_class_info() const; +}; + + +/*********************** GeometryCollection *******************************/ + +class Gis_geometry_collection: public Geometry +{ +public: + uint32 get_data_size() const; + bool init_from_wkt(Gis_read_stream *trs, String *wkb); + uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); + bool get_data_as_wkt(String *txt, const char **end) const; + bool get_mbr(MBR *mbr, const char **end) const; + int num_geometries(uint32 *num) const; + int geometry_n(uint32 num, String *result) const; + bool dimension(uint32 *dim, const char **end) const; + const Class_info *get_class_info() const; +}; + +const int geometry_buffer_size= sizeof(Gis_point); +struct Geometry_buffer +{ + void *arr[(geometry_buffer_size - 1)/sizeof(void *) + 1]; +}; + +#endif /*HAVE_SPATAIAL*/ +#endif diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 46b29f252a6..4626e5892a4 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -26,59 +26,14 @@ */ #include "mysql_priv.h" -#include "sql_acl.h" #include "hash_filo.h" #ifdef HAVE_REPLICATION #include "sql_repl.h" //for tables_ok() #endif #include <m_ctype.h> -#include <assert.h> #include <stdarg.h> -struct acl_host_and_ip -{ - char *hostname; - long ip,ip_mask; // Used with masked ip:s -}; - - -class ACL_ACCESS { -public: - ulong sort; - ulong access; -}; - - -/* ACL_HOST is used if no host is specified */ - -class ACL_HOST :public ACL_ACCESS -{ -public: - acl_host_and_ip host; - char *db; -}; - - -class ACL_USER :public ACL_ACCESS -{ -public: - acl_host_and_ip host; - uint hostname_length; - USER_RESOURCES user_resource; - char *user,*password; - ulong salt[2]; - enum SSL_type ssl_type; - const char *ssl_cipher, *x509_issuer, *x509_subject; -}; - - -class ACL_DB :public ACL_ACCESS -{ -public: - acl_host_and_ip host; - char *user,*db; -}; - +#ifndef NO_EMBEDDED_ACCESS_CHECKS class acl_entry :public hash_filo_element { @@ -96,57 +51,106 @@ static byte* acl_entry_get_key(acl_entry *entry,uint *length, return (byte*) entry->key; } -#define ACL_KEY_LENGTH (sizeof(long)+NAME_LEN+17) +#define IP_ADDR_STRLEN (3+1+3+1+3+1+3) +#define ACL_KEY_LENGTH (IP_ADDR_STRLEN+1+NAME_LEN+1+USERNAME_LENGTH+1) static DYNAMIC_ARRAY acl_hosts,acl_users,acl_dbs; static MEM_ROOT mem, memex; static bool initialized=0; static bool allow_all_hosts=1; -static HASH acl_check_hosts, hash_tables; +static HASH acl_check_hosts, column_priv_hash; static DYNAMIC_ARRAY acl_wild_hosts; static hash_filo *acl_cache; static uint grant_version=0; -static ulong get_access(TABLE *form, uint fieldnr, uint *next_field); +static uint priv_version=0; /* Version of priv tables. incremented by acl_load */ +static ulong get_access(TABLE *form,uint fieldnr, uint *next_field=0); static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b); static ulong get_sort(uint count,...); static void init_check_host(void); -static ACL_USER *find_acl_user(const char *host, const char *user); -static bool update_user_table(THD *thd, const char *host, const char *user, - const char *new_password); +static ACL_USER *find_acl_user(const char *host, const char *user, + my_bool exact); +static bool update_user_table(THD *thd, TABLE *table, + const char *host, const char *user, + const char *new_password, uint new_password_len); static void update_hostname(acl_host_and_ip *host, const char *hostname); static bool compare_hostname(const acl_host_and_ip *host,const char *hostname, const char *ip); +static my_bool acl_load(THD *thd, TABLE_LIST *tables); +static my_bool grant_load(TABLE_LIST *tables); + +/* + Convert scrambled password to binary form, according to scramble type, + Binary form is stored in user.salt. +*/ + +static +void +set_user_salt(ACL_USER *acl_user, const char *password, uint password_len) +{ + if (password_len == SCRAMBLED_PASSWORD_CHAR_LENGTH) + { + get_salt_from_password(acl_user->salt, password); + acl_user->salt_len= SCRAMBLE_LENGTH; + } + else if (password_len == SCRAMBLED_PASSWORD_CHAR_LENGTH_323) + { + get_salt_from_password_323((ulong *) acl_user->salt, password); + acl_user->salt_len= SCRAMBLE_LENGTH_323; + } + else + acl_user->salt_len= 0; +} + +/* + This after_update function is used when user.password is less than + SCRAMBLE_LENGTH bytes. +*/ + +static void restrict_update_of_old_passwords_var(THD *thd, + enum_var_type var_type) +{ + if (var_type == OPT_GLOBAL) + { + pthread_mutex_lock(&LOCK_global_system_variables); + global_system_variables.old_passwords= 1; + pthread_mutex_unlock(&LOCK_global_system_variables); + } + else + thd->variables.old_passwords= 1; +} + /* - Read grant privileges from the privilege tables in the 'mysql' database. + Initialize structures responsible for user/db-level privilege checking and + load privilege information for them from tables in the 'mysql' database. SYNOPSIS acl_init() - thd Thread handler - dont_read_acl_tables Set to 1 if run with --skip-grant + dont_read_acl_tables TRUE if we want to skip loading data from + privilege tables and disable privilege checking. + + NOTES + This function is mostly responsible for preparatory steps, main work + on initialization and grants loading is done in acl_reload(). RETURN VALUES 0 ok 1 Could not initialize grant's */ - -my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) +my_bool acl_init(bool dont_read_acl_tables) { THD *thd; - TABLE_LIST tables[3]; - TABLE *table; - READ_RECORD read_record_info; - MYSQL_LOCK *lock; - my_bool return_val=1; + my_bool return_val; DBUG_ENTER("acl_init"); - if (!acl_cache) - acl_cache=new hash_filo(ACL_CACHE_SIZE,0,0, - (hash_get_key) acl_entry_get_key, - (hash_free_key) free); + acl_cache= new hash_filo(ACL_CACHE_SIZE, 0, 0, + (hash_get_key) acl_entry_get_key, + (hash_free_key) free, system_charset_info); if (dont_read_acl_tables) + { DBUG_RETURN(0); /* purecov: tested */ + } /* To be able to run this from boot, we allocate a temporary THD @@ -154,46 +158,83 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) if (!(thd=new THD)) DBUG_RETURN(1); /* purecov: inspected */ thd->store_globals(); + /* + It is safe to call acl_reload() since acl_* arrays and hashes which + will be freed there are global static objects and thus are initialized + by zeros at startup. + */ + return_val= acl_reload(thd); + delete thd; + /* Remember that we don't have a THD */ + my_pthread_setspecific_ptr(THR_THD, 0); + DBUG_RETURN(return_val); +} + + +/* + Initialize structures responsible for user/db-level privilege checking + and load information about grants from open privilege tables. + + SYNOPSIS + acl_load() + thd Current thread + tables List containing open "mysql.host", "mysql.user" and + "mysql.db" tables. + + RETURN VALUES + FALSE Success + TRUE Error +*/ + +static my_bool acl_load(THD *thd, TABLE_LIST *tables) +{ + TABLE *table; + READ_RECORD read_record_info; + my_bool return_val= 1; + bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE; + char tmp_name[NAME_LEN+1]; + int password_length; + DBUG_ENTER("acl_load"); + + priv_version++; /* Privileges updated */ acl_cache->clear(1); // Clear locked hostname cache - thd->db= my_strdup("mysql",MYF(0)); - thd->db_length=5; // Safety - bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name=(char*) "host"; - tables[1].alias=tables[1].real_name=(char*) "user"; - tables[2].alias=tables[2].real_name=(char*) "db"; - tables[0].next=tables+1; - tables[1].next=tables+2; - tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; - tables[0].db=tables[1].db=tables[2].db=thd->db; - if (open_tables(thd,tables)) - { - sql_print_error("Fatal error: Can't open privilege tables: %s", - thd->net.last_error); - goto end; - } - TABLE *ptr[3]; // Lock tables for quick update - ptr[0]= tables[0].table; - ptr[1]= tables[1].table; - ptr[2]= tables[2].table; - if (! (lock= mysql_lock_tables(thd, ptr, 3, 0))) - { - sql_print_error("Fatal error: Can't lock privilege tables: %s", - thd->net.last_error); - goto end; - } init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0); init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0); VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50)); while (!(read_record_info.read_record(&read_record_info))) { ACL_HOST host; - update_hostname(&host.host,get_field(&mem, table,0)); - host.db= get_field(&mem, table,1); - host.access= get_access(table,2,0); + update_hostname(&host.host,get_field(&mem, table->field[0])); + host.db= get_field(&mem, table->field[1]); + if (lower_case_table_names && host.db) + { + /* + We make a temporary copy of the database, force it to lower case, + and then check it against the original name. + */ + (void)strnmov(tmp_name, host.db, sizeof(tmp_name)); + my_casedn_str(files_charset_info, host.db); + if (strcmp(host.db, tmp_name) != 0) + { + sql_print_warning("'host' entry '%s|%s' had database in mixed " + "case that has been forced to lowercase because " + "lower_case_table_names is set. It will not be " + "possible to remove this privilege using REVOKE.", + host.host.hostname, host.db); + } + } + host.access= get_access(table,2); host.access= fix_rights_for_db(host.access); host.sort= get_sort(2,host.host.hostname,host.db); + if (check_no_resolve && hostname_requires_resolving(host.host.hostname)) + { + sql_print_warning("'host' entry '%s|%s' " + "ignored in --skip-name-resolve mode.", + host.host.hostname, host.db?host.db:""); + continue; + } #ifndef TO_BE_REMOVED if (table->fields == 8) { // Without grant @@ -210,91 +251,137 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0); VOID(my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100)); - if (table->field[2]->field_length == 8 && - protocol_version == PROTOCOL_VERSION) + password_length= table->field[2]->field_length / + table->field[2]->charset()->mbmaxlen; + if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323) + { + sql_print_error("Fatal error: mysql.user table is damaged or in " + "unsupported 3.20 format."); + goto end; + } + + DBUG_PRINT("info",("user table fields: %d, password length: %d", + table->fields, password_length)); + + pthread_mutex_lock(&LOCK_global_system_variables); + if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH) + { + if (opt_secure_auth) + { + pthread_mutex_unlock(&LOCK_global_system_variables); + sql_print_error("Fatal error: mysql.user table is in old format, " + "but server started with --secure-auth option."); + goto end; + } + sys_old_passwords.after_update= restrict_update_of_old_passwords_var; + if (global_system_variables.old_passwords) + pthread_mutex_unlock(&LOCK_global_system_variables); + else + { + global_system_variables.old_passwords= 1; + pthread_mutex_unlock(&LOCK_global_system_variables); + sql_print_warning("mysql.user table is not updated to new password format; " + "Disabling new password usage until " + "mysql_fix_privilege_tables is run"); + } + thd->variables.old_passwords= 1; + } + else { - sql_print_error("Old 'user' table. (Check README or the Reference manual). Continuing --old-protocol"); /* purecov: tested */ - protocol_version=9; /* purecov: tested */ + sys_old_passwords.after_update= 0; + pthread_mutex_unlock(&LOCK_global_system_variables); } - DBUG_PRINT("info",("user table fields: %d",table->fields)); allow_all_hosts=0; while (!(read_record_info.read_record(&read_record_info))) { ACL_USER user; - uint length=0; - update_hostname(&user.host,get_field(&mem, table,0)); - user.user=get_field(&mem, table,1); - user.password=get_field(&mem, table,2); - if (user.password && (length=(uint) strlen(user.password)) == 8 && - protocol_version == PROTOCOL_VERSION) - { - sql_print_error( - "Found old style password for user '%s'. Ignoring user. (You may want to restart mysqld using --old-protocol)", - user.user ? user.user : ""); /* purecov: tested */ - } - else if (length % 8 || length > 16) - { - sql_print_error( - "Found invalid password for user: '%s'@'%s'; Ignoring user", - user.user ? user.user : "", - user.host.hostname ? user.host.hostname : ""); /* purecov: tested */ - continue; /* purecov: tested */ - } - uint next_field; - get_salt_from_password(user.salt,user.password); - user.access=get_access(table,3,&next_field) & GLOBAL_ACLS; - user.sort=get_sort(2,user.host.hostname,user.user); - user.hostname_length= (user.host.hostname ? - (uint) strlen(user.host.hostname) : 0); - if (table->fields >= 31) /* Starting from 4.0.2 we have more fields */ - { - char *ssl_type=get_field(&mem, table, next_field++); - if (!ssl_type) - user.ssl_type=SSL_TYPE_NONE; - else if (!strcmp(ssl_type, "ANY")) - user.ssl_type=SSL_TYPE_ANY; - else if (!strcmp(ssl_type, "X509")) - user.ssl_type=SSL_TYPE_X509; - else /* !strcmp(ssl_type, "SPECIFIED") */ - user.ssl_type=SSL_TYPE_SPECIFIED; - - user.ssl_cipher= get_field(&mem, table, next_field++); - user.x509_issuer= get_field(&mem, table, next_field++); - user.x509_subject= get_field(&mem, table, next_field++); - - char *ptr = get_field(&mem, table, next_field++); - user.user_resource.questions= ptr ? atoi(ptr) : 0; - ptr = get_field(&mem, table, next_field++); - user.user_resource.updates= ptr ? atoi(ptr): 0; - ptr = get_field(&mem, table, next_field++); - user.user_resource.connections=ptr ? atoi(ptr) : 0; - if (user.user_resource.questions || user.user_resource.updates || - user.user_resource.connections) - mqh_used=1; + update_hostname(&user.host, get_field(&mem, table->field[0])); + user.user= get_field(&mem, table->field[1]); + if (check_no_resolve && hostname_requires_resolving(user.host.hostname)) + { + sql_print_warning("'user' entry '%s@%s' " + "ignored in --skip-name-resolve mode.", + user.user, user.host.hostname); + continue; } - else + + const char *password= get_field(&mem, table->field[2]); + uint password_len= password ? strlen(password) : 0; + set_user_salt(&user, password, password_len); + if (user.salt_len == 0 && password_len != 0) { - user.ssl_type=SSL_TYPE_NONE; - bzero((char *)&(user.user_resource),sizeof(user.user_resource)); -#ifndef TO_BE_REMOVED - if (table->fields <= 13) - { // Without grant - if (user.access & CREATE_ACL) - user.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL; + switch (password_len) { + case 45: /* 4.1: to be removed */ + sql_print_warning("Found 4.1 style password for user '%s@%s'. " + "Ignoring user. " + "You should change password for this user.", + user.user ? user.user : "", + user.host.hostname ? user.host.hostname : ""); + break; + default: + sql_print_warning("Found invalid password for user: '%s@%s'; " + "Ignoring user", user.user ? user.user : "", + user.host.hostname ? user.host.hostname : ""); + break; } - /* Convert old privileges */ - user.access|= LOCK_TABLES_ACL | CREATE_TMP_ACL | SHOW_DB_ACL; - if (user.access & FILE_ACL) - user.access|= REPL_CLIENT_ACL | REPL_SLAVE_ACL; - if (user.access & PROCESS_ACL) - user.access|= SUPER_ACL | EXECUTE_ACL; + } + else // password is correct + { + uint next_field; + user.access= get_access(table,3,&next_field) & GLOBAL_ACLS; + user.sort= get_sort(2,user.host.hostname,user.user); + user.hostname_length= (user.host.hostname ? + (uint) strlen(user.host.hostname) : 0); + if (table->fields >= 31) /* Starting from 4.0.2 we have more fields */ + { + char *ssl_type=get_field(&mem, table->field[next_field++]); + if (!ssl_type) + user.ssl_type=SSL_TYPE_NONE; + else if (!strcmp(ssl_type, "ANY")) + user.ssl_type=SSL_TYPE_ANY; + else if (!strcmp(ssl_type, "X509")) + user.ssl_type=SSL_TYPE_X509; + else /* !strcmp(ssl_type, "SPECIFIED") */ + user.ssl_type=SSL_TYPE_SPECIFIED; + + user.ssl_cipher= get_field(&mem, table->field[next_field++]); + user.x509_issuer= get_field(&mem, table->field[next_field++]); + user.x509_subject= get_field(&mem, table->field[next_field++]); + + char *ptr = get_field(&mem, table->field[next_field++]); + user.user_resource.questions=ptr ? atoi(ptr) : 0; + ptr = get_field(&mem, table->field[next_field++]); + user.user_resource.updates=ptr ? atoi(ptr) : 0; + ptr = get_field(&mem, table->field[next_field++]); + user.user_resource.connections=ptr ? atoi(ptr) : 0; + if (user.user_resource.questions || user.user_resource.updates || + user.user_resource.connections) + mqh_used=1; + } + else + { + user.ssl_type=SSL_TYPE_NONE; + bzero((char *)&(user.user_resource),sizeof(user.user_resource)); +#ifndef TO_BE_REMOVED + if (table->fields <= 13) + { // Without grant + if (user.access & CREATE_ACL) + user.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL; + } + /* Convert old privileges */ + user.access|= LOCK_TABLES_ACL | CREATE_TMP_ACL | SHOW_DB_ACL; + if (user.access & FILE_ACL) + user.access|= REPL_CLIENT_ACL | REPL_SLAVE_ACL; + if (user.access & PROCESS_ACL) + user.access|= SUPER_ACL | EXECUTE_ACL; #endif + } + VOID(push_dynamic(&acl_users,(gptr) &user)); + if (!user.host.hostname || user.host.hostname[0] == wild_many && + !user.host.hostname[1]) + allow_all_hosts=1; // Anyone can connect } - VOID(push_dynamic(&acl_users,(gptr) &user)); - if (!user.host.hostname || user.host.hostname[0] == wild_many && - !user.host.hostname[1]) - allow_all_hosts=1; // Anyone can connect } qsort((gptr) dynamic_element(&acl_users,0,ACL_USER*),acl_users.elements, sizeof(ACL_USER),(qsort_cmp) acl_compare); @@ -306,16 +393,40 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) while (!(read_record_info.read_record(&read_record_info))) { ACL_DB db; - update_hostname(&db.host,get_field(&mem, table,0)); - db.db=get_field(&mem, table,1); + update_hostname(&db.host,get_field(&mem, table->field[0])); + db.db=get_field(&mem, table->field[1]); if (!db.db) { - sql_print_error("Found an entry in the 'db' table with empty database name; Skipped"); + sql_print_warning("Found an entry in the 'db' table with empty database name; Skipped"); + continue; + } + db.user=get_field(&mem, table->field[2]); + if (check_no_resolve && hostname_requires_resolving(db.host.hostname)) + { + sql_print_warning("'db' entry '%s %s@%s' " + "ignored in --skip-name-resolve mode.", + db.db, db.user, db.host.hostname); continue; } - db.user=get_field(&mem, table,2); - db.access=get_access(table,3,0); + db.access=get_access(table,3); db.access=fix_rights_for_db(db.access); + if (lower_case_table_names) + { + /* + We make a temporary copy of the database, force it to lower case, + and then check it against the original name. + */ + (void)strnmov(tmp_name, db.db, sizeof(tmp_name)); + my_casedn_str(files_charset_info, db.db); + if (strcmp(db.db, tmp_name) != 0) + { + sql_print_warning("'db' entry '%s %s@%s' had database in mixed " + "case that has been forced to lowercase because " + "lower_case_table_names is set. It will not be " + "possible to remove this privilege using REVOKE.", + db.db, db.user, db.host.hostname, db.host.hostname); + } + } db.sort=get_sort(3,db.host.hostname,db.db,db.user); #ifndef TO_BE_REMOVED if (table->fields <= 9) @@ -332,21 +443,10 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) freeze_size(&acl_dbs); init_check_host(); - mysql_unlock_tables(thd, lock); initialized=1; - thd->version--; // Force close to free memory return_val=0; end: - close_thread_tables(thd); - delete thd; - if (org_thd) - org_thd->store_globals(); /* purecov: inspected */ - else - { - /* Remember that we don't have a THD */ - my_pthread_setspecific_ptr(THR_THD, 0); - } DBUG_RETURN(return_val); } @@ -370,26 +470,60 @@ void acl_free(bool end) /* - Forget current privileges and read new privileges from the privilege tables + Forget current user/db-level privileges and read new privileges + from the privilege tables. SYNOPSIS acl_reload() - thd Thread handle + thd Current thread + + NOTE + All tables of calling thread which were open and locked by LOCK TABLES + statement will be unlocked and closed. + This function is also used for initialization of structures responsible + for user/db-level privilege checking. + + RETURN VALUE + FALSE Success + TRUE Failure */ -void acl_reload(THD *thd) +my_bool acl_reload(THD *thd) { + TABLE_LIST tables[3]; DYNAMIC_ARRAY old_acl_hosts,old_acl_users,old_acl_dbs; MEM_ROOT old_mem; bool old_initialized; + my_bool return_val= 1; DBUG_ENTER("acl_reload"); - if (thd && thd->locked_tables) + if (thd->locked_tables) { // Can't have locked tables here thd->lock=thd->locked_tables; thd->locked_tables=0; close_thread_tables(thd); } + + /* + To avoid deadlocks we should obtain table locks before + obtaining acl_cache->lock mutex. + */ + bzero((char*) tables, sizeof(tables)); + tables[0].alias=tables[0].real_name=(char*) "host"; + tables[1].alias=tables[1].real_name=(char*) "user"; + tables[2].alias=tables[2].real_name=(char*) "db"; + tables[0].db=tables[1].db=tables[2].db= (char*) "mysql"; + tables[0].next= tables+1; + tables[1].next= tables+2; + tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; + + if (simple_open_n_lock_tables(thd, tables)) + { + sql_print_error("Fatal error: Can't open and lock privilege tables: %s", + thd->net.last_error); + goto end; + } + if ((old_initialized=initialized)) VOID(pthread_mutex_lock(&acl_cache->lock)); @@ -400,8 +534,9 @@ void acl_reload(THD *thd) delete_dynamic(&acl_wild_hosts); hash_free(&acl_check_hosts); - if (acl_init(thd, 0)) + if ((return_val= acl_load(thd, tables))) { // Error. Revert to old list + DBUG_PRINT("error",("Reverting to old privileges")); acl_free(); /* purecov: inspected */ acl_hosts=old_acl_hosts; acl_users=old_acl_users; @@ -418,7 +553,9 @@ void acl_reload(THD *thd) } if (old_initialized) VOID(pthread_mutex_unlock(&acl_cache->lock)); - DBUG_VOID_RETURN; +end: + close_thread_tables(thd); + DBUG_RETURN(return_val); } @@ -445,7 +582,7 @@ static ulong get_access(TABLE *form, uint fieldnr, uint *next_field) { ulong access_bits=0,bit; char buff[2]; - String res(buff,sizeof(buff)); + String res(buff,sizeof(buff),&my_charset_latin1); Field **pos; for (pos=form->field+fieldnr, bit=1; @@ -453,8 +590,8 @@ static ulong get_access(TABLE *form, uint fieldnr, uint *next_field) ((Field_enum*) (*pos))->typelib->count == 2 ; pos++, fieldnr++, bit<<=1) { - (*pos)->val_str(&res,&res); - if (toupper(res[0]) == 'Y') + (*pos)->val_str(&res); + if (my_toupper(&my_charset_latin1, res[0]) == 'Y') access_bits|= bit; } if (next_field) @@ -491,15 +628,13 @@ static ulong get_sort(uint count,...) { if (*str == wild_many || *str == wild_one || *str == wild_prefix) { - wild_pos= str - start + 1; + wild_pos= (uint) (str - start) + 1; break; } - else - chars++; + chars= 128; // Marker that chars existed } } - sort= (sort << 8) + (wild_pos ? (wild_pos > 127 ? 127 : wild_pos) : - (chars ? 128 : 0)); + sort= (sort << 8) + (wild_pos ? min(wild_pos, 127) : chars); } va_end(args); return sort; @@ -517,181 +652,231 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b) /* - Get master privilges for user (priviliges for all tables). - Required before connecting to MySQL + Seek ACL entry for a user, check password, SSL cypher, and if + everything is OK, update THD user data and USER_RESOURCES struct. + + IMPLEMENTATION + This function does not check if the user has any sensible privileges: + only user's existence and validity is checked. + Note, that entire operation is protected by acl_cache_lock. + + SYNOPSIS + acl_getroot() + thd thread handle. If all checks are OK, + thd->priv_user, thd->master_access are updated. + thd->host, thd->ip, thd->user are used for checks. + mqh user resources; on success mqh is reset, else + unchanged + passwd scrambled & crypted password, recieved from client + (to check): thd->scramble or thd->scramble_323 is + used to decrypt passwd, so they must contain + original random string, + passwd_len length of passwd, must be one of 0, 8, + SCRAMBLE_LENGTH_323, SCRAMBLE_LENGTH + 'thd' and 'mqh' are updated on success; other params are IN. + + RETURN VALUE + 0 success: thd->priv_user, thd->priv_host, thd->master_access, mqh are + updated + 1 user not found or authentification failure + 2 user found, has long (4.1.1) salt, but passwd is in old (3.23) format. + -1 user found, has short (3.23) salt, but passwd is in new (4.1.1) format. */ -ulong acl_getroot(THD *thd, const char *host, const char *ip, const char *user, - const char *password,const char *message, - char **priv_user, char *priv_host, - bool old_ver, USER_RESOURCES *mqh) +int acl_getroot(THD *thd, USER_RESOURCES *mqh, + const char *passwd, uint passwd_len) { - ulong user_access=NO_ACCESS; - *priv_user=(char*) user; + ulong user_access= NO_ACCESS; + int res= 1; + ACL_USER *acl_user= 0; DBUG_ENTER("acl_getroot"); - bzero((char *)mqh,sizeof(USER_RESOURCES)); if (!initialized) { - // If no data allow anything - DBUG_RETURN((ulong) ~NO_ACCESS); /* purecov: tested */ + /* + here if mysqld's been started with --skip-grant-tables option. + */ + thd->priv_user= (char *) ""; // privileges for + *thd->priv_host= '\0'; // the user are unknown + thd->master_access= ~NO_ACCESS; // everything is allowed + bzero((char*) mqh, sizeof(*mqh)); + DBUG_RETURN(0); } + VOID(pthread_mutex_lock(&acl_cache->lock)); /* - Get possible access from user_list. This is or'ed to others not - fully specified + Find acl entry in user database. Note, that find_acl_user is not the same, + because it doesn't take into account the case when user is not empty, + but acl_user->user is empty */ + for (uint i=0 ; i < acl_users.elements ; i++) { - ACL_USER *acl_user=dynamic_element(&acl_users,i,ACL_USER*); - if (!acl_user->user || !strcmp(user,acl_user->user)) + ACL_USER *acl_user_tmp= dynamic_element(&acl_users,i,ACL_USER*); + if (!acl_user_tmp->user || !strcmp(thd->user, acl_user_tmp->user)) { - if (compare_hostname(&acl_user->host,host,ip)) + if (compare_hostname(&acl_user_tmp->host, thd->host, thd->ip)) { - if (!acl_user->password && !*password || - (acl_user->password && *password && - !check_scramble(password,message,acl_user->salt, - (my_bool) old_ver))) - { - Vio *vio=thd->net.vio; + /* check password: it should be empty or valid */ + if (passwd_len == acl_user_tmp->salt_len) + { + if (acl_user_tmp->salt_len == 0 || + (acl_user_tmp->salt_len == SCRAMBLE_LENGTH ? + check_scramble(passwd, thd->scramble, acl_user_tmp->salt) : + check_scramble_323(passwd, thd->scramble, + (ulong *) acl_user_tmp->salt)) == 0) + { + acl_user= acl_user_tmp; + res= 0; + } + } + else if (passwd_len == SCRAMBLE_LENGTH && + acl_user_tmp->salt_len == SCRAMBLE_LENGTH_323) + res= -1; + else if (passwd_len == SCRAMBLE_LENGTH_323 && + acl_user_tmp->salt_len == SCRAMBLE_LENGTH) + res= 2; + /* linear search complete: */ + break; + } + } + } + /* + This was moved to separate tree because of heavy HAVE_OPENSSL case. + If acl_user is not null, res is 0. + */ + + if (acl_user) + { + /* OK. User found and password checked continue validation */ #ifdef HAVE_OPENSSL - SSL *ssl= (SSL*) vio->ssl_arg; + Vio *vio=thd->net.vio; + SSL *ssl= (SSL*) vio->ssl_arg; #endif - /* - In this point we know that user is allowed to connect - from given host by given username/password pair. Now - we check if SSL is required, if user is using SSL and - if X509 certificate attributes are OK - */ - switch (acl_user->ssl_type) { - case SSL_TYPE_NOT_SPECIFIED: // Impossible - case SSL_TYPE_NONE: /* SSL is not required to connect */ - user_access=acl_user->access; - break; + + /* + At this point we know that user is allowed to connect + from given host by given username/password pair. Now + we check if SSL is required, if user is using SSL and + if X509 certificate attributes are OK + */ + switch (acl_user->ssl_type) { + case SSL_TYPE_NOT_SPECIFIED: // Impossible + case SSL_TYPE_NONE: // SSL is not required + user_access= acl_user->access; + break; #ifdef HAVE_OPENSSL - case SSL_TYPE_ANY: /* Any kind of SSL is good enough */ - if (vio_type(vio) == VIO_TYPE_SSL) - user_access=acl_user->access; - break; - case SSL_TYPE_X509: /* Client should have any valid certificate. */ - /* - We need to check for absence of SSL because without SSL - we should reject connection. - */ - if (vio_type(vio) == VIO_TYPE_SSL && - SSL_get_verify_result(ssl) == X509_V_OK && - SSL_get_peer_certificate(ssl)) - user_access=acl_user->access; - break; - case SSL_TYPE_SPECIFIED: /* Client should have specified attrib */ - /* - We need to check for absence of SSL because without SSL - we should reject connection. - */ - if (vio_type(vio) == VIO_TYPE_SSL && - SSL_get_verify_result(ssl) == X509_V_OK) - { - if (acl_user->ssl_cipher) - { - DBUG_PRINT("info",("comparing ciphers: '%s' and '%s'", - acl_user->ssl_cipher, - SSL_get_cipher(ssl))); - if (!strcmp(acl_user->ssl_cipher,SSL_get_cipher(ssl))) - user_access=acl_user->access; - else - { - if (global_system_variables.log_warnings) - sql_print_error("X509 ciphers mismatch: should be '%s' but is '%s'", - acl_user->ssl_cipher, - SSL_get_cipher(ssl)); - user_access=NO_ACCESS; - break; - } - } - /* Prepare certificate (if exists) */ - DBUG_PRINT("info",("checkpoint 1")); - X509* cert=SSL_get_peer_certificate(ssl); - if (!cert) - { - user_access=NO_ACCESS; - break; - } - DBUG_PRINT("info",("checkpoint 2")); - /* If X509 issuer is speified, we check it... */ - if (acl_user->x509_issuer) - { - DBUG_PRINT("info",("checkpoint 3")); - char *ptr = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0); - DBUG_PRINT("info",("comparing issuers: '%s' and '%s'", - acl_user->x509_issuer, ptr)); - if (strcmp(acl_user->x509_issuer, ptr)) - { - if (global_system_variables.log_warnings) - sql_print_error("X509 issuer mismatch: should be '%s' but is '%s'", - acl_user->x509_issuer, ptr); - user_access=NO_ACCESS; - free(ptr); - break; - } - user_access=acl_user->access; - free(ptr); - } - DBUG_PRINT("info",("checkpoint 4")); - /* X509 subject is specified, we check it .. */ - if (acl_user->x509_subject) - { - char *ptr= X509_NAME_oneline(X509_get_subject_name(cert), 0, 0); - DBUG_PRINT("info",("comparing subjects: '%s' and '%s'", - acl_user->x509_subject, ptr)); - if (strcmp(acl_user->x509_subject,ptr)) - { - if (global_system_variables.log_warnings) - sql_print_error("X509 subject mismatch: '%s' vs '%s'", - acl_user->x509_subject, ptr); - user_access=NO_ACCESS; - } - else - user_access=acl_user->access; - free(ptr); - } - break; - } -#else /* HAVE_OPENSSL */ - default: - /* - If we don't have SSL but SSL is required for this user the - authentication should fail. - */ - break; -#endif /* HAVE_OPENSSL */ - } - - *mqh=acl_user->user_resource; - if (!acl_user->user) - *priv_user=(char*) ""; // Change to anonymous user /* purecov: inspected */ - if (acl_user->host.hostname) - strmake(priv_host, acl_user->host.hostname, MAX_HOSTNAME); - else - *priv_host= 0; + case SSL_TYPE_ANY: // Any kind of SSL is ok + if (vio_type(vio) == VIO_TYPE_SSL) + user_access= acl_user->access; + break; + case SSL_TYPE_X509: /* Client should have any valid certificate. */ + /* + Connections with non-valid certificates are dropped already + in sslaccept() anyway, so we do not check validity here. + + We need to check for absence of SSL because without SSL + we should reject connection. + */ + if (vio_type(vio) == VIO_TYPE_SSL && + SSL_get_verify_result(ssl) == X509_V_OK && + SSL_get_peer_certificate(ssl)) + user_access= acl_user->access; + break; + case SSL_TYPE_SPECIFIED: /* Client should have specified attrib */ + /* + We do not check for absence of SSL because without SSL it does + not pass all checks here anyway. + If cipher name is specified, we compare it to actual cipher in + use. + */ + X509 *cert; + if (vio_type(vio) != VIO_TYPE_SSL || + SSL_get_verify_result(ssl) != X509_V_OK) + break; + if (acl_user->ssl_cipher) + { + DBUG_PRINT("info",("comparing ciphers: '%s' and '%s'", + acl_user->ssl_cipher,SSL_get_cipher(ssl))); + if (!strcmp(acl_user->ssl_cipher,SSL_get_cipher(ssl))) + user_access= acl_user->access; + else + { + if (global_system_variables.log_warnings) + sql_print_information("X509 ciphers mismatch: should be '%s' but is '%s'", + acl_user->ssl_cipher, + SSL_get_cipher(ssl)); break; } -#ifndef ALLOW_DOWNGRADE_OF_USERS - break; // Wrong password breaks loop /* purecov: inspected */ -#endif } + /* Prepare certificate (if exists) */ + DBUG_PRINT("info",("checkpoint 1")); + if (!(cert= SSL_get_peer_certificate(ssl))) + { + user_access=NO_ACCESS; + break; + } + DBUG_PRINT("info",("checkpoint 2")); + /* If X509 issuer is speified, we check it... */ + if (acl_user->x509_issuer) + { + DBUG_PRINT("info",("checkpoint 3")); + char *ptr = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0); + DBUG_PRINT("info",("comparing issuers: '%s' and '%s'", + acl_user->x509_issuer, ptr)); + if (strcmp(acl_user->x509_issuer, ptr)) + { + if (global_system_variables.log_warnings) + sql_print_information("X509 issuer mismatch: should be '%s' " + "but is '%s'", acl_user->x509_issuer, ptr); + free(ptr); + break; + } + user_access= acl_user->access; + free(ptr); + } + DBUG_PRINT("info",("checkpoint 4")); + /* X509 subject is specified, we check it .. */ + if (acl_user->x509_subject) + { + char *ptr= X509_NAME_oneline(X509_get_subject_name(cert), 0, 0); + DBUG_PRINT("info",("comparing subjects: '%s' and '%s'", + acl_user->x509_subject, ptr)); + if (strcmp(acl_user->x509_subject,ptr)) + { + if (global_system_variables.log_warnings) + sql_print_information("X509 subject mismatch: '%s' vs '%s'", + acl_user->x509_subject, ptr); + } + else + user_access= acl_user->access; + free(ptr); + } + break; +#else /* HAVE_OPENSSL */ + default: + /* + If we don't have SSL but SSL is required for this user the + authentication should fail. + */ + break; +#endif /* HAVE_OPENSSL */ } + thd->master_access= user_access; + thd->priv_user= acl_user->user ? thd->user : (char *) ""; + *mqh= acl_user->user_resource; + + if (acl_user->host.hostname) + strmake(thd->priv_host, acl_user->host.hostname, MAX_HOSTNAME); + else + *thd->priv_host= 0; } VOID(pthread_mutex_unlock(&acl_cache->lock)); - DBUG_RETURN(user_access); + DBUG_RETURN(res); } -/* -** Functions to add and change user and database privileges when one -** changes things with GRANT -*/ - static byte* check_get_key(ACL_USER *buff,uint *length, my_bool not_used __attribute__((unused))) { @@ -699,8 +884,9 @@ static byte* check_get_key(ACL_USER *buff,uint *length, return (byte*) buff->host.hostname; } + static void acl_update_user(const char *user, const char *host, - const char *password, + const char *password, uint password_len, enum SSL_type ssl_type, const char *ssl_cipher, const char *x509_issuer, @@ -717,7 +903,7 @@ static void acl_update_user(const char *user, const char *host, { if (!acl_user->host.hostname && !host[0] || acl_user->host.hostname && - !my_strcasecmp(host,acl_user->host.hostname)) + !my_strcasecmp(system_charset_info, host, acl_user->host.hostname)) { acl_user->access=privileges; if (mqh->bits & 1) @@ -737,15 +923,8 @@ static void acl_update_user(const char *user, const char *host, strdup_root(&mem,x509_subject) : 0); } if (password) - { - if (!password[0]) - acl_user->password=0; - else - { - acl_user->password=(char*) ""; // Just point at something - get_salt_from_password(acl_user->salt,password); - } - } + set_user_salt(acl_user, password, password_len); + /* search complete: */ break; } } @@ -754,7 +933,7 @@ static void acl_update_user(const char *user, const char *host, static void acl_insert_user(const char *user, const char *host, - const char *password, + const char *password, uint password_len, enum SSL_type ssl_type, const char *ssl_cipher, const char *x509_issuer, @@ -764,8 +943,7 @@ static void acl_insert_user(const char *user, const char *host, { ACL_USER acl_user; acl_user.user=*user ? strdup_root(&mem,user) : 0; - update_hostname(&acl_user.host, *host ? strdup_root(&mem,host): 0); - acl_user.password=0; + update_hostname(&acl_user.host, *host ? strdup_root(&mem, host): 0); acl_user.access=privileges; acl_user.user_resource = *mqh; acl_user.sort=get_sort(2,acl_user.host.hostname,acl_user.user); @@ -775,16 +953,13 @@ static void acl_insert_user(const char *user, const char *host, acl_user.ssl_cipher= ssl_cipher ? strdup_root(&mem,ssl_cipher) : 0; acl_user.x509_issuer= x509_issuer ? strdup_root(&mem,x509_issuer) : 0; acl_user.x509_subject=x509_subject ? strdup_root(&mem,x509_subject) : 0; - if (password) - { - acl_user.password=(char*) ""; // Just point at something - get_salt_from_password(acl_user.salt,password); - } + + set_user_salt(&acl_user, password, password_len); VOID(push_dynamic(&acl_users,(gptr) &acl_user)); if (!acl_user.host.hostname || acl_user.host.hostname[0] == wild_many && !acl_user.host.hostname[1]) - allow_all_hosts=1; // Anyone can connect /* purecov: tested */ + allow_all_hosts=1; // Anyone can connect /* purecov: tested */ qsort((gptr) dynamic_element(&acl_users,0,ACL_USER*),acl_users.elements, sizeof(ACL_USER),(qsort_cmp) acl_compare); @@ -806,7 +981,8 @@ static void acl_update_db(const char *user, const char *host, const char *db, !strcmp(user,acl_db->user)) { if (!acl_db->host.hostname && !host[0] || - acl_db->host.hostname && !my_strcasecmp(host,acl_db->host.hostname)) + acl_db->host.hostname && + !my_strcasecmp(system_charset_info, host, acl_db->host.hostname)) { if (!acl_db->db && !db[0] || acl_db->db && !strcmp(db,acl_db->db)) @@ -860,21 +1036,20 @@ static void acl_insert_db(const char *user, const char *host, const char *db, acl_cache is not used if db_is_pattern is set. */ -ulong acl_get(const char *host, const char *ip, const char *bin_ip, - const char *user, const char *db, my_bool db_is_pattern) +ulong acl_get(const char *host, const char *ip, + const char *user, const char *db, my_bool db_is_pattern) { - ulong host_access,db_access; + ulong host_access= ~(ulong)0,db_access= 0; uint i,key_length; - db_access=0; host_access= ~0; char key[ACL_KEY_LENGTH],*tmp_db,*end; acl_entry *entry; + DBUG_ENTER("acl_get"); VOID(pthread_mutex_lock(&acl_cache->lock)); - memcpy_fixed(&key,bin_ip,sizeof(struct in_addr)); - end=strmov((tmp_db=strmov(key+sizeof(struct in_addr),user)+1),db); + end=strmov((tmp_db=strmov(strmov(key, ip ? ip : "")+1,user)+1),db); if (lower_case_table_names) { - casedn_str(tmp_db); + my_casedn_str(files_charset_info, tmp_db); db=tmp_db; } key_length=(uint) (end-key); @@ -882,7 +1057,8 @@ ulong acl_get(const char *host, const char *ip, const char *bin_ip, { db_access=entry->access; VOID(pthread_mutex_unlock(&acl_cache->lock)); - return db_access; + DBUG_PRINT("exit", ("access: 0x%lx", db_access)); + DBUG_RETURN(db_access); } /* @@ -917,7 +1093,7 @@ ulong acl_get(const char *host, const char *ip, const char *bin_ip, ACL_HOST *acl_host=dynamic_element(&acl_hosts,i,ACL_HOST*); if (compare_hostname(&acl_host->host,host,ip)) { - if (!acl_host->db || !wild_compare(db,acl_host->db,0)) + if (!acl_host->db || !wild_compare(db,acl_host->db,db_is_pattern)) { host_access=acl_host->access; // Fully specified. Take it break; @@ -935,53 +1111,10 @@ exit: acl_cache->add(entry); } VOID(pthread_mutex_unlock(&acl_cache->lock)); - return (db_access & host_access); -} - - -int wild_case_compare(const char *str,const char *wildstr) -{ - reg3 int flag; - DBUG_ENTER("wild_case_compare"); - DBUG_PRINT("enter",("str: '%s' wildstr: '%s'",str,wildstr)); - while (*wildstr) - { - while (*wildstr && *wildstr != wild_many && *wildstr != wild_one) - { - if (*wildstr == wild_prefix && wildstr[1]) - wildstr++; - if (toupper(*wildstr++) != toupper(*str++)) DBUG_RETURN(1); - } - if (! *wildstr ) DBUG_RETURN (*str != 0); - if (*wildstr++ == wild_one) - { - if (! *str++) DBUG_RETURN (1); /* One char; skip */ - } - else - { /* Found '*' */ - if (!*wildstr) DBUG_RETURN(0); /* '*' as last char: OK */ - flag=(*wildstr != wild_many && *wildstr != wild_one); - do - { - if (flag) - { - char cmp; - if ((cmp= *wildstr) == wild_prefix && wildstr[1]) - cmp=wildstr[1]; - cmp=toupper(cmp); - while (*str && toupper(*str) != cmp) - str++; - if (!*str) DBUG_RETURN (1); - } - if (wild_case_compare(str,wildstr) == 0) DBUG_RETURN (0); - } while (*str++); - DBUG_RETURN(1); - } - } - DBUG_RETURN (*str != '\0'); + DBUG_PRINT("exit", ("access: 0x%lx", db_access & host_access)); + DBUG_RETURN(db_access & host_access); } - /* Check if there are any possible matching entries for this host @@ -995,8 +1128,8 @@ static void init_check_host(void) DBUG_ENTER("init_check_host"); VOID(my_init_dynamic_array(&acl_wild_hosts,sizeof(struct acl_host_and_ip), acl_users.elements,1)); - VOID(hash_init(&acl_check_hosts,acl_users.elements,0,0, - (hash_get_key) check_get_key,0,HASH_CASE_INSENSITIVE)); + VOID(hash_init(&acl_check_hosts,system_charset_info,acl_users.elements,0,0, + (hash_get_key) check_get_key,0,0)); if (!allow_all_hosts) { for (uint i=0 ; i < acl_users.elements ; i++) @@ -1011,16 +1144,17 @@ static void init_check_host(void) { // Check if host already exists acl_host_and_ip *acl=dynamic_element(&acl_wild_hosts,j, acl_host_and_ip *); - if (!my_strcasecmp(acl_user->host.hostname,acl->hostname)) + if (!my_strcasecmp(system_charset_info, + acl_user->host.hostname, acl->hostname)) break; // already stored } if (j == acl_wild_hosts.elements) // If new (void) push_dynamic(&acl_wild_hosts,(char*) &acl_user->host); } - else if (!hash_search(&acl_check_hosts,(byte*) &acl_user->host, + else if (!hash_search(&acl_check_hosts,(byte*) acl_user->host.hostname, (uint) strlen(acl_user->host.hostname))) { - if (hash_insert(&acl_check_hosts,(byte*) acl_user)) + if (my_hash_insert(&acl_check_hosts,(byte*) acl_user)) { // End of memory allow_all_hosts=1; // Should never happen DBUG_VOID_RETURN; @@ -1070,31 +1204,46 @@ bool acl_check_host(const char *host, const char *ip) thd THD host hostname for the user user user name + new_password new password + + NOTE: + new_password cannot be NULL RETURN VALUE 0 OK 1 ERROR ; In this case the error is sent to the client. */ -bool check_change_password(THD *thd, const char *host, const char *user) +bool check_change_password(THD *thd, const char *host, const char *user, + char *new_password, uint new_password_len) { if (!initialized) { - send_error(&thd->net, ER_PASSWORD_NOT_ALLOWED); /* purecov: inspected */ - return(1); /* purecov: inspected */ + net_printf(thd,ER_OPTION_PREVENTS_STATEMENT, + "--skip-grant-tables"); + return(1); } if (!thd->slave_thread && (strcmp(thd->user,user) || - my_strcasecmp(host,thd->host_or_ip))) + my_strcasecmp(system_charset_info, host, thd->priv_host))) { - if (check_access(thd, UPDATE_ACL, "mysql",0,1)) + if (check_access(thd, UPDATE_ACL, "mysql",0,1,0)) return(1); } if (!thd->slave_thread && !thd->user[0]) { - send_error(&thd->net, ER_PASSWORD_ANONYMOUS_USER); + send_error(thd, ER_PASSWORD_ANONYMOUS_USER); return(1); } + uint len=strlen(new_password); + if (len && len != SCRAMBLED_PASSWORD_CHAR_LENGTH && + len != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) + { + net_printf(thd, 0, + "Password hash should be a %d-digit hexadecimal number", + SCRAMBLED_PASSWORD_CHAR_LENGTH); + return -1; + } return(0); } @@ -1117,52 +1266,86 @@ bool check_change_password(THD *thd, const char *host, const char *user) bool change_password(THD *thd, const char *host, const char *user, char *new_password) { - uint length=0; + TABLE_LIST tables; + TABLE *table; + /* Buffer should be extended when password length is extended. */ + char buff[512]; + ulong query_length; + uint new_password_len= strlen(new_password); + bool result= 1; DBUG_ENTER("change_password"); DBUG_PRINT("enter",("host: '%s' user: '%s' new_password: '%s'", host,user,new_password)); DBUG_ASSERT(host != 0); // Ensured by parent - length=(uint) strlen(new_password); - new_password[length & 16]=0; + if (check_change_password(thd, host, user, new_password, new_password_len)) + DBUG_RETURN(1); + + bzero((char*) &tables, sizeof(tables)); + tables.alias=tables.real_name= (char*) "user"; + tables.db= (char*) "mysql"; + +#ifdef HAVE_REPLICATION + /* + GRANT and REVOKE are applied the slave in/exclusion rules as they are + some kind of updates to the mysql.% tables. + */ + if (thd->slave_thread && table_rules_on) + { + /* + The tables must be marked "updating" so that tables_ok() takes them into + account in tests. It's ok to leave 'updating' set after tables_ok. + */ + tables.updating= 1; + /* Thanks to bzero, tables.next==0 */ + if (!tables_ok(0, &tables)) + DBUG_RETURN(0); + } +#endif + + if (!(table= open_ltable(thd, &tables, TL_WRITE))) + DBUG_RETURN(1); VOID(pthread_mutex_lock(&acl_cache->lock)); ACL_USER *acl_user; - if (!(acl_user= find_acl_user(host,user))) + if (!(acl_user= find_acl_user(host, user, TRUE))) { - send_error(&thd->net, ER_PASSWORD_NO_MATCH); VOID(pthread_mutex_unlock(&acl_cache->lock)); - DBUG_RETURN(1); + send_error(thd, ER_PASSWORD_NO_MATCH); + goto end; } - if (update_user_table(thd, + /* update loaded acl entry: */ + set_user_salt(acl_user, new_password, new_password_len); + + if (update_user_table(thd, table, acl_user->host.hostname ? acl_user->host.hostname : "", acl_user->user ? acl_user->user : "", - new_password)) + new_password, new_password_len)) { VOID(pthread_mutex_unlock(&acl_cache->lock)); /* purecov: deadcode */ - send_error(&thd->net,0); /* purecov: deadcode */ - DBUG_RETURN(1); /* purecov: deadcode */ + send_error(thd,0); /* purecov: deadcode */ + goto end; } - get_salt_from_password(acl_user->salt,new_password); - if (!new_password[0]) - acl_user->password=0; - else - acl_user->password=(char*) ""; // Point at something + acl_cache->clear(1); // Clear locked hostname cache VOID(pthread_mutex_unlock(&acl_cache->lock)); - - char buff[460]; - ulong query_length= + result= 0; + query_length= my_sprintf(buff, (buff,"SET PASSWORD FOR \"%-.120s\"@\"%-.120s\"=\"%-.120s\"", acl_user->user ? acl_user->user : "", acl_user->host.hostname ? acl_user->host.hostname : "", new_password)); - thd->clear_error(); mysql_update_log.write(thd, buff, query_length); - Query_log_event qinfo(thd, buff, query_length, 0); - mysql_bin_log.write(&qinfo); - DBUG_RETURN(0); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, buff, query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } +end: + close_thread_tables(thd); + DBUG_RETURN(result); } @@ -1171,7 +1354,7 @@ bool change_password(THD *thd, const char *host, const char *user, */ static ACL_USER * -find_acl_user(const char *host, const char *user) +find_acl_user(const char *host, const char *user, my_bool exact) { DBUG_ENTER("find_acl_user"); DBUG_PRINT("enter",("host: '%s' user: '%s'",host,user)); @@ -1187,7 +1370,10 @@ find_acl_user(const char *host, const char *user) if (!acl_user->user && !user[0] || acl_user->user && !strcmp(user,acl_user->user)) { - if (compare_hostname(&(acl_user->host),host,host)) + if (exact ? !my_strcasecmp(&my_charset_latin1, host, + acl_user->host.hostname ? + acl_user->host.hostname : "") : + compare_hostname(&acl_user->host,host,host)) { DBUG_RETURN(acl_user); } @@ -1249,69 +1435,73 @@ static bool compare_hostname(const acl_host_and_ip *host, const char *hostname, return (tmp & host->ip_mask) == host->ip; } return (!host->hostname || - (hostname && !wild_case_compare(hostname,host->hostname)) || + (hostname && !wild_case_compare(system_charset_info, + hostname,host->hostname)) || (ip && !wild_compare(ip,host->hostname,0))); } +bool hostname_requires_resolving(const char *hostname) +{ + char cur; + if (!hostname) + return FALSE; + int namelen= strlen(hostname); + int lhlen= strlen(my_localhost); + if ((namelen == lhlen) && + !my_strnncoll(system_charset_info, (const uchar *)hostname, namelen, + (const uchar *)my_localhost, strlen(my_localhost))) + return FALSE; + for (; (cur=*hostname); hostname++) + { + if ((cur != '%') && (cur != '_') && (cur != '.') && (cur != '/') && + ((cur < '0') || (cur > '9'))) + return TRUE; + } + return FALSE; +} + /* - Update grants in the user and database privilege tables + Update record for user in mysql.user privilege table with new password. + + SYNOPSIS + update_user_table() + thd Thread handle + table Pointer to TABLE object for open mysql.user table + host/user Hostname/username pair identifying user for which + new password should be set + new_password New password + new_password_len Length of new password */ -static bool update_user_table(THD *thd, const char *host, const char *user, - const char *new_password) +static bool update_user_table(THD *thd, TABLE *table, + const char *host, const char *user, + const char *new_password, uint new_password_len) { - TABLE_LIST tables; - TABLE *table; - bool error=1; + int error; DBUG_ENTER("update_user_table"); DBUG_PRINT("enter",("user: %s host: %s",user,host)); - bzero((char*) &tables,sizeof(tables)); - tables.alias=tables.real_name=(char*) "user"; - tables.db=(char*) "mysql"; -#ifdef HAVE_REPLICATION - /* - GRANT and REVOKE are applied the slave in/exclusion rules as they are - some kind of updates to the mysql.% tables. - */ - if (thd->slave_thread && table_rules_on) - { - /* - The tables must be marked "updating" so that tables_ok() takes them into - account in tests. It's ok to leave 'updating' set after tables_ok. - */ - tables.updating= 1; - /* Thanks to bzero, tables.next==0 */ - if (!tables_ok(0, &tables)) - DBUG_RETURN(0); - } -#endif - - if (!(table=open_ltable(thd,&tables,TL_WRITE))) - DBUG_RETURN(1); /* purecov: deadcode */ - table->field[0]->store(host,(uint) strlen(host)); - table->field[1]->store(user,(uint) strlen(user)); + table->field[0]->store(host,(uint) strlen(host), system_charset_info); + table->field[1]->store(user,(uint) strlen(user), system_charset_info); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr,0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, HA_READ_KEY_EXACT)) { my_error(ER_PASSWORD_NO_MATCH,MYF(0)); /* purecov: deadcode */ DBUG_RETURN(1); /* purecov: deadcode */ } - store_record(table,1); - table->field[2]->store(new_password,(uint) strlen(new_password)); + store_record(table,record[1]); + table->field[2]->store(new_password, new_password_len, system_charset_info); if ((error=table->file->update_row(table->record[1],table->record[0]))) { table->file->print_error(error,MYF(0)); /* purecov: deadcode */ - goto end; /* purecov: deadcode */ + DBUG_RETURN(1); } - error=0; // Record updated - -end: - close_thread_tables(thd); - DBUG_RETURN(error); + DBUG_RETURN(0); } @@ -1327,11 +1517,12 @@ static bool test_if_create_new_users(THD *thd) bzero((char*) &tl,sizeof(tl)); tl.db= (char*) "mysql"; tl.real_name= (char*) "user"; - db_access=acl_get(thd->host, thd->ip, (char*) &thd->remote.sin_addr, + + db_access=acl_get(thd->host, thd->ip, thd->priv_user, tl.db, 0); if (!(db_access & INSERT_ACL)) { - if (check_grant(thd,INSERT_ACL,&tl,0,1)) + if (check_grant(thd, INSERT_ACL, &tl, 0, UINT_MAX, 1)) create_new_users=0; } } @@ -1349,58 +1540,63 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, { int error = -1; bool old_row_exists=0; - char *password,empty_string[1]; + const char *password= ""; + uint password_len= 0; char what= (revoke_grant) ? 'N' : 'Y'; DBUG_ENTER("replace_user_table"); safe_mutex_assert_owner(&acl_cache->lock); - password=empty_string; - empty_string[0]=0; - if (combo.password.str && combo.password.str[0]) { - if (combo.password.length != HASH_PASSWORD_LENGTH) + if (combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH && + combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) { - my_printf_error(ER_PASSWORD_NO_MATCH, - "Password hash should be a %d-digit hexadecimal number", - MYF(0),HASH_PASSWORD_LENGTH); + my_printf_error(ER_UNKNOWN_ERROR, + "Password hash should be a %d-digit hexadecimal number", + MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); DBUG_RETURN(-1); } + password_len= combo.password.length; password=combo.password.str; } - table->field[0]->store(combo.host.str,combo.host.length); - table->field[1]->store(combo.user.str,combo.user.length); - table->file->index_init(0); - if (table->file->index_read(table->record[0], - (byte*) table->field[0]->ptr,0, - HA_READ_KEY_EXACT)) + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(combo.user.str,combo.user.length, system_charset_info); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + if (table->file->index_read_idx(table->record[0], 0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, + HA_READ_KEY_EXACT)) { if (!create_user) { if (what == 'N') - my_printf_error(ER_NONEXISTING_GRANT,ER(ER_NONEXISTING_GRANT), - MYF(0),combo.user.str,combo.host.str); + my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); else - my_printf_error(ER_NO_PERMISSION_TO_CREATE_USER, - ER(ER_NO_PERMISSION_TO_CREATE_USER), - MYF(0),thd->user, - thd->host_or_ip); - error= -1; + my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0), + thd->user, thd->host_or_ip); goto end; } old_row_exists = 0; - restore_record(table,2); // cp empty row from record[2] - table->field[0]->store(combo.host.str,combo.host.length); - table->field[1]->store(combo.user.str,combo.user.length); - table->field[2]->store(password,(uint) strlen(password)); + restore_record(table,default_values); // cp empty row from default_values + table->field[0]->store(combo.host.str,combo.host.length, + system_charset_info); + table->field[1]->store(combo.user.str,combo.user.length, + system_charset_info); + table->field[2]->store(password, password_len, + system_charset_info); } else { old_row_exists = 1; - store_record(table,1); // Save copy for update + store_record(table,record[1]); // Save copy for update if (combo.password.str) // If password given - table->field[2]->store(password,(uint) strlen(password)); + table->field[2]->store(password, password_len, system_charset_info); + else if (!rights && !revoke_grant && thd->lex->ssl_type == SSL_TYPE_NOT_SPECIFIED && + !thd->lex->mqh.bits) + { + DBUG_RETURN(0); + } } /* Update table columns with new privileges */ @@ -1414,55 +1610,58 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, tmp_field++, priv <<= 1) { if (priv & rights) // set requested privileges - (*tmp_field)->store(&what,1); + (*tmp_field)->store(&what, 1, &my_charset_latin1); } rights= get_access(table, 3, &next_field); DBUG_PRINT("info",("table->fields: %d",table->fields)); if (table->fields >= 31) /* From 4.0.0 we have more fields */ { /* We write down SSL related ACL stuff */ - switch (thd->lex.ssl_type) { + switch (thd->lex->ssl_type) { case SSL_TYPE_ANY: - table->field[next_field]->store("ANY", 3); - table->field[next_field+1]->store("", 0); - table->field[next_field+2]->store("", 0); - table->field[next_field+3]->store("", 0); + table->field[next_field]->store("ANY", 3, &my_charset_latin1); + table->field[next_field+1]->store("", 0, &my_charset_latin1); + table->field[next_field+2]->store("", 0, &my_charset_latin1); + table->field[next_field+3]->store("", 0, &my_charset_latin1); break; case SSL_TYPE_X509: - table->field[next_field]->store("X509", 4); - table->field[next_field+1]->store("", 0); - table->field[next_field+2]->store("", 0); - table->field[next_field+3]->store("", 0); + table->field[next_field]->store("X509", 4, &my_charset_latin1); + table->field[next_field+1]->store("", 0, &my_charset_latin1); + table->field[next_field+2]->store("", 0, &my_charset_latin1); + table->field[next_field+3]->store("", 0, &my_charset_latin1); break; case SSL_TYPE_SPECIFIED: - table->field[next_field]->store("SPECIFIED", 9); - table->field[next_field+1]->store("", 0); - table->field[next_field+2]->store("", 0); - table->field[next_field+3]->store("", 0); - if (thd->lex.ssl_cipher) - table->field[next_field+1]->store(thd->lex.ssl_cipher, - strlen(thd->lex.ssl_cipher)); - if (thd->lex.x509_issuer) - table->field[next_field+2]->store(thd->lex.x509_issuer, - strlen(thd->lex.x509_issuer)); - if (thd->lex.x509_subject) - table->field[next_field+3]->store(thd->lex.x509_subject, - strlen(thd->lex.x509_subject)); + table->field[next_field]->store("SPECIFIED", 9, &my_charset_latin1); + table->field[next_field+1]->store("", 0, &my_charset_latin1); + table->field[next_field+2]->store("", 0, &my_charset_latin1); + table->field[next_field+3]->store("", 0, &my_charset_latin1); + if (thd->lex->ssl_cipher) + table->field[next_field+1]->store(thd->lex->ssl_cipher, + strlen(thd->lex->ssl_cipher), + system_charset_info); + if (thd->lex->x509_issuer) + table->field[next_field+2]->store(thd->lex->x509_issuer, + strlen(thd->lex->x509_issuer), + system_charset_info); + if (thd->lex->x509_subject) + table->field[next_field+3]->store(thd->lex->x509_subject, + strlen(thd->lex->x509_subject), + system_charset_info); break; case SSL_TYPE_NOT_SPECIFIED: break; case SSL_TYPE_NONE: - table->field[next_field]->store("", 0); - table->field[next_field+1]->store("", 0); - table->field[next_field+2]->store("", 0); - table->field[next_field+3]->store("", 0); + table->field[next_field]->store("", 0, &my_charset_latin1); + table->field[next_field+1]->store("", 0, &my_charset_latin1); + table->field[next_field+2]->store("", 0, &my_charset_latin1); + table->field[next_field+3]->store("", 0, &my_charset_latin1); break; } /* Skip over SSL related fields to first user limits related field */ next_field+= 4; - USER_RESOURCES mqh = thd->lex.mqh; + USER_RESOURCES mqh= thd->lex->mqh; if (mqh.bits & 1) table->field[next_field]->store((longlong) mqh.questions); if (mqh.bits & 2) @@ -1477,7 +1676,8 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, We should NEVER delete from the user table, as a uses can still use mysqld even if he doesn't have any privileges in the user table! */ - if (cmp_record(table,1) && + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + if (cmp_record(table,record[1]) && (error=table->file->update_row(table->record[1],table->record[0]))) { // This should never happen table->file->print_error(error,MYF(0)); /* purecov: deadcode */ @@ -1501,26 +1701,24 @@ end: if (!error) { acl_cache->clear(1); // Clear privilege cache - if (!combo.password.str) - password=0; // No password given on command if (old_row_exists) - acl_update_user(combo.user.str,combo.host.str,password, - thd->lex.ssl_type, - thd->lex.ssl_cipher, - thd->lex.x509_issuer, - thd->lex.x509_subject, - &thd->lex.mqh, + acl_update_user(combo.user.str, combo.host.str, + combo.password.str, password_len, + thd->lex->ssl_type, + thd->lex->ssl_cipher, + thd->lex->x509_issuer, + thd->lex->x509_subject, + &thd->lex->mqh, rights); else - acl_insert_user(combo.user.str,combo.host.str,password, - thd->lex.ssl_type, - thd->lex.ssl_cipher, - thd->lex.x509_issuer, - thd->lex.x509_subject, - &thd->lex.mqh, + acl_insert_user(combo.user.str, combo.host.str, password, password_len, + thd->lex->ssl_type, + thd->lex->ssl_cipher, + thd->lex->x509_issuer, + thd->lex->x509_subject, + &thd->lex->mqh, rights); } - table->file->index_end(); DBUG_RETURN(error); } @@ -1540,45 +1738,52 @@ static int replace_db_table(TABLE *table, const char *db, char what= (revoke_grant) ? 'N' : 'Y'; DBUG_ENTER("replace_db_table"); + if (!initialized) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); + DBUG_RETURN(-1); + } + /* Check if there is such a user in user table in memory? */ - if (!initialized || !find_acl_user(combo.host.str,combo.user.str)) + if (!find_acl_user(combo.host.str,combo.user.str, FALSE)) { my_error(ER_PASSWORD_NO_MATCH,MYF(0)); DBUG_RETURN(-1); } - table->field[0]->store(combo.host.str,combo.host.length); - table->field[1]->store(db,(uint) strlen(db)); - table->field[2]->store(combo.user.str,combo.user.length); - table->file->index_init(0); - if (table->file->index_read(table->record[0],(byte*) table->field[0]->ptr,0, - HA_READ_KEY_EXACT)) + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + if (table->file->index_read_idx(table->record[0],0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, + HA_READ_KEY_EXACT)) { if (what == 'N') { // no row, no revoke - my_printf_error(ER_NONEXISTING_GRANT,ER(ER_NONEXISTING_GRANT),MYF(0), - combo.user.str,combo.host.str); + my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); goto abort; } old_row_exists = 0; - restore_record(table,2); // cp empty row from record[2] - table->field[0]->store(combo.host.str,combo.host.length); - table->field[1]->store(db,(uint) strlen(db)); - table->field[2]->store(combo.user.str,combo.user.length); + restore_record(table,default_values); // cp empty row from default_values + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); } else { old_row_exists = 1; - store_record(table,1); + store_record(table,record[1]); } store_rights=get_rights_for_db(rights); for (i= 3, priv= 1; i < table->fields; i++, priv <<= 1) { if (priv & store_rights) // do it if priv is chosen - table->field [i]->store(&what,1); // set requested privileges + table->field [i]->store(&what,1, &my_charset_latin1);// set requested privileges } - rights=get_access(table,3,0); + rights=get_access(table,3); rights=fix_rights_for_db(rights); if (old_row_exists) @@ -1586,6 +1791,7 @@ static int replace_db_table(TABLE *table, const char *db, /* update old existing row */ if (rights) { + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if ((error=table->file->update_row(table->record[1],table->record[0]))) goto table_error; /* purecov: deadcode */ } @@ -1607,13 +1813,11 @@ static int replace_db_table(TABLE *table, const char *db, else if (rights) acl_insert_db(combo.user.str,combo.host.str,db,rights); - table->file->index_end(); DBUG_RETURN(0); /* This could only happen if the grant tables got corrupted */ table_error: table->file->print_error(error,MYF(0)); /* purecov: deadcode */ - table->file->index_end(); abort: DBUG_RETURN(-1); @@ -1644,40 +1848,41 @@ static byte* get_key_column(GRANT_COLUMN *buff,uint *length, class GRANT_TABLE :public Sql_alloc { public: - char *host,*db,*user,*tname, *hash_key, *orig_host; + acl_host_and_ip host; + char *db, *user, *tname, *hash_key; ulong privs, cols; ulong sort; uint key_length; HASH hash_columns; - GRANT_TABLE(const char *h, const char *d,const char *u, const char *t, - ulong p, ulong c); - GRANT_TABLE(TABLE *form, TABLE *col_privs); + + GRANT_TABLE(const char *h, const char *d,const char *u, + const char *t, ulong p, ulong c); + GRANT_TABLE (TABLE *form, TABLE *col_privs); bool ok() { return privs != 0 || cols != 0; } }; + GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, const char *t, ulong p, ulong c) :privs(p), cols(c) { /* Host given by user */ - orig_host= strdup_root(&memex,h); - /* Convert empty hostname to '%' for easy comparision */ - host= orig_host[0] ? orig_host : (char*) "%"; + update_hostname(&host, strdup_root(&memex, h)); db = strdup_root(&memex,d); user = strdup_root(&memex,u); - sort= get_sort(3,host,db,user); + sort= get_sort(3,host.hostname,db,user); tname= strdup_root(&memex,t); if (lower_case_table_names) { - casedn_str(db); - casedn_str(tname); + my_casedn_str(files_charset_info, db); + my_casedn_str(files_charset_info, tname); } key_length =(uint) strlen(d)+(uint) strlen(u)+(uint) strlen(t)+3; hash_key = (char*) alloc_root(&memex,key_length); strmov(strmov(strmov(hash_key,user)+1,db)+1,tname); - (void) hash_init(&hash_columns,0,0,0, (hash_get_key) get_key_column,0, - HASH_CASE_INSENSITIVE); + (void) hash_init(&hash_columns,system_charset_info, + 0,0,0, (hash_get_key) get_key_column,0,0); } @@ -1685,18 +1890,13 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) { byte key[MAX_KEY_LENGTH]; - orig_host= host= get_field(&memex,form,0); - db = get_field(&memex,form,1); - user = get_field(&memex,form,2); + update_hostname(&host, get_field(&memex, form->field[0])); + db= get_field(&memex,form->field[1]); + user= get_field(&memex,form->field[2]); if (!user) user= (char*) ""; - if (!orig_host) - { - orig_host= (char*) ""; - host= (char*) "%"; - } - sort= get_sort(3,orig_host,db,user); - tname = get_field(&memex,form,3); + sort= get_sort(3, host.hostname, db, user); + tname= get_field(&memex,form->field[3]); if (!db || !tname) { /* Wrong table row; Ignore it */ @@ -1705,8 +1905,8 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) } if (lower_case_table_names) { - casedn_str(db); - casedn_str(tname); + my_casedn_str(files_charset_info, db); + my_casedn_str(files_charset_info, tname); } key_length = ((uint) strlen(db) + (uint) strlen(user) + (uint) strlen(tname) + 3); @@ -1717,27 +1917,30 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) privs = fix_rights_for_table(privs); cols = fix_rights_for_column(cols); - (void) hash_init(&hash_columns,0,0,0, (hash_get_key) get_key_column,0, - HASH_CASE_INSENSITIVE); + (void) hash_init(&hash_columns,system_charset_info, + 0,0,0, (hash_get_key) get_key_column,0,0); if (cols) { int key_len; - col_privs->field[0]->store(orig_host,(uint) strlen(orig_host)); - col_privs->field[1]->store(db,(uint) strlen(db)); - col_privs->field[2]->store(user,(uint) strlen(user)); - col_privs->field[3]->store(tname,(uint) strlen(tname)); + col_privs->field[0]->store(host.hostname, + host.hostname ? (uint) strlen(host.hostname) : 0, + system_charset_info); + col_privs->field[1]->store(db,(uint) strlen(db), system_charset_info); + col_privs->field[2]->store(user,(uint) strlen(user), system_charset_info); + col_privs->field[3]->store(tname,(uint) strlen(tname), system_charset_info); key_len=(col_privs->field[0]->pack_length()+ col_privs->field[1]->pack_length()+ col_privs->field[2]->pack_length()+ col_privs->field[3]->pack_length()); key_copy(key,col_privs,0,key_len); - col_privs->field[4]->store("",0); - col_privs->file->index_init(0); + col_privs->field[4]->store("",0, &my_charset_latin1); + col_privs->file->ha_index_init(0); if (col_privs->file->index_read(col_privs->record[0], (byte*) col_privs->field[0]->ptr, key_len, HA_READ_KEY_EXACT)) { cols = 0; /* purecov: deadcode */ + col_privs->file->ha_index_end(); return; } do @@ -1745,7 +1948,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) String *res,column_name; GRANT_COLUMN *mem_check; /* As column name is a string, we don't have to supply a buffer */ - res=col_privs->field[4]->val_str(&column_name,&column_name); + res=col_privs->field[4]->val_str(&column_name); ulong priv= (ulong) col_privs->field[6]->val_int(); if (!(mem_check = new GRANT_COLUMN(*res, fix_rights_for_column(priv)))) @@ -1754,9 +1957,10 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) privs = cols = 0; /* purecov: deadcode */ return; /* purecov: deadcode */ } - hash_insert(&hash_columns, (byte *) mem_check); + my_hash_insert(&hash_columns, (byte *) mem_check); } while (!col_privs->file->index_next(col_privs->record[0]) && - !key_cmp(col_privs,key,0,key_len)); + !key_cmp_if_same(col_privs,key,0,key_len)); + col_privs->file->ha_index_end(); } } @@ -1785,26 +1989,29 @@ static GRANT_TABLE *table_hash_search(const char *host,const char* ip, char helping [NAME_LEN*2+USERNAME_LENGTH+3]; uint len; GRANT_TABLE *grant_table,*found=0; - safe_mutex_assert_owner(&LOCK_grant); + HASH_SEARCH_STATE state; len = (uint) (strmov(strmov(strmov(helping,user)+1,db)+1,tname)-helping)+ 1; - for (grant_table=(GRANT_TABLE*) hash_search(&hash_tables,(byte*) helping, - len) ; + for (grant_table=(GRANT_TABLE*) hash_first(&column_priv_hash, + (byte*) helping, + len, &state) ; grant_table ; - grant_table= (GRANT_TABLE*) hash_next(&hash_tables,(byte*) helping,len)) + grant_table= (GRANT_TABLE*) hash_next(&column_priv_hash,(byte*) helping, + len, &state)) { if (exact) { - if ((host && !my_strcasecmp(host,grant_table->host)) || - (ip && !strcmp(ip,grant_table->host))) + if ((host && + !my_strcasecmp(system_charset_info, host, + grant_table->host.hostname)) || + (ip && !strcmp(ip, grant_table->host.hostname))) return grant_table; } else { - if (((host && !wild_case_compare(host,grant_table->host)) || - (ip && !wild_case_compare(ip,grant_table->host))) && + if (compare_hostname(&grant_table->host, host, ip) && (!found || found->sort < grant_table->sort)) - found=grant_table; + found=grant_table; // Host ok } } return found; @@ -1830,10 +2037,10 @@ static int replace_column_table(GRANT_TABLE *g_t, byte key[MAX_KEY_LENGTH]; DBUG_ENTER("replace_column_table"); - table->field[0]->store(combo.host.str,combo.host.length); - table->field[1]->store(db,(uint) strlen(db)); - table->field[2]->store(combo.user.str,combo.user.length); - table->field[3]->store(table_name,(uint) strlen(table_name)); + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); + table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); key_length=(table->field[0]->pack_length()+ table->field[1]->pack_length()+ table->field[2]->pack_length()+ table->field[3]->pack_length()); key_copy(key,table,0,key_length); @@ -1844,29 +2051,32 @@ static int replace_column_table(GRANT_TABLE *g_t, List_iterator <LEX_COLUMN> iter(columns); class LEX_COLUMN *xx; - table->file->index_init(0); + table->file->ha_index_init(0); while ((xx=iter++)) { ulong privileges = xx->rights; bool old_row_exists=0; key_restore(table,key,0,key_length); - table->field[4]->store(xx->column.ptr(),xx->column.length()); + table->field[4]->store(xx->column.ptr(),xx->column.length(), + system_charset_info); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read(table->record[0],(byte*) table->field[0]->ptr, - 0, HA_READ_KEY_EXACT)) + table->key_info[0].key_length, + HA_READ_KEY_EXACT)) { if (revoke_grant) { - my_printf_error(ER_NONEXISTING_TABLE_GRANT, - ER(ER_NONEXISTING_TABLE_GRANT),MYF(0), - combo.user.str, combo.host.str,table_name); /* purecov: inspected */ + my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), + combo.user.str, combo.host.str, table_name); /* purecov: inspected */ result= -1; /* purecov: inspected */ continue; /* purecov: inspected */ } old_row_exists = 0; - restore_record(table,2); // Get empty record + restore_record(table,default_values); // Get empty record key_restore(table,key,0,key_length); - table->field[4]->store(xx->column.ptr(),xx->column.length()); + table->field[4]->store(xx->column.ptr(),xx->column.length(), + system_charset_info); } else { @@ -1878,7 +2088,7 @@ static int replace_column_table(GRANT_TABLE *g_t, else privileges |= tmp; old_row_exists = 1; - store_record(table,1); // copy original row + store_record(table,record[1]); // copy original row } table->field[6]->store((longlong) get_rights_for_column(privileges)); @@ -1910,10 +2120,9 @@ static int replace_column_table(GRANT_TABLE *g_t, goto end; /* purecov: inspected */ } GRANT_COLUMN *grant_column = new GRANT_COLUMN(xx->column,privileges); - hash_insert(&g_t->hash_columns,(byte*) grant_column); + my_hash_insert(&g_t->hash_columns,(byte*) grant_column); } } - table->file->index_end(); /* If revoke of privileges on the table level, remove all such privileges @@ -1922,9 +2131,10 @@ static int replace_column_table(GRANT_TABLE *g_t, if (revoke_grant) { - table->file->index_init(0); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read(table->record[0], (byte*) table->field[0]->ptr, - key_length, HA_READ_KEY_EXACT)) + key_length, + HA_READ_KEY_EXACT)) goto end; /* Scan through all rows with the same host,db,user and table */ @@ -1932,18 +2142,19 @@ static int replace_column_table(GRANT_TABLE *g_t, { ulong privileges = (ulong) table->field[6]->val_int(); privileges=fix_rights_for_column(privileges); - store_record(table,1); + store_record(table,record[1]); if (privileges & rights) // is in this record the priv to be revoked ?? { GRANT_COLUMN *grant_column = NULL; char colum_name_buf[HOSTNAME_LENGTH+1]; - String column_name(colum_name_buf,sizeof(colum_name_buf)); + String column_name(colum_name_buf,sizeof(colum_name_buf), + system_charset_info); privileges&= ~rights; table->field[6]->store((longlong) get_rights_for_column(privileges)); - table->field[4]->val_str(&column_name,&column_name); + table->field[4]->val_str(&column_name); grant_column = column_hash_search(g_t, column_name.ptr(), column_name.length()); @@ -1974,11 +2185,11 @@ static int replace_column_table(GRANT_TABLE *g_t, } } } while (!table->file->index_next(table->record[0]) && - !key_cmp(table,key,0,key_length)); + !key_cmp_if_same(table,key,0,key_length)); } end: - table->file->index_end(); + table->file->ha_index_end(); DBUG_RETURN(result); } @@ -1994,7 +2205,6 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, int error=0; ulong store_table_rights, store_col_rights; DBUG_ENTER("replace_table_table"); - safe_mutex_assert_owner(&LOCK_grant); strxmov(grantor, thd->user, "@", thd->host_or_ip, NullS); @@ -2002,21 +2212,22 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, The following should always succeed as new users are created before this function is called! */ - if (!find_acl_user(combo.host.str,combo.user.str)) + if (!find_acl_user(combo.host.str,combo.user.str, FALSE)) { my_error(ER_PASSWORD_NO_MATCH,MYF(0)); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ } - restore_record(table,2); // Get empty record - table->field[0]->store(combo.host.str,combo.host.length); - table->field[1]->store(db,(uint) strlen(db)); - table->field[2]->store(combo.user.str,combo.user.length); - table->field[3]->store(table_name,(uint) strlen(table_name)); - store_record(table,1); // store at pos 1 - + restore_record(table,default_values); // Get empty record + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); + table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); + store_record(table,record[1]); // store at pos 1 + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr,0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, HA_READ_KEY_EXACT)) { /* @@ -2026,14 +2237,13 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, */ if (revoke_grant) { // no row, no revoke - my_printf_error(ER_NONEXISTING_TABLE_GRANT, - ER(ER_NONEXISTING_TABLE_GRANT),MYF(0), - combo.user.str,combo.host.str, - table_name); /* purecov: deadcode */ + my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), + combo.user.str, combo.host.str, + table_name); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ } old_row_exists = 0; - restore_record(table,1); // Get saved record + restore_record(table,record[1]); // Get saved record } store_table_rights= get_rights_for_table(rights); @@ -2041,7 +2251,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, if (old_row_exists) { ulong j,k; - store_record(table,1); + store_record(table,record[1]); j = (ulong) table->field[6]->val_int(); k = (ulong) table->field[7]->val_int(); @@ -2057,7 +2267,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, } } - table->field[4]->store(grantor,(uint) strlen(grantor)); + table->field[4]->store(grantor,(uint) strlen(grantor), system_charset_info); table->field[6]->store((longlong) store_table_rights); table->field[7]->store((longlong) store_col_rights); rights=fix_rights_for_table(store_table_rights); @@ -2087,7 +2297,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, } else { - hash_delete(&hash_tables,(byte*) grant_table); + hash_delete(&column_priv_hash,(byte*) grant_table); } DBUG_RETURN(0); @@ -2129,8 +2339,9 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, if (!initialized) { - send_error(&(thd->net), ER_UNKNOWN_COM_ERROR); /* purecov: inspected */ - return 1; /* purecov: inspected */ + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), + "--skip-grant-tables"); /* purecov: inspected */ + DBUG_RETURN(-1); /* purecov: inspected */ } if (rights & ~TABLE_ACLS) { @@ -2150,12 +2361,13 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(-1); while ((column = column_iter++)) { + uint unused_field_idx= NO_CACHED_FIELD_INDEX; Field *f= find_field_in_table(thd,table,column->column.ptr(), - column->column.length(),1,0); + column->column.length(),1,0,&unused_field_idx); if (!f) { - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), - column->column.c_ptr(), table_list->alias); + my_error(ER_BAD_FIELD_ERROR, MYF(0), + column->column.c_ptr(), table_list->alias); DBUG_RETURN(-1); } if (f == (Field*)-1) @@ -2176,7 +2388,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, fn_format(buf,buf,"","",4+16+32); if (access(buf,F_OK)) { - my_error(ER_NO_SUCH_TABLE,MYF(0),table_list->db, table_list->alias); + my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->alias); DBUG_RETURN(-1); } } @@ -2214,8 +2426,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, if (thd->slave_thread && table_rules_on) { /* - The tables must be marked "updating" so that tables_ok() takes them into - account in tests. + The tables must be marked "updating" so that tables_ok() takes them into + account in tests. */ tables[0].updating= tables[1].updating= tables[2].updating= 1; if (!tables_ok(0, tables)) @@ -2223,7 +2435,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, } #endif - if (open_and_lock_tables(thd,tables)) + if (simple_open_n_lock_tables(thd,tables)) { // Should never happen close_thread_tables(thd); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ @@ -2232,19 +2444,14 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, if (!revoke_grant) create_new_users= test_if_create_new_users(thd); int result=0; - pthread_mutex_lock(&LOCK_grant); - MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC); - my_pthread_setspecific_ptr(THR_MALLOC,&memex); + rw_wrlock(&LOCK_grant); + MEM_ROOT *old_root= thd->mem_root; + thd->mem_root= &memex; while ((Str = str_list++)) { int error; GRANT_TABLE *grant_table; - if (!Str->host.str) - { - Str->host.str=(char*) "%"; - Str->host.length=1; - } if (Str->host.length > HOSTNAME_LENGTH || Str->user.length > USERNAME_LENGTH) { @@ -2271,9 +2478,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, { if (revoke_grant) { - my_printf_error(ER_NONEXISTING_TABLE_GRANT, - ER(ER_NONEXISTING_TABLE_GRANT),MYF(0), - Str->user.str, Str->host.str, table_list->real_name); + my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), + Str->user.str, Str->host.str, table_list->real_name); result= -1; continue; } @@ -2287,7 +2493,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, result= -1; /* purecov: deadcode */ continue; /* purecov: deadcode */ } - hash_insert(&hash_tables,(byte*) grant_table); + my_hash_insert(&column_priv_hash,(byte*) grant_table); } /* If revoke_grant, calculate the new column privilege for tables_priv */ @@ -2328,7 +2534,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, table_list->db, table_list->real_name, rights, column_priv, revoke_grant)) - { // Crashend table ?? + { + /* Should only happen if table is crashed */ result= -1; /* purecov: deadcode */ } else if (tables[2].table) @@ -2344,17 +2551,17 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, } } grant_option=TRUE; - my_pthread_setspecific_ptr(THR_MALLOC,old_root); - pthread_mutex_unlock(&LOCK_grant); + thd->mem_root= old_root; + rw_unlock(&LOCK_grant); if (!result) - send_ok(&thd->net); + send_ok(thd); /* Tables are automatically closed */ DBUG_RETURN(result); } int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, - ulong rights, bool revoke_grant) + ulong rights, bool revoke_grant) { List_iterator <LEX_USER> str_list (list); LEX_USER *Str; @@ -2362,22 +2569,22 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, bool create_new_users=0; TABLE_LIST tables[2]; DBUG_ENTER("mysql_grant"); - if (!initialized) { - send_error(&(thd->net), ER_UNKNOWN_COM_ERROR); /* purecov: tested */ - DBUG_RETURN(1); /* purecov: tested */ + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), + "--skip-grant-tables"); /* purecov: tested */ + DBUG_RETURN(-1); /* purecov: tested */ } if (lower_case_table_names && db) { strmov(tmp_db,db); - casedn_str(tmp_db); + my_casedn_str(files_charset_info, tmp_db); db=tmp_db; } /* open the mysql.user and mysql.db tables */ - + bzero((char*) &tables,sizeof(tables)); tables[0].alias=tables[0].real_name=(char*) "user"; tables[1].alias=tables[1].real_name=(char*) "db"; tables[0].next=tables+1; @@ -2393,9 +2600,9 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, */ if (thd->slave_thread && table_rules_on) { - /* - The tables must be marked "updating" so that tables_ok() takes them into - account in tests. + /* + The tables must be marked "updating" so that tables_ok() takes them into + account in tests. */ tables[0].updating= tables[1].updating= 1; if (!tables_ok(0, tables)) @@ -2403,7 +2610,7 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, } #endif - if (open_and_lock_tables(thd,tables)) + if (simple_open_n_lock_tables(thd,tables)) { // This should never happen close_thread_tables(thd); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ @@ -2413,18 +2620,13 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, create_new_users= test_if_create_new_users(thd); /* go through users in user_list */ - pthread_mutex_lock(&LOCK_grant); + rw_wrlock(&LOCK_grant); VOID(pthread_mutex_lock(&acl_cache->lock)); grant_version++; int result=0; while ((Str = str_list++)) { - if (!Str->host.str) - { - Str->host.str=(char*) "%"; - Str->host.length=1; - } if (Str->host.length > HOSTNAME_LENGTH || Str->user.length > USERNAME_LENGTH) { @@ -2449,17 +2651,17 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, } else { - net_printf(&thd->net,ER_WRONG_USAGE,"DB GRANT","GLOBAL PRIVILEGES"); - result= 1; + my_error(ER_WRONG_USAGE, MYF(0), "DB GRANT", "GLOBAL PRIVILEGES"); + result= -1; } } } VOID(pthread_mutex_unlock(&acl_cache->lock)); - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); close_thread_tables(thd); if (!result) - send_ok(&thd->net); + send_ok(thd); DBUG_RETURN(result); } @@ -2470,137 +2672,190 @@ void grant_free(void) { DBUG_ENTER("grant_free"); grant_option = FALSE; - hash_free(&hash_tables); + hash_free(&column_priv_hash); free_root(&memex,MYF(0)); DBUG_VOID_RETURN; } -/* Init grant array if possible */ +/* + Initialize structures responsible for table/column-level privilege checking + and load information for them from tables in the 'mysql' database. -my_bool grant_init(THD *org_thd) + SYNOPSIS + grant_init() + + RETURN VALUES + 0 ok + 1 Could not initialize grant's +*/ + +my_bool grant_init() { THD *thd; - TABLE_LIST tables[2]; - MYSQL_LOCK *lock; - my_bool return_val= 1; - TABLE *t_table, *c_table; + my_bool return_val; DBUG_ENTER("grant_init"); - grant_option = FALSE; - (void) hash_init(&hash_tables,0,0,0, (hash_get_key) get_grant_table, - (hash_free_key) free_grant_table,0); - init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0); - - /* Don't do anything if running with --skip-grant */ - if (!initialized) - DBUG_RETURN(0); /* purecov: tested */ - - if (!(thd=new THD)) + if (!(thd= new THD)) DBUG_RETURN(1); /* purecov: deadcode */ thd->store_globals(); - thd->db= my_strdup("mysql",MYF(0)); - thd->db_length=5; // Safety - bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name= (char*) "tables_priv"; - tables[1].alias=tables[1].real_name= (char*) "columns_priv"; - tables[0].next=tables+1; - tables[0].lock_type=tables[1].lock_type=TL_READ; - tables[0].db=tables[1].db=thd->db; + return_val= grant_reload(thd); + delete thd; + /* Remember that we don't have a THD */ + my_pthread_setspecific_ptr(THR_THD, 0); + DBUG_RETURN(return_val); +} - if (open_tables(thd,tables)) - goto end; - TABLE *ptr[2]; // Lock tables for quick update - ptr[0]= tables[0].table; - ptr[1]= tables[1].table; - if (! (lock= mysql_lock_tables(thd, ptr, 2, 0))) - goto end; +/* + Initialize structures responsible for table/column-level privilege + checking and load information about grants from open privilege tables. + + SYNOPSIS + grant_load() + thd Current thread + tables List containing open "mysql.tables_priv" and + "mysql.columns_priv" tables. + + RETURN VALUES + FALSE - success + TRUE - error +*/ + +static my_bool grant_load(TABLE_LIST *tables) +{ + MEM_ROOT *memex_ptr; + my_bool return_val= 1; + TABLE *t_table, *c_table; + bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE; + MEM_ROOT **save_mem_root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, + THR_MALLOC); + DBUG_ENTER("grant_load"); + + grant_option = FALSE; + (void) hash_init(&column_priv_hash,system_charset_info, + 0,0,0, (hash_get_key) get_grant_table, + (hash_free_key) free_grant_table,0); + init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0); t_table = tables[0].table; c_table = tables[1].table; - t_table->file->index_init(0); + t_table->file->ha_index_init(0); if (t_table->file->index_first(t_table->record[0])) { - t_table->file->index_end(); return_val= 0; goto end_unlock; } grant_option= TRUE; - t_table->file->index_end(); - /* Will be restored by org_thd->store_globals() */ - my_pthread_setspecific_ptr(THR_MALLOC,&memex); + memex_ptr= &memex; + my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr); do { GRANT_TABLE *mem_check; - if (!(mem_check=new GRANT_TABLE(t_table,c_table)) || - mem_check->ok() && hash_insert(&hash_tables,(byte*) mem_check)) + if (!(mem_check=new GRANT_TABLE(t_table,c_table))) { /* This could only happen if we are out memory */ grant_option= FALSE; /* purecov: deadcode */ goto end_unlock; } + + if (check_no_resolve) + { + if (hostname_requires_resolving(mem_check->host.hostname)) + { + sql_print_warning("'tables_priv' entry '%s %s@%s' " + "ignored in --skip-name-resolve mode.", + mem_check->tname, mem_check->user, + mem_check->host); + continue; + } + } + + if (mem_check->ok() && my_hash_insert(&column_priv_hash,(byte*) mem_check)) + { + grant_option= FALSE; + goto end_unlock; + } } while (!t_table->file->index_next(t_table->record[0])); return_val=0; // Return ok end_unlock: - mysql_unlock_tables(thd, lock); - thd->version--; // Force close to free memory - -end: - close_thread_tables(thd); - delete thd; - if (org_thd) - org_thd->store_globals(); - else - { - /* Remember that we don't have a THD */ - my_pthread_setspecific_ptr(THR_THD, 0); - } + t_table->file->ha_index_end(); + my_pthread_setspecific_ptr(THR_MALLOC, save_mem_root_ptr); DBUG_RETURN(return_val); } /* - Reload grant array if possible + Reload information about table and column level privileges if possible. SYNOPSIS grant_reload() - thd Thread handler + thd Current thread NOTES - Locked tables are checked by acl_init and doesn't have to be checked here + Locked tables are checked by acl_reload() and doesn't have to be checked + in this call. + This function is also used for initialization of structures responsible + for table/column-level privilege checking. + + RETURN VALUE + FALSE Success + TRUE Error */ -void grant_reload(THD *thd) +my_bool grant_reload(THD *thd) { - HASH old_hash_tables; + TABLE_LIST tables[2]; + HASH old_column_priv_hash; bool old_grant_option; MEM_ROOT old_mem; + my_bool return_val= 1; DBUG_ENTER("grant_reload"); - pthread_mutex_lock(&LOCK_grant); + /* Don't do anything if running with --skip-grant-tables */ + if (!initialized) + DBUG_RETURN(0); + + bzero((char*) tables, sizeof(tables)); + tables[0].alias=tables[0].real_name= (char*) "tables_priv"; + tables[1].alias=tables[1].real_name= (char*) "columns_priv"; + tables[0].db=tables[1].db= (char *) "mysql"; + tables[0].next=tables+1; + tables[0].lock_type=tables[1].lock_type=TL_READ; + + /* + To avoid deadlocks we should obtain table locks before + obtaining LOCK_grant rwlock. + */ + if (simple_open_n_lock_tables(thd, tables)) + goto end; + + rw_wrlock(&LOCK_grant); grant_version++; - old_hash_tables=hash_tables; + old_column_priv_hash= column_priv_hash; old_grant_option= grant_option; - old_mem = memex; + old_mem= memex; - if (grant_init(thd)) + if ((return_val= grant_load(tables))) { // Error. Revert to old hash + DBUG_PRINT("error",("Reverting to old privileges")); grant_free(); /* purecov: deadcode */ - hash_tables=old_hash_tables; /* purecov: deadcode */ + column_priv_hash= old_column_priv_hash; /* purecov: deadcode */ grant_option= old_grant_option; /* purecov: deadcode */ - memex = old_mem; /* purecov: deadcode */ + memex= old_mem; /* purecov: deadcode */ } else { - hash_free(&old_hash_tables); + hash_free(&old_column_priv_hash); free_root(&old_mem,MYF(0)); } - pthread_mutex_unlock(&LOCK_grant); - DBUG_VOID_RETURN; + rw_unlock(&LOCK_grant); +end: + close_thread_tables(thd); + DBUG_RETURN(return_val); } @@ -2610,19 +2865,20 @@ void grant_reload(THD *thd) ****************************************************************************/ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, - uint show_table, bool no_errors) + uint show_table, uint number, bool no_errors) { TABLE_LIST *table; char *user = thd->priv_user; + DBUG_ENTER("check_grant"); want_access &= ~thd->master_access; if (!want_access) - return 0; // ok + DBUG_RETURN(0); // ok - pthread_mutex_lock(&LOCK_grant); - for (table=tables; table ;table=table->next) + rw_rdlock(&LOCK_grant); + for (table= tables; table && number--; table= table->next) { - if (!(~table->grant.privilege & want_access)) + if (!(~table->grant.privilege & want_access) || table->derived) { table->grant.want_privilege=0; continue; // Already checked @@ -2653,22 +2909,22 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, goto err; // impossible } } - pthread_mutex_unlock(&LOCK_grant); - return 0; + rw_unlock(&LOCK_grant); + DBUG_RETURN(0); err: - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); if (!no_errors) // Not a silent skip of table { char command[128]; get_privilege_desc(command, sizeof(command), want_access); - net_printf(&thd->net,ER_TABLEACCESS_DENIED_ERROR, + net_printf(thd,ER_TABLEACCESS_DENIED_ERROR, command, thd->priv_user, thd->host_or_ip, table ? table->real_name : "unknown"); } - return 1; + DBUG_RETURN(1); } @@ -2681,10 +2937,8 @@ bool check_grant_column(THD *thd,TABLE *table, const char *name, ulong want_access=table->grant.want_privilege; if (!want_access) return 0; // Already checked - if (!grant_option) - goto err2; - pthread_mutex_lock(&LOCK_grant); + rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ @@ -2702,21 +2956,20 @@ bool check_grant_column(THD *thd,TABLE *table, const char *name, grant_column=column_hash_search(grant_table, name, length); if (grant_column && !(~grant_column->rights & want_access)) { - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); return 0; } #ifdef NOT_USED if (show_tables && (grant_column || table->grant.privilege & COL_ACLS)) { - pthread_mutex_unlock(&LOCK_grant); /* purecov: deadcode */ + rw_unlock(&LOCK_grant); /* purecov: deadcode */ return 0; /* purecov: deadcode */ } #endif /* We must use my_printf_error() here! */ err: - pthread_mutex_unlock(&LOCK_grant); -err2: + rw_unlock(&LOCK_grant); if (!show_tables) { char command[128]; @@ -2742,9 +2995,14 @@ bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table) want_access &= ~table->grant.privilege; if (!want_access) - return 0; // Already checked + return 0; // Already checked + if (!grant_option) + { + field= table->field[0]; // To give a meaningful error message + goto err2; + } - pthread_mutex_lock(&LOCK_grant); + rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ @@ -2767,13 +3025,13 @@ bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table) if (!grant_column || (~grant_column->rights & want_access)) goto err; } - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); return 0; /* We must use my_printf_error() here! */ err: - pthread_mutex_unlock(&LOCK_grant); - + rw_unlock(&LOCK_grant); +err2: char command[128]; get_privilege_desc(command, sizeof(command), want_access); my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, @@ -2801,21 +3059,21 @@ bool check_grant_db(THD *thd,const char *db) bool error=1; len = (uint) (strmov(strmov(helping,thd->priv_user)+1,db)-helping)+ 1; - pthread_mutex_lock(&LOCK_grant); + rw_rdlock(&LOCK_grant); - for (uint idx=0 ; idx < hash_tables.records ; idx++) + for (uint idx=0 ; idx < column_priv_hash.records ; idx++) { - GRANT_TABLE *grant_table = (GRANT_TABLE*) hash_element(&hash_tables,idx); + GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, + idx); if (len < grant_table->key_length && !memcmp(grant_table->hash_key,helping,len) && - (thd->host && !wild_case_compare(thd->host,grant_table->host) || - (thd->ip && !wild_case_compare(thd->ip,grant_table->host)))) + compare_hostname(&grant_table->host, thd->host, thd->ip)) { error=0; // Found match break; } } - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); return error; } @@ -2825,20 +3083,24 @@ bool check_grant_db(THD *thd,const char *db) ulong get_table_grant(THD *thd, TABLE_LIST *table) { - uint privilege; + ulong privilege; char *user = thd->priv_user; const char *db = table->db ? table->db : thd->db; GRANT_TABLE *grant_table; - pthread_mutex_lock(&LOCK_grant); - grant_table = table_hash_search(thd->host,thd->ip,db,user, - table->real_name, 0); + rw_rdlock(&LOCK_grant); +#ifdef EMBEDDED_LIBRARY + grant_table= NULL; +#else + grant_table= table_hash_search(thd->host, thd->ip, db, user, + table->real_name, 0); +#endif table->grant.grant_table=grant_table; // Remember for column test table->grant.version=grant_version; if (grant_table) table->grant.privilege|= grant_table->privs; privilege= table->grant.privilege; - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); return privilege; } @@ -2849,7 +3111,7 @@ ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) GRANT_COLUMN *grant_column; ulong priv; - pthread_mutex_lock(&LOCK_grant); + rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ if (table->grant.version != grant_version) { @@ -2871,7 +3133,7 @@ ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) else priv=table->grant.privilege | grant_column->rights; } - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); return priv; } @@ -2916,19 +3178,22 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) ulong want_access; uint counter,index; int error = 0; - ACL_USER *acl_user; ACL_DB *acl_db; + ACL_USER *acl_user; + ACL_DB *acl_db; char buff[1024]; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysql_show_grants"); LINT_INIT(acl_user); if (!initialized) { - send_error(&(thd->net), ER_UNKNOWN_COM_ERROR); + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); DBUG_RETURN(-1); } + if (!lex_user->host.str) { - lex_user->host.str=(char*) "%"; + lex_user->host.str= (char*) "%"; lex_user->host.length=1; } if (lex_user->host.length > HOSTNAME_LENGTH || @@ -2943,40 +3208,40 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) const char *user,*host; acl_user=dynamic_element(&acl_users,counter,ACL_USER*); if (!(user=acl_user->user)) - user=""; + user= ""; if (!(host=acl_user->host.hostname)) - host=""; + host= ""; if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(lex_user->host.str,host)) + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) break; } if (counter == acl_users.elements) { - my_printf_error(ER_NONEXISTING_GRANT,ER(ER_NONEXISTING_GRANT), - MYF(0),lex_user->user.str,lex_user->host.str); + my_error(ER_NONEXISTING_GRANT, MYF(0), + lex_user->user.str, lex_user->host.str); DBUG_RETURN(-1); } - Item_string *field=new Item_string("",0); + Item_string *field=new Item_string("",0,&my_charset_latin1); List<Item> field_list; field->name=buff; field->max_length=1024; strxmov(buff,"Grants for ",lex_user->user.str,"@", lex_user->host.str,NullS); field_list.push_back(field); - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(-1); - pthread_mutex_lock(&LOCK_grant); + rw_wrlock(&LOCK_grant); VOID(pthread_mutex_lock(&acl_cache->lock)); /* Add first global access grants */ { - want_access=acl_user->access; - String global(buff,sizeof(buff)); + String global(buff,sizeof(buff),system_charset_info); global.length(0); global.append("GRANT ",6); + want_access= acl_user->access; if (test_all_bits(want_access, (GLOBAL_ACLS & ~ GRANT_ACL))) global.append("ALL PRIVILEGES",14); else if (!(want_access & ~GRANT_ACL)) @@ -2997,16 +3262,20 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) } } global.append (" ON *.* TO '",12); - global.append(lex_user->user.str,lex_user->user.length); + global.append(lex_user->user.str, lex_user->user.length, + system_charset_info); global.append ("'@'",3); global.append(lex_user->host.str,lex_user->host.length); global.append ('\''); - if (acl_user->password) + if (acl_user->salt_len) { - char passd_buff[HASH_PASSWORD_LENGTH+1]; - make_password_from_salt(passd_buff,acl_user->salt); + char passwd_buff[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; + if (acl_user->salt_len == SCRAMBLE_LENGTH) + make_password_from_salt(passwd_buff, acl_user->salt); + else + make_password_from_salt_323(passwd_buff, (ulong *) acl_user->salt); global.append(" IDENTIFIED BY PASSWORD '",25); - global.append(passd_buff); + global.append(passwd_buff); global.append('\''); } /* "show grants" SSL related stuff */ @@ -3056,12 +3325,12 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) add_user_option(&global, acl_user->user_resource.connections, "MAX_CONNECTIONS_PER_HOUR"); } - thd->packet.length(0); - net_store_data(&thd->packet,global.ptr(),global.length()); - if (my_net_write(&thd->net,(char*) thd->packet.ptr(), - thd->packet.length())) + protocol->prepare_for_resend(); + protocol->store(global.ptr(),global.length(),global.charset()); + if (protocol->write()) { - error=-1; goto end; + error= -1; + goto end; } } @@ -3072,17 +3341,17 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); if (!(user=acl_db->user)) - user=""; + user= ""; if (!(host=acl_db->host.hostname)) - host=""; + host= ""; if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(lex_user->host.str,host)) + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) { want_access=acl_db->access; if (want_access) { - String db(buff,sizeof(buff)); + String db(buff,sizeof(buff),system_charset_info); db.length(0); db.append("GRANT ",6); @@ -3105,21 +3374,21 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) } } } - db.append(" ON `",5); - db.append(acl_db->db); - db.append("`.* TO '",8); - db.append(lex_user->user.str,lex_user->user.length); - db.append("'@'",3); + db.append (" ON ",4); + append_identifier(thd, &db, acl_db->db, strlen(acl_db->db)); + db.append (".* TO '",7); + db.append(lex_user->user.str, lex_user->user.length, + system_charset_info); + db.append ("'@'",3); db.append(lex_user->host.str, lex_user->host.length); - db.append('\''); + db.append ('\''); if (want_access & GRANT_ACL) db.append(" WITH GRANT OPTION",18); - thd->packet.length(0); - net_store_data(&thd->packet,db.ptr(),db.length()); - if (my_net_write(&thd->net,(char*) thd->packet.ptr(), - thd->packet.length())) + protocol->prepare_for_resend(); + protocol->store(db.ptr(),db.length(),db.charset()); + if (protocol->write()) { - error=-1; + error= -1; goto end; } } @@ -3127,21 +3396,23 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) } /* Add table & column access */ - for (index=0 ; index < hash_tables.records ; index++) + for (index=0 ; index < column_priv_hash.records ; index++) { const char *user; - GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&hash_tables,index); + GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, + index); if (!(user=grant_table->user)) user= ""; if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(lex_user->host.str, grant_table->orig_host)) + !my_strcasecmp(system_charset_info, lex_user->host.str, + grant_table->host.hostname)) { ulong table_access= grant_table->privs; if ((table_access | grant_table->cols) != 0) { - String global(buff,sizeof(buff)); + String global(buff, sizeof(buff), system_charset_info); ulong test_access= (table_access | grant_table->cols) & ~GRANT_ACL; global.length(0); @@ -3149,7 +3420,7 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (test_all_bits(table_access, (TABLE_ACLS & ~GRANT_ACL))) global.append("ALL PRIVILEGES",14); - else if (!test_access) + else if (!test_access) global.append("USAGE",5); else { @@ -3195,7 +3466,8 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) else global.append(", ",2); global.append(grant_column->column, - grant_column->key_length); + grant_column->key_length, + system_charset_info); } } if (found_col) @@ -3204,34 +3476,35 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) } } } - global.append(" ON `",5); - global.append(grant_table->db); - global.append("`.`",3); - global.append(grant_table->tname); - global.append("` TO '",6); - global.append(lex_user->user.str,lex_user->user.length); + global.append(" ON ",4); + append_identifier(thd, &global, grant_table->db, + strlen(grant_table->db)); + global.append('.'); + append_identifier(thd, &global, grant_table->tname, + strlen(grant_table->tname)); + global.append(" TO '",5); + global.append(lex_user->user.str, lex_user->user.length, + system_charset_info); global.append("'@'",3); global.append(lex_user->host.str,lex_user->host.length); global.append('\''); if (table_access & GRANT_ACL) global.append(" WITH GRANT OPTION",18); - thd->packet.length(0); - net_store_data(&thd->packet,global.ptr(),global.length()); - if (my_net_write(&thd->net,(char*) thd->packet.ptr(), - thd->packet.length())) + protocol->prepare_for_resend(); + protocol->store(global.ptr(),global.length(),global.charset()); + if (protocol->write()) { - error=-1; + error= -1; break; } } } } - end: VOID(pthread_mutex_unlock(&acl_cache->lock)); - pthread_mutex_unlock(&LOCK_grant); + rw_unlock(&LOCK_grant); - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(error); } @@ -3267,12 +3540,339 @@ void get_privilege_desc(char *to, uint max_length, ulong access) void get_mqh(const char *user, const char *host, USER_CONN *uc) { ACL_USER *acl_user; - if (initialized && (acl_user= find_acl_user(host,user))) + if (initialized && (acl_user= find_acl_user(host,user, FALSE))) uc->user_resources= acl_user->user_resource; else bzero((char*) &uc->user_resources, sizeof(uc->user_resources)); } +int open_grant_tables(THD *thd, TABLE_LIST *tables) +{ + DBUG_ENTER("open_grant_tables"); + + if (!initialized) + { + net_printf(thd,ER_OPTION_PREVENTS_STATEMENT, "--skip-grant-tables"); + DBUG_RETURN(-1); + } + + bzero((char*) tables, 4*sizeof(*tables)); + tables->alias= tables->real_name= (char*) "user"; + (tables+1)->alias= (tables+1)->real_name= (char*) "db"; + (tables+2)->alias= (tables+2)->real_name= (char*) "tables_priv"; + (tables+3)->alias= (tables+3)->real_name= (char*) "columns_priv"; + tables->next= tables+1; + (tables+1)->next= tables+2; + (tables+2)->next= tables+3; + (tables+3)->next= 0; + tables->lock_type= (tables+1)->lock_type= + (tables+2)->lock_type= (tables+3)->lock_type= TL_WRITE; + tables->db= (tables+1)->db= (tables+2)->db= (tables+3)->db=(char*) "mysql"; + +#ifdef HAVE_REPLICATION + /* + GRANT and REVOKE are applied the slave in/exclusion rules as they are + some kind of updates to the mysql.% tables. + */ + if (thd->slave_thread && table_rules_on) + { + /* + The tables must be marked "updating" so that tables_ok() takes them into + account in tests. + */ + tables[0].updating=tables[1].updating=tables[2].updating=tables[3].updating=1; + if (!tables_ok(0, tables)) + DBUG_RETURN(1); + tables[0].updating=tables[1].updating=tables[2].updating=tables[3].updating=0; + } +#endif + + if (simple_open_n_lock_tables(thd, tables)) + { // This should never happen + close_thread_tables(thd); + DBUG_RETURN(-1); + } + + DBUG_RETURN(0); +} + +ACL_USER *check_acl_user(LEX_USER *user_name, + uint *acl_acl_userdx) +{ + ACL_USER *acl_user= 0; + uint counter; + + for (counter= 0 ; counter < acl_users.elements ; counter++) + { + const char *user,*host; + acl_user= dynamic_element(&acl_users, counter, ACL_USER*); + if (!(user=acl_user->user)) + user= ""; + if (!(host=acl_user->host.hostname)) + host= ""; + if (!strcmp(user_name->user.str,user) && + !my_strcasecmp(system_charset_info, user_name->host.str, host)) + break; + } + if (counter == acl_users.elements) + return 0; + + *acl_acl_userdx= counter; + return acl_user; +} + + +int mysql_drop_user(THD *thd, List <LEX_USER> &list) +{ + uint counter, acl_userd; + int result; + ACL_USER *acl_user; + ACL_DB *acl_db; + TABLE_LIST tables[4]; + + DBUG_ENTER("mysql_drop_user"); + + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result == 1 ? 0 : 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + LEX_USER *user_name; + List_iterator <LEX_USER> user_list(list); + while ((user_name=user_list++)) + { + if (!(acl_user= check_acl_user(user_name, &counter))) + { + sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; No such user", + user_name->user.str, + user_name->host.str); + result= -1; + continue; + } + if ((acl_user->access & ~0)) + { + sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; Global privileges exists", + user_name->user.str, + user_name->host.str); + result= -1; + continue; + } + acl_userd= counter; + + for (counter= 0 ; counter < acl_dbs.elements ; counter++) + { + const char *user,*host; + acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); + if (!(user= acl_db->user)) + user= ""; + if (!(host= acl_db->host.hostname)) + host= ""; + + if (!strcmp(user_name->user.str,user) && + !my_strcasecmp(system_charset_info, user_name->host.str, host)) + break; + } + if (counter != acl_dbs.elements) + { + sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; Database privileges exists", + user_name->user.str, + user_name->host.str); + result= -1; + continue; + } + + for (counter= 0 ; counter < column_priv_hash.records ; counter++) + { + const char *user,*host; + GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, + counter); + if (!(user=grant_table->user)) + user= ""; + if (!(host=grant_table->host.hostname)) + host= ""; + + if (!strcmp(user_name->user.str,user) && + !my_strcasecmp(system_charset_info, user_name->host.str, host)) + break; + } + if (counter != column_priv_hash.records) + { + sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; Table privileges exists", + user_name->user.str, + user_name->host.str); + result= -1; + continue; + } + + tables[0].table->field[0]->store(user_name->host.str,(uint) + user_name->host.length, + system_charset_info); + tables[0].table->field[1]->store(user_name->user.str,(uint) + user_name->user.length, + system_charset_info); + tables[0].table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + if (!tables[0].table->file->index_read_idx(tables[0].table->record[0],0, + (byte*) tables[0].table-> + field[0]->ptr, + tables[0].table-> + key_info[0].key_length, + HA_READ_KEY_EXACT)) + { + int error; + if ((error = tables[0].table->file->delete_row(tables[0].table-> + record[0]))) + { + tables[0].table->file->print_error(error, MYF(0)); + result= -1; + goto end; + } + delete_dynamic_element(&acl_users, acl_userd); + } + } + + if (result) + my_error(ER_DROP_USER, MYF(0)); + +end: + /* Reload acl_check_hosts as its memory is mapped to acl_user */ + delete_dynamic(&acl_wild_hosts); + hash_free(&acl_check_hosts); + init_check_host(); + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + rw_unlock(&LOCK_grant); + close_thread_tables(thd); + DBUG_RETURN(result); +} + +int mysql_revoke_all(THD *thd, List <LEX_USER> &list) +{ + uint counter, revoked; + int result; + ACL_DB *acl_db; + TABLE_LIST tables[4]; + DBUG_ENTER("mysql_revoke_all"); + + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result == 1 ? 0 : 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + LEX_USER *lex_user; + List_iterator <LEX_USER> user_list(list); + while ((lex_user=user_list++)) + { + if (!check_acl_user(lex_user, &counter)) + { + sql_print_error("REVOKE ALL PRIVILEGES, GRANT: User '%s'@'%s' not exists", + lex_user->user.str, + lex_user->host.str); + result= -1; + continue; + } + + if (replace_user_table(thd, tables[0].table, + *lex_user, ~(ulong)0, 1, 0)) + { + result= -1; + continue; + } + + /* Remove db access privileges */ + /* + Because acl_dbs and column_priv_hash shrink and may re-order + as privileges are removed, removal occurs in a repeated loop + until no more privileges are revoked. + */ + do + { + for (counter= 0, revoked= 0 ; counter < acl_dbs.elements ; ) + { + const char *user,*host; + + acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); + if (!(user=acl_db->user)) + user= ""; + if (!(host=acl_db->host.hostname)) + host= ""; + + if (!strcmp(lex_user->user.str,user) && + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) + { + if (!replace_db_table(tables[1].table, acl_db->db, *lex_user, ~(ulong)0, 1)) + { + /* + Don't increment counter as replace_db_table deleted the + current element in acl_dbs. + */ + revoked= 1; + continue; + } + result= -1; // Something went wrong + } + counter++; + } + } while (revoked); + + /* Remove column access */ + do + { + for (counter= 0, revoked= 0 ; counter < column_priv_hash.records ; ) + { + const char *user,*host; + GRANT_TABLE *grant_table= (GRANT_TABLE*)hash_element(&column_priv_hash, + counter); + if (!(user=grant_table->user)) + user= ""; + if (!(host=grant_table->host.hostname)) + host= ""; + + if (!strcmp(lex_user->user.str,user) && + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) + { + if (replace_table_table(thd,grant_table,tables[2].table,*lex_user, + grant_table->db, + grant_table->tname, + ~(ulong)0, 0, 1)) + { + result= -1; + } + else + { + if (!grant_table->cols) + { + revoked= 1; + continue; + } + List<LEX_COLUMN> columns; + if (!replace_column_table(grant_table,tables[3].table, *lex_user, + columns, + grant_table->db, + grant_table->tname, + ~(ulong)0, 1)) + { + revoked= 1; + continue; + } + result= -1; + } + } + counter++; + } + } while (revoked); + } + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + rw_unlock(&LOCK_grant); + close_thread_tables(thd); + + if (result) + my_error(ER_REVOKE_GRANTS, MYF(0)); + + DBUG_RETURN(result); +} /***************************************************************************** @@ -3285,3 +3885,50 @@ template class List_iterator<LEX_USER>; template class List<LEX_COLUMN>; template class List<LEX_USER>; #endif + +#endif /*NO_EMBEDDED_ACCESS_CHECKS */ + + +int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr) +{ + reg3 int flag; + DBUG_ENTER("wild_case_compare"); + DBUG_PRINT("enter",("str: '%s' wildstr: '%s'",str,wildstr)); + while (*wildstr) + { + while (*wildstr && *wildstr != wild_many && *wildstr != wild_one) + { + if (*wildstr == wild_prefix && wildstr[1]) + wildstr++; + if (my_toupper(cs, *wildstr++) != + my_toupper(cs, *str++)) DBUG_RETURN(1); + } + if (! *wildstr ) DBUG_RETURN (*str != 0); + if (*wildstr++ == wild_one) + { + if (! *str++) DBUG_RETURN (1); /* One char; skip */ + } + else + { /* Found '*' */ + if (!*wildstr) DBUG_RETURN(0); /* '*' as last char: OK */ + flag=(*wildstr != wild_many && *wildstr != wild_one); + do + { + if (flag) + { + char cmp; + if ((cmp= *wildstr) == wild_prefix && wildstr[1]) + cmp=wildstr[1]; + cmp=my_toupper(cs, cmp); + while (*str && my_toupper(cs, *str) != cmp) + str++; + if (!*str) DBUG_RETURN (1); + } + if (wild_case_compare(cs, str,wildstr) == 0) DBUG_RETURN (0); + } while (*str++); + DBUG_RETURN(1); + } + } + DBUG_RETURN (*str != '\0'); +} + diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 7d8dcfd2079..256101ec7d8 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -14,7 +14,6 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - #define SELECT_ACL (1L << 0) #define INSERT_ACL (1L << 1) #define UPDATE_ACL (1L << 2) @@ -37,6 +36,12 @@ #define REPL_SLAVE_ACL (1L << 19) #define REPL_CLIENT_ACL (1L << 20) +/* + don't forget to update + static struct show_privileges_st sys_privileges[] + in sql_show.cc when adding new privileges! +*/ + #define DB_ACLS \ (UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ @@ -79,19 +84,66 @@ #define fix_rights_for_column(A) (((A) & 7) | (((A) & ~7) << 8)) #define get_rights_for_column(A) (((A) & 7) | ((A) >> 8)) +/* Classes */ + +struct acl_host_and_ip +{ + char *hostname; + long ip,ip_mask; // Used with masked ip:s +}; + + +class ACL_ACCESS { +public: + ulong sort; + ulong access; +}; + + +/* ACL_HOST is used if no host is specified */ + +class ACL_HOST :public ACL_ACCESS +{ +public: + acl_host_and_ip host; + char *db; +}; + + +class ACL_USER :public ACL_ACCESS +{ +public: + acl_host_and_ip host; + uint hostname_length; + USER_RESOURCES user_resource; + char *user; + uint8 salt[SCRAMBLE_LENGTH+1]; // scrambled password in binary form + uint8 salt_len; // 0 - no password, 4 - 3.20, 8 - 3.23, 20 - 4.1.1 + enum SSL_type ssl_type; + const char *ssl_cipher, *x509_issuer, *x509_subject; +}; + + +class ACL_DB :public ACL_ACCESS +{ +public: + acl_host_and_ip host; + char *user,*db; +}; + /* prototypes */ -my_bool acl_init(THD *thd, bool dont_read_acl_tables); -void acl_reload(THD *thd); +bool hostname_requires_resolving(const char *hostname); +my_bool acl_init(bool dont_read_acl_tables); +my_bool acl_reload(THD *thd); void acl_free(bool end=0); -ulong acl_get(const char *host, const char *ip, const char *bin_ip, +ulong acl_get(const char *host, const char *ip, const char *user, const char *db, my_bool db_is_pattern); -ulong acl_getroot(THD *thd, const char *host, const char *ip, const char *user, - const char *password,const char *scramble, - char **priv_user, char *priv_host, - bool old_ver, USER_RESOURCES *max); +int acl_getroot(THD *thd, USER_RESOURCES *mqh, const char *passwd, + uint passwd_len); bool acl_check_host(const char *host, const char *ip); -bool check_change_password(THD *thd, const char *host, const char *user); +bool check_change_password(THD *thd, const char *host, const char *user, + char *password, uint password_len); bool change_password(THD *thd, const char *host, const char *user, char *password); int mysql_grant(THD *thd, const char *db, List <LEX_USER> &user_list, @@ -99,11 +151,11 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &user_list, int mysql_table_grant(THD *thd, TABLE_LIST *table, List <LEX_USER> &user_list, List <LEX_COLUMN> &column_list, ulong rights, bool revoke); -my_bool grant_init(THD *thd); +my_bool grant_init(); void grant_free(void); -void grant_reload(THD *thd); +my_bool grant_reload(THD *thd); bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, - uint show_command=0, bool dont_print_error=0); + uint show_command, uint number, bool dont_print_error); bool check_grant_column (THD *thd,TABLE *table, const char *name, uint length, uint show_command=0); bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table); @@ -113,3 +165,10 @@ ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field); int mysql_show_grants(THD *thd, LEX_USER *user); void get_privilege_desc(char *to, uint max_length, ulong access); void get_mqh(const char *user, const char *host, USER_CONN *uc); +int mysql_drop_user(THD *thd, List <LEX_USER> &list); +int mysql_revoke_all(THD *thd, List <LEX_USER> &list); + +#ifdef NO_EMBEDDED_ACCESS_CHECKS +#define check_grant(A,B,C,D,E,F) 0 +#define check_grant_db(A,B) 0 +#endif diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 0723c274a17..d2237c24139 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -23,7 +23,7 @@ ** - type set is out of optimization yet */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -38,13 +38,7 @@ int sortcmp2(void* cmp_arg __attribute__((unused)), const String *a,const String *b) { - return sortcmp(a,b); -} - -int stringcmp2(void* cmp_arg __attribute__((unused)), - const String *a,const String *b) -{ - return stringcmp(a,b); + return sortcmp(a,b,a->charset()); } int compare_double2(void* cmp_arg __attribute__((unused)), @@ -65,6 +59,7 @@ int compare_ulonglong2(void* cmp_arg __attribute__((unused)), return compare_ulonglong(s,t); } +static bool append_escaped(String *to_str, String *from_str); Procedure * proc_analyse_init(THD *thd, ORDER *param, select_result *result, @@ -75,6 +70,9 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, field_info **f_info; DBUG_ENTER("proc_analyse_init"); + if (!pc) + DBUG_RETURN(0); + if (!(param = param->next)) { pc->max_tree_elements = MAX_TREE_ELEMENTS; @@ -86,34 +84,30 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, if ((*param->item)->type() != Item::INT_ITEM || (*param->item)->val() < 0) { - delete pc; - net_printf(&thd->net, ER_WRONG_PARAMETERS_TO_PROCEDURE, proc_name); - DBUG_RETURN(0); + my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); + goto err; } pc->max_tree_elements = (uint) (*param->item)->val_int(); param = param->next; if (param->next) // no third parameter possible { - delete pc; - net_printf(&thd->net, ER_WRONG_PARAMCOUNT_TO_PROCEDURE, proc_name); - DBUG_RETURN(0); + my_error(ER_WRONG_PARAMCOUNT_TO_PROCEDURE, MYF(0), proc_name); + goto err; } // second parameter if ((*param->item)->type() != Item::INT_ITEM || (*param->item)->val() < 0) { - delete pc; - net_printf(&thd->net, ER_WRONG_PARAMETERS_TO_PROCEDURE, proc_name); - DBUG_RETURN(0); + my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); + goto err; } pc->max_treemem = (uint) (*param->item)->val_int(); } else if ((*param->item)->type() != Item::INT_ITEM || (*param->item)->val() < 0) { - delete pc; - net_printf(&thd->net, ER_WRONG_PARAMETERS_TO_PROCEDURE, proc_name); - DBUG_RETURN(0); + my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); + goto err; } // if only one parameter was given, it will be the value of max_tree_elements else @@ -122,34 +116,39 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, pc->max_treemem = MAX_TREEMEM; } - if (!pc || !(pc->f_info = (field_info**) - sql_alloc(sizeof(field_info*)*field_list.elements))) - DBUG_RETURN(0); + if (!(pc->f_info= + (field_info**)sql_alloc(sizeof(field_info*)*field_list.elements))) + goto err; pc->f_end = pc->f_info + field_list.elements; pc->fields = field_list; - List_iterator_fast<Item> it(pc->fields); - f_info = pc->f_info; - - Item *item; - while ((item = it++)) { - if (item->result_type() == INT_RESULT) + List_iterator_fast<Item> it(pc->fields); + f_info = pc->f_info; + + Item *item; + while ((item = it++)) { - // Check if fieldtype is ulonglong - if (item->type() == Item::FIELD_ITEM && - ((Item_field*) item)->field->type() == FIELD_TYPE_LONGLONG && - ((Field_longlong*) ((Item_field*) item)->field)->unsigned_flag) - *f_info++ = new field_ulonglong(item, pc); - else - *f_info++ = new field_longlong(item, pc); + if (item->result_type() == INT_RESULT) + { + // Check if fieldtype is ulonglong + if (item->type() == Item::FIELD_ITEM && + ((Item_field*) item)->field->type() == FIELD_TYPE_LONGLONG && + ((Field_longlong*) ((Item_field*) item)->field)->unsigned_flag) + *f_info++ = new field_ulonglong(item, pc); + else + *f_info++ = new field_longlong(item, pc); + } + if (item->result_type() == REAL_RESULT) + *f_info++ = new field_real(item, pc); + if (item->result_type() == STRING_RESULT) + *f_info++ = new field_str(item, pc); } - if (item->result_type() == REAL_RESULT) - *f_info++ = new field_real(item, pc); - if (item->result_type() == STRING_RESULT) - *f_info++ = new field_str(item, pc); } DBUG_RETURN(pc); +err: + delete pc; + DBUG_RETURN(0); } @@ -169,7 +168,7 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) MySQL removes any endspaces of a string, so we must take care only of spaces in front of a string */ - for (; str != end && isspace(*str); str++) ; + for (; str != end && my_isspace(system_charset_info, *str); str++) ; if (str == end) return 0; @@ -182,16 +181,18 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) else info->negative = 0; begin = str; - for (; str != end && isdigit(*str); str++) + for (; str != end && my_isdigit(system_charset_info,*str); str++) { if (!info->integers && *str == '0' && (str + 1) != end && - isdigit(*(str + 1))) + my_isdigit(system_charset_info,*(str + 1))) info->zerofill = 1; // could be a postnumber for example info->integers++; } if (str == end && info->integers) { - info->ullval = (ulonglong) strtoull(begin ,NULL, 10); + char *endpos= (char*) end; + int error; + info->ullval= (ulonglong) my_strtoll10(begin, &endpos, &error); if (info->integers == 1) return 0; // a single number can't be zerofill info->maybe_zerofill = 1; @@ -203,7 +204,9 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) return 0; if ((str + 1) == end) // number was something like '123[.eE]' { - info->ullval = (ulonglong) strtoull(begin, NULL, 10); + char *endpos= (char*) str; + int error; + info->ullval= (ulonglong) my_strtoll10(begin, &endpos, &error); return 1; } if (*str == 'e' || *str == 'E') // number may be something like '1e+50' @@ -211,7 +214,7 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) str++; if (*str != '-' && *str != '+') return 0; - for (str++; str != end && isdigit(*str); str++) ; + for (str++; str != end && my_isdigit(system_charset_info,*str); str++) ; if (str == end) { info->is_float = 1; // we can't use variable decimals here @@ -222,14 +225,16 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) for (str++; *(end - 1) == '0'; end--); // jump over zeros at the end if (str == end) // number was something like '123.000' { - info->ullval = (ulonglong) strtoull(begin, NULL, 10); + char *endpos= (char*) str; + int error; + info->ullval= (ulonglong) my_strtoll10(begin, &endpos, &error); return 1; } - for (; str != end && isdigit(*str); str++) + for (; str != end && my_isdigit(system_charset_info,*str); str++) info->decimals++; if (str == end) { - info->dval = atod(begin); + info->dval = my_atof(begin); return 1; } } @@ -275,7 +280,7 @@ void free_string(String *s) void field_str::add() { char buff[MAX_FIELD_WIDTH], *ptr; - String s(buff, sizeof(buff)), *res; + String s(buff, sizeof(buff),&my_charset_bin), *res; ulong length; if (!(res = item->val_str(&s))) @@ -310,6 +315,7 @@ void field_str::add() was_maybe_zerofill = num_info.maybe_zerofill; } + /* Update min and max arguments */ if (!found) { found = 1; @@ -325,30 +331,20 @@ void field_str::add() if (length > max_length) max_length = length; - if (item->binary) - { - if (stringcmp(res, &min_arg) < 0) - min_arg.copy(*res); - if (stringcmp(res, &max_arg) > 0) - max_arg.copy(*res); - } - else - { - if (sortcmp(res, &min_arg) < 0) - min_arg.copy(*res); - if (sortcmp(res, &max_arg) > 0) - max_arg.copy(*res); - } + if (sortcmp(res, &min_arg,item->collation.collation) < 0) + min_arg.copy(*res); + if (sortcmp(res, &max_arg,item->collation.collation) > 0) + max_arg.copy(*res); } if (room_in_tree) { if (res != &s) s.copy(*res); - if (!tree_search(&tree, (void*) &s)) // If not in tree + if (!tree_search(&tree, (void*) &s, tree.custom_arg)) // If not in tree { s.copy(); // slow, when SAFE_MALLOC is in use - if (!tree_insert(&tree, (void*) &s, 0)) + if (!tree_insert(&tree, (void*) &s, 0, tree.custom_arg)) { room_in_tree = 0; // Remove tree, out of RAM ? delete_tree(&tree); @@ -388,8 +384,7 @@ void field_real::add() if ((decs = decimals()) == NOT_FIXED_DEC) { - sprintf(buff, "%g", num); - length = (uint) strlen(buff); + length= my_sprintf(buff, (buff, "%g", num)); if (rint(num) != num) max_notzero_dec_len = 1; } @@ -398,12 +393,11 @@ void field_real::add() #ifdef HAVE_SNPRINTF buff[sizeof(buff)-1]=0; // Safety snprintf(buff, sizeof(buff)-1, "%-.*f", (int) decs, num); + length = (uint) strlen(buff); #else - sprintf(buff, "%-.*f", (int) decs, num); + length= my_sprintf(buff, (buff, "%-.*f", (int) decs, num)); #endif - length = (uint) strlen(buff); - // We never need to check further than this end = buff + length - 1 - decs + max_notzero_dec_len; @@ -417,7 +411,7 @@ void field_real::add() if (room_in_tree) { - if (!(element = tree_insert(&tree, (void*) &num, 0))) + if (!(element = tree_insert(&tree, (void*) &num, 0, tree.custom_arg))) { room_in_tree = 0; // Remove tree, out of RAM ? delete_tree(&tree); @@ -473,7 +467,7 @@ void field_longlong::add() if (room_in_tree) { - if (!(element = tree_insert(&tree, (void*) &num, 0))) + if (!(element = tree_insert(&tree, (void*) &num, 0, tree.custom_arg))) { room_in_tree = 0; // Remove tree, out of RAM ? delete_tree(&tree); @@ -529,7 +523,7 @@ void field_ulonglong::add() if (room_in_tree) { - if (!(element = tree_insert(&tree, (void*) &num, 0))) + if (!(element = tree_insert(&tree, (void*) &num, 0, tree.custom_arg))) { room_in_tree = 0; // Remove tree, out of RAM ? delete_tree(&tree); @@ -586,8 +580,9 @@ bool analyse::end_of_records() { field_info **f = f_info; char buff[MAX_FIELD_WIDTH]; - String *res, s_min(buff, sizeof(buff)), s_max(buff, sizeof(buff)), - ans(buff, sizeof(buff)); + String *res, s_min(buff, sizeof(buff),&my_charset_bin), + s_max(buff, sizeof(buff),&my_charset_bin), + ans(buff, sizeof(buff),&my_charset_bin); for (; f != f_end; f++) { @@ -601,23 +596,23 @@ bool analyse::end_of_records() { func_items[1]->null_value = 0; res = (*f)->get_min_arg(&s_min); - func_items[1]->set(res->ptr(), res->length()); + func_items[1]->set(res->ptr(), res->length(), res->charset()); func_items[2]->null_value = 0; res = (*f)->get_max_arg(&s_max); - func_items[2]->set(res->ptr(), res->length()); + func_items[2]->set(res->ptr(), res->length(), res->charset()); } func_items[3]->set((longlong) (*f)->min_length); func_items[4]->set((longlong) (*f)->max_length); func_items[5]->set((longlong) (*f)->empty); func_items[6]->set((longlong) (*f)->nulls); res = (*f)->avg(&s_max, rows); - func_items[7]->set(res->ptr(), res->length()); + func_items[7]->set(res->ptr(), res->length(), res->charset()); func_items[8]->null_value = 0; res = (*f)->std(&s_max, rows); if (!res) func_items[8]->null_value = 1; else - func_items[8]->set(res->ptr(), res->length()); + func_items[8]->set(res->ptr(), res->length(), res->charset()); /* count the dots, quotas, etc. in (ENUM("a","b","c"...)) If tree has been removed, don't suggest ENUM. @@ -635,14 +630,14 @@ bool analyse::end_of_records() ((*f)->tree.elements_in_tree * 3 - 1 + 6)))) { char tmp[331]; //331, because one double prec. num. can be this long - String tmp_str(tmp, sizeof(tmp)); + String tmp_str(tmp, sizeof(tmp),&my_charset_bin); TREE_INFO tree_info; tree_info.str = &tmp_str; tree_info.found = 0; tree_info.item = (*f)->item; - tmp_str.set("ENUM(", 5); + tmp_str.set("ENUM(", 5,&my_charset_bin); tree_walk(&(*f)->tree, (*f)->collect_enum(), (char*) &tree_info, left_root_right); tmp_str.append(')'); @@ -650,7 +645,7 @@ bool analyse::end_of_records() if (!(*f)->nulls) tmp_str.append(" NOT NULL"); output_str_length = tmp_str.length(); - func_items[9]->set(tmp_str.ptr(), tmp_str.length()); + func_items[9]->set(tmp_str.ptr(), tmp_str.length(), tmp_str.charset()); if (result->send_data(result_fields)) return -1; continue; @@ -695,7 +690,7 @@ bool analyse::end_of_records() } if (!(*f)->nulls) ans.append(" NOT NULL"); - func_items[9]->set(ans.ptr(), ans.length()); + func_items[9]->set(ans.ptr(), ans.length(), ans.charset()); if (result->send_data(result_fields)) return -1; } @@ -746,7 +741,7 @@ void field_str::get_opt_type(String *answer, ha_rows total_rows) { if (must_be_blob) { - if (item->binary) + if (item->collation.collation == &my_charset_bin) answer->append("TINYBLOB", 8); else answer->append("TINYTEXT", 8); @@ -764,21 +759,21 @@ void field_str::get_opt_type(String *answer, ha_rows total_rows) } else if (max_length < (1L << 16)) { - if (item->binary) + if (item->collation.collation == &my_charset_bin) answer->append("BLOB", 4); else answer->append("TEXT", 4); } else if (max_length < (1L << 24)) { - if (item->binary) + if (item->collation.collation == &my_charset_bin) answer->append("MEDIUMBLOB", 10); else answer->append("MEDIUMTEXT", 10); } else { - if (item->binary) + if (item->collation.collation == &my_charset_bin) answer->append("LONGBLOB", 8); else answer->append("LONGTEXT", 8); @@ -793,19 +788,22 @@ void field_real::get_opt_type(String *answer, if (!max_notzero_dec_len) { + int len= (int) max_length - ((item->decimals == NOT_FIXED_DEC) ? + 0 : (item->decimals + 1)); + if (min_arg >= -128 && max_arg <= (min_arg >= 0 ? 255 : 127)) - sprintf(buff, "TINYINT(%d)", (int) max_length - (item->decimals + 1)); + sprintf(buff, "TINYINT(%d)", len); else if (min_arg >= INT_MIN16 && max_arg <= (min_arg >= 0 ? UINT_MAX16 : INT_MAX16)) - sprintf(buff, "SMALLINT(%d)", (int) max_length - (item->decimals + 1)); + sprintf(buff, "SMALLINT(%d)", len); else if (min_arg >= INT_MIN24 && max_arg <= (min_arg >= 0 ? UINT_MAX24 : INT_MAX24)) - sprintf(buff, "MEDIUMINT(%d)", (int) max_length - (item->decimals + 1)); + sprintf(buff, "MEDIUMINT(%d)", len); else if (min_arg >= INT_MIN32 && max_arg <= (min_arg >= 0 ? UINT_MAX32 : INT_MAX32)) - sprintf(buff, "INT(%d)", (int) max_length - (item->decimals + 1)); + sprintf(buff, "INT(%d)", len); else - sprintf(buff, "BIGINT(%d)", (int) max_length - (item->decimals + 1)); + sprintf(buff, "BIGINT(%d)", len); answer->append(buff, (uint) strlen(buff)); if (min_arg >= 0) answer->append(" UNSIGNED"); @@ -901,7 +899,8 @@ int collect_string(String *element, else info->found = 1; info->str->append('\''); - info->str->append(*element); + if (append_escaped(info->str, element)) + return 1; info->str->append('\''); return 0; } // collect_string @@ -911,14 +910,14 @@ int collect_real(double *element, element_count count __attribute__((unused)), TREE_INFO *info) { char buff[MAX_FIELD_WIDTH]; - String s(buff, sizeof(buff)); + String s(buff, sizeof(buff),current_thd->charset()); if (info->found) info->str->append(','); else info->found = 1; info->str->append('\''); - s.set(*element, info->item->decimals); + s.set(*element, info->item->decimals, current_thd->charset()); info->str->append(s); info->str->append('\''); return 0; @@ -930,14 +929,14 @@ int collect_longlong(longlong *element, TREE_INFO *info) { char buff[MAX_FIELD_WIDTH]; - String s(buff, sizeof(buff)); + String s(buff, sizeof(buff),&my_charset_bin); if (info->found) info->str->append(','); else info->found = 1; info->str->append('\''); - s.set(*element); + s.set(*element, current_thd->charset()); info->str->append(s); info->str->append('\''); return 0; @@ -949,14 +948,14 @@ int collect_ulonglong(ulonglong *element, TREE_INFO *info) { char buff[MAX_FIELD_WIDTH]; - String s(buff, sizeof(buff)); + String s(buff, sizeof(buff),&my_charset_bin); if (info->found) info->str->append(','); else info->found = 1; info->str->append('\''); - s.set(*element); + s.set(*element, current_thd->charset()); info->str->append(s); info->str->append('\''); return 0; @@ -1036,3 +1035,57 @@ uint check_ulonglong(const char *str, uint length) while (*cmp && *cmp++ == *str++) ; return ((uchar) str[-1] <= (uchar) cmp[-1]) ? smaller : bigger; } /* check_ulonlong */ + + +/* + Quote special characters in a string. + + SYNOPSIS + append_escaped(to_str, from_str) + to_str (in) A pointer to a String. + from_str (to) A pointer to an allocated string + + DESCRIPTION + append_escaped() takes a String type variable, where it appends + escaped the second argument. Only characters that require escaping + will be escaped. + + RETURN VALUES + 0 Success + 1 Out of memory +*/ + +static bool append_escaped(String *to_str, String *from_str) +{ + char *from, *end, c; + + if (to_str->realloc(to_str->length() + from_str->length())) + return 1; + + from= (char*) from_str->ptr(); + end= from + from_str->length(); + for (; from < end; from++) + { + c= *from; + switch (c) { + case '\0': + c= '0'; + break; + case '\032': + c= 'Z'; + break; + case '\\': + case '\'': + break; + default: + goto normal_character; + } + if (to_str->append('\\')) + return 1; + + normal_character: + if (to_str->append(c)) + return 1; + } + return 0; +} diff --git a/sql/sql_analyse.h b/sql/sql_analyse.h index aa6d0dbb2d1..8523b05a1de 100644 --- a/sql/sql_analyse.h +++ b/sql/sql_analyse.h @@ -17,10 +17,12 @@ /* Analyse database */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif +#define my_thd_charset default_charset_info + #define DEC_IN_AVG 4 typedef struct st_number_info @@ -97,8 +99,6 @@ int collect_string(String *element, element_count count, int sortcmp2(void* cmp_arg __attribute__((unused)), const String *a,const String *b); -int stringcmp2(void* cmp_arg __attribute__((unused)), - const String *a,const String *b); class field_str :public field_info { @@ -110,12 +110,12 @@ class field_str :public field_info EV_NUM_INFO ev_num_info; public: - field_str(Item* a, analyse* b) :field_info(a,b), min_arg(""), - max_arg(""), sum(0), + field_str(Item* a, analyse* b) :field_info(a,b), + min_arg("",default_charset_info), + max_arg("",default_charset_info), sum(0), must_be_blob(0), was_zero_fill(0), was_maybe_zerofill(0), can_be_still_num(1) - { init_tree(&tree, 0, 0, sizeof(String), a->binary ? - (qsort_cmp2) stringcmp2 : (qsort_cmp2) sortcmp2, + { init_tree(&tree, 0, 0, sizeof(String), (qsort_cmp2) sortcmp2, 0, (tree_element_free) free_string, NULL); }; void add(); @@ -127,10 +127,10 @@ public: String *avg(String *s, ha_rows rows) { if (!(rows - nulls)) - s->set((double) 0.0, 1); + s->set((double) 0.0, 1,my_thd_charset); else s->set((ulonglong2double(sum) / ulonglong2double(rows - nulls)), - DEC_IN_AVG); + DEC_IN_AVG,my_thd_charset); return s; } friend int collect_string(String *element, element_count count, @@ -159,26 +159,34 @@ public: void add(); void get_opt_type(String*, ha_rows); - String *get_min_arg(String *s) { s->set(min_arg, item->decimals); return s; } - String *get_max_arg(String *s) { s->set(max_arg, item->decimals); return s; } + String *get_min_arg(String *s) + { + s->set(min_arg, item->decimals,my_thd_charset); + return s; + } + String *get_max_arg(String *s) + { + s->set(max_arg, item->decimals,my_thd_charset); + return s; + } String *avg(String *s, ha_rows rows) { if (!(rows - nulls)) - s->set((double) 0.0, 1); + s->set((double) 0.0, 1,my_thd_charset); else - s->set(((double)sum / (double) (rows - nulls)), item->decimals); + s->set(((double)sum / (double) (rows - nulls)), item->decimals,my_thd_charset); return s; } String *std(String *s, ha_rows rows) { double tmp = ulonglong2double(rows); if (!(tmp - nulls)) - s->set((double) 0.0, 1); + s->set((double) 0.0, 1,my_thd_charset); else { double tmp2 = ((sum_sqr - sum * sum / (tmp - nulls)) / (tmp - nulls)); - s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), item->decimals); + s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), item->decimals,my_thd_charset); } return s; } @@ -205,26 +213,26 @@ public: void add(); void get_opt_type(String*, ha_rows); - String *get_min_arg(String *s) { s->set(min_arg); return s; } - String *get_max_arg(String *s) { s->set(max_arg); return s; } + String *get_min_arg(String *s) { s->set(min_arg,my_thd_charset); return s; } + String *get_max_arg(String *s) { s->set(max_arg,my_thd_charset); return s; } String *avg(String *s, ha_rows rows) { if (!(rows - nulls)) - s->set((double) 0.0, 1); + s->set((double) 0.0, 1,my_thd_charset); else - s->set(((double) sum / (double) (rows - nulls)), DEC_IN_AVG); + s->set(((double) sum / (double) (rows - nulls)), DEC_IN_AVG,my_thd_charset); return s; } String *std(String *s, ha_rows rows) { double tmp = ulonglong2double(rows); if (!(tmp - nulls)) - s->set((double) 0.0, 1); + s->set((double) 0.0, 1,my_thd_charset); else { double tmp2 = ((sum_sqr - sum * sum / (tmp - nulls)) / (tmp - nulls)); - s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG); + s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG,my_thd_charset); } return s; } @@ -249,28 +257,28 @@ public: (qsort_cmp2) compare_ulonglong2, 0, NULL, NULL); } void add(); void get_opt_type(String*, ha_rows); - String *get_min_arg(String *s) { s->set(min_arg); return s; } - String *get_max_arg(String *s) { s->set(max_arg); return s; } + String *get_min_arg(String *s) { s->set(min_arg,my_thd_charset); return s; } + String *get_max_arg(String *s) { s->set(max_arg,my_thd_charset); return s; } String *avg(String *s, ha_rows rows) { if (!(rows - nulls)) - s->set((double) 0.0, 1); + s->set((double) 0.0, 1,my_thd_charset); else s->set((ulonglong2double(sum) / ulonglong2double(rows - nulls)), - DEC_IN_AVG); + DEC_IN_AVG,my_thd_charset); return s; } String *std(String *s, ha_rows rows) { double tmp = ulonglong2double(rows); if (!(tmp - nulls)) - s->set((double) 0.0, 1); + s->set((double) 0.0, 1,my_thd_charset); else { double tmp2 = ((ulonglong2double(sum_sqr) - ulonglong2double(sum * sum) / (tmp - nulls)) / (tmp - nulls)); - s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG); + s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG,my_thd_charset); } return s; } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 42a2e692d21..60e91aff3f9 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -18,25 +18,23 @@ /* Basic functions needed by many modules */ #include "mysql_priv.h" -#include "sql_acl.h" +#include "sql_select.h" #include <m_ctype.h> #include <my_dir.h> #include <hash.h> #include <nisam.h> -#include <assert.h> #ifdef __WIN__ #include <io.h> #endif TABLE *unused_tables; /* Used by mysql_test */ HASH open_cache; /* Used by mysql_test */ +HASH assign_cache; static int open_unireg_entry(THD *thd,TABLE *entry,const char *db, const char *name, const char *alias); static void free_cache_entry(TABLE *entry); static void mysql_rm_tmp_tables(void); -static key_map get_key_map_from_key_list(TABLE *table, - List<String> *index_list); extern "C" byte *table_cache_key(const byte *record,uint *length, @@ -50,11 +48,11 @@ extern "C" byte *table_cache_key(const byte *record,uint *length, bool table_cache_init(void) { mysql_rm_tmp_tables(); - return hash_init(&open_cache,table_cache_size+16,0,0,table_cache_key, - (hash_free_key) free_cache_entry,0) != 0; + return hash_init(&open_cache, &my_charset_bin, table_cache_size+16, + 0, 0,table_cache_key, + (hash_free_key) free_cache_entry, 0) != 0; } - void table_cache_free(void) { DBUG_ENTER("table_cache_free"); @@ -64,7 +62,6 @@ void table_cache_free(void) DBUG_VOID_RETURN; } - uint cached_tables(void) { return open_cache.records; @@ -144,7 +141,8 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) OPEN_TABLE_LIST *table; TABLE *entry=(TABLE*) hash_element(&open_cache,idx); - if ((!entry->real_name)) + DBUG_ASSERT(entry->real_name); + if ((!entry->real_name)) // To be removed continue; // Shouldn't happen if (wild) { @@ -157,9 +155,9 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) table_list.db= (char*) entry->table_cache_key; table_list.real_name= entry->real_name; table_list.grant.privilege=0; + if (check_table_access(thd,SELECT_ACL | EXTRA_ACL,&table_list,1)) continue; - /* need to check if we haven't already listed it */ for (table= open_list ; table ; table=table->next) { @@ -194,92 +192,6 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) DBUG_RETURN(open_list); } - -/****************************************************************************** -** Send name and type of result to client. -** Sum fields has table name empty and field_name. -** flag is a bit mask with the following functions: -** 1 send number of rows -** 2 send default values -** 4 Don't convert field names -******************************************************************************/ - -bool -send_fields(THD *thd,List<Item> &list,uint flag) -{ - List_iterator_fast<Item> it(list); - Item *item; - char buff[80]; - CONVERT *convert= (flag & 4) ? (CONVERT*) 0 : thd->variables.convert_set; - DBUG_ENTER("send_fields"); - - String tmp((char*) buff,sizeof(buff)),*res,*packet= &thd->packet; - - if (thd->fatal_error) // We have got an error - goto err; - - if (flag & 1) - { // Packet with number of elements - char *pos=net_store_length(buff,(uint) list.elements); - (void) my_net_write(&thd->net, buff,(uint) (pos-buff)); - } - while ((item=it++)) - { - char *pos; - Send_field field; - item->make_field(&field); - packet->length(0); - - if (convert) - { - if (convert->store(packet,field.table_name, - (uint) strlen(field.table_name)) || - convert->store(packet,field.col_name, - (uint) strlen(field.col_name)) || - packet->realloc(packet->length()+10)) - goto err; - } - else if (net_store_data(packet,field.table_name) || - net_store_data(packet,field.col_name) || - packet->realloc(packet->length()+10)) - goto err; /* purecov: inspected */ - pos= (char*) packet->ptr()+packet->length(); - - if (!(thd->client_capabilities & CLIENT_LONG_FLAG)) - { - packet->length(packet->length()+9); - pos[0]=3; int3store(pos+1,field.length); - pos[4]=1; pos[5]=field.type; - pos[6]=2; pos[7]=(char) field.flags; pos[8]= (char) field.decimals; - } - else - { - packet->length(packet->length()+10); - pos[0]=3; int3store(pos+1,field.length); - pos[4]=1; pos[5]=field.type; - pos[6]=3; int2store(pos+7,field.flags); pos[9]= (char) field.decimals; - } - if (flag & 2) - { // Send default value - if (!(res=item->val_str(&tmp))) - { - if (net_store_null(packet)) - goto err; - } - else if (net_store_data(packet,res->ptr(),res->length())) - goto err; - } - if (my_net_write(&thd->net, (char*) packet->ptr(),packet->length())) - break; /* purecov: inspected */ - } - send_eof(&thd->net,1); - DBUG_RETURN(0); - err: - send_error(&thd->net,ER_OUT_OF_RESOURCES); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ -} - - /***************************************************************************** * Functions to free open table cache ****************************************************************************/ @@ -325,31 +237,33 @@ static void free_cache_entry(TABLE *table) DBUG_VOID_RETURN; } +/* Free resources allocated by filesort() and read_record() */ void free_io_cache(TABLE *table) { DBUG_ENTER("free_io_cache"); - if (table->io_cache) - { - close_cached_file(table->io_cache); - my_free((gptr) table->io_cache,MYF(0)); - table->io_cache=0; - } - if (table->record_pointers) + if (table->sort.io_cache) { - my_free((gptr) table->record_pointers,MYF(0)); - table->record_pointers=0; + close_cached_file(table->sort.io_cache); + my_free((gptr) table->sort.io_cache,MYF(0)); + table->sort.io_cache=0; } DBUG_VOID_RETURN; } - /* Close all tables which aren't in use by any thread */ +/* + Close all tables which aren't in use by any thread + + THD can be NULL, but then if_wait_for_refresh must be FALSE + and tables must be NULL. +*/ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, TABLE_LIST *tables) { bool result=0; DBUG_ENTER("close_cached_tables"); + DBUG_ASSERT(thd || (!if_wait_for_refresh && !tables)); VOID(pthread_mutex_lock(&LOCK_open)); if (!tables) @@ -377,8 +291,10 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, if (!found) if_wait_for_refresh=0; // Nothing to wait for } +#ifndef EMBEDDED_LIBRARY if (!tables) kill_delayed_threads(); +#endif if (if_wait_for_refresh) { /* @@ -394,7 +310,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, TRUE); bool found=1; /* Wait until all threads has closed all the tables we had locked */ - DBUG_PRINT("info", ("Waiting for others threads to close their open tables")); + DBUG_PRINT("info", + ("Waiting for others threads to close their open tables")); while (found && ! thd->killed) { found=0; @@ -424,7 +341,6 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, VOID(pthread_mutex_unlock(&LOCK_open)); if (if_wait_for_refresh) { - THD *thd=current_thd; pthread_mutex_lock(&thd->mysys_var->mutex); thd->mysys_var->current_mutex= 0; thd->mysys_var->current_cond= 0; @@ -435,31 +351,60 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, } -/* Put all tables used by thread in free list */ +/* + Close all tables used by thread -void close_thread_tables(THD *thd, bool locked) + SYNOPSIS + close_thread_tables() + thd Thread handler + lock_in_use Set to 1 (0 = default) if caller has a lock on + LOCK_open + skip_derived Set to 1 (0 = default) if we should not free derived + tables. + + IMPLEMENTATION + Unlocks tables and frees derived tables. + Put all normal tables used by thread in free list. +*/ + +void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) { + bool found_old_table; DBUG_ENTER("close_thread_tables"); + if (thd->derived_tables && !skip_derived) + { + TABLE *table, *next; + /* + Close all derived tables generated from questions like + SELECT * from (select * from t1)) + */ + for (table= thd->derived_tables ; table ; table= next) + { + next= table->next; + free_tmp_table(thd, table); + } + thd->derived_tables= 0; + } if (thd->locked_tables) { ha_commit_stmt(thd); // If select statement DBUG_VOID_RETURN; // LOCK TABLES in use } - bool found_old_table=0; - if (thd->lock) { - mysql_unlock_tables(thd, thd->lock); thd->lock=0; + mysql_unlock_tables(thd, thd->lock); + thd->lock=0; } /* VOID(pthread_sigmask(SIG_SETMASK,&thd->block_signals,NULL)); */ - if (!locked) + if (!lock_in_use) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); DBUG_PRINT("info", ("thd->open_tables=%p", thd->open_tables)); + found_old_table= 0; while (thd->open_tables) found_old_table|=close_thread_table(thd, &thd->open_tables); thd->some_tables_deleted=0; @@ -473,7 +418,7 @@ void close_thread_tables(THD *thd, bool locked) /* Tell threads waiting for refresh that something has happened */ VOID(pthread_cond_broadcast(&COND_refresh)); } - if (!locked) + if (!lock_in_use) VOID(pthread_mutex_unlock(&LOCK_open)); /* VOID(pthread_sigmask(SIG_SETMASK,&thd->signals,NULL)); */ DBUG_VOID_RETURN; @@ -506,7 +451,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) else { // Free memory and reset for next loop - table->file->extra(HA_EXTRA_RESET); + table->file->reset(); } table->in_use=0; if (unused_tables) @@ -538,13 +483,22 @@ void close_temporary(TABLE *table,bool delete_table) DBUG_VOID_RETURN; } +/* close_temporary_tables' internal, 4 is due to uint4korr definition */ +static inline uint tmpkeyval(THD *thd, TABLE *table) +{ + return uint4korr(table->table_cache_key + table->key_length - 4); +} + +/* Creates one DROP TEMPORARY TABLE binlog event for each pseudo-thread */ void close_temporary_tables(THD *thd) { - TABLE *table,*next; - char *query, *end; - uint query_buf_size; - bool found_user_tables = 0; + TABLE *next, + *prev_table /* prev link is not maintained in TABLE's double-linked list */, + *table; + char *query= (gptr) 0, *end; + uint query_buf_size, max_names_len; + bool found_user_tables; if (!thd->temporary_tables) return; @@ -552,55 +506,175 @@ void close_temporary_tables(THD *thd) LINT_INIT(end); query_buf_size= 50; // Enough for DROP ... TABLE IF EXISTS - for (table=thd->temporary_tables ; table ; table=table->next) + /* + insertion sort of temp tables by pseudo_thread_id to build ordered list + of sublists of equal pseudo_thread_id + */ + for (prev_table= thd->temporary_tables, + table= prev_table->next, + found_user_tables= (prev_table->real_name[0] != '#'); + table; + prev_table= table, table= table->next) + { + TABLE *prev_sorted /* same as for prev_table */, + *sorted; /* - We are going to add 4 ` around the db/table names, so 1 does not look - enough; indeed it is enough, because table->key_length is greater (by 8, - because of server_id and thread_id) than db||table. + table not created directly by the user is moved to the tail. + Fixme/todo: nothing (I checked the manual) prevents user to create temp + with `#' */ - query_buf_size+= table->key_length+1; - - if ((query = alloc_root(&thd->mem_root, query_buf_size))) + if (table->real_name[0] == '#') + continue; + else + { + found_user_tables = 1; + } + for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table; + prev_sorted= sorted, sorted= sorted->next) + { + if (sorted->real_name[0] == '#' || tmpkeyval(thd, sorted) > tmpkeyval(thd, table)) + { + /* move into the sorted part of the list from the unsorted */ + prev_table->next= table->next; + table->next= sorted; + if (prev_sorted) + { + prev_sorted->next= table; + } + else + { + thd->temporary_tables= table; + } + table= prev_table; + break; + } + } + } + /* + calc query_buf_size as max per sublists, one sublist per pseudo thread id. + Also stop at first occurence of `#'-named table that starts + all implicitly created temp tables + */ + for (max_names_len= 0, table=thd->temporary_tables; + table && table->real_name[0] != '#'; + table=table->next) + { + uint tmp_names_len; + for (tmp_names_len= table->key_length + 1; + table->next && table->real_name[0] != '#' && + tmpkeyval(thd, table) == tmpkeyval(thd, table->next); + table=table->next) + { + /* + We are going to add 4 ` around the db/table names, so 1 might not look + enough; indeed it is enough, because table->key_length is greater (by 8, + because of server_id and thread_id) than db||table. + */ + tmp_names_len += table->next->key_length + 1; + } + if (tmp_names_len > max_names_len) max_names_len= tmp_names_len; + } + + /* allocate */ + if (found_user_tables && mysql_bin_log.is_open() && + (query = alloc_root(thd->mem_root, query_buf_size+= max_names_len))) // Better add "if exists", in case a RESET MASTER has been done - end=strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "); + end= strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "); - for (table=thd->temporary_tables ; table ; table=next) + /* scan sorted tmps to generate sequence of DROP */ + for (table=thd->temporary_tables; table; table= next) { - if (query) // we might be out of memory, but this is not fatal + if (query // we might be out of memory, but this is not fatal + && table->real_name[0] != '#') { - // skip temporary tables not created directly by the user - if (table->real_name[0] != '#') - found_user_tables = 1; + char *end_cur; + /* Set pseudo_thread_id to be that of the processed table */ + thd->variables.pseudo_thread_id= tmpkeyval(thd, table); + /* Loop forward through all tables within the sublist of + common pseudo_thread_id to create single DROP query */ + for (end_cur= end; + table && table->real_name[0] != '#' && + tmpkeyval(thd, table) == thd->variables.pseudo_thread_id; + table= next) + { + end_cur= strxmov(end_cur, "`", table->table_cache_key, "`.`", + table->real_name, "`,", NullS); + next= table->next; + close_temporary(table, 1); + } + thd->clear_error(); + /* The -1 is to remove last ',' */ + Query_log_event qinfo(thd, query, (ulong)(end_cur - query) - 1, 0, FALSE); /* - Here we assume table_cache_key always starts - with \0 terminated db name + Imagine the thread had created a temp table, then was doing a SELECT, and + the SELECT was killed. Then it's not clever to mark the statement above as + "killed", because it's not really a statement updating data, and there + are 99.99% chances it will succeed on slave. + If a real update (one updating a persistent table) was killed on the + master, then this real update will be logged with error_code=killed, + rightfully causing the slave to stop. */ - end = strxmov(end,"`",table->table_cache_key,"`.`", - table->real_name,"`,", NullS); + qinfo.error_code= 0; + mysql_bin_log.write(&qinfo); + } + else + { + next= table->next; + close_temporary(table, 1); } - next=table->next; - close_temporary(table); - } - if (query && found_user_tables && mysql_bin_log.is_open()) - { - /* The -1 is to remove last ',' */ - thd->clear_error(); - Query_log_event qinfo(thd, query, (ulong)(end-query)-1, 0); - /* - Imagine the thread had created a temp table, then was doing a SELECT, and - the SELECT was killed. Then it's not clever to mark the statement above as - "killed", because it's not really a statement updating data, and there - are 99.99% chances it will succeed on slave. - If a real update (one updating a persistent table) was killed on the - master, then this real update will be logged with error_code=killed, - rightfully causing the slave to stop. - */ - qinfo.error_code= 0; - mysql_bin_log.write(&qinfo); } thd->temporary_tables=0; } +/* + Find first suitable table by alias in given list. + + SYNOPSIS + find_table_in_list() + table - pointer to table list + db_name - data base name or 0 for any + table_name - table name or 0 for any + + RETURN VALUES + NULL Table not found + # Pointer to found table. +*/ + +TABLE_LIST * find_table_in_list(TABLE_LIST *table, + const char *db_name, const char *table_name) +{ + for (; table; table= table->next) + if ((!db_name || !strcmp(table->db, db_name)) && + (!table_name || !my_strcasecmp(table_alias_charset, + table->alias, table_name))) + break; + return table; +} + +/* + Find real table in given list. + + SYNOPSIS + find_real_table_in_list() + table - pointer to table list + db_name - data base name + table_name - table name + + RETURN VALUES + NULL Table not found + # Pointer to found table. +*/ + +TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, + const char *db_name, + const char *table_name) +{ + for (; table; table= table->next) + if (!strcmp(table->db, db_name) && + !strcmp(table->real_name, table_name)) + break; + return table; +} TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) { @@ -610,7 +684,7 @@ TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) int4store(key+key_length,thd->server_id); key_length += 4; - int4store(key+key_length,thd->slave_proxy_id); + int4store(key+key_length,thd->variables.pseudo_thread_id); key_length += 4; prev= &thd->temporary_tables; @@ -659,7 +733,7 @@ bool rename_temporary_table(THD* thd, TABLE *table, const char *db, table_name) - table->table_cache_key)+1; int4store(key+table->key_length,thd->server_id); table->key_length += 4; - int4store(key+table->key_length,thd->slave_proxy_id); + int4store(key+table->key_length,thd->variables.pseudo_thread_id); table->key_length += 4; return 0; } @@ -805,6 +879,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, reg1 TABLE *table; char key[MAX_DBKEY_LENGTH]; uint key_length; + HASH_SEARCH_STATE state; DBUG_ENTER("open_table"); /* find a unused table in the open table cache */ @@ -814,7 +889,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, DBUG_RETURN(0); key_length= (uint) (strmov(strmov(key,db)+1,table_name)-key)+1; int4store(key + key_length, thd->server_id); - int4store(key + key_length + 4, thd->slave_proxy_id); + int4store(key + key_length + 4, thd->variables.pseudo_thread_id); for (table=thd->temporary_tables; table ; table=table->next) { @@ -829,6 +904,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, DBUG_RETURN(0); } table->query_id=thd->query_id; + table->clear_query_id=1; + thd->tmp_table_used= 1; DBUG_PRINT("info",("Using temporary table")); goto reset; } @@ -840,7 +917,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, { if (table->key_length == key_length && !memcmp(table->table_cache_key,key,key_length) && - !my_strcasecmp(table->table_name,alias) && + !my_strcasecmp(system_charset_info, table->table_name, alias) && table->query_id != thd->query_id) { table->query_id=thd->query_id; @@ -851,6 +928,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, my_printf_error(ER_TABLE_NOT_LOCKED,ER(ER_TABLE_NOT_LOCKED),MYF(0),alias); DBUG_RETURN(0); } + VOID(pthread_mutex_lock(&LOCK_open)); if (!thd->open_tables) @@ -866,9 +944,11 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, /* close handler tables which are marked for flush */ mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE, TRUE); - for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ; + for (table= (TABLE*) hash_first(&open_cache, (byte*) key, key_length, + &state); table && table->in_use ; - table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length)) + table= (TABLE*) hash_next(&open_cache, (byte*) key, key_length, + &state)) { if (table->version != refresh_version) { @@ -903,6 +983,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, } table->prev->next=table->next; /* Remove from unused list */ table->next->prev=table->prev; + } else { @@ -929,11 +1010,12 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, table->version=refresh_version; table->flush_version=flush_version; DBUG_PRINT("info", ("inserting table %p into the cache", table)); - VOID(hash_insert(&open_cache,(byte*) table)); + VOID(my_hash_insert(&open_cache,(byte*) table)); } table->in_use=thd; - check_unused(); + check_unused(); // Debugging call + VOID(pthread_mutex_unlock(&LOCK_open)); if (refresh) { @@ -986,7 +1068,12 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, table->status=STATUS_NO_RECORD; table->keys_in_use_for_query= table->keys_in_use; table->used_keys= table->keys_for_keyread; + table->file->ft_handler=0; + table->fulltext_searched=0; + if (table->timestamp_field) + table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); DBUG_ASSERT(table->key_read == 0); + DBUG_ASSERT(table->insert_values == 0); DBUG_RETURN(table); } @@ -1007,10 +1094,20 @@ TABLE *find_locked_table(THD *thd, const char *db,const char *table_name) /**************************************************************************** -** Reopen an table because the definition has changed. The date file for the -** table is already closed. -** Returns 0 if ok. -** If table can't be reopened, the entry is unchanged. + Reopen an table because the definition has changed. The date file for the + table is already closed. + + SYNOPSIS + reopen_table() + table Table to be opened + locked 1 if we have already a lock on LOCK_open + + NOTES + table->query_id will be 0 if table was reopened + + RETURN + 0 ok + 1 error ('table' is unchanged if table couldn't be reopened) ****************************************************************************/ bool reopen_table(TABLE *table,bool locked) @@ -1064,8 +1161,8 @@ bool reopen_table(TABLE *table,bool locked) tmp.grant= table->grant; /* Replace table in open list */ - tmp.next=table->next; - tmp.prev=table->prev; + tmp.next= table->next; + tmp.prev= table->prev; if (table->file) VOID(closefrm(table)); // close file, free everything @@ -1073,14 +1170,17 @@ bool reopen_table(TABLE *table,bool locked) *table=tmp; table->file->change_table_ptr(table); + DBUG_ASSERT(table->table_name); for (field=table->field ; *field ; field++) { - (*field)->table=table; + (*field)->table= (*field)->orig_table= table; (*field)->table_name=table->table_name; } for (key=0 ; key < table->keys ; key++) + { for (part=0 ; part < table->key_info[key].usable_key_parts ; part++) - table->key_info[key].key_part[part].field->table=table; + table->key_info[key].key_part[part].field->table= table; + } VOID(pthread_cond_broadcast(&COND_refresh)); error=0; @@ -1233,12 +1333,14 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock) { do { + HASH_SEARCH_STATE state; char *key= table->table_cache_key; uint key_length=table->key_length; - for (TABLE *search=(TABLE*) hash_search(&open_cache, - (byte*) key,key_length) ; + for (TABLE *search= (TABLE*) hash_first(&open_cache, (byte*) key, + key_length, &state); search ; - search = (TABLE*) hash_next(&open_cache,(byte*) key,key_length)) + search= (TABLE*) hash_next(&open_cache, (byte*) key, + key_length, &state)) { if (search->locked_by_flush || search->locked_by_name && wait_for_name_lock || @@ -1363,22 +1465,49 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, { char path[FN_REFLEN]; int error; + uint discover_retry_count= 0; DBUG_ENTER("open_unireg_entry"); strxmov(path, mysql_data_home, "/", db, "/", name, NullS); - if (openfrm(path,alias, + while (openfrm(path,alias, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | HA_TRY_READ_ONLY), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, thd->open_options, entry)) { if (!entry->crashed) - goto err; // Can't repair the table + { + /* + Frm file could not be found on disk + Since it does not exist, no one can be using it + LOCK_open has been locked to protect from someone else + trying to discover the table at the same time. + */ + if (discover_retry_count++ != 0) + goto err; + if (ha_create_table_from_engine(thd, db, name) > 0) + { + /* Give right error message */ + thd->clear_error(); + DBUG_PRINT("error", ("Dicovery of %s/%s failed", db, name)); + my_printf_error(ER_UNKNOWN_ERROR, + "Failed to open '%-.64s', error while " + "unpacking from engine", + MYF(0), name); + + goto err; + } + + thd->clear_error(); // Clear error message + continue; + } + // Code below is for repairing a crashed file TABLE_LIST table_list; + bzero((char*) &table_list, sizeof(table_list)); // just for safe table_list.db=(char*) db; table_list.real_name=(char*) name; - table_list.next=0; + safe_mutex_assert_owner(&LOCK_open); if ((error=lock_table_name(thd,&table_list))) @@ -1394,8 +1523,8 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, } } pthread_mutex_unlock(&LOCK_open); - thd->clear_error(); - error=0; + thd->clear_error(); // Clear error message + error= 0; if (openfrm(path,alias, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | HA_TRY_READ_ONLY), @@ -1407,18 +1536,19 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, /* Give right error message */ thd->clear_error(); my_error(ER_NOT_KEYFILE, MYF(0), name, my_errno); - sql_print_error("Error: Couldn't repair table: %s.%s",db,name); + sql_print_error("Couldn't repair table: %s.%s",db,name); if (entry->file) closefrm(entry); error=1; } else - thd->clear_error(); + thd->clear_error(); // Clear error message pthread_mutex_lock(&LOCK_open); unlock_table_name(thd,&table_list); if (error) goto err; + break; } /* If we are here, there was no fatal error (but error may be still @@ -1435,7 +1565,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, { end = strxmov(strmov(query, "DELETE FROM `"), db,"`.`",name,"`", NullS); - Query_log_event qinfo(thd, query, (ulong)(end-query), 0); + Query_log_event qinfo(thd, query, (ulong)(end-query), 0, FALSE); mysql_bin_log.write(&qinfo); my_free(query, MYF(0)); } @@ -1446,7 +1576,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, DBA on top of warning the client (which will automatically be done because of MYF(MY_WME) in my_malloc() above). */ - sql_print_error("Error: when opening HEAP table, could not allocate \ + sql_print_error("When opening HEAP table, could not allocate \ memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name); if (entry->file) closefrm(entry); @@ -1459,26 +1589,45 @@ err: DBUG_RETURN(1); } -/***************************************************************************** -** open all tables in list -*****************************************************************************/ +/* + Open all tables in list + + SYNOPSIS + open_tables() + thd - thread handler + start - list of tables + counter - number of opened tables will be return using this parameter + + RETURN + 0 - OK + -1 - error +*/ -int open_tables(THD *thd,TABLE_LIST *start) +int open_tables(THD *thd, TABLE_LIST *start, uint *counter) { TABLE_LIST *tables; bool refresh; int result=0; DBUG_ENTER("open_tables"); + thd->current_tablenr= 0; restart: + *counter= 0; thd->proc_info="Opening tables"; for (tables=start ; tables ; tables=tables->next) { + /* + Ignore placeholders for derived tables. After derived tables + processing, link to created temporary table will be put here. + */ + if (tables->derived) + continue; + (*counter)++; if (!tables->table && - !(tables->table=open_table(thd, - tables->db, - tables->real_name, - tables->alias, &refresh))) + !(tables->table= open_table(thd, + tables->db, + tables->real_name, + tables->alias, &refresh))) { if (refresh) // Refresh in progress { @@ -1588,9 +1737,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) DBUG_ENTER("open_ltable"); thd->proc_info="Opening table"; + thd->current_tablenr= 0; while (!(table=open_table(thd,table_list->db, table_list->real_name,table_list->alias, &refresh)) && refresh) ; + if (table) { #if defined( __WIN__) || defined(OS2) @@ -1610,6 +1761,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) } else { + DBUG_ASSERT(thd->lock == 0); // You must lock everything at once if ((table->reginfo.lock_type= lock_type) != TL_UNLOCK) if (! (thd->lock= mysql_lock_tables(thd, &table_list->table, 1, 0))) table= 0; @@ -1621,15 +1773,107 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) /* - Open all tables in list and locks them for read. - The lock will automaticly be freed by close_thread_tables() + Open all tables in list and locks them for read without derived + tables processing. + + SYNOPSIS + simple_open_n_lock_tables() + thd - thread handler + tables - list of tables for open&locking + + RETURN + 0 - ok + -1 - error + + NOTE + The lock will automaticly be freed by close_thread_tables() */ -int open_and_lock_tables(THD *thd,TABLE_LIST *tables) +int simple_open_n_lock_tables(THD *thd, TABLE_LIST *tables) { - if (open_tables(thd,tables) || lock_tables(thd,tables)) - return -1; /* purecov: inspected */ - return 0; + DBUG_ENTER("simple_open_n_lock_tables"); + uint counter; + if (open_tables(thd, tables, &counter) || lock_tables(thd, tables, counter)) + DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(0); +} + + +/* + Open all tables in list, locks them and process derived tables + tables processing. + + SYNOPSIS + open_and_lock_tables() + thd - thread handler + tables - list of tables for open&locking + + RETURN + 0 - ok + -1 - error + + NOTE + The lock will automaticly be freed by close_thread_tables() +*/ + +int open_and_lock_tables(THD *thd, TABLE_LIST *tables) +{ + DBUG_ENTER("open_and_lock_tables"); + uint counter; + if (open_tables(thd, tables, &counter) || lock_tables(thd, tables, counter)) + DBUG_RETURN(-1); /* purecov: inspected */ + relink_tables_for_derived(thd); + DBUG_RETURN(mysql_handle_derived(thd->lex)); +} + + +/* + Open all tables in list and process derived tables + + SYNOPSIS + open_normal_and_derived_tables + thd - thread handler + tables - list of tables for open + + RETURN + FALSE - ok + TRUE - error + + NOTE + This is to be used on prepare stage when you don't read any + data from the tables. +*/ + +int open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables) +{ + uint counter; + DBUG_ENTER("open_normal_and_derived_tables"); + if (open_tables(thd, tables, &counter)) + DBUG_RETURN(-1); /* purecov: inspected */ + relink_tables_for_derived(thd); + DBUG_RETURN(mysql_handle_derived(thd->lex)); +} + + +/* + Let us propagate pointers to open tables from global table list + to table lists in particular selects if needed. +*/ + +void relink_tables_for_derived(THD *thd) +{ + if (thd->lex->all_selects_list->next_select_in_list() || + thd->lex->time_zone_tables_used) + { + for (SELECT_LEX *sl= thd->lex->all_selects_list; + sl; + sl= sl->next_select_in_list()) + for (TABLE_LIST *cursor= (TABLE_LIST *) sl->table_list.first; + cursor; + cursor=cursor->next) + if (cursor->table_list) + cursor->table= cursor->table_list->table; + } } @@ -1640,13 +1884,19 @@ int open_and_lock_tables(THD *thd,TABLE_LIST *tables) lock_tables() thd Thread handler tables Tables to lock + count umber of opened tables + + NOTES + You can't call lock_tables twice, as this would break the dead-lock-free + handling thr_lock gives us. You most always get all needed locks at + once. RETURN VALUES 0 ok -1 Error */ -int lock_tables(THD *thd,TABLE_LIST *tables) +int lock_tables(THD *thd, TABLE_LIST *tables, uint count) { TABLE_LIST *table; if (!tables) @@ -1654,22 +1904,24 @@ int lock_tables(THD *thd,TABLE_LIST *tables) if (!thd->locked_tables) { - uint count=0; - for (table = tables ; table ; table=table->next) - count++; + DBUG_ASSERT(thd->lock == 0); // You must lock everything at once TABLE **start,**ptr; if (!(ptr=start=(TABLE**) sql_alloc(sizeof(TABLE*)*count))) return -1; for (table = tables ; table ; table=table->next) - *(ptr++)= table->table; - if (! (thd->lock= mysql_lock_tables(thd, start, count, 0))) + { + if (!table->derived) + *(ptr++)= table->table; + } + if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start), 0))) return -1; /* purecov: inspected */ } else { for (table = tables ; table ; table=table->next) { - if (check_lock_and_start_stmt(thd, table->table, table->lock_type)) + if (!table->derived && + check_lock_and_start_stmt(thd, table->table, table->lock_type)) { ha_rollback_stmt(thd); return -1; @@ -1715,6 +1967,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, } tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked + tmp_table->in_use= thd; tmp_table->tmp_table = (tmp_table->file->has_transactions() ? TRANSACTIONAL_TMP_TABLE : TMP_TABLE); tmp_table->table_cache_key=(char*) (tmp_table+1); @@ -1726,7 +1979,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, thd->server_id); tmp_table->key_length += 4; int4store(tmp_table->table_cache_key + tmp_table->key_length, - thd->slave_proxy_id); + thd->variables.pseudo_thread_id); tmp_table->key_length += 4; if (link_in_list) @@ -1743,6 +1996,8 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, bool rm_temporary_table(enum db_type base, char *path) { bool error=0; + DBUG_ENTER("rm_temporary_table"); + fn_format(path, path,"",reg_ext,4); unpack_filename(path,path); if (my_delete(path,MYF(0))) @@ -1752,11 +2007,11 @@ bool rm_temporary_table(enum db_type base, char *path) if (file && file->delete_table(path)) { error=1; - sql_print_error("Warning: Could not remove tmp table: '%s', error: %d", - path, my_errno); + sql_print_warning("Could not remove tmp table: '%s', error: %d", + path, my_errno); } delete file; - return error; + DBUG_RETURN(error); } @@ -1768,49 +2023,88 @@ bool rm_temporary_table(enum db_type base, char *path) #define WRONG_GRANT (Field*) -1 Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grants, bool allow_rowid) + bool check_grants, bool allow_rowid, + uint *cached_field_index_ptr) { - Field *field; - if (table->name_hash.records) + Field **field_ptr, *field; + uint cached_field_index= *cached_field_index_ptr; + + /* We assume here that table->field < NO_CACHED_FIELD_INDEX = UINT_MAX */ + if (cached_field_index < table->fields && + !my_strcasecmp(system_charset_info, + table->field[cached_field_index]->field_name, name)) + field_ptr= table->field + cached_field_index; + else if (table->name_hash.records) + field_ptr= (Field**)hash_search(&table->name_hash,(byte*) name, + length); + else + { + if (!(field_ptr= table->field)) + return (Field *)0; + for (; *field_ptr; ++field_ptr) + if (!my_strcasecmp(system_charset_info, (*field_ptr)->field_name, name)) + break; + } + + if (field_ptr && *field_ptr) { - if ((field=(Field*) hash_search(&table->name_hash,(byte*) name, - length))) - goto found; + *cached_field_index_ptr= field_ptr - table->field; + field= *field_ptr; } else { - Field **ptr=table->field; - while ((field = *ptr++)) - { - if (!my_strcasecmp(field->field_name, name)) - goto found; - } + if (!allow_rowid || + my_strcasecmp(system_charset_info, name, "_rowid") || + !(field=table->rowid_field)) + return (Field*) 0; } - if (allow_rowid && !my_strcasecmp(name,"_rowid") && - (field=table->rowid_field)) - goto found; - return (Field*) 0; - found: if (thd->set_query_id) { if (field->query_id != thd->query_id) { field->query_id=thd->query_id; table->used_fields++; - table->used_keys&= field->part_of_key; + table->used_keys.intersect(field->part_of_key); } else thd->dupp_field=field; } +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_grants && check_grant_column(thd,table,name,length)) return WRONG_GRANT; +#endif return field; } +/* + Find field in table list. + + SYNOPSIS + find_field_in_tables() + thd Pointer to current thread structure + item Field item that should be found + tables Tables for scanning + where Table where field found will be returned via + this parameter + report_error If FALSE then do not report error if item not found + and return not_found_field + + RETURN VALUES + 0 Field is not found or field is not unique- error + message is reported + not_found_field Function was called with report_error == FALSE and + field was not found. no error message reported. + found field +*/ + +// Special Field pointer for find_field_in_tables returning +const Field *not_found_field= (Field*) 0x1; + Field * -find_field_in_tables(THD *thd,Item_field *item,TABLE_LIST *tables) +find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, + TABLE_LIST **where, bool report_error) { Field *found=0; const char *db=item->db_name; @@ -1818,6 +2112,32 @@ find_field_in_tables(THD *thd,Item_field *item,TABLE_LIST *tables) const char *name=item->field_name; uint length=(uint) strlen(name); char name_buff[NAME_LEN+1]; + bool allow_rowid; + + if (item->cached_table) + { + /* + This shortcut is used by prepared statements. We assuming that + TABLE_LIST *tables is not changed during query execution (which + is true for all queries except RENAME but luckily RENAME doesn't + use fields...) so we can rely on reusing pointer to its member. + With this optimisation we also miss case when addition of one more + field makes some prepared query ambiguous and so erronous, but we + accept this trade off. + */ + found= find_field_in_table(thd, item->cached_table->table, name, length, + test(item->cached_table-> + table->grant.want_privilege), + 1, &(item->cached_field_index)); + + if (found) + { + (*where)= tables; + if (found == WRONG_GRANT) + return (Field*) 0; + return found; + } + } if (db && lower_case_table_names) { @@ -1827,25 +2147,28 @@ find_field_in_tables(THD *thd,Item_field *item,TABLE_LIST *tables) 'name' of the item which may be used in the select list */ strmake(name_buff, db, sizeof(name_buff)-1); - casedn_str(name_buff); + my_casedn_str(files_charset_info, name_buff); db= name_buff; } - if (table_name) + if (table_name && table_name[0]) { /* Qualified field */ bool found_table=0; for (; tables ; tables=tables->next) { - if (!strcmp(tables->alias,table_name) && - (!db || !strcmp(db,tables->db))) + if (!my_strcasecmp(table_alias_charset, tables->alias, table_name) && + (!db || !tables->db || !tables->db[0] || !strcmp(db,tables->db))) { found_table=1; Field *find=find_field_in_table(thd,tables->table,name,length, test(tables->table->grant. want_privilege), - 1); + 1, &(item->cached_field_index)); if (find) { + (*where)= item->cached_table= tables; + if (!tables->cacheable_table) + item->cached_table= 0; if (find == WRONG_GRANT) return (Field*) 0; if (db || !thd->where) @@ -1862,32 +2185,46 @@ find_field_in_tables(THD *thd,Item_field *item,TABLE_LIST *tables) } if (found) return found; - if (!found_table) + if (!found_table && report_error) { char buff[NAME_LEN*2+1]; - if (db) + if (db && db[0]) { strxnmov(buff,sizeof(buff)-1,db,".",table_name,NullS); table_name=buff; } - my_printf_error(ER_UNKNOWN_TABLE,ER(ER_UNKNOWN_TABLE),MYF(0),table_name, - thd->where); + my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), + table_name, thd->where); } else - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), - item->full_name(),thd->where); + if (report_error) + my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), + item->full_name(),thd->where); + else + return (Field*) not_found_field; return (Field*) 0; } - bool allow_rowid= tables && !tables->next; // Only one table + allow_rowid= tables && !tables->next; // Only one table for (; tables ; tables=tables->next) { + if (!tables->table) + { + if (report_error) + my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), + item->full_name(),thd->where); + return (Field*) not_found_field; + } + Field *field=find_field_in_table(thd,tables->table,name,length, test(tables->table->grant.want_privilege), - allow_rowid); + allow_rowid, &(item->cached_field_index)); if (field) { if (field == WRONG_GRANT) return (Field*) 0; + (*where)= item->cached_table= tables; + if (!tables->cacheable_table) + item->cached_table= 0; if (found) { if (!thd->where) // Returns first found @@ -1896,100 +2233,266 @@ find_field_in_tables(THD *thd,Item_field *item,TABLE_LIST *tables) name,thd->where); return (Field*) 0; } - found=field; + found= field; } } if (found) return found; - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR), - MYF(0),item->full_name(),thd->where); + if (report_error) + my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), + MYF(0), item->full_name(), thd->where); + else + return (Field*) not_found_field; return (Field*) 0; } + +/* + Find Item in list of items (find_field_in_tables analog) + + TODO + is it better return only counter? + + SYNOPSIS + find_item_in_list() + find Item to find + items List of items + counter To return number of found item + report_error + REPORT_ALL_ERRORS report errors, return 0 if error + REPORT_EXCEPT_NOT_FOUND Do not report 'not found' error and + return not_found_item, report other errors, + return 0 + IGNORE_ERRORS Do not report errors, return 0 if error + unaliased Set to true if item is field which was found + by original field name and not by its alias + in item list. Set to false otherwise. + + RETURN VALUES + 0 Item is not found or item is not unique, + error message is reported + not_found_item Function was called with + report_error == REPORT_EXCEPT_NOT_FOUND and + item was not found. No error message was reported + found field +*/ + +// Special Item pointer for find_item_in_list returning +const Item **not_found_item= (const Item**) 0x1; + + Item ** -find_item_in_list(Item *find,List<Item> &items) +find_item_in_list(Item *find, List<Item> &items, uint *counter, + find_item_error_report_type report_error, bool *unaliased) { List_iterator<Item> li(items); - Item **found=0,*item; + Item **found=0, **found_unaliased= 0, *item; + const char *db_name=0; const char *field_name=0; const char *table_name=0; + bool found_unaliased_non_uniq= 0; + uint unaliased_counter; + + LINT_INIT(unaliased_counter); + *unaliased= FALSE; + if (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM) { field_name= ((Item_ident*) find)->field_name; table_name= ((Item_ident*) find)->table_name; + db_name= ((Item_ident*) find)->db_name; } - while ((item=li++)) + for (uint i= 0; (item=li++); i++) { if (field_name && item->type() == Item::FIELD_ITEM) { - if (!my_strcasecmp(((Item_field*) item)->name,field_name)) + Item_field *item_field= (Item_field*) item; + + /* + In case of group_concat() with ORDER BY condition in the QUERY + item_field can be field of temporary table without item name + (if this field created from expression argument of group_concat()), + => we have to check presence of name before compare + */ + if (!item_field->name) + continue; + + if (table_name) { - if (!table_name) - { - if (found) - { - if ((*found)->eq(item,0)) - continue; // Same field twice (Access?) - if (current_thd->where) - my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), - find->full_name(), current_thd->where); - return (Item**) 0; - } - found=li.ref(); - } - else if (!strcmp(((Item_field*) item)->table_name,table_name)) - { - found=li.ref(); - break; - } + /* + If table name is specified we should find field 'field_name' in + table 'table_name'. According to SQL-standard we should ignore + aliases in this case. + + Since we should NOT prefer fields from the select list over + other fields from the tables participating in this select in + case of ambiguity we have to do extra check outside this function. + + We use strcmp for table names and database names as these may be + case sensitive. In cases where they are not case sensitive, they + are always in lower case. + + item_field->field_name and item_field->table_name can be 0x0 if + item is not fix_field()'ed yet. + */ + if (item_field->field_name && item_field->table_name && + !my_strcasecmp(system_charset_info, item_field->field_name, + field_name) && + !strcmp(item_field->table_name, table_name) && + (!db_name || (item_field->db_name && + !strcmp(item_field->db_name, db_name)))) + { + if (found_unaliased) + { + if ((*found_unaliased)->eq(item, 0)) + continue; + /* + Two matching fields in select list. + We already can bail out because we are searching through + unaliased names only and will have duplicate error anyway. + */ + if (report_error != IGNORE_ERRORS) + my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), + MYF(0), find->full_name(), current_thd->where); + return (Item**) 0; + } + found_unaliased= li.ref(); + unaliased_counter= i; + if (db_name) + break; // Perfect match + } + } + else if (!my_strcasecmp(system_charset_info, item_field->name, + field_name)) + { + /* + If table name was not given we should scan through aliases + (or non-aliased fields) first. We are also checking unaliased + name of the field in then next else-if, to be able to find + instantly field (hidden by alias) if no suitable alias (or + non-aliased field) was found. + */ + if (found) + { + if ((*found)->eq(item, 0)) + continue; // Same field twice + if (report_error != IGNORE_ERRORS) + my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), + MYF(0), find->full_name(), current_thd->where); + return (Item**) 0; + } + found= li.ref(); + *counter= i; + } + else if (!my_strcasecmp(system_charset_info, item_field->field_name, + field_name)) + { + /* + We will use un-aliased field or react on such ambiguities only if + we won't be able to find aliased field. + Again if we have ambiguity with field outside of select list + we should prefer fields from select list. + */ + if (found_unaliased) + { + if ((*found_unaliased)->eq(item, 0)) + continue; // Same field twice + found_unaliased_non_uniq= 1; + } + else + { + found_unaliased= li.ref(); + unaliased_counter= i; + } } } else if (!table_name && (item->eq(find,0) || find->name && item->name && - !my_strcasecmp(item->name,find->name))) + !my_strcasecmp(system_charset_info, + item->name,find->name))) { - found=li.ref(); + found= li.ref(); + *counter= i; break; } } - if (!found && current_thd->where) - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), - find->full_name(),current_thd->where); - return found; + if (!found) + { + if (found_unaliased_non_uniq) + { + if (report_error != IGNORE_ERRORS) + my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), MYF(0), + find->full_name(), current_thd->where); + return (Item **) 0; + } + if (found_unaliased) + { + found= found_unaliased; + *counter= unaliased_counter; + *unaliased= TRUE; + } + } + if (found) + return found; + if (report_error != REPORT_EXCEPT_NOT_FOUND) + { + if (report_error == REPORT_ALL_ERRORS) + my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), + find->full_name(), current_thd->where); + return (Item **) 0; + } + else + return (Item **) not_found_item; } /**************************************************************************** -** Check that all given fields exists and fill struct with current data +** Expand all '*' in given fields ****************************************************************************/ -int setup_fields(THD *thd, TABLE_LIST *tables, List<Item> &fields, - bool set_query_id, List<Item> *sum_func_list, - bool allow_sum_func) +int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, + List<Item> *sum_func_list, + uint wild_num) { + DBUG_ENTER("setup_wild"); + if (!wild_num) + DBUG_RETURN(0); + reg2 Item *item; List_iterator<Item> it(fields); - DBUG_ENTER("setup_fields"); - - thd->set_query_id=set_query_id; - thd->allow_sum_func= allow_sum_func; - thd->where="field list"; + Item_arena *arena, backup; + /* + If we are in preparing prepared statement phase then we have change + temporary mem_root to statement mem root to save changes of SELECT list + */ + arena= thd->change_arena_if_needed(&backup); - while ((item=it++)) - { - /* - Expand * to all fields if this is not the temporary table for an - a UNION result - */ + while (wild_num && (item= it++)) + { if (item->type() == Item::FIELD_ITEM && + ((Item_field*) item)->field_name && ((Item_field*) item)->field_name[0] == '*' && - ((Item_field*) item)->field_name[1] == 0 && !((Item_field*) item)->field) { - uint elem=fields.elements; - if (insert_fields(thd,tables,((Item_field*) item)->db_name, - ((Item_field*) item)->table_name,&it)) - DBUG_RETURN(-1); /* purecov: inspected */ + uint elem= fields.elements; + Item_subselect *subsel= thd->lex->current_select->master_unit()->item; + if (subsel && + subsel->substype() == Item_subselect::EXISTS_SUBS) + { + /* + It is EXISTS(SELECT * ...) and we can replace * by any constant. + + Item_int do not need fix_fields() because it is basic constant. + */ + it.replace(new Item_int("Not_used", (longlong) 1, 21)); + } + else if (insert_fields(thd,tables,((Item_field*) item)->db_name, + ((Item_field*) item)->table_name, &it)) + { + if (arena) + thd->restore_backup_item_arena(arena, &backup); + DBUG_RETURN(-1); + } if (sum_func_list) { /* @@ -1999,25 +2502,80 @@ int setup_fields(THD *thd, TABLE_LIST *tables, List<Item> &fields, */ sum_func_list->elements+= fields.elements - elem; } - } - else - { - if (item->fix_fields(thd,tables)) - DBUG_RETURN(-1); /* purecov: inspected */ - if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM && - sum_func_list) - item->split_sum_func(*sum_func_list); - thd->used_tables|=item->used_tables(); + wild_num--; } } - DBUG_RETURN(test(thd->fatal_error)); + if (arena) + thd->restore_backup_item_arena(arena, &backup); + DBUG_RETURN(0); +} + +/**************************************************************************** +** Check that all given fields exists and fill struct with current data +****************************************************************************/ + +int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + List<Item> &fields, bool set_query_id, + List<Item> *sum_func_list, bool allow_sum_func) +{ + reg2 Item *item; + List_iterator<Item> it(fields); + DBUG_ENTER("setup_fields"); + + thd->set_query_id=set_query_id; + thd->allow_sum_func= allow_sum_func; + thd->where="field list"; + + /* + To prevent fail on forward lookup we fill it with zerows, + then if we got pointer on zero after find_item_in_list we will know + that it is forward lookup. + + There is other way to solve problem: fill array with pointers to list, + but it will be slower. + + TODO: remove it when (if) we made one list for allfields and + ref_pointer_array + */ + if (ref_pointer_array) + bzero(ref_pointer_array, sizeof(Item *) * fields.elements); + + Item **ref= ref_pointer_array; + while ((item= it++)) + { + if (!item->fixed && item->fix_fields(thd, tables, it.ref()) || + (item= *(it.ref()))->check_cols(1)) + DBUG_RETURN(-1); /* purecov: inspected */ + if (ref) + *(ref++)= item; + if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM && + sum_func_list) + item->split_sum_func(thd, ref_pointer_array, *sum_func_list); + thd->used_tables|=item->used_tables(); + } + DBUG_RETURN(test(thd->net.report_error)); } /* - Remap table numbers if INSERT ... SELECT - Check also that the 'used keys' and 'ignored keys' exists and set up the - table structure accordingly + prepare tables + + SYNOPSIS + setup_tables() + tables table list + + + NOTE + Remap table numbers if INSERT ... SELECT + Check also that the 'used keys' and 'ignored keys' exists and set up the + table structure accordingly + + This has to be called for all tables that are used by items, as otherwise + table->map is not set and all Item_field will be regarded as const items. + + RETURN + 0 ok; In this case *map will includes the choosed index + 1 error */ bool setup_tables(TABLE_LIST *tables) @@ -2027,41 +2585,26 @@ bool setup_tables(TABLE_LIST *tables) for (TABLE_LIST *table_list=tables ; table_list ; table_list=table_list->next,tablenr++) { - TABLE *table=table_list->table; - - table->used_fields=0; - table->const_table=0; - table->null_row=0; - table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= table->keys_in_use; + TABLE *table= table_list->table; + setup_table_map(table, table_list, tablenr); table->used_keys= table->keys_for_keyread; - table->maybe_null=test(table->outer_join= table_list->outer_join); - table->tablenr=tablenr; - table->map= (table_map) 1 << tablenr; - table->force_index= table_list->force_index; if (table_list->use_index) { - key_map map= get_key_map_from_key_list(table, - table_list->use_index); - if (map == ~(key_map) 0) + key_map map; + get_key_map_from_key_list(&map, table, table_list->use_index); + if (map.is_set_all()) DBUG_RETURN(1); table->keys_in_use_for_query=map; } if (table_list->ignore_index) { - key_map map= get_key_map_from_key_list(table, - table_list->ignore_index); - if (map == ~(key_map) 0) + key_map map; + get_key_map_from_key_list(&map, table, table_list->ignore_index); + if (map.is_set_all()) DBUG_RETURN(1); - table->keys_in_use_for_query &= ~map; - } - table->used_keys &= table->keys_in_use_for_query; - if (table_list->shared) - { - /* Clear query_id that may have been set by previous select */ - for (Field **ptr=table->field ; *ptr ; ptr++) - (*ptr)->query_id=0; + table->keys_in_use_for_query.subtract(map); } + table->used_keys.intersect(table->keys_in_use_for_query); } if (tablenr > MAX_TABLES) { @@ -2072,26 +2615,44 @@ bool setup_tables(TABLE_LIST *tables) } -static key_map get_key_map_from_key_list(TABLE *table, - List<String> *index_list) +/* + Create a key_map from a list of index names + + SYNOPSIS + get_key_map_from_key_list() + map key_map to fill in + table Table + index_list List of index names + + RETURN + 0 ok; In this case *map will includes the choosed index + 1 error +*/ + +bool get_key_map_from_key_list(key_map *map, TABLE *table, + List<String> *index_list) { - key_map map=0; List_iterator_fast<String> it(*index_list); String *name; uint pos; + + map->clear_all(); while ((name=it++)) { - if ((pos=find_type(name->c_ptr(), &table->keynames, 1+2)) <= 0) + if ((pos= find_type(&table->keynames, name->ptr(), name->length(), 1)) <= + 0) { my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), name->c_ptr(), table->real_name); - return (~ (key_map) 0); + map->set_all(); + return 1; } - map|= ((key_map) 1) << (pos-1); + map->set_bit(pos-1); } - return map; + return 0; } + /**************************************************************************** This just drops in all fields instead of current '*' field Returns pointer to last inserted field if ok @@ -2113,7 +2674,7 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, 'name' of the item which may be used in the select list */ strmake(name_buff, db_name, sizeof(name_buff)-1); - casedn_str(name_buff); + my_casedn_str(files_charset_info, name_buff); db_name= name_buff; } @@ -2122,23 +2683,40 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, for (; tables ; tables=tables->next) { TABLE *table=tables->table; - if (!table_name || (!strcmp(table_name,tables->alias) && + if (!table_name || (!my_strcasecmp(table_alias_charset, table_name, + tables->alias) && (!db_name || !strcmp(tables->db,db_name)))) { +#ifndef NO_EMBEDDED_ACCESS_CHECKS /* Ensure that we have access right to all columns */ if (!(table->grant.privilege & SELECT_ACL) && check_grant_all_columns(thd,SELECT_ACL,table)) DBUG_RETURN(-1); - +#endif Field **ptr=table->field,*field; + TABLE *natural_join_table= 0; + thd->used_tables|=table->map; + if (!table->outer_join && + tables->natural_join && + !tables->natural_join->table->outer_join) + natural_join_table= tables->natural_join->table; + while ((field = *ptr++)) { - Item_field *item= new Item_field(field); - if (!found++) - (void) it->replace(item); // Replace '*' - else - it->after(item); + uint not_used_field_index= NO_CACHED_FIELD_INDEX; + /* Skip duplicate field names if NATURAL JOIN is used */ + if (!natural_join_table || + !find_field_in_table(thd, natural_join_table, field->field_name, + strlen(field->field_name), 0, 0, + ¬_used_field_index)) + { + Item_field *item= new Item_field(thd, field); + if (!found++) + (void) it->replace(item); // Replace '*' + else + it->after(item); + } /* Mark if field used before in this select. Used by 'insert' to verify if a field name is used twice @@ -2146,7 +2724,7 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, if (field->query_id == thd->query_id) thd->dupp_field=field; field->query_id=thd->query_id; - table->used_keys&= field->part_of_key; + table->used_keys.intersect(field->part_of_key); } /* All fields are used */ table->used_fields=table->fields; @@ -2170,18 +2748,21 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) { table_map not_null_tables= 0; + Item_arena *arena= 0, backup; DBUG_ENTER("setup_conds"); + thd->set_query_id=1; - thd->cond_count=0; - thd->allow_sum_func=0; + thd->lex->current_select->cond_count= 0; if (*conds) { thd->where="where clause"; - if ((*conds)->fix_fields(thd,tables)) + if (!(*conds)->fixed && (*conds)->fix_fields(thd, tables, conds) || + (*conds)->check_cols(1)) DBUG_RETURN(1); not_null_tables= (*conds)->not_null_tables(); } + /* Check if we are using outer joins */ for (TABLE_LIST *table=tables ; table ; table=table->next) { @@ -2189,9 +2770,12 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) { /* Make a join an a expression */ thd->where="on clause"; - if (table->on_expr->fix_fields(thd,tables)) + + if (!table->on_expr->fixed && + table->on_expr->fix_fields(thd, tables, &table->on_expr) || + table->on_expr->check_cols(1)) DBUG_RETURN(1); - thd->cond_count++; + thd->lex->current_select->cond_count++; /* If it's a normal join or a LEFT JOIN which can be optimized away @@ -2202,57 +2786,109 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) !(specialflag & SPECIAL_NO_NEW_FUNC))) { table->outer_join= 0; - if (!(*conds=and_conds(*conds, table->on_expr))) - DBUG_RETURN(1); + arena= thd->change_arena_if_needed(&backup); + *conds= and_conds(*conds, table->on_expr); table->on_expr=0; + if (arena) + { + thd->restore_backup_item_arena(arena, &backup); + arena= 0; // Safety if goto err + } + if ((*conds) && !(*conds)->fixed && + (*conds)->fix_fields(thd, tables, conds)) + DBUG_RETURN(1); } } if (table->natural_join) { + arena= thd->change_arena_if_needed(&backup); /* Make a join of all fields with have the same name */ - TABLE *t1=table->table; - TABLE *t2=table->natural_join->table; - Item_cond_and *cond_and=new Item_cond_and(); + TABLE *t1= table->table; + TABLE *t2= table->natural_join->table; + Item_cond_and *cond_and= new Item_cond_and(); if (!cond_and) // If not out of memory - DBUG_RETURN(1); + goto err; cond_and->top_level_item(); - uint i,j; - for (i=0 ; i < t1->fields ; i++) + Field **t1_field, *t2_field; + for (t1_field= t1->field; (*t1_field); t1_field++) { - // TODO: This could be optimized to use hashed names if t2 had a hash - for (j=0 ; j < t2->fields ; j++) - { - if (!my_strcasecmp(t1->field[i]->field_name, - t2->field[j]->field_name)) - { - Item_func_eq *tmp=new Item_func_eq(new Item_field(t1->field[i]), - new Item_field(t2->field[j])); - if (!tmp) - DBUG_RETURN(1); - tmp->fix_length_and_dec(); // Update cmp_type - tmp->const_item_cache=0; - /* Mark field used for table cache */ - t1->field[i]->query_id=t2->field[j]->query_id=thd->query_id; - cond_and->list.push_back(tmp); - t1->used_keys&= t1->field[i]->part_of_key; - t2->used_keys&= t2->field[j]->part_of_key; - break; - } - } + const char *t1_field_name= (*t1_field)->field_name; + uint not_used_field_index= NO_CACHED_FIELD_INDEX; + + if ((t2_field= find_field_in_table(thd, t2, t1_field_name, + strlen(t1_field_name), 0, 0, + ¬_used_field_index))) + { + Item_func_eq *tmp=new Item_func_eq(new Item_field(thd, *t1_field), + new Item_field(thd, t2_field)); + if (!tmp) + goto err; + /* Mark field used for table cache */ + (*t1_field)->query_id= t2_field->query_id= thd->query_id; + cond_and->list.push_back(tmp); + t1->used_keys.intersect((*t1_field)->part_of_key); + t2->used_keys.intersect(t2_field->part_of_key); + } } - cond_and->used_tables_cache= t1->map | t2->map; - thd->cond_count+=cond_and->list.elements; - if (!table->outer_join) // Not left join + thd->lex->current_select->cond_count+= cond_and->list.elements; + + // to prevent natural join processing during PS re-execution + table->natural_join= 0; + + if (cond_and->list.elements) { - if (!(*conds=and_conds(*conds, cond_and))) - DBUG_RETURN(1); + if (!table->outer_join) // Not left join + { + *conds= and_conds(*conds, cond_and); + // fix_fields() should be made with temporary memory pool + if (arena) + thd->restore_backup_item_arena(arena, &backup); + if (*conds && !(*conds)->fixed) + { + if (!(*conds)->fixed && + (*conds)->fix_fields(thd, tables, conds)) + DBUG_RETURN(1); + } + } + else + { + table->on_expr= and_conds(table->on_expr, cond_and); + // fix_fields() should be made with temporary memory pool + if (arena) + thd->restore_backup_item_arena(arena, &backup); + if (table->on_expr && !table->on_expr->fixed) + { + if (!table->on_expr->fixed && + table->on_expr->fix_fields(thd, tables, &table->on_expr)) + DBUG_RETURN(1); + } + } + } + else if (arena) + { + thd->restore_backup_item_arena(arena, &backup); + arena= 0; // Safety if goto err } - else - table->on_expr=and_conds(table->on_expr,cond_and); } } - DBUG_RETURN(test(thd->fatal_error)); + + if (thd->current_arena->is_stmt_prepare()) + { + /* + We are in prepared statement preparation code => we should store + WHERE clause changing for next executions. + + We do this ON -> WHERE transformation only once per PS statement. + */ + thd->lex->current_select->where= *conds; + } + DBUG_RETURN(test(thd->net.report_error)); + +err: + if (arena) + thd->restore_backup_item_arena(arena, &backup); + DBUG_RETURN(1); } @@ -2272,7 +2908,11 @@ fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors) while ((field=(Item_field*) f++)) { value=v++; - if (value->save_in_field(field->field, 0) && !ignore_errors) + Field *rfield= field->field; + TABLE *table= rfield->table; + if (rfield == table->next_number_field) + table->auto_increment_field_not_null= TRUE; + if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors) DBUG_RETURN(1); } DBUG_RETURN(0); @@ -2290,7 +2930,10 @@ fill_record(Field **ptr,List<Item> &values, bool ignore_errors) while ((field = *ptr++)) { value=v++; - if (value->save_in_field(field, 0) && !ignore_errors) + TABLE *table= field->table; + if (field == table->next_number_field) + table->auto_increment_field_not_null= TRUE; + if ((value->save_in_field(field, 0) < 0) && !ignore_errors) DBUG_RETURN(1); } DBUG_RETURN(0); @@ -2299,19 +2942,20 @@ fill_record(Field **ptr,List<Item> &values, bool ignore_errors) static void mysql_rm_tmp_tables(void) { - uint idx; - char filePath[FN_REFLEN]; + uint i, idx; + char filePath[FN_REFLEN], *tmpdir; MY_DIR *dirp; FILEINFO *file; DBUG_ENTER("mysql_rm_tmp_tables"); + for (i=0; i<=mysql_tmpdir_list.max; i++) + { + tmpdir=mysql_tmpdir_list.list[i]; /* See if the directory exists */ - if (!(dirp = my_dir(mysql_tmpdir,MYF(MY_WME | MY_DONT_SORT)))) - DBUG_VOID_RETURN; /* purecov: inspected */ + if (!(dirp = my_dir(tmpdir,MYF(MY_WME | MY_DONT_SORT)))) + continue; - /* - ** Remove all SQLxxx tables from directory - */ + /* Remove all SQLxxx tables from directory */ for (idx=0 ; idx < (uint) dirp->number_off_files ; idx++) { @@ -2324,52 +2968,16 @@ static void mysql_rm_tmp_tables(void) if (!bcmp(file->name,tmp_file_prefix,tmp_file_prefix_length)) { - sprintf(filePath,"%s%s",mysql_tmpdir,file->name); /* purecov: inspected */ - VOID(my_delete(filePath,MYF(MY_WME))); /* purecov: inspected */ + sprintf(filePath,"%s%s",tmpdir,file->name); + VOID(my_delete(filePath,MYF(MY_WME))); } } my_dirend(dirp); + } DBUG_VOID_RETURN; } -/* -** CREATE INDEX and DROP INDEX are implemented by calling ALTER TABLE with -** the proper arguments. This isn't very fast but it should work for most -** cases. -** One should normally create all indexes with CREATE TABLE or ALTER TABLE. -*/ - -int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys) -{ - List<create_field> fields; - List<Alter_drop> drop; - List<Alter_column> alter; - HA_CREATE_INFO create_info; - DBUG_ENTER("mysql_create_index"); - bzero((char*) &create_info,sizeof(create_info)); - create_info.db_type=DB_TYPE_DEFAULT; - DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, - &create_info, table_list, - fields, keys, drop, alter, (ORDER*)0, FALSE, - DUP_ERROR)); -} - - -int mysql_drop_index(THD *thd, TABLE_LIST *table_list, List<Alter_drop> &drop) -{ - List<create_field> fields; - List<Key> keys; - List<Alter_column> alter; - HA_CREATE_INFO create_info; - DBUG_ENTER("mysql_drop_index"); - bzero((char*) &create_info,sizeof(create_info)); - create_info.db_type=DB_TYPE_DEFAULT; - DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, - &create_info, table_list, - fields, keys, drop, alter, (ORDER*)0, FALSE, - DUP_ERROR)); -} /***************************************************************************** unireg support functions @@ -2381,8 +2989,18 @@ int mysql_drop_index(THD *thd, TABLE_LIST *table_list, List<Alter_drop> &drop) ** and afterwards delete those marked unused. */ -void remove_db_from_cache(const my_string db) +void remove_db_from_cache(const char *db) { + char name_buff[NAME_LEN+1]; + if (db && lower_case_table_names) + { + /* + convert database to lower case for comparision. + */ + strmake(name_buff, db, sizeof(name_buff)-1); + my_casedn_str(files_charset_info, name_buff); + db= name_buff; + } for (uint idx=0 ; idx < open_cache.records ; idx++) { TABLE *table=(TABLE*) hash_element(&open_cache,idx); @@ -2414,12 +3032,16 @@ void flush_tables() /* Mark all entries with the table as deleted to force an reopen of the table + The table will be closed (not stored in cache) by the current thread when + close_thread_tables() is called. + PREREQUISITES Lock on LOCK_open() RETURN - 0 If the table is NOT in use by another thread - 1 If the table is NOT in use by another thread + 0 This thread now have exclusive access to this table and no other thread + can access the table until close_thread_tables() is called. + 1 Table is in use by another thread */ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, @@ -2435,11 +3057,14 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; for (;;) { + HASH_SEARCH_STATE state; result= signalled= 0; - for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ; + for (table= (TABLE*) hash_first(&open_cache, (byte*) key, key_length, + &state); table; - table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length)) + table= (TABLE*) hash_next(&open_cache, (byte*) key, key_length, + &state)) { THD *in_use; table->version=0L; /* Free when thread is ready */ @@ -2517,10 +3142,10 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, DBUG_RETURN(result); } -int setup_ftfuncs(THD *thd) +int setup_ftfuncs(SELECT_LEX *select_lex) { - List_iterator<Item_func_match> li(thd->lex.select->ftfunc_list), - lj(thd->lex.select->ftfunc_list); + List_iterator<Item_func_match> li(*(select_lex->ftfunc_list)), + lj(*(select_lex->ftfunc_list)); Item_func_match *ftf, *ftf2; while ((ftf=li++)) @@ -2539,11 +3164,11 @@ int setup_ftfuncs(THD *thd) } -int init_ftfuncs(THD *thd, bool no_order) +int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order) { - if (thd->lex.select->ftfunc_list.elements) + if (select_lex->ftfunc_list->elements) { - List_iterator<Item_func_match> li(thd->lex.select->ftfunc_list); + List_iterator<Item_func_match> li(*(select_lex->ftfunc_list)); Item_func_match *ifm; DBUG_PRINT("info",("Performing FULLTEXT search")); thd->proc_info="FULLTEXT initialization"; diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h new file mode 100644 index 00000000000..2fd603d9381 --- /dev/null +++ b/sql/sql_bitmap.h @@ -0,0 +1,131 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Implementation of a bitmap type. + The idea with this is to be able to handle any constant number of bits but + also be able to use 32 or 64 bits bitmaps very efficiently +*/ + +#include <my_bitmap.h> + +template <uint default_width> class Bitmap +{ + MY_BITMAP map; + uchar buffer[(default_width+7)/8]; +public: + Bitmap() { init(); } + Bitmap(Bitmap& from) { *this=from; } + explicit Bitmap(uint prefix_to_set) { init(prefix_to_set); } + void init() { bitmap_init(&map, buffer, default_width, 0); } + void init(uint prefix_to_set) { init(); set_prefix(prefix_to_set); } + uint length() const { return default_width; } + Bitmap& operator=(const Bitmap& map2) + { + init(); + memcpy(buffer, map2.buffer, sizeof(buffer)); + return *this; + } + void set_bit(uint n) { bitmap_set_bit(&map, n); } + void clear_bit(uint n) { bitmap_clear_bit(&map, n); } + void set_prefix(uint n) { bitmap_set_prefix(&map, n); } + void set_all() { bitmap_set_all(&map); } + void clear_all() { bitmap_clear_all(&map); } + void intersect(Bitmap& map2) { bitmap_intersect(&map, &map2.map); } + void intersect(ulonglong map2buff) + { + MY_BITMAP map2; + bitmap_init(&map2, (uchar *)&map2buff, sizeof(ulonglong)*8, 0); + bitmap_intersect(&map, &map2); + } + void subtract(Bitmap& map2) { bitmap_subtract(&map, &map2.map); } + void merge(Bitmap& map2) { bitmap_union(&map, &map2.map); } + my_bool is_set(uint n) const { return bitmap_is_set(&map, n); } + my_bool is_prefix(uint n) const { return bitmap_is_prefix(&map, n); } + my_bool is_clear_all() const { return bitmap_is_clear_all(&map); } + my_bool is_set_all() const { return bitmap_is_set_all(&map); } + my_bool is_subset(const Bitmap& map2) const { return bitmap_is_subset(&map, &map2.map); } + my_bool operator==(const Bitmap& map2) const { return bitmap_cmp(&map, &map2.map); } + char *print(char *buf) const + { + char *s=buf; int i; + for (i=sizeof(buffer)-1; i>=0 ; i--) + { + if ((*s=_dig_vec_upper[buffer[i] >> 4]) != '0') + break; + if ((*s=_dig_vec_upper[buffer[i] & 15]) != '0') + break; + } + for (s++, i-- ; i>=0 ; i--) + { + *s++=_dig_vec_upper[buffer[i] >> 4]; + *s++=_dig_vec_upper[buffer[i] & 15]; + } + *s=0; + return buf; + } + ulonglong to_ulonglong() const + { + if (sizeof(buffer) >= 8) + return uint8korr(buffer); + DBUG_ASSERT(sizeof(buffer) >= 4); + uint4korr(buffer); + } +}; + +template <> class Bitmap<64> +{ + ulonglong map; +public: + Bitmap<64>() { } +#if defined(__NETWARE__) || defined(__MWERKS__) + /* + Metwork compiler gives error on Bitmap<64> + Changed to Bitmap, since in this case also it will proper construct + this class + */ + explicit Bitmap(uint prefix_to_set) { set_prefix(prefix_to_set); } +#else + explicit Bitmap<64>(uint prefix_to_set) { set_prefix(prefix_to_set); } +#endif + void init() { } + void init(uint prefix_to_set) { set_prefix(prefix_to_set); } + uint length() const { return 64; } + void set_bit(uint n) { map|= ((ulonglong)1) << n; } + void clear_bit(uint n) { map&= ~(((ulonglong)1) << n); } + void set_prefix(uint n) + { + if (n >= length()) + set_all(); + else + map= (((ulonglong)1) << n)-1; + } + void set_all() { map=~(ulonglong)0; } + void clear_all() { map=(ulonglong)0; } + void intersect(Bitmap<64>& map2) { map&= map2.map; } + void intersect(ulonglong map2) { map&= map2; } + void subtract(Bitmap<64>& map2) { map&= ~map2.map; } + void merge(Bitmap<64>& map2) { map|= map2.map; } + my_bool is_set(uint n) const { return test(map & (((ulonglong)1) << n)); } + my_bool is_prefix(uint n) const { return map == (((ulonglong)1) << n)-1; } + my_bool is_clear_all() const { return map == (ulonglong)0; } + my_bool is_set_all() const { return map == ~(ulonglong)0; } + my_bool is_subset(const Bitmap<64>& map2) const { return !(map & ~map2.map); } + my_bool operator==(const Bitmap<64>& map2) const { return map == map2.map; } + char *print(char *buf) const { longlong2str(map,buf,16); return buf; } + ulonglong to_ulonglong() const { return map; } +}; + diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 071dac0d3c5..457478e90db 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -278,6 +278,21 @@ TODO list: - Move MRG_MYISAM table type processing to handlers, something like: tables_used->table->file->register_used_filenames(callback, first_argument); + - Make derived tables cachable. + - QC improvement suggested by Monty: + - Add a counter in open_table() for how many MERGE (ISAM or MyISAM) + tables are cached in the table cache. + (This will be trivial when we have the new table cache in place I + have been working on) + - After this we can add the following test around the for loop in + is_cacheable:: + + if (thd->temp_tables || global_merge_table_count) + + - Another option would be to set thd->lex->safe_to_cache_query to 0 + in 'get_lock_data' if any of the tables was a tmp table or a + MRG_ISAM table. + (This could be done with almost no speed penalty) */ #include "mysql_priv.h" @@ -285,14 +300,16 @@ TODO list: #include <m_ctype.h> #include <my_dir.h> #include <hash.h> -#include "sql_acl.h" #include "ha_myisammrg.h" #ifndef MASTER #include "../srclib/myisammrg/myrg_def.h" #else #include "../myisammrg/myrg_def.h" #endif -#include <assert.h> + +#ifdef EMBEDDED_LIBRARY +#include "emb_qcache.h" +#endif #if defined(EXTRA_DEBUG) && !defined(DBUG_OFF) #define MUTEX_LOCK(M) { DBUG_PRINT("lock", ("mutex lock 0x%lx", (ulong)(M))); \ @@ -342,16 +359,10 @@ TODO list: #define DUMP(C) #endif -#ifdef FN_NO_CASE_SENCE -#define DB_NAME_PREPROCESS(C) tolower(C) -#else -#define DB_NAME_PREPROCESS(C) (C) -#endif - const char *query_cache_type_names[]= { "OFF", "ON", "DEMAND",NullS }; TYPELIB query_cache_type_typelib= { - array_elements(query_cache_type_names)-1,"", query_cache_type_names + array_elements(query_cache_type_names)-1,"", query_cache_type_names, NULL }; /***************************************************************************** @@ -363,7 +374,7 @@ inline Query_cache_block * Query_cache_block_table::block() return (Query_cache_block *)(((byte*)this) - ALIGN_SIZE(sizeof(Query_cache_block_table)*n) - ALIGN_SIZE(sizeof(Query_cache_block))); -}; +} /***************************************************************************** Query_cache_block method(s) @@ -648,12 +659,16 @@ void query_cache_abort(NET *net) } -void query_cache_end_of_result(NET *net) +void query_cache_end_of_result(THD *thd) { DBUG_ENTER("query_cache_end_of_result"); - if (net->query_cache_query != 0) // Quick check on unlocked structure + if (thd->net.query_cache_query != 0) // Quick check on unlocked structure { +#ifdef EMBEDDED_LIBRARY + query_cache_insert(&thd->net, (char*)thd, + emb_count_querycache_size(thd)); +#endif STRUCT_LOCK(&query_cache.structure_guard_mutex); /* It is very unlikely that following condition is TRUE (it is possible @@ -667,7 +682,7 @@ void query_cache_end_of_result(NET *net) } Query_cache_block *query_block = ((Query_cache_block*) - net->query_cache_query); + thd->net.query_cache_query); if (query_block) { DUMP(&query_cache); @@ -699,7 +714,7 @@ void query_cache_end_of_result(NET *net) // Cache was flushed or resized and query was deleted => do nothing STRUCT_UNLOCK(&query_cache.structure_guard_mutex); } - net->query_cache_query=0; + thd->net.query_cache_query=0; DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0);); } DBUG_VOID_RETURN; @@ -756,6 +771,14 @@ ulong Query_cache::resize(ulong query_cache_size_arg) } +ulong Query_cache::set_min_res_unit(ulong size) +{ + if (size < min_allocation_unit) + size= min_allocation_unit; + return (min_result_data_size= ALIGN_SIZE(size)); +} + + void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) { TABLE_COUNTER_TYPE local_tables; @@ -763,12 +786,33 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) DBUG_ENTER("Query_cache::store_query"); if (query_cache_size == 0 || thd->locked_tables) DBUG_VOID_RETURN; + uint8 tables_type= 0; if ((local_tables= is_cacheable(thd, thd->query_length, - thd->query, &thd->lex, tables_used))) + thd->query, thd->lex, tables_used, + &tables_type))) { NET *net= &thd->net; - byte flags= (thd->client_capabilities & CLIENT_LONG_FLAG ? 0x80 : 0); + Query_cache_query_flags flags; + // fill all gaps between fields with 0 to get repeatable key + bzero(&flags, QUERY_CACHE_FLAGS_SIZE); + flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ? + 1 : 0); + flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ? + 1 : 0); + flags.character_set_client_num= + thd->variables.character_set_client->number; + flags.character_set_results_num= + (thd->variables.character_set_results ? + thd->variables.character_set_results->number : + UINT_MAX); + flags.collation_connection_num= + thd->variables.collation_connection->number; + flags.limit= thd->variables.select_limit; + flags.time_zone= thd->variables.time_zone; + flags.sql_mode= thd->variables.sql_mode; + flags.max_sort_length= thd->variables.max_sort_length; + flags.group_concat_max_len= thd->variables.group_concat_max_len; STRUCT_LOCK(&structure_guard_mutex); if (query_cache_size == 0) @@ -778,6 +822,13 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) } DUMP(this); + if (ask_handler_allowance(thd, tables_used)) + { + refused++; + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; + } + /* Key is query + database + flag */ if (thd->db_length) { @@ -789,26 +840,19 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) { DBUG_PRINT("qcache", ("No active database")); } + tot_length= thd->query_length + thd->db_length + 1 + + QUERY_CACHE_FLAGS_SIZE; /* - Prepare flags: - most significant bit - CLIENT_LONG_FLAG, - other - charset number (0 no charset convertion) + We should only copy structure (don't use it location directly) + because of alignment issue */ - if (thd->variables.convert_set != 0) - { - flags|= (byte) thd->variables.convert_set->number(); - DBUG_ASSERT(thd->variables.convert_set->number() < 128); - } - tot_length= thd->query_length+thd->db_length+2+sizeof(ha_rows); - thd->query[tot_length-1]= (char) flags; - memcpy((void *)(thd->query + (tot_length-sizeof(ha_rows)-1)), - (const void *)&thd->variables.select_limit, sizeof(ha_rows)); + memcpy((void *)(thd->query + (tot_length - QUERY_CACHE_FLAGS_SIZE)), + &flags, QUERY_CACHE_FLAGS_SIZE); /* Check if another thread is processing the same query? */ Query_cache_block *competitor = (Query_cache_block *) hash_search(&queries, (byte*) thd->query, tot_length); - DBUG_PRINT("qcache", ("competitor 0x%lx, flags %x", (ulong) competitor, - flags)); + DBUG_PRINT("qcache", ("competitor 0x%lx", (ulong) competitor)); if (competitor == 0) { /* Query is not in cache and no one is working with it; Store it */ @@ -823,7 +867,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) Query_cache_query *header = query_block->query(); header->init_n_lock(); - if (hash_insert(&queries, (byte*) query_block)) + if (my_hash_insert(&queries, (byte*) query_block)) { refused++; DBUG_PRINT("qcache", ("insertion in query hash")); @@ -849,6 +893,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) net->query_cache_query= (gptr) query_block; header->writer(net); + header->tables_type(tables_type); // init_n_lock make query block locked BLOCK_UNLOCK_WR(query_block); } @@ -868,14 +913,14 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) DBUG_PRINT("qcache", ("Another thread process same query")); } } - else - if (thd->lex.sql_command == SQLCOM_SELECT) - statistic_increment(refused, &structure_guard_mutex); + else if (thd->lex->sql_command == SQLCOM_SELECT) + statistic_increment(refused, &structure_guard_mutex); end: DBUG_VOID_RETURN; } + /* Check if the query is in the cache. If it was cached, send it to the user. @@ -886,8 +931,6 @@ end: -1 The query was cached but we didn't have rights to use it. No error is sent to the client yet. */ - - int Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) @@ -896,37 +939,43 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) Query_cache_block *first_result_block, *result_block; Query_cache_block_table *block_table, *block_table_end; ulong tot_length; - byte flags; + Query_cache_query_flags flags; + bool check_tables; DBUG_ENTER("Query_cache::send_result_to_client"); if (query_cache_size == 0 || thd->locked_tables || - /* - it is not possible to check has_transactions() function of handler - because tables not opened yet - */ - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) || thd->variables.query_cache_type == 0) - goto err; /* Check that we haven't forgot to reset the query cache variables */ DBUG_ASSERT(thd->net.query_cache_query == 0); - if (!thd->safe_to_cache_query) + if (!thd->lex->safe_to_cache_query) { DBUG_PRINT("qcache", ("SELECT is non-cacheable")); goto err; } - /* - Test if the query is a SELECT - (pre-space is removed in dispatch_command) - */ - if (toupper(sql[0]) != 'S' || toupper(sql[1]) != 'E' || - toupper(sql[2]) !='L') { - DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached")); - goto err; + uint i= 0; + /* + Skip '(' characters in queries like following: + (select a from t1) union (select a from t1); + */ + while (sql[i]=='(') + i++; + + /* + Test if the query is a SELECT + (pre-space is removed in dispatch_command) + */ + if (my_toupper(system_charset_info, sql[i]) != 'S' || + my_toupper(system_charset_info, sql[i + 1]) != 'E' || + my_toupper(system_charset_info, sql[i + 2]) != 'L') + { + DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached")); + goto err; + } } STRUCT_LOCK(&structure_guard_mutex); @@ -937,7 +986,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) } Query_cache_block *query_block; - tot_length= query_length+thd->db_length+2+sizeof(ha_rows); + tot_length= query_length + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE; if (thd->db_length) { memcpy(sql+query_length+1, thd->db, thd->db_length); @@ -948,23 +997,28 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) { DBUG_PRINT("qcache", ("No active database")); } - /* - prepare flags: - Most significant bit - CLIENT_LONG_FLAG, - Other - charset number (0 no charset convertion) - */ - flags= (thd->client_capabilities & CLIENT_LONG_FLAG ? 0x80 : 0); - if (thd->variables.convert_set != 0) - { - flags|= (byte) thd->variables.convert_set->number(); - DBUG_ASSERT(thd->variables.convert_set->number() < 128); - } - sql[tot_length-1]= (char) flags; - memcpy((void *)(sql + (tot_length-sizeof(ha_rows)-1)), - (const void *)&thd->variables.select_limit, sizeof(ha_rows)); - query_block= (Query_cache_block *) hash_search(&queries, (byte*) sql, - tot_length); + // fill all gaps between fields with 0 to get repeatable key + bzero(&flags, QUERY_CACHE_FLAGS_SIZE); + flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ? + 1 : 0); + flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ? + 1 : 0); + flags.character_set_client_num= thd->variables.character_set_client->number; + flags.character_set_results_num= + (thd->variables.character_set_results ? + thd->variables.character_set_results->number : + UINT_MAX); + flags.collation_connection_num= thd->variables.collation_connection->number; + flags.limit= thd->variables.select_limit; + flags.time_zone= thd->variables.time_zone; + flags.sql_mode= thd->variables.sql_mode; + flags.max_sort_length= thd->variables.max_sort_length; + flags.group_concat_max_len= thd->variables.group_concat_max_len; + memcpy((void *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)), + &flags, QUERY_CACHE_FLAGS_SIZE); + query_block = (Query_cache_block *) hash_search(&queries, (byte*) sql, + tot_length); /* Quick abort on unlocked data */ if (query_block == 0 || query_block->query()->result() == 0 || @@ -990,6 +1044,16 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) } DBUG_PRINT("qcache", ("Query have result 0x%lx", (ulong) query)); + if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && + (query->tables_type() & HA_CACHE_TBL_TRANSACT)) + { + DBUG_PRINT("qcache", + ("we are in transaction and have transaction tables in query")); + BLOCK_UNLOCK_RD(query_block); + goto err_unlock; + } + + check_tables= query->tables_type() & HA_CACHE_TBL_ASKTRANSACT; // Check access; block_table= query_block->table(0); block_table_end= block_table+query_block->n_tables; @@ -1008,9 +1072,9 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) */ for (tmptable= thd->temporary_tables; tmptable ; tmptable= tmptable->next) { - if (tmptable->key_length - TMP_TABLE_KEY_EXTRA == table->key_len() && + if (tmptable->key_length - TMP_TABLE_KEY_EXTRA == table->key_length() && !memcmp(tmptable->table_cache_key, table->data(), - table->key_len())) + table->key_length())) { DBUG_PRINT("qcache", ("Temporary table detected: '%s.%s'", @@ -1021,7 +1085,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) temporary tables => assign following variable to make check faster. */ - thd->safe_to_cache_query=0; + thd->lex->safe_to_cache_query=0; BLOCK_UNLOCK_RD(query_block); DBUG_RETURN(-1); } @@ -1030,13 +1094,14 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) bzero((char*) &table_list,sizeof(table_list)); table_list.db = table->db(); table_list.alias= table_list.real_name= table->table(); +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_table_access(thd,SELECT_ACL,&table_list,1)) { DBUG_PRINT("qcache", ("probably no SELECT access to %s.%s => return to normal processing", table_list.db, table_list.alias)); STRUCT_UNLOCK(&structure_guard_mutex); - thd->safe_to_cache_query=0; // Don't try to cache this + thd->lex->safe_to_cache_query=0; // Don't try to cache this BLOCK_UNLOCK_RD(query_block); DBUG_RETURN(-1); // Privilege error } @@ -1045,9 +1110,23 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) DBUG_PRINT("qcache", ("Need to check column privileges for %s.%s", table_list.db, table_list.alias)); BLOCK_UNLOCK_RD(query_block); - thd->safe_to_cache_query=0; // Don't try to cache this + thd->lex->safe_to_cache_query= 0; // Don't try to cache this + goto err_unlock; // Parse query + } +#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ + if (check_tables && !ha_caching_allowed(thd, table->db(), + table->key_length(), + table->type())) + { + DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", + table_list.db, table_list.alias)); + BLOCK_UNLOCK_RD(query_block); + thd->lex->safe_to_cache_query= 0; // Don't try to cache this goto err_unlock; // Parse query } + else + DBUG_PRINT("qcache", ("handler allow caching (%d) %s,%s", + check_tables, table_list.db, table_list.alias)); } move_to_query_list_end(query_block); hits++; @@ -1056,13 +1135,14 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) /* Send cached result to client */ +#ifndef EMBEDDED_LIBRARY do { DBUG_PRINT("qcache", ("Results (len %lu, used %lu, headers %lu)", - result_block->length, result_block->used, - result_block->headers_len()+ - ALIGN_SIZE(sizeof(Query_cache_result)))); - + result_block->length, result_block->used, + result_block->headers_len()+ + ALIGN_SIZE(sizeof(Query_cache_result)))); + Query_cache_result *result = result_block->result(); if (net_real_write(&thd->net, result->data(), result_block->used - @@ -1071,6 +1151,13 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) break; // Client aborted result_block = result_block->next; } while (result_block != first_result_block); +#else + { + Querycache_stream qs(result_block, result_block->headers_len() + + ALIGN_SIZE(sizeof(Query_cache_result))); + emb_load_querycache_result(thd, &qs); + } +#endif /*!EMBEDDED_LIBRARY*/ thd->limit_found_rows = query->found_rows(); @@ -1104,11 +1191,14 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used, for (; tables_used; tables_used=tables_used->next) { DBUG_ASSERT(!using_transactions || tables_used->table!=0); - if (using_transactions && - tables_used->table->file->has_transactions()) - /* + if (tables_used->derived) + continue; + if (using_transactions && + (tables_used->table->file->table_cache_type() == + HA_CACHE_TBL_TRANSACT)) + /* Tables_used->table can't be 0 in transaction. - Only 'drop' invalidate not opened table, but 'drop' + Only 'drop' invalidate not opened table, but 'drop' force transaction finish. */ thd->add_changed_table(tables_used->table); @@ -1156,7 +1246,7 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) */ void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used) { - DBUG_ENTER("Query_cache::invalidate (changed table list)"); + DBUG_ENTER("Query_cache::invalidate_locked_for_write"); if (query_cache_size > 0 && tables_used) { STRUCT_LOCK(&structure_guard_mutex); @@ -1190,7 +1280,8 @@ void Query_cache::invalidate(THD *thd, TABLE *table, { using_transactions = using_transactions && (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - if (using_transactions && table->file->has_transactions()) + if (using_transactions && + (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT)) thd->add_changed_table(table); else invalidate_table(table); @@ -1483,16 +1574,32 @@ ulong Query_cache::init_cache() DUMP(this); - VOID(hash_init(&queries,def_query_hash_size, 0, 0, + VOID(hash_init(&queries, &my_charset_bin, def_query_hash_size, 0, 0, query_cache_query_get_key, 0, 0)); #ifndef FN_NO_CASE_SENCE - VOID(hash_init(&tables,def_table_hash_size, 0, 0, + /* + If lower_case_table_names!=0 then db and table names are already + converted to lower case and we can use binary collation for their + comparison (no matter if file system case sensitive or not). + If we have case-sensitive file system (like on most Unixes) and + lower_case_table_names == 0 then we should distinguish my_table + and MY_TABLE cases and so again can use binary collation. + */ + VOID(hash_init(&tables, &my_charset_bin, def_table_hash_size, 0, 0, query_cache_table_get_key, 0, 0)); #else - // windows, OS/2 or other case insensitive file names work around - VOID(hash_init(&tables,def_table_hash_size, 0, 0, - query_cache_table_get_key, 0, - (lower_case_table_names?0:HASH_CASE_INSENSITIVE))); + /* + On windows, OS/2, MacOS X with HFS+ or any other case insensitive + file system if lower_case_table_names!=0 we have same situation as + in previous case, but if lower_case_table_names==0 then we should + not distinguish cases (to be compatible in behavior with underlying + file system) and so should use case insensitive collation for + comparison. + */ + VOID(hash_init(&tables, + lower_case_table_names ? &my_charset_bin : + files_charset_info, + def_table_hash_size, 0, 0,query_cache_table_get_key, 0, 0)); #endif queries_in_cache = 0; @@ -1828,22 +1935,32 @@ my_bool Query_cache::write_result_data(Query_cache_block **result_block, { // It is success (nobody can prevent us write data) STRUCT_UNLOCK(&structure_guard_mutex); - byte *rest = (byte*) data; - Query_cache_block *block = *result_block; uint headers_len = (ALIGN_SIZE(sizeof(Query_cache_block)) + ALIGN_SIZE(sizeof(Query_cache_result))); +#ifndef EMBEDDED_LIBRARY + Query_cache_block *block= *result_block; + byte *rest= (byte*) data; // Now fill list of blocks that created by allocate_data_chain do { block->type = type; ulong length = block->used - headers_len; DBUG_PRINT("qcache", ("write %lu byte in block 0x%lx",length, - (ulong)block)); + (ulong)block)); memcpy((void*)(((byte*) block)+headers_len), (void*) rest, length); rest += length; block = block->next; type = Query_cache_block::RES_CONT; } while (block != *result_block); +#else + /* + Set type of first block, emb_store_querycache_result() will handle + the others. + */ + (*result_block)->type= type; + Querycache_stream qs(*result_block, headers_len); + emb_store_querycache_result(&qs, (THD*)data); +#endif /*!EMBEDDED_LIBRARY*/ } else { @@ -2017,6 +2134,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, for (n=0; tables_used; tables_used=tables_used->next, n++, block_table++) { + if (tables_used->derived) + { + DBUG_PRINT("qcache", ("derived table skipped")); + n--; + block_table--; + continue; + } DBUG_PRINT("qcache", ("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx", tables_used->real_name, tables_used->db, @@ -2026,7 +2150,8 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, block_table->n=n; if (!insert_table(tables_used->table->key_length, tables_used->table->table_cache_key, block_table, - tables_used->db_length)) + tables_used->db_length, + tables_used->table->file->table_cache_type())) break; if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) @@ -2039,11 +2164,12 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, { char key[MAX_DBKEY_LENGTH]; uint32 db_length; - uint key_length =filename_2_table_key(key, table->table->filename, + uint key_length= filename_2_table_key(key, table->table->filename, &db_length); (++block_table)->n= ++n; if (!insert_table(key_length, key, block_table, - db_length)) + db_length, + tables_used->table->file->table_cache_type())) goto err; } } @@ -2070,7 +2196,7 @@ err: my_bool Query_cache::insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length) + uint32 db_length, uint8 cache_type) { DBUG_ENTER("Query_cache::insert_table"); DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", @@ -2099,7 +2225,7 @@ Query_cache::insert_table(uint key_len, char *key, Query_cache_block_table *list_root = table_block->table(0); list_root->n = 0; list_root->next = list_root->prev = list_root; - if (hash_insert(&tables, (const byte *) table_block)) + if (my_hash_insert(&tables, (const byte *) table_block)) { DBUG_PRINT("qcache", ("Can't insert table to hash")); // write_block_data return locked block @@ -2108,7 +2234,8 @@ Query_cache::insert_table(uint key_len, char *key, } char *db = header->db(); header->table(db + db_length + 1); - header->key_len(key_len); + header->key_length(key_len); + header->type(cache_type); } Query_cache_block_table *list_root = table_block->table(0); @@ -2553,7 +2680,9 @@ void Query_cache::double_linked_list_join(Query_cache_block *head_tail, TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, - LEX *lex, TABLE_LIST *tables_used) + LEX *lex, + TABLE_LIST *tables_used, + uint8 *tables_type) { TABLE_COUNTER_TYPE table_count = 0; DBUG_ENTER("Query_cache::is_cacheable"); @@ -2562,12 +2691,11 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, (thd->variables.query_cache_type == 1 || (thd->variables.query_cache_type == 2 && (lex->select_lex.options & OPTION_TO_QUERY_CACHE))) && - thd->safe_to_cache_query) + lex->safe_to_cache_query) { - my_bool has_transactions = 0; DBUG_PRINT("qcache", ("options %lx %lx, type %u", OPTION_TO_QUERY_CACHE, - lex->select->options, + lex->select_lex.options, (int) thd->variables.query_cache_type)); for (; tables_used; tables_used= tables_used->next) @@ -2576,23 +2704,30 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, DBUG_PRINT("qcache", ("table %s, db %s, type %u", tables_used->real_name, tables_used->db, tables_used->table->db_type)); - has_transactions = (has_transactions || - tables_used->table->file->has_transactions()); - - if (tables_used->table->db_type == DB_TYPE_MRG_ISAM || - tables_used->table->tmp_table != NO_TMP_TABLE || - (tables_used->db_length == 5 && - DB_NAME_PREPROCESS(tables_used->db[0])=='m' && - DB_NAME_PREPROCESS(tables_used->db[1])=='y' && - DB_NAME_PREPROCESS(tables_used->db[2])=='s' && - DB_NAME_PREPROCESS(tables_used->db[3])=='q' && - DB_NAME_PREPROCESS(tables_used->db[4])=='l')) + *tables_type|= tables_used->table->file->table_cache_type(); + + /* + table_alias_charset used here because it depends of + lower_case_table_names variable + */ + if ((tables_used->table->tmp_table != NO_TMP_TABLE && + !tables_used->derived) || + (*tables_type & HA_CACHE_TBL_NOCACHE) || + (tables_used->db_length == 5 && + my_strnncoll(table_alias_charset, (uchar*)tables_used->db, 6, + (uchar*)"mysql",6) == 0)) { DBUG_PRINT("qcache", - ("select not cacheable: used MRG_ISAM, temporary or system table(s)")); + ("select not cacheable: temporary, system or \ +other non-cacheable table(s)")); DBUG_RETURN(0); } - if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) + if (tables_used->derived) + { + table_count--; + DBUG_PRINT("qcache", ("derived table skipped")); + } + else if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) { ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file; MYRG_INFO *file = handler->myrg_info(); @@ -2601,7 +2736,7 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, } if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && - has_transactions) + ((*tables_type)&HA_CACHE_TBL_TRANSACT)) { DBUG_PRINT("qcache", ("not in autocommin mode")); DBUG_RETURN(0); @@ -2614,11 +2749,44 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, ("not interesting query: %d or not cacheable, options %lx %lx, type %u", (int) lex->sql_command, OPTION_TO_QUERY_CACHE, - lex->select->options, + lex->select_lex.options, (int) thd->variables.query_cache_type)); DBUG_RETURN(0); } +/* + Check handler allowance to cache query with these tables + + SYNOPSYS + Query_cache::ask_handler_allowance() + thd - thread handlers + tables_used - tables list used in query + + RETURN + 0 - caching allowed + 1 - caching disallowed +*/ +my_bool Query_cache::ask_handler_allowance(THD *thd, + TABLE_LIST *tables_used) +{ + DBUG_ENTER("Query_cache::ask_handler_allowance"); + + for (; tables_used; tables_used= tables_used->next) + { + TABLE *table= tables_used->table; + if (!ha_caching_allowed(thd, table->table_cache_key, + table->key_length, + table->file->table_cache_type())) + { + DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", + tables_used->db, tables_used->alias)); + thd->lex->safe_to_cache_query= 0; // Don't try to cache this + DBUG_RETURN(1); + } + } + DBUG_RETURN(0); +} + /***************************************************************************** Packing @@ -2705,6 +2873,7 @@ my_bool Query_cache::move_by_type(byte **border, } case Query_cache_block::TABLE: { + HASH_SEARCH_STATE record_idx; DBUG_PRINT("qcache", ("block 0x%lx TABLE", (ulong) block)); if (*border == 0) break; @@ -2722,7 +2891,7 @@ my_bool Query_cache::move_by_type(byte **border, byte *key; uint key_length; key=query_cache_table_get_key((byte*) block, &key_length, 0); - hash_search(&tables, (byte*) key, key_length); + hash_first(&tables, (byte*) key, key_length, &record_idx); block->destroy(); new_block->init(len); @@ -2756,7 +2925,7 @@ my_bool Query_cache::move_by_type(byte **border, /* Fix pointer to table name */ new_block->table()->table(new_block->table()->db() + tablename_offset); /* Fix hash to point at moved block */ - hash_replace(&tables, tables.current_record, (byte*) new_block); + hash_replace(&tables, &record_idx, (byte*) new_block); DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx", len, (ulong) new_block, (ulong) *border)); @@ -2764,6 +2933,7 @@ my_bool Query_cache::move_by_type(byte **border, } case Query_cache_block::QUERY: { + HASH_SEARCH_STATE record_idx; DBUG_PRINT("qcache", ("block 0x%lx QUERY", (ulong) block)); if (*border == 0) break; @@ -2781,7 +2951,7 @@ my_bool Query_cache::move_by_type(byte **border, byte *key; uint key_length; key=query_cache_query_get_key((byte*) block, &key_length, 0); - hash_search(&queries, (byte*) key, key_length); + hash_first(&queries, (byte*) key, key_length, &record_idx); // Move table of used tables memmove((char*) new_block->table(0), (char*) block->table(0), ALIGN_SIZE(n_tables*sizeof(Query_cache_block_table))); @@ -2849,7 +3019,7 @@ my_bool Query_cache::move_by_type(byte **border, net->query_cache_query= (gptr) new_block; } /* Fix hash to point at moved block */ - hash_replace(&queries, queries.current_record, (byte*) new_block); + hash_replace(&queries, &record_idx, (byte*) new_block); DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx", len, (ulong) new_block, (ulong) *border)); break; @@ -3076,7 +3246,7 @@ void Query_cache::bins_dump() { uint i; - if (!initialized) + if (!initialized || query_cache_size == 0) { DBUG_PRINT("qcache", ("Query Cache not initialized")); return; @@ -3117,7 +3287,7 @@ void Query_cache::bins_dump() void Query_cache::cache_dump() { - if (!initialized) + if (!initialized || query_cache_size == 0) { DBUG_PRINT("qcache", ("Query Cache not initialized")); return; @@ -3160,20 +3330,22 @@ void Query_cache::queries_dump() { uint len; char *str = (char*) query_cache_query_get_key((byte*) block, &len, 0); - len--; // Point at flags - uint flags = (uint) (uchar) str[len]; - str[len]=0; - DBUG_PRINT("qcache", ("%u (%u,%u) '%s' '%s'", - ((flags & QUERY_CACHE_CLIENT_LONG_FLAG_MASK)? 1:0), - (flags & QUERY_CACHE_CHARSET_CONVERT_MASK), len, - str,strend(str)+1)); + len-= QUERY_CACHE_FLAGS_SIZE; // Point at flags + Query_cache_query_flags flags; + memcpy(&flags, str+len, QUERY_CACHE_FLAGS_SIZE); + str[len]= 0; // make zero ending DB name + DBUG_PRINT("qcache", ("F:%u C:%u L:%lu T:'%s' (%u) '%s' '%s'", + flags.client_long_flag, + flags.character_set_client_num, + (ulong)flags.limit, flags.time_zone->get_name(), + len, str, strend(str)+1)); DBUG_PRINT("qcache", ("-b- 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", (ulong) block, (ulong) block->next, (ulong) block->prev, (ulong)block->pnext, (ulong)block->pprev)); - str[len]=(char) flags; - for (TABLE_COUNTER_TYPE t = 0; t < block->n_tables; t++) + memcpy(str + len, &flags, QUERY_CACHE_FLAGS_SIZE); // restore flags + for (TABLE_COUNTER_TYPE t= 0; t < block->n_tables; t++) { - Query_cache_table *table = block->table(t)->parent; + Query_cache_table *table= block->table(t)->parent; DBUG_PRINT("qcache", ("-t- '%s' '%s'", table->db(), table->table())); } Query_cache_query *header = block->query(); @@ -3206,7 +3378,7 @@ void Query_cache::queries_dump() void Query_cache::tables_dump() { - if (!initialized) + if (!initialized || query_cache_size == 0) { DBUG_PRINT("qcache", ("Query Cache not initialized")); return; diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 854937d0158..b0a045a8aad 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -53,10 +53,6 @@ of list of free blocks */ #define QUERY_CACHE_MEM_BIN_TRY 5 -/* query flags masks */ -#define QUERY_CACHE_CLIENT_LONG_FLAG_MASK 0x80 -#define QUERY_CACHE_CHARSET_CONVERT_MASK 0x7F - /* packing parameters */ #define QUERY_CACHE_PACK_ITERATION 2 #define QUERY_CACHE_PACK_LIMIT (512*1024L) @@ -115,18 +111,21 @@ struct Query_cache_query Query_cache_block *res; NET *wri; ulong len; + uint8 tbls_type; inline void init_n_lock(); void unlock_n_destroy(); inline ulonglong found_rows() { return limit_found_rows; } - inline void found_rows(ulonglong rows) { limit_found_rows = rows; } + inline void found_rows(ulonglong rows) { limit_found_rows= rows; } inline Query_cache_block *result() { return res; } - inline void result(Query_cache_block *p) { res=p; } + inline void result(Query_cache_block *p) { res= p; } inline NET *writer() { return wri; } - inline void writer(NET *p) { wri=p; } + inline void writer(NET *p) { wri= p; } + inline uint8 tables_type() { return tbls_type; } + inline void tables_type(uint8 type) { tbls_type= type; } inline ulong length() { return len; } - inline ulong add(ulong packet_len) { return(len += packet_len); } - inline void length(ulong length) { len = length; } + inline ulong add(ulong packet_len) { return(len+= packet_len); } + inline void length(ulong length) { len= length; } inline gptr query() { return (gptr)(((byte*)this)+ @@ -144,13 +143,16 @@ struct Query_cache_query struct Query_cache_table { char *tbl; - uint32 key_length; + uint32 key_len; + uint8 table_type; inline char *db() { return (char *) data(); } inline char *table() { return tbl; } - inline void table(char *table) { tbl = table; } - inline uint32 key_len() { return key_length; } - inline void key_len(uint32 len) { key_length= len; } + inline void table(char *table) { tbl= table; } + inline uint32 key_length() { return key_len; } + inline void key_length(uint32 len) { key_len= len; } + inline uint8 type() { return table_type; } + inline void type(uint8 t) { table_type= t; } inline gptr data() { return (gptr)(((byte*)this)+ @@ -279,7 +281,7 @@ protected: TABLE_COUNTER_TYPE tables); my_bool insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length); + uint32 db_length, uint8 cache_type); void unlink_table(Query_cache_block_table *node); Query_cache_block *get_free_block (ulong len, my_bool not_less, ulong min); @@ -336,7 +338,10 @@ protected: (query without tables not cached) */ TABLE_COUNTER_TYPE is_cacheable(THD *thd, uint32 query_len, char *query, - LEX *lex, TABLE_LIST *tables_used); + LEX *lex, TABLE_LIST *tables_used, + uint8 *tables_type); + + static my_bool ask_handler_allowance(THD *thd, TABLE_LIST *tables_used); public: Query_cache(ulong query_cache_limit = ULONG_MAX, @@ -349,7 +354,10 @@ protected: void init(); /* resize query cache (return real query size, 0 if disabled) */ ulong resize(ulong query_cache_size); + /* set limit on result size */ inline void result_size_limit(ulong limit){query_cache_limit=limit;} + /* set minimal result data allocation unit size */ + ulong set_min_res_unit(ulong size); /* register query in cache */ void store_query(THD *thd, TABLE_LIST *used_tables); @@ -382,7 +390,7 @@ protected: void destroy(); friend void query_cache_insert(NET *net, const char *packet, ulong length); - friend void query_cache_end_of_result(NET *net); + friend void query_cache_end_of_result(THD *thd); friend void query_cache_abort(NET *net); /* @@ -406,7 +414,7 @@ protected: extern Query_cache query_cache; extern TYPELIB query_cache_type_typelib; -void query_cache_end_of_result(NET *net); +void query_cache_end_of_result(THD *thd); void query_cache_abort(NET *net); #endif diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 66d23ada163..d278ebe8dfa 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -22,12 +22,11 @@ ** *****************************************************************************/ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" -#include "sql_acl.h" #include <m_ctype.h> #include <sys/stat.h> #include <thr_alarm.h> @@ -35,7 +34,12 @@ #include <io.h> #endif #include <mysys_err.h> -#include <assert.h> + +/* + The following is used to initialise Table_ident with a internal + table name +*/ +char internal_table_name[2]= "*"; /***************************************************************************** @@ -73,42 +77,127 @@ extern "C" void free_user_var(user_var_entry *entry) my_free((char*) entry,MYF(0)); } +bool key_part_spec::operator==(const key_part_spec& other) const +{ + return length == other.length && !strcmp(field_name, other.field_name); +} + + +/* + Test if a foreign key (= generated key) is a prefix of the given key + (ignoring key name, key type and order of columns) + + NOTES: + This is only used to test if an index for a FOREIGN KEY exists + + IMPLEMENTATION + We only compare field names + + RETURN + 0 Generated key is a prefix of other key + 1 Not equal +*/ + +bool foreign_key_prefix(Key *a, Key *b) +{ + /* Ensure that 'a' is the generated key */ + if (a->generated) + { + if (b->generated && a->columns.elements > b->columns.elements) + swap_variables(Key*, a, b); // Put shorter key in 'a' + } + else + { + if (!b->generated) + return TRUE; // No foreign key + swap_variables(Key*, a, b); // Put generated key in 'a' + } + + /* Test if 'a' is a prefix of 'b' */ + if (a->columns.elements > b->columns.elements) + return TRUE; // Can't be prefix + + List_iterator<key_part_spec> col_it1(a->columns); + List_iterator<key_part_spec> col_it2(b->columns); + const key_part_spec *col1, *col2; + +#ifdef ENABLE_WHEN_INNODB_CAN_HANDLE_SWAPED_FOREIGN_KEY_COLUMNS + while ((col1= col_it1++)) + { + bool found= 0; + col_it2.rewind(); + while ((col2= col_it2++)) + { + if (*col1 == *col2) + { + found= TRUE; + break; + } + } + if (!found) + return TRUE; // Error + } + return FALSE; // Is prefix +#else + while ((col1= col_it1++)) + { + col2= col_it2++; + if (!(*col1 == *col2)) + return TRUE; + } + return FALSE; // Is prefix +#endif +} + /**************************************************************************** ** Thread specific functions ****************************************************************************/ -THD::THD():user_time(0),global_read_lock(0),fatal_error(0), - last_insert_id_used(0),insert_id_used(0),rand_used(0), - in_lock_tables(0),bootstrap(0) +THD::THD() + :user_time(0), global_read_lock(0), is_fatal_error(0), + last_insert_id_used(0), + insert_id_used(0), rand_used(0), time_zone_used(0), + in_lock_tables(0), bootstrap(0) { - host=user=priv_user=db=query=ip=0; + current_arena= this; + host= user= priv_user= db= ip=0; host_or_ip= "connecting host"; - locked=killed=count_cuted_fields=some_tables_deleted=no_errors=password= - query_start_used=safe_to_cache_query=0; - db_length=query_length=col_access=0; - query_error=0; + locked=some_tables_deleted=no_errors=password= 0; + killed=0; + query_start_used= 0; + count_cuted_fields= CHECK_FIELD_IGNORE; + db_length= col_access= 0; + query_error= tmp_table_used= 0; next_insert_id=last_insert_id=0; - open_tables=temporary_tables=handler_tables=0; + open_tables= temporary_tables= handler_tables= derived_tables= 0; hash_clear(&handler_tables_hash); - current_tablenr=0; - handler_items=0; tmp_table=0; lock=locked_tables=0; used_tables=0; - cuted_fields=sent_row_count=0L; + cuted_fields= sent_row_count= 0L; + limit_found_rows= 0; + statement_id_counter= 0UL; + // Must be reset to handle error with THD's created for init of mysqld + lex->current_select= 0; start_time=(time_t) 0; current_linfo = 0; slave_thread = 0; - slave_proxy_id = 0; + variables.pseudo_thread_id= 0; + one_shot_set= 0; file_id = 0; - cond_count=0; + query_id= 0; + warn_id= 0; + db_charset= global_system_variables.collation_database; mysys_var=0; #ifndef DBUG_OFF dbug_sentry=THD_SENTRY_MAGIC; -#endif +#endif +#ifndef EMBEDDED_LIBRARY net.vio=0; +#endif net.last_error[0]=0; // If error on boot + client_capabilities= 0; // minimalistic client ull=0; system_thread=cleanup_done=0; peer_port= 0; // For SHOW PROCESSLIST @@ -127,21 +216,42 @@ THD::THD():user_time(0),global_read_lock(0),fatal_error(0), server_id = ::server_id; slave_net = 0; command=COM_CONNECT; - set_query_id=1; +#ifndef NO_EMBEDDED_ACCESS_CHECKS db_access=NO_ACCESS; +#endif version=refresh_version; // For boot + *scramble= '\0'; init(); /* Initialize sub structures */ - bzero((char*) &mem_root,sizeof(mem_root)); - bzero((char*) &transaction.mem_root,sizeof(transaction.mem_root)); + init_sql_alloc(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE); user_connect=(USER_CONN *)0; - hash_init(&user_vars, USER_VARS_HASH_SIZE, 0, 0, + hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, (hash_free_key) free_user_var,0); + + /* For user vars replication*/ + if (opt_bin_log) + my_init_dynamic_array(&user_var_events, + sizeof(BINLOG_USER_VAR_EVENT *), + 16, + 16); + else + bzero((char*) &user_var_events, sizeof(user_var_events)); + + /* Protocol */ + protocol= &protocol_simple; // Default protocol + protocol_simple.init(this); + protocol_prep.init(this); + + tablespace_op=FALSE; #ifdef USING_TRANSACTIONS bzero((char*) &transaction,sizeof(transaction)); - if (opt_using_transactions) + /* + Binlog is always open (if needed) before a THD is created (including + bootstrap). + */ + if (opt_using_transactions && mysql_bin_log.is_open()) { if (open_cached_file(&transaction.trans_log, mysql_tmpdir, LOG_PREFIX, binlog_cache_size, @@ -150,15 +260,9 @@ THD::THD():user_time(0),global_read_lock(0),fatal_error(0), transaction.trans_log.end_of_file= max_binlog_cache_size; } #endif - - /* - We need good random number initialization for new thread - Just coping global one will not work - */ + init_sql_alloc(&transaction.mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); { - pthread_mutex_lock(&LOCK_thread_count); - ulong tmp=(ulong) (my_rnd(&sql_rand) * 0xffffffff); /* make all bits random */ - pthread_mutex_unlock(&LOCK_thread_count); + ulong tmp=sql_rnd_with_mutex(); randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id); } } @@ -172,17 +276,48 @@ void THD::init(void) { pthread_mutex_lock(&LOCK_global_system_variables); variables= global_system_variables; + variables.time_format= date_time_format_copy((THD*) 0, + variables.time_format); + variables.date_format= date_time_format_copy((THD*) 0, + variables.date_format); + variables.datetime_format= date_time_format_copy((THD*) 0, + variables.datetime_format); +#ifdef HAVE_NDBCLUSTER_DB + variables.ndb_use_transactions= 1; +#endif pthread_mutex_unlock(&LOCK_global_system_variables); server_status= SERVER_STATUS_AUTOCOMMIT; options= thd_startup_options; - sql_mode=(uint) opt_sql_mode; open_options=ha_open_options; update_lock_default= (variables.low_priority_updates ? TL_WRITE_LOW_PRIORITY : TL_WRITE); session_tx_isolation= (enum_tx_isolation) variables.tx_isolation; + warn_list.empty(); + bzero((char*) warn_count, sizeof(warn_count)); + total_warn_count= 0; + update_charset(); } + +/* + Init THD for query processing. + This has to be called once before we call mysql_parse. + See also comments in sql_class.h. +*/ + +void THD::init_for_queries() +{ + ha_enable_transaction(this,TRUE); + + reset_root_defaults(mem_root, variables.query_alloc_block_size, + variables.query_prealloc_size); + reset_root_defaults(&transaction.mem_root, + variables.trans_alloc_block_size, + variables.trans_prealloc_size); +} + + /* Do what's needed when one invokes change user @@ -197,11 +332,12 @@ void THD::init(void) void THD::change_user(void) { cleanup(); - cleanup_done=0; + cleanup_done= 0; init(); - hash_init(&user_vars, USER_VARS_HASH_SIZE, 0, 0, + stmt_map.reset(); + hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, - (hash_free_key) free_user_var,0); + (hash_free_key) free_user_var, 0); } @@ -219,8 +355,12 @@ void THD::cleanup(void) mysql_ha_flush(this, (TABLE_LIST*) 0, MYSQL_HA_CLOSE_FINAL | MYSQL_HA_FLUSH_ALL, FALSE); hash_free(&handler_tables_hash); - close_temporary_tables(this); + delete_dynamic(&user_var_events); hash_free(&user_vars); + close_temporary_tables(this); + my_free((char*) variables.time_format, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*) variables.date_format, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*) variables.datetime_format, MYF(MY_ALLOW_ZERO_PTR)); if (global_read_lock) unlock_global_read_lock(this); if (ull) @@ -244,11 +384,13 @@ THD::~THD() pthread_mutex_unlock(&LOCK_delete); /* Close connection */ +#ifndef EMBEDDED_LIBRARY if (net.vio) { vio_delete(net.vio); net_end(&net); } +#endif if (!cleanup_done) cleanup(); #ifdef USING_TRANSACTIONS @@ -260,19 +402,21 @@ THD::~THD() #endif DBUG_PRINT("info", ("freeing host")); - if (host != localhost) // If not pointer to constant + if (host != my_localhost) // If not pointer to constant safeFree(host); if (user != delayed_user) safeFree(user); - safeFree(db); safeFree(ip); - free_root(&mem_root,MYF(0)); + safeFree(db); + free_root(&warn_root,MYF(0)); free_root(&transaction.mem_root,MYF(0)); mysys_var=0; // Safety (shouldn't be needed) pthread_mutex_destroy(&LOCK_delete); #ifndef DBUG_OFF dbug_sentry = THD_SENTRY_GONE; #endif + /* Reset stmt_backup.mem_root to not double-free memory from thd.mem_root */ + clear_alloc_root(&stmt_backup.main_mem_root); DBUG_VOID_RETURN; } @@ -303,10 +447,13 @@ void THD::awake(bool prepare_to_die) it is the true value but maybe current_mutex is not yet non-zero (we're in the middle of enter_cond() and there is a "memory order inversion"). So we test the mutex too to not lock 0. + Note that there is a small chance we fail to kill. If victim has locked - current_mutex, and hasn't entered enter_cond(), then we don't know it's - going to wait on cond. Then victim goes into its cond "forever" (until - we issue a second KILL). True we have set its thd->killed but it may not + current_mutex, but hasn't yet entered enter_cond() (which means that + current_cond and current_mutex are 0), then the victim will not get + a signal and it may wait "forever" on the cond (until + we issue a second KILL or the status it's waiting for happens). + It's true that we have set its thd->killed but it may not see it immediately and so may have time to reach the cond_wait(). */ if (mysys_var->current_cond && mysys_var->current_mutex) @@ -327,8 +474,7 @@ void THD::awake(bool prepare_to_die) bool THD::store_globals() { if (my_pthread_setspecific_ptr(THR_THD, this) || - my_pthread_setspecific_ptr(THR_MALLOC, &mem_root) || - my_pthread_setspecific_ptr(THR_NET, &net)) + my_pthread_setspecific_ptr(THR_MALLOC, &mem_root)) return 1; mysys_var=my_thread_var; dbug_thread_id=my_thread_id(); @@ -336,11 +482,97 @@ bool THD::store_globals() By default 'slave_proxy_id' is 'thread_id'. They may later become different if this is the slave SQL thread. */ - slave_proxy_id= thread_id; + variables.pseudo_thread_id= thread_id; return 0; } +/* + Convert a string to another character set + + SYNOPSIS + convert_string() + to Store new allocated string here + to_cs New character set for allocated string + from String to convert + from_length Length of string to convert + from_cs Original character set + + NOTES + to will be 0-terminated to make it easy to pass to system funcs + + RETURN + 0 ok + 1 End of memory. + In this case to->str will point to 0 and to->length will be 0. +*/ + +bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, + const char *from, uint from_length, + CHARSET_INFO *from_cs) +{ + DBUG_ENTER("convert_string"); + size_s new_length= to_cs->mbmaxlen * from_length; + uint dummy_errors; + if (!(to->str= alloc(new_length+1))) + { + to->length= 0; // Safety fix + DBUG_RETURN(1); // EOM + } + to->length= copy_and_convert((char*) to->str, new_length, to_cs, + from, from_length, from_cs, &dummy_errors); + to->str[to->length]=0; // Safety + DBUG_RETURN(0); +} + + +/* + Convert string from source character set to target character set inplace. + + SYNOPSIS + THD::convert_string + + DESCRIPTION + Convert string using convert_buffer - buffer for character set + conversion shared between all protocols. + + RETURN + 0 ok + !0 out of memory +*/ + +bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs) +{ + uint dummy_errors; + if (convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, &dummy_errors)) + return TRUE; + /* If convert_buffer >> s copying is more efficient long term */ + if (convert_buffer.alloced_length() >= convert_buffer.length() * 2 || + !s->is_alloced()) + { + return s->copy(convert_buffer); + } + s->swap(convert_buffer); + return FALSE; +} + + +/* + Update some cache variables when character set changes +*/ + +void THD::update_charset() +{ + uint32 not_used; + charset_is_system_charset= !String::needs_conversion(0,charset(), + system_charset_info, + ¬_used); + charset_is_collation_connection= + !String::needs_conversion(0,charset(),variables.collation_connection, + ¬_used); +} + + /* routings to adding tables to list of changed in transaction tables */ inline static void list_include(CHANGED_TABLE_LIST** prev, @@ -430,20 +662,102 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length) } +int THD::send_explain_fields(select_result *result) +{ + List<Item> field_list; + Item *item; + CHARSET_INFO *cs= system_charset_info; + field_list.push_back(new Item_return_int("id",3, MYSQL_TYPE_LONGLONG)); + field_list.push_back(new Item_empty_string("select_type", 19, cs)); + field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs)); + item->maybe_null= 1; + field_list.push_back(item= new Item_empty_string("type", 10, cs)); + item->maybe_null= 1; + field_list.push_back(item=new Item_empty_string("possible_keys", + NAME_LEN*MAX_KEY, cs)); + item->maybe_null=1; + field_list.push_back(item=new Item_empty_string("key", NAME_LEN, cs)); + item->maybe_null=1; + field_list.push_back(item=new Item_return_int("key_len",3, + MYSQL_TYPE_LONGLONG)); + item->maybe_null=1; + field_list.push_back(item=new Item_empty_string("ref", + NAME_LEN*MAX_REF_PARTS, cs)); + item->maybe_null=1; + field_list.push_back(item= new Item_return_int("rows", 10, + MYSQL_TYPE_LONGLONG)); + item->maybe_null= 1; + field_list.push_back(new Item_empty_string("Extra", 255, cs)); + return (result->send_fields(field_list,1)); +} + #ifdef SIGNAL_WITH_VIO_CLOSE void THD::close_active_vio() { DBUG_ENTER("close_active_vio"); safe_mutex_assert_owner(&LOCK_delete); +#ifndef EMBEDDED_LIBRARY if (active_vio) { vio_close(active_vio); active_vio = 0; } +#endif DBUG_VOID_RETURN; } #endif + +struct Item_change_record: public ilink +{ + Item **place; + Item *old_value; + /* Placement new was hidden by `new' in ilink (TODO: check): */ + static void *operator new(size_t size, void *mem) { return mem; } + static void operator delete(void *ptr, size_t size) {} + static void operator delete(void *ptr, void *mem) { /* never called */ } +}; + + +/* + Register an item tree tree transformation, performed by the query + optimizer. We need a pointer to runtime_memroot because it may be != + thd->mem_root (due to possible set_n_backup_item_arena called for thd). +*/ + +void THD::nocheck_register_item_tree_change(Item **place, Item *old_value, + MEM_ROOT *runtime_memroot) +{ + Item_change_record *change; + /* + Now we use one node per change, which adds some memory overhead, + but still is rather fast as we use alloc_root for allocations. + A list of item tree changes of an average query should be short. + */ + void *change_mem= alloc_root(runtime_memroot, sizeof(*change)); + if (change_mem == 0) + { + fatal_error(); + return; + } + change= new (change_mem) Item_change_record; + change->place= place; + change->old_value= old_value; + change_list.append(change); +} + + +void THD::rollback_item_tree_changes() +{ + I_List_iterator<Item_change_record> it(change_list); + Item_change_record *change; + while ((change= it++)) + *change->place= change->old_value; + /* We can forget about changes memory: it's allocated in runtime memroot */ + change_list.empty(); +} + + /***************************************************************************** ** Functions to provide a interface to select results *****************************************************************************/ @@ -453,61 +767,78 @@ select_result::select_result() thd=current_thd; } -static String default_line_term("\n"),default_escaped("\\"), - default_field_term("\t"); +void select_result::send_error(uint errcode,const char *err) +{ + ::send_error(thd, errcode, err); +} + + +void select_result::cleanup() +{ + /* do nothing */ +} + +static String default_line_term("\n",default_charset_info); +static String default_escaped("\\",default_charset_info); +static String default_field_term("\t",default_charset_info); sql_exchange::sql_exchange(char *name,bool flag) :file_name(name), opt_enclosed(0), dumpfile(flag), skip_lines(0) { field_term= &default_field_term; - enclosed= line_start= &empty_string; + enclosed= line_start= &my_empty_string; line_term= &default_line_term; escaped= &default_escaped; } bool select_send::send_fields(List<Item> &list,uint flag) { - return ::send_fields(thd,list,flag); + return thd->protocol->send_fields(&list,flag); } - /* Send data to client. Returns 0 if ok */ bool select_send::send_data(List<Item> &items) { - List_iterator_fast<Item> li(items); - String *packet= &thd->packet; - DBUG_ENTER("send_data"); + if (unit->offset_limit_cnt) + { // using limit offset,count + unit->offset_limit_cnt--; + return 0; + } #ifdef HAVE_INNOBASE_DB - /* We may be passing the control from mysqld to the client: release the - InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved - by thd */ + /* + We may be passing the control from mysqld to the client: release the + InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved + by thd + */ if (thd->transaction.all.innobase_tid) ha_release_temporary_latches(thd); #endif - if (thd->offset_limit) - { // using limit offset,count - thd->offset_limit--; - DBUG_RETURN(0); - } - packet->length(0); // Reset packet + List_iterator_fast<Item> li(items); + Protocol *protocol= thd->protocol; + char buff[MAX_FIELD_WIDTH]; + String buffer(buff, sizeof(buff), &my_charset_bin); + DBUG_ENTER("send_data"); + + protocol->prepare_for_resend(); Item *item; while ((item=li++)) { - if (item->send(thd, packet)) + if (item->send(protocol, &buffer)) { - packet->free(); // Free used - my_error(ER_OUT_OF_RESOURCES,MYF(0)); - DBUG_RETURN(1); + protocol->free(); // Free used buffer + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + break; } } thd->sent_row_count++; - if (!thd->net.vio) + if (!thd->vio_ok()) DBUG_RETURN(0); - bool error=my_net_write(&thd->net,(char*) packet->ptr(),packet->length()); - DBUG_RETURN(error); + if (!thd->net.report_error) + DBUG_RETURN(protocol->write()); + DBUG_RETURN(1); } bool select_send::send_eof() @@ -523,19 +854,63 @@ bool select_send::send_eof() /* Unlock tables before sending packet to gain some speed */ if (thd->lock) { - mysql_unlock_tables(thd, thd->lock); thd->lock=0; + mysql_unlock_tables(thd, thd->lock); + thd->lock=0; } - ::send_eof(&thd->net); - return 0; + if (!thd->net.report_error) + { + ::send_eof(thd); + return 0; + } + else + return 1; } -/*************************************************************************** -** Export of select to textfile -***************************************************************************/ +/************************************************************************ + Handling writing to file +************************************************************************/ + +void select_to_file::send_error(uint errcode,const char *err) +{ + ::send_error(thd,errcode,err); + if (file > 0) + { + (void) end_io_cache(&cache); + (void) my_close(file,MYF(0)); + (void) my_delete(path,MYF(0)); // Delete file on error + file= -1; + } +} -select_export::~select_export() +bool select_to_file::send_eof() +{ + int error= test(end_io_cache(&cache)); + if (my_close(file,MYF(MY_WME))) + error= 1; + if (!error) + ::send_ok(thd,row_count); + file= -1; + return error; +} + + +void select_to_file::cleanup() +{ + /* In case of error send_eof() may be not called: close the file here. */ + if (file >= 0) + { + (void) end_io_cache(&cache); + (void) my_close(file,MYF(0)); + file= -1; + } + path[0]= '\0'; + row_count= 0; +} + + +select_to_file::~select_to_file() { if (file >= 0) { // This only happens in case of error @@ -543,40 +918,85 @@ select_export::~select_export() (void) my_close(file,MYF(0)); file= -1; } +} + +/*************************************************************************** +** Export of select to textfile +***************************************************************************/ + +select_export::~select_export() +{ thd->sent_row_count=row_count; } -int -select_export::prepare(List<Item> &list) + +/* + Create file with IO cache + + SYNOPSIS + create_file() + thd Thread handle + path File name + exchange Excange class + cache IO cache + + RETURN + >= 0 File handle + -1 Error +*/ + + +static File create_file(THD *thd, char *path, sql_exchange *exchange, + IO_CACHE *cache) { - uint option=4; - bool blob_flag=0; + File file; + uint option= MY_UNPACK_FILENAME; + #ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS - option|=1; // Force use of db directory + option|= MY_REPLACE_DIR; // Force use of db directory #endif - if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN) - strmake(path,exchange->file_name,FN_REFLEN-1); - (void) fn_format(path,exchange->file_name, thd->db ? thd->db : "", "", - option); - if (!access(path,F_OK)) + + if (!dirname_length(exchange->file_name)) { - my_error(ER_FILE_EXISTS_ERROR,MYF(0),exchange->file_name); - return 1; + strxnmov(path, FN_REFLEN, mysql_real_data_home, thd->db ? thd->db : "", NullS); + (void) fn_format(path, exchange->file_name, path, "", option); + } + else + (void) fn_format(path, exchange->file_name, mysql_real_data_home, "", option); + + if (!access(path, F_OK)) + { + my_error(ER_FILE_EXISTS_ERROR, MYF(0), exchange->file_name); + return -1; } /* Create the file world readable */ - if ((file=my_create(path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0) - return 1; + if ((file= my_create(path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0) + return file; #ifdef HAVE_FCHMOD - (void) fchmod(file,0666); // Because of umask() + (void) fchmod(file, 0666); // Because of umask() #else - (void) chmod(path,0666); + (void) chmod(path, 0666); #endif - if (init_io_cache(&cache,file,0L,WRITE_CACHE,0L,1,MYF(MY_WME))) + if (init_io_cache(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME))) { - my_close(file,MYF(0)); - file= -1; - return 1; + my_close(file, MYF(0)); + my_delete(path, MYF(0)); // Delete file on error, it was just created + return -1; } + return file; +} + + +int +select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) +{ + bool blob_flag=0; + unit= u; + if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN) + strmake(path,exchange->file_name,FN_REFLEN-1); + + if ((file= create_file(thd, path, exchange, &cache)) < 0) + return 1; /* Check if there is any blobs in data */ { List_iterator_fast<Item> li(list); @@ -614,12 +1034,12 @@ bool select_export::send_data(List<Item> &items) DBUG_ENTER("send_data"); char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH]; bool space_inited=0; - String tmp(buff,sizeof(buff)),*res; + String tmp(buff,sizeof(buff),&my_charset_bin),*res; tmp.length(0); - if (thd->offset_limit) + if (unit->offset_limit_cnt) { // using limit offset,count - thd->offset_limit--; + unit->offset_limit_cnt--; DBUG_RETURN(0); } row_count++; @@ -675,10 +1095,11 @@ bool select_export::send_data(List<Item> &items) pos++) { #ifdef USE_MB - if (use_mb(default_charset_info)) + CHARSET_INFO *res_charset=res->charset(); + if (use_mb(res_charset)) { int l; - if ((l=my_ismbchar(default_charset_info, pos, end))) + if ((l=my_ismbchar(res_charset, pos, end))) { pos += l-1; continue; @@ -746,78 +1167,17 @@ err: } -void select_export::send_error(uint errcode,const char *err) -{ - ::send_error(&thd->net,errcode,err); - if (file > 0) - { - (void) end_io_cache(&cache); - (void) my_close(file,MYF(0)); - (void) my_delete(path,MYF(0)); // Delete file on error - file= -1; - } -} - - -bool select_export::send_eof() -{ - int error=test(end_io_cache(&cache)); - if (my_close(file,MYF(MY_WME))) - error=1; - if (error) - ::send_error(&thd->net); - else - ::send_ok(&thd->net,row_count); - file= -1; - return error; -} - - /*************************************************************************** ** Dump of select to a binary file ***************************************************************************/ -select_dump::~select_dump() -{ - if (file >= 0) - { // This only happens in case of error - (void) end_io_cache(&cache); - (void) my_close(file,MYF(0)); - file= -1; - } -} - int -select_dump::prepare(List<Item> &list __attribute__((unused))) +select_dump::prepare(List<Item> &list __attribute__((unused)), + SELECT_LEX_UNIT *u) { - uint option=4; -#ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS - option|=1; // Force use of db directory -#endif - (void) fn_format(path,exchange->file_name, thd->db ? thd->db : "", "", - option); - if (!access(path,F_OK)) - { - my_error(ER_FILE_EXISTS_ERROR,MYF(0),exchange->file_name); - return 1; - } - /* Create the file world readable */ - if ((file=my_create(path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0) - return 1; -#ifdef HAVE_FCHMOD - (void) fchmod(file,0666); // Because of umask() -#else - (void) chmod(path,0666); -#endif - if (init_io_cache(&cache,file,0L,WRITE_CACHE,0L,1,MYF(MY_WME))) - { - my_close(file,MYF(0)); - my_delete(path,MYF(0)); - file= -1; - return 1; - } - return 0; + unit= u; + return (int) ((file= create_file(thd, path, exchange, &cache)) < 0); } @@ -825,19 +1185,19 @@ bool select_dump::send_data(List<Item> &items) { List_iterator_fast<Item> li(items); char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff)),*res; + String tmp(buff,sizeof(buff),&my_charset_bin),*res; tmp.length(0); Item *item; DBUG_ENTER("send_data"); - if (thd->offset_limit) + if (unit->offset_limit_cnt) { // using limit offset,count - thd->offset_limit--; + unit->offset_limit_cnt--; DBUG_RETURN(0); } if (row_count++ > 1) { - my_error(ER_TOO_MANY_ROWS,MYF(0)); + my_error(ER_TOO_MANY_ROWS, MYF(0)); goto err; } while ((item=li++)) @@ -860,28 +1220,572 @@ err: } -void select_dump::send_error(uint errcode,const char *err) +select_subselect::select_subselect(Item_subselect *item_arg) { - ::send_error(&thd->net,errcode,err); - if (file > 0) + item= item_arg; +} + + +bool select_singlerow_subselect::send_data(List<Item> &items) +{ + DBUG_ENTER("select_singlerow_subselect::send_data"); + Item_singlerow_subselect *it= (Item_singlerow_subselect *)item; + if (it->assigned()) { - (void) end_io_cache(&cache); - (void) my_close(file,MYF(0)); - (void) my_delete(path,MYF(0)); // Delete file on error - file= -1; + my_message(ER_SUBQUERY_NO_1_ROW, ER(ER_SUBQUERY_NO_1_ROW), MYF(0)); + DBUG_RETURN(1); + } + if (unit->offset_limit_cnt) + { // Using limit offset,count + unit->offset_limit_cnt--; + DBUG_RETURN(0); } + List_iterator_fast<Item> li(items); + Item *val_item; + for (uint i= 0; (val_item= li++); i++) + it->store(i, val_item); + it->assigned(1); + DBUG_RETURN(0); } -bool select_dump::send_eof() +void select_max_min_finder_subselect::cleanup() { - int error=test(end_io_cache(&cache)); - if (my_close(file,MYF(MY_WME))) - error=1; - if (error) - ::send_error(&thd->net); + DBUG_ENTER("select_max_min_finder_subselect::cleanup"); + cache= 0; + DBUG_VOID_RETURN; +} + + +bool select_max_min_finder_subselect::send_data(List<Item> &items) +{ + DBUG_ENTER("select_max_min_finder_subselect::send_data"); + Item_maxmin_subselect *it= (Item_maxmin_subselect *)item; + List_iterator_fast<Item> li(items); + Item *val_item= li++; + it->register_value(); + if (it->assigned()) + { + cache->store(val_item); + if ((this->*op)()) + it->store(0, cache); + } else - ::send_ok(&thd->net,row_count); - file= -1; - return error; + { + if (!cache) + { + cache= Item_cache::get_cache(val_item->result_type()); + switch (val_item->result_type()) + { + case REAL_RESULT: + op= &select_max_min_finder_subselect::cmp_real; + break; + case INT_RESULT: + op= &select_max_min_finder_subselect::cmp_int; + break; + case STRING_RESULT: + op= &select_max_min_finder_subselect::cmp_str; + break; + case ROW_RESULT: + // This case should never be choosen + DBUG_ASSERT(0); + op= 0; + } + } + cache->store(val_item); + it->store(0, cache); + } + it->assigned(1); + DBUG_RETURN(0); +} + +bool select_max_min_finder_subselect::cmp_real() +{ + Item *maxmin= ((Item_singlerow_subselect *)item)->el(0); + double val1= cache->val(), val2= maxmin->val(); + if (fmax) + return (cache->null_value && !maxmin->null_value) || + (!cache->null_value && !maxmin->null_value && + val1 > val2); + else + return (maxmin->null_value && !cache->null_value) || + (!cache->null_value && !maxmin->null_value && + val1 < val2); +} + +bool select_max_min_finder_subselect::cmp_int() +{ + Item *maxmin= ((Item_singlerow_subselect *)item)->el(0); + longlong val1= cache->val_int(), val2= maxmin->val_int(); + if (fmax) + return (cache->null_value && !maxmin->null_value) || + (!cache->null_value && !maxmin->null_value && + val1 > val2); + else + return (maxmin->null_value && !cache->null_value) || + (!cache->null_value && !maxmin->null_value && + val1 < val2); +} + +bool select_max_min_finder_subselect::cmp_str() +{ + String *val1, *val2, buf1, buf2; + Item *maxmin= ((Item_singlerow_subselect *)item)->el(0); + /* + as far as both operand is Item_cache buf1 & buf2 will not be used, + but added for safety + */ + val1= cache->val_str(&buf1); + val2= maxmin->val_str(&buf1); + if (fmax) + return (cache->null_value && !maxmin->null_value) || + (!cache->null_value && !maxmin->null_value && + sortcmp(val1, val2, cache->collation.collation) > 0) ; + else + return (maxmin->null_value && !cache->null_value) || + (!cache->null_value && !maxmin->null_value && + sortcmp(val1, val2, cache->collation.collation) < 0); +} + +bool select_exists_subselect::send_data(List<Item> &items) +{ + DBUG_ENTER("select_exists_subselect::send_data"); + Item_exists_subselect *it= (Item_exists_subselect *)item; + if (unit->offset_limit_cnt) + { // Using limit offset,count + unit->offset_limit_cnt--; + DBUG_RETURN(0); + } + it->value= 1; + it->assigned(1); + DBUG_RETURN(0); +} + + +/*************************************************************************** + Dump of select to variables +***************************************************************************/ + +int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u) +{ + List_iterator_fast<Item> li(list); + List_iterator_fast<LEX_STRING> gl(var_list); + Item *item; + LEX_STRING *ls; + if (var_list.elements != list.elements) + { + my_error(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, MYF(0)); + return 1; + } + unit=u; + while ((item=li++)) + { + ls= gl++; + Item_func_set_user_var *xx = new Item_func_set_user_var(*ls,item); + /* + Item_func_set_user_var can't substitute something else on its place => + 0 can be passed as last argument (reference on item) + */ + xx->fix_fields(thd,(TABLE_LIST*) thd->lex->select_lex.table_list.first, + 0); + xx->fix_length_and_dec(); + vars.push_back(xx); + } + return 0; +} + + +void select_dumpvar::cleanup() +{ + vars.empty(); + row_count=0; +} + + +/* + Create arena for already constructed THD. + + SYNOPSYS + Item_arena() + thd - thread for which arena is created + + DESCRIPTION + Create arena for already existing THD using its variables as parameters + for memory root initialization. +*/ +Item_arena::Item_arena(THD* thd) + :free_list(0), mem_root(&main_mem_root), + state(INITIALIZED) +{ + init_sql_alloc(&main_mem_root, + thd->variables.query_alloc_block_size, + thd->variables.query_prealloc_size); +} + + +/* + Create arena and optionally initialize memory root. + + SYNOPSYS + Item_arena() + init_mem_root - whenever we need to initialize memory root + + DESCRIPTION + Create arena and optionally initialize memory root with minimal + possible parameters. + + NOTE + We use this constructor when arena is part of THD, but reinitialize + its memory root in THD::init_for_queries() before execution of real + statements. +*/ +Item_arena::Item_arena(bool init_mem_root) + :free_list(0), mem_root(&main_mem_root), + state(CONVENTIONAL_EXECUTION) +{ + if (init_mem_root) + init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); +} + + +Item_arena::Type Item_arena::type() const +{ + DBUG_ASSERT("Item_arena::type()" == "abstract"); + return STATEMENT; +} + + +/* + Statement functions +*/ + +Statement::Statement(THD *thd) + :Item_arena(thd), + id(++thd->statement_id_counter), + set_query_id(1), + allow_sum_func(0), + lex(&main_lex), + query(0), + query_length(0) +{ + name.str= NULL; +} + +/* + This constructor is called when statement is a subobject of THD: + Some variables are initialized in THD::init due to locking problems + This statement object will be used to +*/ + +Statement::Statement() + :Item_arena((bool)TRUE), + id(0), + set_query_id(1), + allow_sum_func(0), /* initialized later */ + lex(&main_lex), + query(0), /* these two are set */ + query_length(0) /* in alloc_query() */ +{ +} + + +Item_arena::Type Statement::type() const +{ + return STATEMENT; +} + + +void Statement::set_statement(Statement *stmt) +{ + id= stmt->id; + set_query_id= stmt->set_query_id; + allow_sum_func= stmt->allow_sum_func; + lex= stmt->lex; + query= stmt->query; + query_length= stmt->query_length; +} + + +void +Statement::set_n_backup_statement(Statement *stmt, Statement *backup) +{ + backup->set_statement(this); + set_statement(stmt); +} + + +void Statement::restore_backup_statement(Statement *stmt, Statement *backup) +{ + stmt->set_statement(this); + set_statement(backup); +} + + +void THD::end_statement() +{ + /* Cleanup SQL processing state to resuse this statement in next query. */ + lex_end(lex); + delete lex->result; + lex->result= 0; + free_items(free_list); + free_list= 0; + /* + Don't free mem_root, as mem_root is freed in the end of dispatch_command + (once for any command). + */ +} + + +void Item_arena::set_n_backup_item_arena(Item_arena *set, Item_arena *backup) +{ + DBUG_ENTER("Item_arena::set_n_backup_item_arena"); + backup->set_item_arena(this); + set_item_arena(set); + DBUG_VOID_RETURN; +} + + +void Item_arena::restore_backup_item_arena(Item_arena *set, Item_arena *backup) +{ + DBUG_ENTER("Item_arena::restore_backup_item_arena"); + set->set_item_arena(this); + set_item_arena(backup); +#ifdef NOT_NEEDED_NOW + /* + Reset backup mem_root to avoid its freeing. + Since Item_arena's mem_root is freed only when it is part of Statement + we need this only if we use some Statement's arena as backup storage. + But we do this only with THD::stmt_backup and this Statement is specially + handled in this respect. So this code is not really needed now. + */ + clear_alloc_root(&backup->mem_root); +#endif + DBUG_VOID_RETURN; +} + +void Item_arena::set_item_arena(Item_arena *set) +{ + mem_root= set->mem_root; + free_list= set->free_list; + state= set->state; +} + +Statement::~Statement() +{ + free_root(&main_mem_root, MYF(0)); +} + +C_MODE_START + +static byte * +get_statement_id_as_hash_key(const byte *record, uint *key_length, + my_bool not_used __attribute__((unused))) +{ + const Statement *statement= (const Statement *) record; + *key_length= sizeof(statement->id); + return (byte *) &((const Statement *) statement)->id; +} + +static void delete_statement_as_hash_key(void *key) +{ + delete (Statement *) key; +} + +static byte *get_stmt_name_hash_key(Statement *entry, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length=(uint) entry->name.length; + return (byte*) entry->name.str; +} + +C_MODE_END + +Statement_map::Statement_map() : + last_found_statement(0) +{ + enum + { + START_STMT_HASH_SIZE = 16, + START_NAME_HASH_SIZE = 16 + }; + hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0, + get_statement_id_as_hash_key, + delete_statement_as_hash_key, MYF(0)); + hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0, + (hash_get_key) get_stmt_name_hash_key, + NULL,MYF(0)); +} + +/* + Insert a new statement to the thread-local statement map. + + DESCRIPTION + If there was an old statement with the same name, replace it with the + new one. Otherwise, check if max_prepared_stmt_count is not reached yet, + increase prepared_stmt_count, and insert the new statement. It's okay + to delete an old statement and fail to insert the new one. + + POSTCONDITIONS + All named prepared statements are also present in names_hash. + Statement names in names_hash are unique. + The statement is added only if prepared_stmt_count < max_prepard_stmt_count + last_found_statement always points to a valid statement or is 0 + + RETURN VALUE + 0 success + 1 error: out of resources or max_prepared_stmt_count limit has been + reached. An error is sent to the client, the statement is deleted. +*/ + +int Statement_map::insert(THD *thd, Statement *statement) +{ + if (my_hash_insert(&st_hash, (byte*) statement)) + { + /* + Delete is needed only in case of an insert failure. In all other + cases hash_delete will also delete the statement. + */ + delete statement; + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + goto err_st_hash; + } + if (statement->name.str) + { + /* + If there is a statement with the same name, remove it. It is ok to + remove old and fail to insert new one at the same time. + */ + Statement *old_stmt; + if ((old_stmt= find_by_name(&statement->name))) + erase(old_stmt); + if (my_hash_insert(&names_hash, (byte*) statement)) + { + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + goto err_names_hash; + } + } + pthread_mutex_lock(&LOCK_prepared_stmt_count); + /* + We don't check that prepared_stmt_count is <= max_prepared_stmt_count + because we would like to allow to lower the total limit + of prepared statements below the current count. In that case + no new statements can be added until prepared_stmt_count drops below + the limit. + */ + if (prepared_stmt_count >= max_prepared_stmt_count) + { + pthread_mutex_unlock(&LOCK_prepared_stmt_count); + my_error(ER_UNKNOWN_ERROR, MYF(0)); + goto err_max; + } + prepared_stmt_count++; + pthread_mutex_unlock(&LOCK_prepared_stmt_count); + + last_found_statement= statement; + return 0; + +err_max: + if (statement->name.str) + hash_delete(&names_hash, (byte*) statement); +err_names_hash: + hash_delete(&st_hash, (byte*) statement); +err_st_hash: + send_error(thd); + return 1; +} + + +void Statement_map::erase(Statement *statement) +{ + if (statement == last_found_statement) + last_found_statement= 0; + if (statement->name.str) + { + hash_delete(&names_hash, (byte *) statement); + } + hash_delete(&st_hash, (byte *) statement); + pthread_mutex_lock(&LOCK_prepared_stmt_count); + DBUG_ASSERT(prepared_stmt_count > 0); + prepared_stmt_count--; + pthread_mutex_unlock(&LOCK_prepared_stmt_count); +} + + +void Statement_map::reset() +{ + /* Must be first, hash_free will reset st_hash.records */ + pthread_mutex_lock(&LOCK_prepared_stmt_count); + DBUG_ASSERT(prepared_stmt_count >= st_hash.records); + prepared_stmt_count-= st_hash.records; + pthread_mutex_unlock(&LOCK_prepared_stmt_count); + + my_hash_reset(&names_hash); + my_hash_reset(&st_hash); + last_found_statement= 0; +} + + +Statement_map::~Statement_map() +{ + /* Must go first, hash_free will reset st_hash.records */ + pthread_mutex_lock(&LOCK_prepared_stmt_count); + DBUG_ASSERT(prepared_stmt_count >= st_hash.records); + prepared_stmt_count-= st_hash.records; + pthread_mutex_unlock(&LOCK_prepared_stmt_count); + + hash_free(&names_hash); + hash_free(&st_hash); + +} + +bool select_dumpvar::send_data(List<Item> &items) +{ + List_iterator_fast<Item_func_set_user_var> li(vars); + Item_func_set_user_var *xx; + DBUG_ENTER("send_data"); + + if (unit->offset_limit_cnt) + { // Using limit offset,count + unit->offset_limit_cnt--; + DBUG_RETURN(0); + } + if (row_count++) + { + my_error(ER_TOO_MANY_ROWS, MYF(0)); + DBUG_RETURN(1); + } + while ((xx=li++)) + { + xx->check(); + xx->update(); + } + DBUG_RETURN(0); +} + +bool select_dumpvar::send_eof() +{ + if (row_count) + { + ::send_ok(thd,row_count); + return 0; + } + else + { + my_error(ER_EMPTY_QUERY,MYF(0)); + return 1; + } +} + +/**************************************************************************** + TMP_TABLE_PARAM +****************************************************************************/ + +void TMP_TABLE_PARAM::init() +{ + DBUG_ENTER("TMP_TABLE_PARAM::init"); + DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this)); + field_count= sum_func_count= func_count= hidden_field_count= 0; + group_parts= group_length= group_null_parts= 0; + quick_group= 1; + DBUG_VOID_RETURN; } diff --git a/sql/sql_class.h b/sql/sql_class.h index 17d371d3dc0..d482a524934 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -17,7 +17,7 @@ /* Classes in mysql */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -29,11 +29,16 @@ class Slave_log_event; enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME }; -enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_IGNORE }; +enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE }; enum enum_log_type { LOG_CLOSED, LOG_TO_BE_OPENED, LOG_NORMAL, LOG_NEW, LOG_BIN}; enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON, DELAY_KEY_WRITE_ALL }; +enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, + CHECK_FIELD_ERROR_FOR_NULL }; + +extern char internal_table_name[2]; + /* log info errors */ #define LOG_INFO_EOF -1 #define LOG_INFO_IO -2 @@ -61,6 +66,15 @@ typedef struct st_log_info ~st_log_info() { pthread_mutex_destroy(&lock);} } LOG_INFO; +typedef struct st_user_var_events +{ + user_var_entry *user_var_event; + char *value; + ulong length; + Item_result type; + uint charset_number; +} BINLOG_USER_VAR_EVENT; + class Log_event; class MYSQL_LOG @@ -79,16 +93,11 @@ class MYSQL_LOG // current file sequence number for load data infile binary logging uint file_id; uint open_count; // For replication - /* - For binlog - if log name can never change we should not try to rotate it - or write any rotation events. The user should use FLUSH MASTER instead - of FLUSH LOGS for purging. - */ volatile enum_log_type log_type; enum cache_type io_cache_type; bool write_error, inited; bool need_start_event; - bool no_auto_events; // for relay binlog + bool no_auto_events; // For relay binlog /* The max size before rotation (usable only if log_type == LOG_BIN: binary logs and relay logs). @@ -122,7 +131,7 @@ public: DBUG_VOID_RETURN; } void set_max_size(ulong max_size_arg); - void signal_update() { pthread_cond_broadcast(&update_cond);} + void signal_update(); void wait_for_update(THD* thd, bool master_or_slave); void set_need_start_event() { need_start_event = 1; } void init(enum_log_type log_type_arg, @@ -152,10 +161,16 @@ public: int generate_new_name(char *new_name,const char *old_name); void make_log_name(char* buf, const char* log_ident); bool is_active(const char* log_file_name); - int purge_logs(THD* thd, const char* to_log); - int purge_first_log(struct st_relay_log_info* rli); + int update_log_index(LOG_INFO* linfo, bool need_update_threads); + int purge_logs(const char *to_log, bool included, + bool need_mutex, bool need_update_threads, + ulonglong *decrease_log_space); + int purge_logs_before_date(time_t purge_time); + int purge_first_log(struct st_relay_log_info* rli, bool included); bool reset_logs(THD* thd); void close(uint exiting); + bool cut_spurious_tail(); + void report_pos_in_innodb(); // iterating through the log index file int find_log_pos(LOG_INFO* linfo, const char* log_name, @@ -177,34 +192,19 @@ public: /* character conversion tables */ -class CONVERT; -CONVERT *get_convert_set(const char *name_ptr); - -class CONVERT -{ - const uchar *from_map,*to_map; - void convert_array(const uchar *mapping,uchar *buff,uint length); -public: - const char *name; - uint numb; - CONVERT(const char *name_par,uchar *from_par,uchar *to_par, uint number) - :from_map(from_par),to_map(to_par),name(name_par),numb(number) {} - friend CONVERT *get_convert_set(const char *name_ptr); - inline void convert(char *a,uint length) - { - convert_array(from_map, (uchar*) a,length); - } - bool store(String *, const char *,uint); - inline uint number() { return numb; } -}; typedef struct st_copy_info { ha_rows records; ha_rows deleted; + ha_rows updated; ha_rows copied; ha_rows error_count; enum enum_duplicates handle_duplicates; int escape_char, last_errno; + bool ignore; + /* for INSERT ... UPDATE */ + List<Item> *update_fields; + List<Item> *update_values; } COPY_INFO; @@ -213,6 +213,7 @@ public: const char *field_name; uint length; key_part_spec(const char *name,uint len=0) :field_name(name), length(len) {} + bool operator==(const key_part_spec& other) const; }; @@ -237,19 +238,44 @@ public: class Key :public Sql_alloc { public: - enum Keytype { PRIMARY, UNIQUE, MULTIPLE, FULLTEXT }; + enum Keytype { PRIMARY, UNIQUE, MULTIPLE, FULLTEXT, SPATIAL, FOREIGN_KEY}; enum Keytype type; enum ha_key_alg algorithm; List<key_part_spec> columns; - const char *Name; + const char *name; + bool generated; - Key(enum Keytype type_par,const char *name_arg,List<key_part_spec> &cols) - :type(type_par), algorithm(HA_KEY_ALG_UNDEF), columns(cols), Name(name_arg) + Key(enum Keytype type_par, const char *name_arg, enum ha_key_alg alg_par, + bool generated_arg, List<key_part_spec> &cols) + :type(type_par), algorithm(alg_par), columns(cols), name(name_arg), + generated(generated_arg) {} ~Key() {} - const char *name() { return Name; } + /* Equality comparison of keys (ignoring name) */ + friend bool foreign_key_prefix(Key *a, Key *b); }; +class Table_ident; + +class foreign_key: public Key { +public: + enum fk_match_opt { FK_MATCH_UNDEF, FK_MATCH_FULL, + FK_MATCH_PARTIAL, FK_MATCH_SIMPLE}; + enum fk_option { FK_OPTION_UNDEF, FK_OPTION_RESTRICT, FK_OPTION_CASCADE, + FK_OPTION_SET_NULL, FK_OPTION_NO_ACTION, FK_OPTION_DEFAULT}; + + Table_ident *ref_table; + List<key_part_spec> ref_columns; + uint delete_opt, update_opt, match_opt; + foreign_key(const char *name_arg, List<key_part_spec> &cols, + Table_ident *table, List<key_part_spec> &ref_cols, + uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg) + :Key(FOREIGN_KEY, name_arg, HA_KEY_ALG_UNDEF, 0, cols), + ref_table(table), ref_columns(cols), + delete_opt(delete_opt_arg), update_opt(update_opt_arg), + match_opt(match_opt_arg) + {} +}; typedef struct st_mysql_lock { @@ -269,8 +295,8 @@ public: #include "sql_lex.h" /* Must be here */ -// needed to be able to have an I_List of char* strings.in mysqld.cc where we cannot use String -// because it is Sql_alloc'ed +/* Needed to be able to have an I_List of char* strings in mysqld.cc. */ + class i_string: public ilink { public: @@ -279,7 +305,7 @@ public: i_string(char* s) : ptr(s) {} }; -//needed for linked list of two strings for replicate-rewrite-db +/* needed for linked list of two strings for replicate-rewrite-db */ class i_string_pair: public ilink { public: @@ -290,7 +316,29 @@ public: }; +class MYSQL_ERROR: public Sql_alloc +{ +public: + enum enum_warning_level + { WARN_LEVEL_NOTE, WARN_LEVEL_WARN, WARN_LEVEL_ERROR, WARN_LEVEL_END}; + + uint code; + enum_warning_level level; + char *msg; + + MYSQL_ERROR(THD *thd, uint code_arg, enum_warning_level level_arg, + const char *msg_arg) + :code(code_arg), level(level_arg) + { + if (msg_arg) + set_msg(thd, msg_arg); + } + void set_msg(THD *thd, const char *msg_arg); +}; + + class delayed_insert; +class select_result; #define THD_SENTRY_MAGIC 0xfeedd1ff #define THD_SENTRY_GONE 0xdeadbeef @@ -307,25 +355,31 @@ struct system_variables ulong join_buff_size; ulong long_query_time; ulong max_allowed_packet; + ulong max_error_count; ulong max_heap_table_size; + ulong max_length_for_sort_data; ulong max_sort_length; ulong max_tmp_tables; ulong max_insert_delayed_threads; ulong myisam_repair_threads; ulong myisam_sort_buff_size; + ulong myisam_stats_method; ulong net_buffer_length; ulong net_interactive_timeout; ulong net_read_timeout; + ulong net_retry_count; ulong net_wait_timeout; ulong net_write_timeout; - ulong net_retry_count; + ulong preload_buff_size; ulong query_cache_type; ulong read_buff_size; ulong read_rnd_buff_size; ulong sortbuff_size; + ulong table_type; ulong tmp_table_size; ulong tx_isolation; - ulong table_type; + /* Determines which non-standard SQL behaviour should be enabled */ + ulong sql_mode; ulong default_week_format; ulong max_seeks_for_key; ulong range_alloc_block_size; @@ -334,36 +388,182 @@ struct system_variables ulong trans_alloc_block_size; ulong trans_prealloc_size; ulong log_warnings; + ulong group_concat_max_len; + /* + In slave thread we need to know in behalf of which + thread the query is being run to replicate temp tables properly + */ + ulong pseudo_thread_id; my_bool low_priority_updates; my_bool new_mode; my_bool query_cache_wlock_invalidate; +#ifdef HAVE_REPLICATION + ulong sync_replication; + ulong sync_replication_slave_id; + ulong sync_replication_timeout; +#endif /* HAVE_REPLICATION */ #ifdef HAVE_INNOBASE_DB my_bool innodb_table_locks; #endif /* HAVE_INNOBASE_DB */ +#ifdef HAVE_NDBCLUSTER_DB + ulong ndb_autoincrement_prefetch_sz; + my_bool ndb_force_send; + my_bool ndb_use_exact_count; + my_bool ndb_use_transactions; +#endif /* HAVE_NDBCLUSTER_DB */ + my_bool old_passwords; + + /* Only charset part of these variables is sensible */ + CHARSET_INFO *character_set_client; + CHARSET_INFO *character_set_results; + + /* Both charset and collation parts of these variables are important */ + CHARSET_INFO *collation_server; + CHARSET_INFO *collation_database; + CHARSET_INFO *collation_connection; - CONVERT *convert_set; + Time_zone *time_zone; + + /* DATE, DATETIME and TIME formats */ + DATE_TIME_FORMAT *date_format; + DATE_TIME_FORMAT *datetime_format; + DATE_TIME_FORMAT *time_format; +}; + +void free_tmp_table(THD *thd, TABLE *entry); + + +class Item_arena +{ +public: + /* + List of items created in the parser for this query. Every item puts + itself to the list on creation (see Item::Item() for details)) + */ + Item *free_list; + MEM_ROOT main_mem_root; + MEM_ROOT *mem_root; // Pointer to current memroot + enum enum_state + { + INITIALIZED= 0, PREPARED= 1, EXECUTED= 3, CONVENTIONAL_EXECUTION= 2, + ERROR= -1 + }; + + enum_state state; + + /* We build without RTTI, so dynamic_cast can't be used. */ + enum Type + { + STATEMENT, PREPARED_STATEMENT, STORED_PROCEDURE + }; + + /* + This constructor is used only when Item_arena is created as + backup storage for another instance of Item_arena. + */ + Item_arena() {}; + /* + Create arena for already constructed THD using its variables as + parameters for memory root initialization. + */ + Item_arena(THD *thd); + /* + Create arena and optionally init memory root with minimal values. + Particularly used if Item_arena is part of Statement. + */ + Item_arena(bool init_mem_root); + virtual Type type() const; + virtual ~Item_arena() {}; + + inline bool is_stmt_prepare() const { return (int)state < (int)PREPARED; } + inline bool is_first_stmt_execute() const { return state == PREPARED; } + inline bool is_stmt_execute() const + { return state == PREPARED || state == EXECUTED; } + inline bool is_conventional_execution() const + { return state == CONVENTIONAL_EXECUTION; } + inline gptr alloc(unsigned int size) { return alloc_root(mem_root,size); } + inline gptr calloc(unsigned int size) + { + gptr ptr; + if ((ptr=alloc_root(mem_root,size))) + bzero((char*) ptr,size); + return ptr; + } + inline char *strdup(const char *str) + { return strdup_root(mem_root,str); } + inline char *strmake(const char *str, uint size) + { return strmake_root(mem_root,str,size); } + inline char *memdup(const char *str, uint size) + { return memdup_root(mem_root,str,size); } + inline char *memdup_w_gap(const char *str, uint size, uint gap) + { + gptr ptr; + if ((ptr=alloc_root(mem_root,size+gap))) + memcpy(ptr,str,size); + return ptr; + } + + void set_n_backup_item_arena(Item_arena *set, Item_arena *backup); + void restore_backup_item_arena(Item_arena *set, Item_arena *backup); + void set_item_arena(Item_arena *set); }; /* - For each client connection we create a separate thread with THD serving as - a thread/connection descriptor + State of a single command executed against this connection. + One connection can contain a lot of simultaneously running statements, + some of which could be: + - prepared, that is, contain placeholders, + - opened as cursors. We maintain 1 to 1 relationship between + statement and cursor - if user wants to create another cursor for his + query, we create another statement for it. + To perform some action with statement we reset THD part to the state of + that statement, do the action, and then save back modified state from THD + to the statement. It will be changed in near future, and Statement will + be used explicitly. */ -class THD :public ilink +class Statement: public Item_arena { + Statement(const Statement &rhs); /* not implemented: */ + Statement &operator=(const Statement &rhs); /* non-copyable */ public: - NET net; // client connection descriptor - LEX lex; // parse tree descriptor - MEM_ROOT mem_root; // 1 command-life memory pool - HASH user_vars; // hash for user variables - String packet; // dynamic buffer for network I/O - struct sockaddr_in remote; // client socket address - struct rand_struct rand; // used for authentication - struct system_variables variables; // Changeable local variables - pthread_mutex_t LOCK_delete; // Locked before thd is deleted - /* + /* FIXME: must be private */ + LEX main_lex; + + /* + Uniquely identifies each statement object in thread scope; change during + statement lifetime. FIXME: must be const + */ + ulong id; + + /* + - if set_query_id=1, we set field->query_id for all fields. In that case + field list can not contain duplicates. + */ + bool set_query_id; + /* + This variable is used in post-parse stage to declare that sum-functions, + or functions which have sense only if GROUP BY is present, are allowed. + For example in queries + SELECT MIN(i) FROM foo + SELECT GROUP_CONCAT(a, b, MIN(i)) FROM ... GROUP BY ... + MIN(i) have no sense. + Though it's grammar-related issue, it's hard to catch it out during the + parse stage because GROUP BY clause goes in the end of query. This + variable is mainly used in setup_fields/fix_fields. + See item_sum.cc for details. + */ + bool allow_sum_func; + + LEX_STRING name; /* name for named prepared statements */ + LEX *lex; // parse tree descriptor + /* + Points to the query associated with this statement. It's const, but + we need to declare it char * because all table handlers are written + in C and need to point to it. + Note that (A) if we set query = NULL, we must at the same time set query_length = 0, and protect the whole operation with the LOCK_thread_count mutex. And (B) we are ONLY allowed to set query to a @@ -381,7 +581,127 @@ public: This printing is needed at least in SHOW PROCESSLIST and SHOW INNODB STATUS. */ - char *query; // Points to the current query, + char *query; + uint32 query_length; // current query length + +public: + + /* + This constructor is called when statement is a subobject of THD: + some variables are initialized in THD::init due to locking problems + */ + Statement(); + + Statement(THD *thd); + virtual ~Statement(); + + /* Assign execution context (note: not all members) of given stmt to self */ + void set_statement(Statement *stmt); + void set_n_backup_statement(Statement *stmt, Statement *backup); + void restore_backup_statement(Statement *stmt, Statement *backup); + /* return class type */ + virtual Type type() const; +}; + + +/* + Container for all statements created/used in a connection. + Statements in Statement_map have unique Statement::id (guaranteed by id + assignment in Statement::Statement) + Non-empty statement names are unique too: attempt to insert a new statement + with duplicate name causes older statement to be deleted + + Statements are auto-deleted when they are removed from the map and when the + map is deleted. +*/ + +class Statement_map +{ +public: + Statement_map(); + + int insert(THD *thd, Statement *statement); + + Statement *find_by_name(LEX_STRING *name) + { + Statement *stmt; + stmt= (Statement*)hash_search(&names_hash, (byte*)name->str, + name->length); + return stmt; + } + + Statement *find(ulong id) + { + if (last_found_statement == 0 || id != last_found_statement->id) + { + Statement *stmt; + stmt= (Statement *) hash_search(&st_hash, (byte *) &id, sizeof(id)); + if (stmt && stmt->name.str) + return NULL; + last_found_statement= stmt; + } + return last_found_statement; + } + void erase(Statement *statement); + /* Erase all statements (calls Statement destructor) */ + void reset(); + ~Statement_map(); +private: + HASH st_hash; + HASH names_hash; + Statement *last_found_statement; +}; + + +/* + A registry for item tree transformations performed during + query optimization. We register only those changes which require + a rollback to re-execute a prepared statement or stored procedure + yet another time. +*/ + +struct Item_change_record; +typedef I_List<Item_change_record> Item_change_list; + + +/* + For each client connection we create a separate thread with THD serving as + a thread/connection descriptor +*/ + +class THD :public ilink, + public Statement +{ +public: +#ifdef EMBEDDED_LIBRARY + struct st_mysql *mysql; + struct st_mysql_data *data; + unsigned long client_stmt_id; + unsigned long client_param_count; + struct st_mysql_bind *client_params; + char *extra_data; + ulong extra_length; + String query_rest; +#endif + NET net; // client connection descriptor + MEM_ROOT warn_root; // For warnings and errors + Protocol *protocol; // Current protocol + Protocol_simple protocol_simple; // Normal protocol + Protocol_prep protocol_prep; // Binary protocol + HASH user_vars; // hash for user variables + String packet; // dynamic buffer for network I/O + String convert_buffer; // buffer for charset conversions + struct sockaddr_in remote; // client socket address + struct rand_struct rand; // used for authentication + struct system_variables variables; // Changeable local variables + pthread_mutex_t LOCK_delete; // Locked before thd is deleted + /* all prepared statements and cursors of this connection */ + Statement_map stmt_map; + /* + keeps THD state while it is used for active statement + Note: we perform special cleanup for it in THD destructor. + */ + Statement stmt_backup; /* A pointer to the stack frame of handle_one_connection(), which is called first in the thread for handling a client @@ -392,8 +712,7 @@ public: host - host of the client user - user of the client, set to NULL until the user has been read from the connection - priv_user - not sure why we have it, but it is set to "boot" when we run - with --bootstrap + priv_user - The user privilege we are using. May be '' for anonymous user. db - currently selected database ip - client IP */ @@ -410,9 +729,7 @@ public: /* points to host if host is available, otherwise points to ip */ const char *host_or_ip; - uint client_capabilities; /* What the client supports */ - /* Determines if which non-standard SQL behaviour should be enabled */ - uint sql_mode; + ulong client_capabilities; /* What the client supports */ ulong max_client_packet_length; ulong master_access; /* Global privileges from mysql.user */ ulong db_access; /* Privileges for current db */ @@ -423,15 +740,37 @@ public: handler_tables - list of tables that were opened with HANDLER OPEN and are still in use by this thread */ - TABLE *open_tables,*temporary_tables, *handler_tables; - HASH handler_tables_hash; - // TODO: document the variables below - MYSQL_LOCK *lock,*locked_tables; - ULL *ull; + TABLE *open_tables,*temporary_tables, *handler_tables, *derived_tables; + /* + During a MySQL session, one can lock tables in two modes: automatic + or manual. In automatic mode all necessary tables are locked just before + statement execution, and all acquired locks are stored in 'lock' + member. Unlocking takes place automatically as well, when the + statement ends. + Manual mode comes into play when a user issues a 'LOCK TABLES' + statement. In this mode the user can only use the locked tables. + Trying to use any other tables will give an error. The locked tables are + stored in 'locked_tables' member. Manual locking is described in + the 'LOCK_TABLES' chapter of the MySQL manual. + See also lock_tables() for details. + */ + MYSQL_LOCK *lock; /* Current locks */ + MYSQL_LOCK *locked_tables; /* Tables locked with LOCK */ + HASH handler_tables_hash; + /* + One thread can hold up to one named user-level lock. This variable + points to a lock object if the lock is present. See item_func.cc and + chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK. + */ + User_level_lock *ull; #ifndef DBUG_OFF uint dbug_sentry; // watch out for memory corruption -#endif +#endif struct st_my_thread_var *mysys_var; + /* + Type of current query: COM_PREPARE, COM_QUERY, etc. Set from + first byte of the packet in do_command() + */ enum enum_server_command command; uint32 server_id; uint32 file_id; // for LOAD DATA INFILE @@ -445,15 +784,19 @@ public: time_t connect_time,thr_create_time; // track down slow pthread_create thr_lock_type update_lock_default; delayed_insert *di; + my_bool tablespace_op; /* This is TRUE in DISCARD/IMPORT TABLESPACE */ struct st_transactions { - IO_CACHE trans_log; + IO_CACHE trans_log; // Inited ONLY if binlog is open ! THD_TRANS all; // Trans since BEGIN WORK THD_TRANS stmt; // Trans for current statement uint bdb_lock_count; - - /* +#ifdef HAVE_NDBCLUSTER_DB + void* thd_ndb; +#endif + bool on; + /* Tables changed in transaction (that must be invalidated in query cache). - List contain only transactional tables, that not invalidated in query + List contain only transactional tables, that not invalidated in query cache (instead of full list of changed in transaction tables). */ CHANGED_TABLE_LIST* changed_tables; @@ -464,62 +807,129 @@ public: free_root(&mem_root,MYF(MY_KEEP_PREALLOC)); } } transaction; - Item *free_list, *handler_items; Field *dupp_field; #ifndef __WIN__ sigset_t signals,block_signals; #endif #ifdef SIGNAL_WITH_VIO_CLOSE Vio* active_vio; -#endif - ulonglong next_insert_id,last_insert_id,current_insert_id, - limit_found_rows; - ha_rows select_limit, offset_limit, cuted_fields, +#endif + /* + This is to track items changed during execution of a prepared + statement/stored procedure. It's created by + register_item_tree_change() in memory root of THD, and freed in + rollback_item_tree_changes(). For conventional execution it's always 0. + */ + Item_change_list change_list; + + /* + Current prepared Item_arena if there one, or 0 + */ + Item_arena *current_arena; + /* + next_insert_id is set on SET INSERT_ID= #. This is used as the next + generated auto_increment value in handler.cc + */ + ulonglong next_insert_id; + /* + The insert_id used for the last statement or set by SET LAST_INSERT_ID=# + or SELECT LAST_INSERT_ID(#). Used for binary log and returned by + LAST_INSERT_ID() + */ + ulonglong last_insert_id; + /* + Set to the first value that LAST_INSERT_ID() returned for the last + statement. When this is set, last_insert_id_used is set to true. + */ + ulonglong current_insert_id; + ulonglong limit_found_rows; + ha_rows cuted_fields, sent_row_count, examined_row_count; table_map used_tables; USER_CONN *user_connect; - ulong query_id,version, options,thread_id, col_access; + CHARSET_INFO *db_charset; + List<TABLE> temporary_tables_should_be_free; // list of temporary tables + /* + FIXME: this, and some other variables like 'count_cuted_fields' + maybe should be statement/cursor local, that is, moved to Statement + class. With current implementation warnings produced in each prepared + statement/cursor settle here. + */ + List <MYSQL_ERROR> warn_list; + uint warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END]; + uint total_warn_count; + /* + Id of current query. Statement can be reused to execute several queries + query_id is global in context of the whole MySQL server. + ID is automatically generated from mutex-protected counter. + It's used in handler code for various purposes: to check which columns + from table are necessary for this select, to check if it's necessary to + update auto-updatable fields (like auto_increment and timestamp). + */ + ulong query_id; + ulong warn_id, version, options, thread_id, col_access; + + /* Statement id is thread-wide. This counter is used to generate ids */ + ulong statement_id_counter; ulong rand_saved_seed1, rand_saved_seed2; + ulong row_count; // Row counter, mainly for errors and warnings long dbug_thread_id; pthread_t real_id; - uint current_tablenr,tmp_table,cond_count,global_read_lock; + uint current_tablenr,tmp_table,global_read_lock; uint server_status,open_options,system_thread; - uint32 query_length; uint32 db_length; + uint select_number; //number of select (used for EXPLAIN) /* variables.transaction_isolation is reset to this after each commit */ enum_tx_isolation session_tx_isolation; - char scramble[9]; - bool slave_thread; - bool set_query_id,locked,count_cuted_fields,some_tables_deleted; - bool no_errors, allow_sum_func, password, fatal_error; + enum_check_fields count_cuted_fields; + /* for user variables replication*/ + DYNAMIC_ARRAY user_var_events; + + /* scramble - random string sent to client on handshake */ + char scramble[SCRAMBLE_LENGTH+1]; + + bool slave_thread, one_shot_set; + bool locked, some_tables_deleted; + bool last_cuted_field; + bool no_errors, password, is_fatal_error; bool query_start_used,last_insert_id_used,insert_id_used,rand_used; + bool time_zone_used; bool in_lock_tables; bool query_error, bootstrap, cleanup_done; - bool safe_to_cache_query; - bool volatile killed; + bool tmp_table_used; + bool charset_is_system_charset, charset_is_collation_connection; + bool enable_slow_log; /* enable slow log for current statement */ + my_bool volatile killed; + /* If we do a purge of binary logs, log index info of the threads that are currently reading it needs to be adjusted. To do that each thread that is using LOG_INFO needs to adjust the pointer to it */ LOG_INFO* current_linfo; - /* - In slave thread we need to know in behalf of which - thread the query is being run to replicate temp tables properly - */ - ulong slave_proxy_id; NET* slave_net; // network connection from slave -> m. - /* Used by the sys_var class to store temporary values */ union { my_bool my_bool_value; long long_value; + ulong ulong_value; } sys_var_tmp; THD(); ~THD(); + void init(void); + /* + Initialize memory roots necessary for query processing and (!) + pre-allocate memory for it. We can't do that in THD constructor because + there are use cases (acl_init, delayed inserts, watcher threads, + killing mysqld) where it's vital to not allocate excessive and not used + memory. Note, that we still don't return error from init_for_queries(): + if preallocation fails, we should notice that at the first call to + alloc_root. + */ + void init_for_queries(); void change_user(void); void cleanup(void); bool store_globals(); @@ -574,8 +984,11 @@ public: inline void end_time() { time(&start_time); } inline void set_time(time_t t) { time_after_lock=start_time=user_time=t; } inline void lock_time() { time(&time_after_lock); } - inline void insert_id(ulonglong id) - { last_insert_id=id; insert_id_used=1; } + inline void insert_id(ulonglong id_arg) + { + last_insert_id= id_arg; + insert_id_used=1; + } inline ulonglong insert_id(void) { if (!last_insert_id_used) @@ -593,58 +1006,86 @@ public: { #ifdef USING_TRANSACTIONS return (transaction.all.bdb_tid != 0 || - transaction.all.innodb_active_trans != 0); + transaction.all.innodb_active_trans != 0 || + transaction.all.ndb_tid != 0); #else return 0; #endif } - inline gptr alloc(unsigned int size) { return alloc_root(&mem_root,size); } - inline gptr calloc(unsigned int size) - { - gptr ptr; - if ((ptr=alloc_root(&mem_root,size))) - bzero((char*) ptr,size); - return ptr; - } - inline char *strdup(const char *str) - { return strdup_root(&mem_root,str); } - inline char *strmake(const char *str, uint size) - { return strmake_root(&mem_root,str,size); } - inline char *memdup(const char *str, uint size) - { return memdup_root(&mem_root,str,size); } - inline char *memdup_w_gap(const char *str, uint size, uint gap) - { - gptr ptr; - if ((ptr=alloc_root(&mem_root,size+gap))) - memcpy(ptr,str,size); - return ptr; - } inline gptr trans_alloc(unsigned int size) { return alloc_root(&transaction.mem_root,size); } + + bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, + const char *from, uint from_length, + CHARSET_INFO *from_cs); + + bool convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs); + void add_changed_table(TABLE *table); void add_changed_table(const char *key, long key_length); CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length); + int send_explain_fields(select_result *result); #ifndef EMBEDDED_LIBRARY inline void clear_error() { net.last_error[0]= 0; net.last_errno= 0; + net.report_error= 0; } + inline bool vio_ok() const { return net.vio != 0; } #else void clear_error(); + inline bool vio_ok() const { return true; } #endif + inline void fatal_error() + { + is_fatal_error= 1; + net.report_error= 1; + DBUG_PRINT("error",("Fatal error set")); + } + inline CHARSET_INFO *charset() { return variables.character_set_client; } + void update_charset(); + + inline Item_arena *change_arena_if_needed(Item_arena *backup) + { + /* + use new arena if we are in a prepared statements and we have not + already changed to use this arena. + */ + if (current_arena->is_stmt_prepare() && + mem_root != ¤t_arena->main_mem_root) + { + set_n_backup_item_arena(current_arena, backup); + return current_arena; + } + return 0; + } + + void change_item_tree(Item **place, Item *new_value) + { + /* TODO: check for OOM condition here */ + if (!current_arena->is_conventional_execution()) + nocheck_register_item_tree_change(place, *place, mem_root); + *place= new_value; + } + void nocheck_register_item_tree_change(Item **place, Item *old_value, + MEM_ROOT *runtime_memroot); + void rollback_item_tree_changes(); + + /* + Cleanup statement parse state (parse tree, lex) and execution + state after execution of a non-prepared SQL statement. + */ + void end_statement(); }; -# define tmp_disable_binlog(A) \ - ulong save_options= (A)->options, save_master_access= (A)->master_access; \ - (A)->options&= ~OPTION_BIN_LOG; \ - (A)->master_access|= SUPER_ACL; /* unneeded in 4.1 */ +#define tmp_disable_binlog(A) \ + ulong save_options= (A)->options; \ + (A)->options&= ~OPTION_BIN_LOG; -#define reenable_binlog(A) \ - (A)->options= save_options; \ - (A)->master_access= save_master_access; +#define reenable_binlog(A) (A)->options= save_options; /* Flags for the THD::system_thread (bitmap) variable */ #define SYSTEM_THREAD_DELAYED_INSERT 1 @@ -654,7 +1095,9 @@ public: /* Used to hold information about file and file structure in exchainge via non-DB file (...INTO OUTFILE..., ...LOAD DATA...) + XXX: We never call destructor for objects of this class. */ + class sql_exchange :public Sql_alloc { public: @@ -664,35 +1107,62 @@ public: bool dumpfile; ulong skip_lines; sql_exchange(char *name,bool dumpfile_flag); - ~sql_exchange() {} }; #include "log_event.h" /* -** This is used to get result from a select + This is used to get result from a select */ class JOIN; -void send_error(NET *net,uint sql_errno=0, const char *err=0); +void send_error(THD *thd, uint sql_errno=0, const char *err=0); class select_result :public Sql_alloc { protected: THD *thd; + SELECT_LEX_UNIT *unit; public: select_result(); virtual ~select_result() {}; - virtual int prepare(List<Item> &list) { return 0; } + virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u) + { + unit= u; + return 0; + } + /* + Because of peculiarities of prepared statements protocol + we need to know number of columns in the result set (if + there is a result set) apart from sending columns metadata. + */ + virtual uint field_count(List<Item> &fields) const + { return fields.elements; } virtual bool send_fields(List<Item> &list,uint flag)=0; virtual bool send_data(List<Item> &items)=0; virtual bool initialize_tables (JOIN *join=0) { return 0; } - virtual void send_error(uint errcode,const char *err) - { - ::send_error(&thd->net,errcode,err); - } + virtual void send_error(uint errcode,const char *err); virtual bool send_eof()=0; virtual void abort() {} + /* + Cleanup instance of this class for next execution of a prepared + statement/stored procedure. + */ + virtual void cleanup(); +}; + + +/* + Base class for select_result descendands which intercept and + transform result set rows. As the rows are not sent to the client, + sending of result set metadata should be suppressed as well. +*/ + +class select_result_interceptor: public select_result +{ +public: + uint field_count(List<Item> &fields) const { return 0; } + bool send_fields(List<Item> &fields, uint flag) { return FALSE; } }; @@ -705,67 +1175,86 @@ public: }; -class select_export :public select_result { +class select_to_file :public select_result_interceptor { +protected: sql_exchange *exchange; File file; IO_CACHE cache; ha_rows row_count; char path[FN_REFLEN]; + +public: + select_to_file(sql_exchange *ex) :exchange(ex), file(-1),row_count(0L) + { path[0]=0; } + ~select_to_file(); + void send_error(uint errcode,const char *err); + bool send_eof(); + void cleanup(); +}; + + +class select_export :public select_to_file { uint field_term_length; int field_sep_char,escape_char,line_sep_char; bool fixed_row_size; public: - select_export(sql_exchange *ex) :exchange(ex),file(-1),row_count(0L) - { path[0]=0; } + select_export(sql_exchange *ex) :select_to_file(ex) {} ~select_export(); - int prepare(List<Item> &list); - bool send_fields(List<Item> &list, - uint flag) { return 0; } + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); - void send_error(uint errcode,const char *err); - bool send_eof(); }; -class select_dump :public select_result { - sql_exchange *exchange; - File file; - IO_CACHE cache; - ha_rows row_count; - char path[FN_REFLEN]; +class select_dump :public select_to_file { public: - select_dump(sql_exchange *ex) :exchange(ex),file(-1),row_count(0L) - { path[0]=0; } - ~select_dump(); - int prepare(List<Item> &list); - bool send_fields(List<Item> &list, - uint flag) { return 0; } + select_dump(sql_exchange *ex) :select_to_file(ex) {} + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); - void send_error(uint errcode,const char *err); - bool send_eof(); }; -class select_insert :public select_result { +class select_insert :public select_result_interceptor { public: TABLE *table; List<Item> *fields; ulonglong last_insert_id; COPY_INFO info; + TABLE_LIST *insert_table_list; + TABLE_LIST *dup_table_list; - select_insert(TABLE *table_par,List<Item> *fields_par,enum_duplicates duplic) - :table(table_par),fields(fields_par), last_insert_id(0) + select_insert(TABLE *table_par, List<Item> *fields_par, + enum_duplicates duplic, bool ignore) + :table(table_par), fields(fields_par), last_insert_id(0), + insert_table_list(0), dup_table_list(0) { bzero((char*) &info,sizeof(info)); + info.ignore= ignore; info.handle_duplicates=duplic; } + select_insert(TABLE *table_par, + TABLE_LIST *insert_table_list_par, + TABLE_LIST *dup_table_list_par, + List<Item> *fields_par, + List<Item> *update_fields, List<Item> *update_values, + enum_duplicates duplic, bool ignore) + :table(table_par), fields(fields_par), last_insert_id(0), + insert_table_list(insert_table_list_par), + dup_table_list(dup_table_list_par) + { + bzero((char*) &info,sizeof(info)); + info.ignore= ignore; + info.handle_duplicates= duplic; + info.update_fields= update_fields; + info.update_values= update_values; + } ~select_insert(); - int prepare(List<Item> &list); - bool send_fields(List<Item> &list, uint flag) - { return 0; } + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); + virtual void store_values(List<Item> &values); void send_error(uint errcode,const char *err); bool send_eof(); + /* not implemented: select_insert is never re-used in prepared statements */ + void cleanup(); }; @@ -783,34 +1272,128 @@ public: HA_CREATE_INFO *create_info_par, List<create_field> &fields_par, List<Key> &keys_par, - List<Item> &select_fields,enum_duplicates duplic) - :select_insert (NULL, &select_fields, duplic), db(db_name), + List<Item> &select_fields,enum_duplicates duplic, bool ignore) + :select_insert (NULL, &select_fields, duplic, ignore), db(db_name), name(table_name), extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par), lock(0) {} - int prepare(List<Item> &list); - bool send_data(List<Item> &values); + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); + void store_values(List<Item> &values); void send_error(uint errcode,const char *err); bool send_eof(); void abort(); }; +#include <myisam.h> + +/* + Param to create temporary tables when doing SELECT:s + NOTE + This structure is copied using memcpy as a part of JOIN. +*/ + +class TMP_TABLE_PARAM :public Sql_alloc +{ +private: + /* Prevent use of these (not safe because of lists and copy_field) */ + TMP_TABLE_PARAM(const TMP_TABLE_PARAM &); + void operator=(TMP_TABLE_PARAM &); + +public: + List<Item> copy_funcs; + List<Item> save_copy_funcs; + Copy_field *copy_field, *copy_field_end; + Copy_field *save_copy_field, *save_copy_field_end; + byte *group_buff; + Item **items_to_copy; /* Fields in tmp table */ + MI_COLUMNDEF *recinfo,*start_recinfo; + KEY *keyinfo; + ha_rows end_write_records; + uint field_count,sum_func_count,func_count; + uint hidden_field_count; + uint group_parts,group_length,group_null_parts; + uint quick_group; + bool using_indirect_summary_function; + /* If >0 convert all blob fields to varchar(convert_blob_length) */ + uint convert_blob_length; + bool force_copy_fields; + TMP_TABLE_PARAM() + :copy_field(0), group_parts(0), + group_length(0), group_null_parts(0), convert_blob_length(0), + force_copy_fields(0) + {} + ~TMP_TABLE_PARAM() + { + cleanup(); + } + void init(void); + inline void cleanup(void) + { + if (copy_field) /* Fix for Intel compiler */ + { + delete [] copy_field; + save_copy_field= copy_field= 0; + } + } +}; -class select_union :public select_result { +class select_union :public select_result_interceptor { public: TABLE *table; COPY_INFO info; - TMP_TABLE_PARAM *tmp_table_param; - bool not_describe; + TMP_TABLE_PARAM tmp_table_param; select_union(TABLE *table_par); ~select_union(); - int prepare(List<Item> &list); - bool send_fields(List<Item> &list, uint flag) - { return 0; } + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool send_eof(); bool flush(); + void set_table(TABLE *tbl) { table= tbl; } +}; + +/* Base subselect interface class */ +class select_subselect :public select_result_interceptor +{ +protected: + Item_subselect *item; +public: + select_subselect(Item_subselect *item); + bool send_data(List<Item> &items)=0; + bool send_eof() { return 0; }; +}; + +/* Single value subselect interface class */ +class select_singlerow_subselect :public select_subselect +{ +public: + select_singlerow_subselect(Item_subselect *item):select_subselect(item){} + bool send_data(List<Item> &items); +}; + +/* used in independent ALL/ANY optimisation */ +class select_max_min_finder_subselect :public select_subselect +{ + Item_cache *cache; + bool (select_max_min_finder_subselect::*op)(); + bool fmax; +public: + select_max_min_finder_subselect(Item_subselect *item, bool mx) + :select_subselect(item), cache(0), fmax(mx) + {} + void cleanup(); + bool send_data(List<Item> &items); + bool cmp_real(); + bool cmp_int(); + bool cmp_str(); +}; + +/* EXISTS subselect interface class */ +class select_exists_subselect :public select_subselect +{ +public: + select_exists_subselect(Item_subselect *item):select_subselect(item){} + bool send_data(List<Item> &items); }; /* Structs used when sorting */ @@ -819,8 +1402,9 @@ typedef struct st_sort_field { Field *field; /* Field to sort */ Item *item; /* Item if not sorting fields */ uint length; /* Length of sort field */ - my_bool reverse; /* if descending sort */ Item_result result_type; /* Type of item */ + bool reverse; /* if descending sort */ + bool need_strxnfrm; /* If we have to use strxnfrm() */ } SORT_FIELD; @@ -834,21 +1418,35 @@ typedef struct st_sort_buffer { /* Structure for db & table in sql_yacc */ -class Table_ident :public Sql_alloc { +class Table_ident :public Sql_alloc +{ public: LEX_STRING db; LEX_STRING table; - inline Table_ident(LEX_STRING db_arg,LEX_STRING table_arg,bool force) - :table(table_arg) + SELECT_LEX_UNIT *sel; + inline Table_ident(THD *thd, LEX_STRING db_arg, LEX_STRING table_arg, + bool force) + :table(table_arg), sel((SELECT_LEX_UNIT *)0) { - if (!force && (current_thd->client_capabilities & CLIENT_NO_SCHEMA)) + if (!force && (thd->client_capabilities & CLIENT_NO_SCHEMA)) db.str=0; else db= db_arg; } - inline Table_ident(LEX_STRING table_arg) :table(table_arg) {db.str=0;} + inline Table_ident(LEX_STRING table_arg) + :table(table_arg), sel((SELECT_LEX_UNIT *)0) + { + db.str=0; + } + inline Table_ident(SELECT_LEX_UNIT *s) : sel(s) + { + /* We must have a table name here as this is used with add_table_to_list */ + db.str=0; table.str= internal_table_name; table.length=1; + } inline void change_db(char *db_name) - { db.str= db_name; db.length=(uint) strlen(db_name); } + { + db.str= db_name; db.length= (uint) strlen(db_name); + } }; // this is needed for user_vars hash @@ -857,12 +1455,13 @@ class user_var_entry public: LEX_STRING name; char *value; - ulong length, update_query_id; + ulong length, update_query_id, used_query_id; Item_result type; double val(my_bool *null_value); longlong val_int(my_bool *null_value); String *val_str(my_bool *null_value, String *str, uint decimals); + DTCollation collation; }; @@ -887,7 +1486,7 @@ public: { if (tree.elements_in_tree > max_elements && flush()) return 1; - return !tree_insert(&tree,ptr,0); + return !tree_insert(&tree, ptr, 0, tree.custom_arg); } bool get(TABLE *table); @@ -896,22 +1495,20 @@ public: friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique); }; -class multi_delete : public select_result + +class multi_delete :public select_result_interceptor { TABLE_LIST *delete_tables, *table_being_deleted; - Unique **tempfiles; + Unique **tempfiles; THD *thd; ha_rows deleted, found; uint num_of_tables; int error; bool do_delete, transactional_tables, log_delayed, normal_tables; - public: multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables); ~multi_delete(); - int prepare(List<Item> &list); - bool send_fields(List<Item> &list, - uint flag) { return 0; } + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool initialize_tables (JOIN *join); void send_error(uint errcode,const char *err); @@ -919,7 +1516,8 @@ public: bool send_eof(); }; -class multi_update : public select_result + +class multi_update :public select_result_interceptor { TABLE_LIST *all_tables, *update_tables, *table_being_updated; THD *thd; @@ -931,14 +1529,13 @@ class multi_update : public select_result uint table_count; Copy_field *copy_field; enum enum_duplicates handle_duplicates; - bool do_update, trans_safe, transactional_tables, log_delayed; + bool do_update, trans_safe, transactional_tables, log_delayed, ignore; public: multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> *fields, - List<Item> *values, enum_duplicates handle_duplicates); + List<Item> *values, enum_duplicates handle_duplicates, bool ignore); ~multi_update(); - int prepare(List<Item> &list); - bool send_fields(List<Item> &list, uint flag) { return 0; } + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool initialize_tables (JOIN *join); void send_error(uint errcode,const char *err); @@ -946,3 +1543,16 @@ public: bool send_eof(); }; + +class select_dumpvar :public select_result_interceptor { + ha_rows row_count; +public: + List<LEX_STRING> var_list; + List<Item_func_set_user_var> vars; + select_dumpvar(void) { var_list.empty(); vars.empty(); row_count=0;} + ~select_dumpvar() {} + int prepare(List<Item> &list, SELECT_LEX_UNIT *u); + bool send_data(List<Item> &items); + bool send_eof(); + void cleanup(); +}; diff --git a/sql/opt_ft.cc b/sql/sql_client.cc index 74349819937..49d0d3087ad 100644 --- a/sql/opt_ft.cc +++ b/sql/sql_client.cc @@ -14,23 +14,26 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ -#pragma implementation // gcc: Class implementation -#endif +/* + This files defines some MySQL C API functions that are server specific +*/ #include "mysql_priv.h" -#include "sql_select.h" -#include "opt_ft.h" -/**************************************************************************** -** Create a FT or QUICK RANGE based on a key -****************************************************************************/ +/* + Function called by my_net_init() to set some check variables +*/ -QUICK_SELECT *get_ft_or_quick_select_for_ref(THD *thd, TABLE *table, - JOIN_TAB *tab) +extern "C" { +void my_net_local_init(NET *net) { - if (tab->type == JT_FT) - return new FT_SELECT(thd, table, &tab->ref); - return get_quick_select_for_ref(thd, table, &tab->ref); +#ifndef EMBEDDED_LIBRARY + net->max_packet= (uint) global_system_variables.net_buffer_length; + net->read_timeout= (uint) global_system_variables.net_read_timeout; + net->write_timeout=(uint) global_system_variables.net_write_timeout; + net->retry_count= (uint) global_system_variables.net_retry_count; + net->max_packet_size= max(global_system_variables.net_buffer_length, + global_system_variables.max_allowed_packet); +#endif +} } - diff --git a/sql/sql_crypt.cc b/sql/sql_crypt.cc index 930ecfffef7..f21a109e95d 100644 --- a/sql/sql_crypt.cc +++ b/sql/sql_crypt.cc @@ -23,7 +23,7 @@ needs something like 'ssh'. */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -32,7 +32,7 @@ SQL_CRYPT::SQL_CRYPT(const char *password) { ulong rand_nr[2]; - hash_password(rand_nr,password); + hash_password(rand_nr,password, strlen(password)); crypt_init(rand_nr); } diff --git a/sql/sql_crypt.h b/sql/sql_crypt.h index 1b27f0a4d27..25bc2d29e1d 100644 --- a/sql/sql_crypt.h +++ b/sql/sql_crypt.h @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif diff --git a/sql/sql_db.cc b/sql/sql_db.cc index c8874701aa1..035a0b22a6b 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -18,30 +18,402 @@ /* create and drop of databases */ #include "mysql_priv.h" -#include "sql_acl.h" +#include <mysys_err.h> #include <my_dir.h> #include <m_ctype.h> #ifdef __WIN__ #include <direct.h> #endif +#define MAX_DROP_TABLE_Q_LEN 1024 + +const char *del_exts[]= {".frm", ".BAK", ".TMD",".opt", NullS}; +static TYPELIB deletable_extentions= +{array_elements(del_exts)-1,"del_exts", del_exts, NULL}; + static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, - const char *db, const char *path, - uint level); + const char *db, const char *path, uint level, + TABLE_LIST **dropped_tables); + +/* Database options hash */ +static HASH dboptions; +static my_bool dboptions_init= 0; +static rw_lock_t LOCK_dboptions; -/* db-name is already validated when we come here */ +/* Structure for database options */ +typedef struct my_dbopt_st +{ + char *name; /* Database name */ + uint name_length; /* Database length name */ + CHARSET_INFO *charset; /* Database default character set */ +} my_dbopt_t; + + +/* + Function we use in the creation of our hash to get key. +*/ + +static byte* dboptions_get_key(my_dbopt_t *opt, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= opt->name_length; + return (byte*) opt->name; +} + + +/* + Helper function to write a query to binlog used by mysql_rm_db() +*/ + +static inline void write_to_binlog(THD *thd, char *query, uint q_len, + char *db, uint db_len) +{ + Query_log_event qinfo(thd, query, q_len, 0, 0); + qinfo.error_code= 0; + qinfo.db= db; + qinfo.db_len= db_len; + mysql_bin_log.write(&qinfo); +} + + +/* + Function to free dboptions hash element +*/ + +static void free_dbopt(void *dbopt) +{ + my_free((gptr) dbopt, MYF(0)); +} + + +/* + Initialize database option hash + + SYNOPSIS + my_dbopt_init() + + NOTES + Must be called before any other database function is called. + + RETURN + 0 ok + 1 Fatal error +*/ + +bool my_dbopt_init(void) +{ + bool error= 0; + (void) my_rwlock_init(&LOCK_dboptions, NULL); + if (!dboptions_init) + { + dboptions_init= 1; + error= hash_init(&dboptions, lower_case_table_names ? + &my_charset_bin : system_charset_info, + 32, 0, 0, (hash_get_key) dboptions_get_key, + free_dbopt,0); + } + return error; +} + + +/* + Free database option hash. +*/ + +void my_dbopt_free(void) +{ + if (dboptions_init) + { + dboptions_init= 0; + hash_free(&dboptions); + (void) rwlock_destroy(&LOCK_dboptions); + } +} + + +void my_dbopt_cleanup(void) +{ + rw_wrlock(&LOCK_dboptions); + hash_free(&dboptions); + hash_init(&dboptions, lower_case_table_names ? + &my_charset_bin : system_charset_info, + 32, 0, 0, (hash_get_key) dboptions_get_key, + free_dbopt,0); + rw_unlock(&LOCK_dboptions); +} + + +/* + Find database options in the hash. + + DESCRIPTION + Search a database options in the hash, usings its path. + Fills "create" on success. + + RETURN VALUES + 0 on success. + 1 on error. +*/ + +static my_bool get_dbopt(const char *dbname, HA_CREATE_INFO *create) +{ + my_dbopt_t *opt; + uint length; + my_bool error= 1; + + length= (uint) strlen(dbname); + + rw_rdlock(&LOCK_dboptions); + if ((opt= (my_dbopt_t*) hash_search(&dboptions, (byte*) dbname, length))) + { + create->default_table_charset= opt->charset; + error= 0; + } + rw_unlock(&LOCK_dboptions); + return error; +} + + +/* + Writes database options into the hash. + + DESCRIPTION + Inserts database options into the hash, or updates + options if they are already in the hash. + + RETURN VALUES + 0 on success. + 1 on error. +*/ + +static my_bool put_dbopt(const char *dbname, HA_CREATE_INFO *create) +{ + my_dbopt_t *opt; + uint length; + my_bool error= 0; + DBUG_ENTER("put_dbopt"); + + length= (uint) strlen(dbname); + + rw_wrlock(&LOCK_dboptions); + if (!(opt= (my_dbopt_t*) hash_search(&dboptions, (byte*) dbname, length))) + { + /* Options are not in the hash, insert them */ + char *tmp_name; + if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &opt, (uint) sizeof(*opt), &tmp_name, length+1, + NullS)) + { + error= 1; + goto end; + } + + opt->name= tmp_name; + strmov(opt->name, dbname); + opt->name_length= length; + + if ((error= my_hash_insert(&dboptions, (byte*) opt))) + { + my_free((gptr) opt, MYF(0)); + goto end; + } + } + + /* Update / write options in hash */ + opt->charset= create->default_table_charset; + +end: + rw_unlock(&LOCK_dboptions); + DBUG_RETURN(error); +} + + +/* + Deletes database options from the hash. +*/ + +void del_dbopt(const char *path) +{ + my_dbopt_t *opt; + rw_wrlock(&LOCK_dboptions); + if ((opt= (my_dbopt_t *)hash_search(&dboptions, (const byte*) path, + strlen(path)))) + hash_delete(&dboptions, (byte*) opt); + rw_unlock(&LOCK_dboptions); +} + + +/* + Create database options file: + + DESCRIPTION + Currently database default charset is only stored there. + + RETURN VALUES + 0 ok + 1 Could not create file or write to it. Error sent through my_error() +*/ + +static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) +{ + register File file; + char buf[256]; // Should be enough for one option + bool error=1; + + if (!create->default_table_charset) + create->default_table_charset= thd->variables.collation_server; + + if (put_dbopt(path, create)) + return 1; + + if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) + { + ulong length; + length= (ulong) (strxnmov(buf, sizeof(buf), "default-character-set=", + create->default_table_charset->csname, + "\ndefault-collation=", + create->default_table_charset->name, + "\n", NullS) - buf); + + /* Error is written by my_write */ + if (!my_write(file,(byte*) buf, length, MYF(MY_NABP+MY_WME))) + error=0; + my_close(file,MYF(0)); + } + return error; +} + + +/* + Load database options file + + load_db_opt() + path Path for option file + create Where to store the read options + + DESCRIPTION + For now, only default-character-set is read. + + RETURN VALUES + 0 File found + 1 No database file or could not open it + +*/ + +bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) +{ + File file; + char buf[256]; + DBUG_ENTER("load_db_opt"); + bool error=1; + uint nbytes; + + bzero((char*) create,sizeof(*create)); + create->default_table_charset= thd->variables.collation_server; + + /* Check if options for this database are already in the hash */ + if (!get_dbopt(path, create)) + DBUG_RETURN(0); + + /* Otherwise, load options from the .opt file */ + if ((file=my_open(path, O_RDONLY | O_SHARE, MYF(0))) >= 0) + { + IO_CACHE cache; + init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0)); + + while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0) + { + char *pos= buf+nbytes-1; + /* Remove end space and control characters */ + while (pos > buf && !my_isgraph(&my_charset_latin1, pos[-1])) + pos--; + *pos=0; + if ((pos= strchr(buf, '='))) + { + if (!strncmp(buf,"default-character-set", (pos-buf))) + { + /* + Try character set name, and if it fails + try collation name, probably it's an old + 4.1.0 db.opt file, which didn't have + separate default-character-set and + default-collation commands. + */ + if (!(create->default_table_charset= + get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) && + !(create->default_table_charset= + get_charset_by_name(pos+1, MYF(0)))) + { + sql_print_error("Error while loading database options: '%s':",path); + sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1); + create->default_table_charset= default_charset_info; + } + } + else if (!strncmp(buf,"default-collation", (pos-buf))) + { + if (!(create->default_table_charset= get_charset_by_name(pos+1, + MYF(0)))) + { + sql_print_error("Error while loading database options: '%s':",path); + sql_print_error(ER(ER_UNKNOWN_COLLATION),pos+1); + create->default_table_charset= default_charset_info; + } + } + } + } + end_io_cache(&cache); + my_close(file,MYF(0)); + /* + Put the loaded value into the hash. + Note that another thread could've added the same + entry to the hash after we called get_dbopt(), + but it's not an error, as put_dbopt() takes this + possibility into account. + */ + error= put_dbopt(path, create); + } + DBUG_RETURN(error); +} + + +/* + Create a database -int mysql_create_db(THD *thd, char *db, uint create_options, bool silent) + SYNOPSIS + mysql_create_db() + thd Thread handler + db Name of database to create + Function assumes that this is already validated. + create_info Database create options (like character set) + silent Used by replication when internally creating a database. + In this case the entry should not be logged. + + SIDE-EFFECTS + 1. Report back to client that command succeeded (send_ok) + 2. Report errors to client + 3. Log event to binary log + (The 'silent' flags turns off 1 and 3.) + + RETURN VALUES + 0 ok + -1 Error + +*/ + +int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, + bool silent) { char path[FN_REFLEN+16]; - MY_DIR *dirp; - long result=1; - int error = 0; + long result= 1; + int error= 0; + MY_STAT stat_info; + uint create_options= create_info ? create_info->options : 0; + uint path_len; DBUG_ENTER("mysql_create_db"); - + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - // do not create database if another thread is holding read lock + /* do not create database if another thread is holding read lock */ if (wait_if_global_read_lock(thd, 0, 1)) { error= -1; @@ -49,56 +421,106 @@ int mysql_create_db(THD *thd, char *db, uint create_options, bool silent) } /* Check directory */ - (void)sprintf(path,"%s/%s", mysql_data_home, db); - unpack_dirname(path,path); // Convert if not unix - if ((dirp = my_dir(path,MYF(MY_DONT_SORT)))) + strxmov(path, mysql_data_home, "/", db, NullS); + path_len= unpack_dirname(path,path); // Convert if not unix + path[path_len-1]= 0; // Remove last '/' from path + + if (my_stat(path,&stat_info,MYF(0))) { - my_dirend(dirp); if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS)) { my_error(ER_DB_CREATE_EXISTS,MYF(0),db); - error = -1; + error= -1; goto exit; } - result = 0; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db); + if (!silent) + send_ok(thd); + error= 0; + goto exit; } else { - strend(path)[-1]=0; // Remove last '/' from path + if (my_errno != ENOENT) + { + my_error(EE_STAT, MYF(0),path,my_errno); + goto exit; + } if (my_mkdir(path,0777,MYF(0)) < 0) { my_error(ER_CANT_CREATE_DB,MYF(0),db,my_errno); - error = -1; + error= -1; goto exit; } } + path[path_len-1]= FN_LIBCHAR; + strmake(path+path_len, MY_DB_OPT_FILE, sizeof(path)-path_len-1); + if (write_db_opt(thd, path, create_info)) + { + /* + Could not create options file. + Restore things to beginning. + */ + path[path_len]= 0; + if (rmdir(path) >= 0) + { + error= -1; + goto exit; + } + /* + We come here when we managed to create the database, but not the option + file. In this case it's best to just continue as if nothing has + happened. (This is a very unlikely senario) + */ + } + if (!silent) { - if (!thd->query) + char *query; + uint query_length; + + if (!thd->query) // Only in replication { - /* The client used the old obsolete mysql_create_db() call */ - thd->query_length= (uint) (strxmov(path,"create database `", db, "`", - NullS) - path); - thd->query= path; + query= path; + query_length= (uint) (strxmov(path,"create database `", db, "`", NullS) - + path); } + else { - mysql_update_log.write(thd,thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); - mysql_bin_log.write(&qinfo); - } + query= thd->query; + query_length= thd->query_length; } - if (thd->query == path) + mysql_update_log.write(thd, query, query_length); + if (mysql_bin_log.is_open()) { - VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query= 0; - thd->query_length= 0; - VOID(pthread_mutex_unlock(&LOCK_thread_count)); + Query_log_event qinfo(thd, query, query_length, 0, + /* suppress_use */ TRUE); + + /* + Write should use the database being created as the "current + database" and not the threads current database, which is the + default. If we do not change the "current database" to the + database being created, the CREATE statement will not be + replicated when using --binlog-do-db to select databases to be + replicated. + + An example (--binlog-do-db=sisyfos): + + CREATE DATABASE bob; # Not replicated + USE bob; # 'bob' is the current database + CREATE DATABASE sisyfos; # Not replicated since 'bob' is + # current database. + USE sisyfos; # Will give error on slave since + # database does not exist. + */ + qinfo.db = db; + qinfo.db_len = strlen(db); + + mysql_bin_log.write(&qinfo); } - send_ok(&thd->net, result); + send_ok(thd, result); } exit: @@ -108,14 +530,65 @@ exit2: DBUG_RETURN(error); } -const char *del_exts[]= {".frm", ".BAK", ".TMD", NullS}; -static TYPELIB deletable_extentions= -{array_elements(del_exts)-1,"del_exts", del_exts}; -const char *known_exts[]= -{".ISM",".ISD",".ISM",".MRG",".MYI",".MYD",".db",NullS}; -static TYPELIB known_extentions= -{array_elements(known_exts)-1,"known_exts", known_exts}; +/* db-name is already validated when we come here */ + +int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) +{ + char path[FN_REFLEN+16]; + long result=1; + int error= 0; + DBUG_ENTER("mysql_alter_db"); + + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); + + /* do not alter database if another thread is holding read lock */ + if ((error=wait_if_global_read_lock(thd,0,1))) + goto exit2; + + /* Check directory */ + strxmov(path, mysql_data_home, "/", db, "/", MY_DB_OPT_FILE, NullS); + fn_format(path, path, "", "", MYF(MY_UNPACK_FILENAME)); + if ((error=write_db_opt(thd, path, create_info))) + goto exit; + + /* + Change options if current database is being altered + TODO: Delete this code + */ + if (thd->db && !strcmp(thd->db,db)) + { + thd->db_charset= create_info->default_table_charset ? + create_info->default_table_charset : + thd->variables.collation_server; + thd->variables.collation_database= thd->db_charset; + } + + mysql_update_log.write(thd,thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, + /* suppress_use */ TRUE); + + /* + Write should use the database being created as the "current + database" and not the threads current database, which is the + default. + */ + qinfo.db = db; + qinfo.db_len = strlen(db); + + thd->clear_error(); + mysql_bin_log.write(&qinfo); + } + send_ok(thd, result); + +exit: + start_waiting_global_read_lock(thd); +exit2: + VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); + DBUG_RETURN(error ? -1 : 0); /* -1 to delegate send_error() */ +} /* @@ -134,18 +607,19 @@ static TYPELIB known_extentions= -1 Error generated */ - int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) { long deleted=0; - int error = 0; + int error= 0; char path[FN_REFLEN+16], tmp_db[NAME_LEN+1]; MY_DIR *dirp; + uint length; + TABLE_LIST* dropped_tables= 0; DBUG_ENTER("mysql_rm_db"); VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - // do not drop database if another thread is holding read lock + /* do not drop database if another thread is holding read lock */ if (wait_if_global_read_lock(thd, 0, 1)) { error= -1; @@ -153,66 +627,149 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) } (void) sprintf(path,"%s/%s",mysql_data_home,db); - unpack_dirname(path,path); // Convert if not unix + length= unpack_dirname(path,path); // Convert if not unix + strmov(path+length, MY_DB_OPT_FILE); // Append db option file name + del_dbopt(path); // Remove dboption hash entry + path[length]= '\0'; // Remove file name + /* See if the directory exists */ - if (!(dirp = my_dir(path,MYF(MY_DONT_SORT)))) + if (!(dirp= my_dir(path,MYF(MY_DONT_SORT)))) { if (!if_exists) { error= -1; my_error(ER_DB_DROP_EXISTS,MYF(0),db); + goto exit; + } + else + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_DB_DROP_EXISTS, ER(ER_DB_DROP_EXISTS), db); + } + else + { + pthread_mutex_lock(&LOCK_open); + remove_db_from_cache(db); + pthread_mutex_unlock(&LOCK_open); + + + error= -1; + if ((deleted= mysql_rm_known_files(thd, dirp, db, path, 0, + &dropped_tables)) >= 0) + { + ha_drop_database(path); + query_cache_invalidate1(db); + error = 0; } - else if (!silent) - send_ok(&thd->net,0); - goto exit; } if (lower_case_table_names) { /* Convert database to lower case */ strmov(tmp_db, db); - casedn_str(tmp_db); + my_casedn_str(files_charset_info, tmp_db); db= tmp_db; } + if (!silent && deleted>=0) + { + const char *query; + ulong query_length; + if (!thd->query) + { + /* The client used the old obsolete mysql_drop_db() call */ + query= path; + query_length= (uint) (strxmov(path, "drop database `", db, "`", + NullS) - path); + } + else + { + query =thd->query; + query_length= thd->query_length; + } + mysql_update_log.write(thd, query, query_length); + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, query, query_length, 0, + /* suppress_use */ TRUE); + /* + Write should use the database being created as the "current + database" and not the threads current database, which is the + default. + */ + qinfo.db = db; + qinfo.db_len = strlen(db); - pthread_mutex_lock(&LOCK_open); - remove_db_from_cache(db); - pthread_mutex_unlock(&LOCK_open); - - error = -1; - if ((deleted=mysql_rm_known_files(thd, dirp, db, path,0)) >= 0 && thd) + thd->clear_error(); + mysql_bin_log.write(&qinfo); + } + thd->server_status|= SERVER_STATUS_DB_DROPPED; + send_ok(thd, (ulong) deleted); + thd->server_status&= ~SERVER_STATUS_DB_DROPPED; + } + else if (mysql_bin_log.is_open()) { - ha_drop_database(path); - query_cache_invalidate1(db); - if (!silent) + char *query, *query_pos, *query_end, *query_data_start; + TABLE_LIST *tbl; + uint db_len; + + if (!(query= thd->alloc(MAX_DROP_TABLE_Q_LEN))) + goto exit; /* not much else we can do */ + query_pos= query_data_start= strmov(query,"drop table "); + query_end= query + MAX_DROP_TABLE_Q_LEN; + db_len= strlen(db); + + for (tbl= dropped_tables; tbl; tbl= tbl->next) { - if (!thd->query) - { - thd->query_length= (uint) (strxmov(path,"drop database `", db, "`", - NullS)- - path); - thd->query= path; - } - mysql_update_log.write(thd, thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); - mysql_bin_log.write(&qinfo); - } - if (thd->query == path) + uint tbl_name_len; + + /* 3 for the quotes and the comma*/ + tbl_name_len= strlen(tbl->real_name) + 3; + if (query_pos + tbl_name_len + 1 >= query_end) { - VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query= 0; - thd->query_length= 0; - VOID(pthread_mutex_unlock(&LOCK_thread_count)); + write_to_binlog(thd, query, query_pos -1 - query, db, db_len); + query_pos= query_data_start; } - send_ok(&thd->net,(ulong) deleted); + + *query_pos++ = '`'; + query_pos= strmov(query_pos,tbl->real_name); + *query_pos++ = '`'; + *query_pos++ = ','; + } + + if (query_pos != query_data_start) + { + write_to_binlog(thd, query, query_pos -1 - query, db, db_len); } - error = 0; } exit: start_waiting_global_read_lock(thd); + /* + If this database was the client's selected database, we silently change the + client's selected database to nothing (to have an empty SELECT DATABASE() + in the future). For this we free() thd->db and set it to 0. But we don't do + free() for the slave thread. Indeed, doing a x_free() on it leads to nasty + problems (i.e. long painful debugging) because in this thread, thd->db is + the same as data_buf and db of the Query_log_event which is dropping the + database. So if you free() thd->db, you're freeing data_buf. You set + thd->db to 0 but not data_buf (thd->db and data_buf are two distinct + pointers which point to the same place). Then in ~Query_log_event(), we + have 'if (data_buf) free(data_buf)' data_buf is !=0 so this makes a + DOUBLE free(). + Side effects of this double free() are, randomly (depends on the machine), + when the slave is replicating a DROP DATABASE: + - garbage characters in the error message: + "Error 'Can't drop database 'test2'; database doesn't exist' on query + 'h4zI©'" + - segfault + - hang in "free(vio)" (yes!) in the I/O or SQL slave threads (so slave + server hangs at shutdown etc). + */ + if (thd->db && !strcmp(thd->db, db)) + { + if (!(thd->slave_thread)) /* a slave thread will free it itself */ + x_free(thd->db); + thd->db= 0; + thd->db_length= 0; + } exit2: VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); @@ -226,14 +783,14 @@ exit2: */ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, - const char *org_path, uint level) + const char *org_path, uint level, + TABLE_LIST **dropped_tables) { long deleted=0; ulong found_other_files=0; char filePath[FN_REFLEN]; TABLE_LIST *tot_list=0, **tot_list_next; List<String> raid_dirs; - DBUG_ENTER("mysql_rm_known_files"); DBUG_PRINT("enter",("path: %s", org_path)); @@ -253,9 +810,9 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, continue; /* Check if file is a raid directory */ - if ((isdigit(file->name[0]) || + if ((my_isdigit(&my_charset_latin1, file->name[0]) || (file->name[0] >= 'a' && file->name[0] <= 'f')) && - (isdigit(file->name[1]) || + (my_isdigit(&my_charset_latin1, file->name[1]) || (file->name[1] >= 'a' && file->name[1] <= 'f')) && !file->name[2] && !level) { @@ -269,10 +826,11 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, if ((new_dirp = my_dir(newpath,MYF(MY_DONT_SORT)))) { DBUG_PRINT("my",("New subdir found: %s", newpath)); - if ((mysql_rm_known_files(thd, new_dirp, NullS, newpath,1)) < 0) + if ((mysql_rm_known_files(thd, new_dirp, NullS, newpath,1,0)) < 0) goto err; if (!(copy_of_path= thd->memdup(newpath, length+1)) || - !(dir= new String(copy_of_path, length)) || + !(dir= new (thd->mem_root) String(copy_of_path, length, + &my_charset_bin)) || raid_dirs.push_back(dir)) goto err; continue; @@ -283,11 +841,13 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, extension= fn_ext(file->name); if (find_type(extension, &deletable_extentions,1+2) <= 0) { - if (find_type(extension, &known_extentions,1+2) <= 0) + if (find_type(extension, ha_known_exts(),1+2) <= 0) found_other_files++; continue; } - if (db && !my_strcasecmp(extension, reg_ext)) + /* just for safety we use files_charset_info */ + if (db && !my_strcasecmp(files_charset_info, + extension, reg_ext)) { /* Drop the table nicely */ *extension= 0; // Remove extension @@ -301,6 +861,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, /* Link into list */ (*tot_list_next)= table_list; tot_list_next= &table_list->next; + deleted++; } else { @@ -309,14 +870,11 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, { goto err; } - deleted++; } } if (thd->killed || - (tot_list && mysql_rm_table_part2_with_lock(thd, tot_list, 1, 1))) - { + (tot_list && mysql_rm_table_part2_with_lock(thd, tot_list, 1, 0, 1))) goto err; - } /* Remove RAID directories */ { @@ -328,6 +886,9 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, } my_dirend(dirp); + if (dropped_tables) + *dropped_tables= tot_list; + /* If the directory is a symbolic link, remove the link first, then remove the directory the symbolic link pointed at @@ -386,44 +947,61 @@ err: /* - Changes the current database. + Change default database. + + SYNOPSIS + mysql_change_db() + thd Thread handler + name Databasename + + DESCRIPTION + Becasue the database name may have been given directly from the + communication packet (in case of 'connect' or 'COM_INIT_DB') + we have to do end space removal in this function. NOTES Do as little as possible in this function, as it is not called for the replication slave SQL thread (for that thread, setting of thd->db is done in ::exec_event() methods of log_event.cc). + + RETURN VALUES + 0 ok + 1 error */ -bool mysql_change_db(THD *thd,const char *name) +bool mysql_change_db(THD *thd, const char *name) { int length, db_length; char *dbname=my_strdup((char*) name,MYF(MY_WME)); char path[FN_REFLEN]; + HA_CREATE_INFO create; +#ifndef NO_EMBEDDED_ACCESS_CHECKS ulong db_access; +#endif DBUG_ENTER("mysql_change_db"); if (!dbname || !(db_length= strlen(dbname))) { x_free(dbname); /* purecov: inspected */ - send_error(&thd->net,ER_NO_DB_ERROR); /* purecov: inspected */ + send_error(thd,ER_NO_DB_ERROR); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } if (check_db_name(dbname)) { - net_printf(&thd->net,ER_WRONG_DB_NAME, dbname); + net_printf(thd, ER_WRONG_DB_NAME, dbname); x_free(dbname); DBUG_RETURN(1); } DBUG_PRINT("info",("Use database: %s", dbname)); +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (test_all_bits(thd->master_access,DB_ACLS)) db_access=DB_ACLS; else - db_access= (acl_get(thd->host,thd->ip,(char*) &thd->remote.sin_addr, - thd->priv_user,dbname,0) | + db_access= (acl_get(thd->host,thd->ip, thd->priv_user,dbname,0) | thd->master_access); if (!(db_access & DB_ACLS) && (!grant_option || check_grant_db(thd,dbname))) { - net_printf(&thd->net,ER_DBACCESS_DENIED_ERROR, + net_printf(thd,ER_DBACCESS_DENIED_ERROR, thd->priv_user, thd->priv_host, dbname); @@ -434,21 +1012,29 @@ bool mysql_change_db(THD *thd,const char *name) my_free(dbname,MYF(0)); DBUG_RETURN(1); } - +#endif (void) sprintf(path,"%s/%s",mysql_data_home,dbname); length=unpack_dirname(path,path); // Convert if not unix if (length && path[length-1] == FN_LIBCHAR) path[length-1]=0; // remove ending '\' - if (access(path,F_OK)) + if (my_access(path,F_OK)) { - net_printf(&thd->net,ER_BAD_DB_ERROR,dbname); + net_printf(thd,ER_BAD_DB_ERROR,dbname); my_free(dbname,MYF(0)); DBUG_RETURN(1); } - send_ok(&thd->net); + send_ok(thd); x_free(thd->db); - thd->db=dbname; + thd->db=dbname; // THD::~THD will free this thd->db_length=db_length; +#ifndef NO_EMBEDDED_ACCESS_CHECKS thd->db_access=db_access; +#endif + strmov(path+unpack_dirname(path,path), MY_DB_OPT_FILE); + load_db_opt(thd, path, &create); + thd->db_charset= create.default_table_charset ? + create.default_table_charset : + thd->variables.collation_server; + thd->variables.collation_database= thd->db_charset; DBUG_RETURN(0); } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 166a0e130e3..b085d37be78 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -27,35 +27,42 @@ #include "ha_innodb.h" #include "sql_select.h" -int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order, - ha_rows limit, ulong options) +int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, + SQL_LIST *order, ha_rows limit, ulong options) { int error; TABLE *table; SQL_SELECT *select=0; READ_RECORD info; bool using_limit=limit != HA_POS_ERROR; - bool transactional_table, log_delayed, safe_update; + bool transactional_table, log_delayed, safe_update, const_cond; ha_rows deleted; + uint usable_index= MAX_KEY; DBUG_ENTER("mysql_delete"); - if (((safe_update=thd->options & OPTION_SAFE_UPDATES)) && !conds) - { - send_error(&thd->net,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); - DBUG_RETURN(1); - } - - if (!(table = open_ltable(thd, table_list, table_list->lock_type))) + if ((open_and_lock_tables(thd, table_list))) DBUG_RETURN(-1); + table= table_list->table; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); thd->proc_info="init"; table->map=1; - if (setup_conds(thd,table_list,&conds) || setup_ftfuncs(thd)) - DBUG_RETURN(-1); + + if ((error= mysql_prepare_delete(thd, table_list, &conds))) + DBUG_RETURN(error); + + const_cond= (!conds || conds->const_item()); + safe_update=test(thd->options & OPTION_SAFE_UPDATES); + if (safe_update && const_cond) + { + send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); + DBUG_RETURN(1); + } + + thd->lex->select_lex.no_error= thd->lex->ignore; /* Test if the user wants to delete all rows */ - if (!using_limit && (!conds || (conds->const_item() && conds->val_int())) && - !(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) && !safe_update) + if (!using_limit && const_cond && (!conds || conds->val_int()) && + !(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE))) { deleted= table->file->records; if (!(error=table->file->delete_all_rows())) @@ -72,35 +79,35 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order, /* Handler didn't support fast delete; Delete rows one by one */ } - table->used_keys=table->quick_keys=0; // Can't use 'only index' + table->used_keys.clear_all(); + table->quick_keys.clear_all(); // Can't use 'only index' select=make_select(table,0,0,conds,&error); if (error) DBUG_RETURN(-1); - if ((select && select->check_quick(thd, - test(thd->options & OPTION_SAFE_UPDATES), - limit)) || - !limit) + if ((select && select->check_quick(thd, safe_update, limit)) || !limit) { delete select; - send_ok(&thd->net,0L); + free_underlaid_joins(thd, &thd->lex->select_lex); + send_ok(thd,0L); DBUG_RETURN(0); // Nothing to delete } /* If running in safe sql mode, don't allow updates without keys */ - if (!table->quick_keys) + if (table->quick_keys.is_clear_all()) { - thd->lex.select_lex.options|=QUERY_NO_INDEX_USED; + thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (safe_update && !using_limit) { delete select; - send_error(&thd->net,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); + free_underlaid_joins(thd, &thd->lex->select_lex); + send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); DBUG_RETURN(1); } } if (options & OPTION_QUICK) (void) table->file->extra(HA_EXTRA_QUICK); - if (order) + if (order && order->elements) { uint length; SORT_FIELD *sortorder; @@ -113,33 +120,55 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order, tables.table = table; tables.alias = table_list->alias; - table->io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), - MYF(MY_FAE | MY_ZEROFILL)); - if (setup_order(thd, &tables, fields, all_fields, order) || - !(sortorder=make_unireg_sortorder(order, &length)) || - (table->found_records = filesort(table, sortorder, length, - select, 0L, HA_POS_ERROR, - &examined_rows)) - == HA_POS_ERROR) + if (thd->lex->select_lex.setup_ref_array(thd, order->elements) || + setup_order(thd, thd->lex->select_lex.ref_pointer_array, &tables, + fields, all_fields, (ORDER*) order->first)) { delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); DBUG_RETURN(-1); // This will force out message } - /* - Filesort has already found and selected the rows we want to delete, - so we don't need the where clause - */ - delete select; - select= 0; + + if (!select && limit != HA_POS_ERROR) + usable_index= get_index_for_order(table, (ORDER*)(order->first), limit); + + if (usable_index == MAX_KEY) + { + table->sort.io_cache= (IO_CACHE *) my_malloc(sizeof(IO_CACHE), + MYF(MY_FAE | MY_ZEROFILL)); + + if ( !(sortorder=make_unireg_sortorder((ORDER*) order->first, &length)) || + (table->sort.found_records = filesort(thd, table, sortorder, length, + select, HA_POS_ERROR, + &examined_rows)) + == HA_POS_ERROR) + { + delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); + DBUG_RETURN(-1); // This will force out message + } + /* + Filesort has already found and selected the rows we want to delete, + so we don't need the where clause + */ + delete select; + select= 0; + } } - init_read_record(&info,thd,table,select,1,1); + if (usable_index==MAX_KEY) + init_read_record(&info,thd,table,select,1,1); + else + init_read_record_idx(&info, thd, table, 1, usable_index); + deleted=0L; - init_ftfuncs(thd,1); + init_ftfuncs(thd, &thd->lex->select_lex, 1); thd->proc_info="updating"; - while (!(error=info.read_record(&info)) && !thd->killed) + while (!(error=info.read_record(&info)) && !thd->killed && + !thd->net.report_error) { - if (!(select && select->skipp_record())) + // thd->net.report_error is tested to disallow delete row on error + if (!(select && select->skip_record())&& !thd->net.report_error ) { if (!(error=table->file->delete_row(table->record[0]))) { @@ -177,50 +206,94 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order, (void) table->file->extra(HA_EXTRA_NORMAL); cleanup: + /* + Invalidate the table in the query cache if something changed. This must + be before binlog writing and ha_autocommit_... + */ + if (deleted) + { + query_cache_invalidate3(thd, table_list, 1); + } + + delete select; transactional_table= table->file->has_transactions(); log_delayed= (transactional_table || table->tmp_table); - if (deleted && (error <= 0 || !transactional_table)) + /* + We write to the binary log even if we deleted no row, because maybe the + user is using this command to ensure that a table is clean on master *and + on slave*. Think of the case of a user having played separately with the + master's table and slave's table and wanting to take a fresh identical + start now. + error < 0 means "really no error". error <= 0 means "maybe some error". + */ + if ((deleted || (error < 0)) && (error <= 0 || !transactional_table)) { mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed); + Query_log_event qinfo(thd, thd->query, thd->query_length, + log_delayed, FALSE); if (mysql_bin_log.write(&qinfo) && transactional_table) error=1; } if (!log_delayed) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } + free_underlaid_joins(thd, &thd->lex->select_lex); if (transactional_table) { if (ha_autocommit_or_rollback(thd,error >= 0)) error=1; } - /* - Store table for future invalidation or invalidate it in - the query cache if something changed - */ - if (deleted) - { - query_cache_invalidate3(thd, table_list, 1); - } if (thd->lock) { mysql_unlock_tables(thd, thd->lock); thd->lock=0; } - delete select; - if (error >= 0) // Fatal error - send_error(&thd->net,thd->killed ? ER_SERVER_SHUTDOWN: 0); + if (error >= 0 || thd->net.report_error) + send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN: 0); else { - send_ok(&thd->net,deleted); + send_ok(thd,deleted); DBUG_PRINT("info",("%d records deleted",deleted)); } + DBUG_RETURN(0); +} + + +/* + Prepare items in DELETE statement + + SYNOPSIS + mysql_prepare_delete() + thd - thread handler + table_list - global table list + conds - conditions + + RETURN VALUE + 0 - OK + 1 - error (message is sent to user) + -1 - error (message is not sent to user) +*/ +int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) +{ + TABLE_LIST *delete_table_list= ((TABLE_LIST*) thd->lex-> + select_lex.table_list.first); + DBUG_ENTER("mysql_prepare_delete"); + + thd->allow_sum_func= 0; + if (setup_conds(thd, delete_table_list, conds) || + setup_ftfuncs(&thd->lex->select_lex)) + DBUG_RETURN(-1); + if (find_real_table_in_list(table_list->next, + table_list->db, table_list->real_name)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); + DBUG_RETURN(-1); + } DBUG_RETURN(0); } @@ -248,9 +321,10 @@ multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt, int -multi_delete::prepare(List<Item> &values) +multi_delete::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { DBUG_ENTER("multi_delete::prepare"); + unit= u; do_delete= 1; thd->proc_info="deleting from main table"; DBUG_RETURN(0); @@ -285,7 +359,7 @@ multi_delete::initialize_tables(JOIN *join) tbl->no_keyread=1; /* Don't use record cache */ tbl->no_cache= 1; - tbl->used_keys= 0; + tbl->used_keys.clear_all(); if (tbl->file->has_transactions()) log_delayed= transactional_tables= 1; else if (tbl->tmp_table != NO_TMP_TABLE) @@ -304,8 +378,8 @@ multi_delete::initialize_tables(JOIN *join) table->file->ref_length, MEM_STRIP_BUF_SIZE); } - init_ftfuncs(thd,1); - DBUG_RETURN(thd->fatal_error != 0); + init_ftfuncs(thd, thd->lex->current_select, 1); + DBUG_RETURN(thd->is_fatal_error != 0); } @@ -377,7 +451,7 @@ void multi_delete::send_error(uint errcode,const char *err) DBUG_ENTER("multi_delete::send_error"); /* First send error what ever it is ... */ - ::send_error(&thd->net,errcode,err); + ::send_error(thd,errcode,err); /* If nothing deleted return */ if (!deleted) @@ -488,10 +562,19 @@ bool multi_delete::send_eof() thd->proc_info="end"; /* + We must invalidate the query cache before binlog writing and + ha_autocommit_... + */ + if (deleted) + query_cache_invalidate3(thd, delete_tables, 1); + + /* Write the SQL statement to the binlog if we deleted rows and we succeeded, or also in an error case when there was a non-transaction-safe table involved, since modifications in it cannot be rolled back. + Note that if we deleted nothing we don't write to the binlog (TODO: + fix this). */ if (deleted && (error <= 0 || normal_tables)) { @@ -501,7 +584,7 @@ bool multi_delete::send_eof() if (error <= 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed); + log_delayed, FALSE); if (mysql_bin_log.write(&qinfo) && !normal_tables) local_error=1; // Log write failed: roll back the SQL statement } @@ -512,14 +595,11 @@ bool multi_delete::send_eof() if (transactional_tables) if (ha_autocommit_or_rollback(thd,local_error > 0)) local_error=1; - - if (deleted) - query_cache_invalidate3(thd, delete_tables, 1); if (local_error) - ::send_error(&thd->net); + ::send_error(thd); else - ::send_ok(&thd->net,deleted); + ::send_ok(thd, deleted); return 0; } @@ -556,10 +636,13 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) TABLE *table= *table_ptr; table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK); db_type table_type=table->db_type; - + if (!ha_supports_generate(table_type)) + goto trunc_by_del; strmov(path,table->path); *table_ptr= table->next; // Unlink table from list close_temporary(table,0); + if (thd->slave_thread) + --slave_open_temp_tables; *fn_ext(path)=0; // Remove the .frm extension ha_create_table(path, &create_info,1); // We don't need to call invalidate() because this table is not in cache @@ -567,15 +650,15 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) table_list->real_name, 1)))) (void) rm_temporary_table(table_type, path); /* - Sasha: if we return here we will not have binloged the truncation and - we will not send_ok() to the client. + If we return here we will not have logged the truncation to the bin log + and we will not send_ok() to the client. */ goto end; } (void) sprintf(path,"%s/%s/%s%s",mysql_data_home,table_list->db, table_list->real_name,reg_ext); - fn_format(path,path,"","",4); + fn_format(path, path, "", "", MY_UNPACK_FILENAME); if (!dont_send_ok) { @@ -587,16 +670,7 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) DBUG_RETURN(-1); } if (!ha_supports_generate(table_type)) - { - /* Probably InnoDB table */ - table_list->lock_type= TL_WRITE; - ulong save_options= thd->options; - thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT); - int res= mysql_delete(thd, table_list, (COND*) 0, (ORDER*) 0, - HA_POS_ERROR, 0); - thd->options= save_options; - DBUG_RETURN(res); - } + goto trunc_by_del; if (lock_and_wait_for_table_name(thd, table_list)) DBUG_RETURN(-1); } @@ -615,10 +689,10 @@ end: { thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - thd->tmp_table); + thd->tmp_table, FALSE); mysql_bin_log.write(&qinfo); } - send_ok(&thd->net); // This should return record count + send_ok(thd); // This should return record count } VOID(pthread_mutex_lock(&LOCK_open)); unlock_table_name(thd, table_list); @@ -631,4 +705,16 @@ end: VOID(pthread_mutex_unlock(&LOCK_open)); } DBUG_RETURN(error ? -1 : 0); + + trunc_by_del: + /* Probably InnoDB table */ + ulong save_options= thd->options; + table_list->lock_type= TL_WRITE; + thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT); + ha_enable_transaction(thd, FALSE); + error= mysql_delete(thd, table_list, (COND*) 0, (SQL_LIST*) 0, + HA_POS_ERROR, 0); + ha_enable_transaction(thd, TRUE); + thd->options= save_options; + DBUG_RETURN(error); } diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc new file mode 100644 index 00000000000..e9f9b432c21 --- /dev/null +++ b/sql/sql_derived.cc @@ -0,0 +1,241 @@ +/* Copyright (C) 2002-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +/* + Derived tables + These were introduced by Sinisa <sinisa@mysql.com> +*/ + + +#include "mysql_priv.h" +#include "sql_select.h" + +static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *s, + TABLE_LIST *t); + +/* + Resolve derived tables in all queries + + SYNOPSIS + mysql_handle_derived() + lex LEX for this thread + + RETURN + 0 ok + -1 Error + 1 Error and error message given +*/ + +int +mysql_handle_derived(LEX *lex) +{ + if (lex->derived_tables) + { + for (SELECT_LEX *sl= lex->all_selects_list; + sl; + sl= sl->next_select_in_list()) + { + for (TABLE_LIST *cursor= sl->get_table_list(); + cursor; + cursor= cursor->next) + { + int res; + if (cursor->derived && (res=mysql_derived(lex->thd, lex, + cursor->derived, + cursor))) + { + return res; + } + } + if (lex->describe) + { + /* + Force join->join_tmp creation, because we will use this JOIN + twice for EXPLAIN and we have to have unchanged join for EXPLAINing + */ + sl->uncacheable|= UNCACHEABLE_EXPLAIN; + sl->master_unit()->uncacheable|= UNCACHEABLE_EXPLAIN; + } + } + } + return 0; +} + + +/* + Resolve derived tables in all queries + + SYNOPSIS + mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, TABLE_LIST *t) + thd Thread handle + lex LEX for this thread + unit node that contains all SELECT's for derived tables + t TABLE_LIST for the upper SELECT + + IMPLEMENTATION + Derived table is resolved with temporary table. It is created based on the + queries defined. After temporary table is created, if this is not EXPLAIN, + then the entire unit / node is deleted. unit is deleted if UNION is used + for derived table and node is deleted is it is a simple SELECT. + + After table creation, the above TABLE_LIST is updated with a new table. + + This function is called before any command containing derived table + is executed. + + Derived tables is stored in thd->derived_tables and freed in + close_thread_tables() + + RETURN + 0 ok + 1 Error + -1 Error and error message given +*/ + + +static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, + TABLE_LIST *org_table_list) +{ + SELECT_LEX *first_select= unit->first_select(); + TABLE *table; + int res; + select_union *derived_result; + bool is_union= first_select->next_select() && + first_select->next_select()->linkage == UNION_TYPE; + SELECT_LEX *save_current_select= lex->current_select; + DBUG_ENTER("mysql_derived"); + + if (!(derived_result= new select_union(0))) + DBUG_RETURN(1); // out of memory + + // st_select_lex_unit::prepare correctly work for single select + if ((res= unit->prepare(thd, derived_result, 0, org_table_list->alias))) + goto exit; + + + derived_result->tmp_table_param.init(); + derived_result->tmp_table_param.field_count= unit->types.elements; + /* + Temp table is created so that it hounours if UNION without ALL is to be + processed + + As 'distinct' parameter we always pass FALSE (0), because underlying + query will control distinct condition by itself. Correct test of + distinct underlying query will be is_union && + !unit->union_distinct->next_select() (i.e. it is union and last distinct + SELECT is last SELECT of UNION). + */ + if (!(table= create_tmp_table(thd, &derived_result->tmp_table_param, + unit->types, (ORDER*) 0, + FALSE, 1, + (first_select->options | thd->options | + TMP_TABLE_ALL_COLUMNS), + HA_POS_ERROR, + org_table_list->alias))) + { + res= -1; + goto exit; + } + derived_result->set_table(table); + + /* + if it is preparation PS only then we do not need real data and we + can skip execution (and parameters is not defined, too) + */ + if (! thd->current_arena->is_stmt_prepare()) + { + if (is_union) + { + // execute union without clean up + if (!(res= unit->prepare(thd, derived_result, SELECT_NO_UNLOCK, ""))) + res= unit->exec(); + } + else + { + unit->offset_limit_cnt= first_select->offset_limit; + unit->select_limit_cnt= first_select->select_limit+ + first_select->offset_limit; + if (unit->select_limit_cnt < first_select->select_limit) + unit->select_limit_cnt= HA_POS_ERROR; + if (unit->select_limit_cnt == HA_POS_ERROR) + first_select->options&= ~OPTION_FOUND_ROWS; + + lex->current_select= first_select; + res= mysql_select(thd, &first_select->ref_pointer_array, + (TABLE_LIST*) first_select->table_list.first, + first_select->with_wild, + first_select->item_list, first_select->where, + (first_select->order_list.elements+ + first_select->group_list.elements), + (ORDER *) first_select->order_list.first, + (ORDER *) first_select->group_list.first, + first_select->having, (ORDER*) NULL, + (first_select->options | thd->options | + SELECT_NO_UNLOCK), + derived_result, unit, first_select); + } + } + + if (!res) + { + /* + Here we entirely fix both TABLE_LIST and list of SELECT's as + there were no derived tables + */ + if (derived_result->flush()) + res= 1; + else + { + org_table_list->real_name= table->real_name; + org_table_list->table= table; + if (org_table_list->table_list) + { + org_table_list->table_list->real_name= table->real_name; + org_table_list->table_list->table= table; + } + table->derived_select_number= first_select->select_number; + table->tmp_table= TMP_TABLE; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + table->grant.privilege= SELECT_ACL; +#endif + org_table_list->db= (char *)""; + // Force read of table stats in the optimizer + table->file->info(HA_STATUS_VARIABLE); + } + + if (!lex->describe) + unit->cleanup(); + if (res) + free_tmp_table(thd, table); + else + { + /* Add new temporary table to list of open derived tables */ + table->next= thd->derived_tables; + thd->derived_tables= table; + } + } + else + { + free_tmp_table(thd, table); + unit->cleanup(); + } + +exit: + delete derived_result; + lex->current_select= save_current_select; + DBUG_RETURN(res); +} diff --git a/sql/sql_do.cc b/sql/sql_do.cc index 70124c2d796..af72632199f 100644 --- a/sql/sql_do.cc +++ b/sql/sql_do.cc @@ -18,17 +18,18 @@ /* Execute DO statement */ #include "mysql_priv.h" -#include "sql_acl.h" int mysql_do(THD *thd, List<Item> &values) { List_iterator<Item> li(values); Item *value; DBUG_ENTER("mysql_do"); - if (setup_fields(thd,0, values, 0, 0, 0)) + if (setup_fields(thd, 0, 0, values, 0, 0, 0)) DBUG_RETURN(-1); while ((value = li++)) value->val_int(); - send_ok(&thd->net); + free_underlaid_joins(thd, &thd->lex->select_lex); + thd->clear_error(); // DO always is OK + send_ok(thd); DBUG_RETURN(0); } diff --git a/sql/sql_error.cc b/sql/sql_error.cc new file mode 100644 index 00000000000..b24d15b6e3b --- /dev/null +++ b/sql/sql_error.cc @@ -0,0 +1,220 @@ +/* Copyright (C) 1995-2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/********************************************************************** +This file contains the implementation of error and warnings related + + - Whenever an error or warning occurred, it pushes it to a warning list + that the user can retrieve with SHOW WARNINGS or SHOW ERRORS. + + - For each statement, we return the number of warnings generated from this + command. Note that this can be different from @@warning_count as + we reset the warning list only for questions that uses a table. + This is done to allow on to do: + INSERT ...; + SELECT @@warning_count; + SHOW WARNINGS; + (If we would reset after each command, we could not retrieve the number + of warnings) + + - When client requests the information using SHOW command, then + server processes from this list and returns back in the form of + resultset. + + Supported syntaxes: + + SHOW [COUNT(*)] ERRORS [LIMIT [offset,] rows] + SHOW [COUNT(*)] WARNINGS [LIMIT [offset,] rows] + SELECT @@warning_count, @@error_count; + +***********************************************************************/ + +#include "mysql_priv.h" + +/* + Store a new message in an error object + + This is used to in group_concat() to register how many warnings we actually + got after the query has been executed. +*/ + +void MYSQL_ERROR::set_msg(THD *thd, const char *msg_arg) +{ + msg= strdup_root(&thd->warn_root, msg_arg); +} + + +/* + Reset all warnings for the thread + + SYNOPSIS + mysql_reset_errors() + thd Thread handle + + IMPLEMENTATION + Don't reset warnings if this has already been called for this query. + This may happen if one gets a warning during the parsing stage, + in which case push_warnings() has already called this function. +*/ + +void mysql_reset_errors(THD *thd) +{ + DBUG_ENTER("mysql_reset_errors"); + if (thd->query_id != thd->warn_id) + { + thd->warn_id= thd->query_id; + free_root(&thd->warn_root,MYF(0)); + bzero((char*) thd->warn_count, sizeof(thd->warn_count)); + thd->warn_list.empty(); + thd->row_count= 1; // by default point to row 1 + } + DBUG_VOID_RETURN; +} + + +/* + Push the warning/error to error list if there is still room in the list + + SYNOPSIS + push_warning() + thd Thread handle + level Severity of warning (note, warning, error ...) + code Error number + msg Clear error message + + RETURN + pointer on MYSQL_ERROR object +*/ + +MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, + uint code, const char *msg) +{ + MYSQL_ERROR *err= 0; + DBUG_ENTER("push_warning"); + + if (level == MYSQL_ERROR::WARN_LEVEL_NOTE && !(thd->options & OPTION_SQL_NOTES)) + return(0); + + query_cache_abort(&thd->net); + + if (thd->query_id != thd->warn_id) + mysql_reset_errors(thd); + + if (thd->warn_list.elements < thd->variables.max_error_count) + { + /* + The following code is here to change the allocation to not + use the thd->mem_root, which is freed after each query + */ + MEM_ROOT *old_root= thd->mem_root; + thd->mem_root= &thd->warn_root; + err= new MYSQL_ERROR(thd, code, level, msg); + if (err) + thd->warn_list.push_back(err); + thd->mem_root= old_root; + } + thd->warn_count[(uint) level]++; + thd->total_warn_count++; + DBUG_RETURN(err); +} + +/* + Push the warning/error to error list if there is still room in the list + + SYNOPSIS + push_warning_printf() + thd Thread handle + level Severity of warning (note, warning, error ...) + code Error number + msg Clear error message +*/ + +void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, + uint code, const char *format, ...) +{ + va_list args; + char warning[ERRMSGSIZE+20]; + DBUG_ENTER("push_warning_printf"); + DBUG_PRINT("enter",("warning: %u", code)); + + va_start(args,format); + my_vsnprintf(warning, sizeof(warning), format, args); + va_end(args); + push_warning(thd, level, code, warning); + DBUG_VOID_RETURN; +} + + +/* + Send all notes, errors or warnings to the client in a result set + + SYNOPSIS + mysqld_show_warnings() + thd Thread handler + levels_to_show Bitmap for which levels to show + + DESCRIPTION + Takes into account the current LIMIT + + RETURN VALUES + 0 ok + 1 Error sending data to client +*/ + +static const char *warning_level_names[]= {"Note", "Warning", "Error", "?"}; +static int warning_level_length[]= { 4, 7, 5, 1 }; + +my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) +{ + List<Item> field_list; + DBUG_ENTER("mysqld_show_warnings"); + + field_list.push_back(new Item_empty_string("Level", 7)); + field_list.push_back(new Item_return_int("Code",4, MYSQL_TYPE_LONG)); + field_list.push_back(new Item_empty_string("Message",MYSQL_ERRMSG_SIZE)); + + if (thd->protocol->send_fields(&field_list,1)) + DBUG_RETURN(1); + + MYSQL_ERROR *err; + SELECT_LEX *sel= &thd->lex->select_lex; + ha_rows offset= sel->offset_limit, limit= sel->select_limit; + Protocol *protocol=thd->protocol; + + List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); + while ((err= it++)) + { + /* Skip levels that the user is not interested in */ + if (!(levels_to_show & ((ulong) 1 << err->level))) + continue; + if (offset) + { + offset--; + continue; + } + if (limit-- == 0) + break; + protocol->prepare_for_resend(); + protocol->store(warning_level_names[err->level], + warning_level_length[err->level], system_charset_info); + protocol->store((uint32) err->code); + protocol->store(err->msg, strlen(err->msg), system_charset_info); + if (protocol->write()) + DBUG_RETURN(1); + } + send_eof(thd); + DBUG_RETURN(0); +} diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 58b75e667b5..1c5381a9fa0 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -1,5 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - +/* Copyright (C) 2000-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or @@ -65,7 +64,7 @@ #define HANDLER_TABLES_HASH_SIZE 120 static enum enum_ha_read_modes rkey_to_rnext[]= - { RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV }; + { RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; #define HANDLER_TABLES_HACK(thd) { \ TABLE *tmp=thd->open_tables; \ @@ -148,12 +147,8 @@ static void mysql_ha_hash_free(TABLE_LIST *tables) int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) { TABLE_LIST *hash_tables; - char *db; - char *name; - char *alias; - uint dblen; - uint namelen; - uint aliaslen; + char *db, *name, *alias; + uint dblen, namelen, aliaslen, counter; int err; DBUG_ENTER("mysql_ha_open"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' reopen: %d", @@ -165,7 +160,8 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) /* HASH entries are of type TABLE_LIST. */ - if (hash_init(&thd->handler_tables_hash, HANDLER_TABLES_HASH_SIZE, 0, 0, + if (hash_init(&thd->handler_tables_hash, &my_charset_latin1, + HANDLER_TABLES_HASH_SIZE, 0, 0, (hash_get_key) mysql_ha_hash_get_key, (hash_free_key) mysql_ha_hash_free, 0)) goto err; @@ -189,7 +185,7 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) */ DBUG_ASSERT(! tables->table); HANDLER_TABLES_HACK(thd); - err=open_tables(thd,tables); + err=open_tables(thd, tables, &counter); HANDLER_TABLES_HACK(thd); if (err) goto err; @@ -226,7 +222,7 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) memcpy(hash_tables->alias, tables->alias, aliaslen); /* add to hash */ - if (hash_insert(&thd->handler_tables_hash, (byte*) hash_tables)) + if (my_hash_insert(&thd->handler_tables_hash, (byte*) hash_tables)) { mysql_ha_close(thd, tables); goto err; @@ -234,7 +230,7 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) } if (! reopen) - send_ok(&thd->net); + send_ok(thd); DBUG_PRINT("exit",("OK")); DBUG_RETURN(0); @@ -294,6 +290,7 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) { if (*table_ptr) { + (*table_ptr)->file->ha_index_or_rnd_end(); VOID(pthread_mutex_lock(&LOCK_open)); if (close_thread_table(thd, table_ptr)) { @@ -325,7 +322,7 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) DBUG_RETURN(-1); } - send_ok(&thd->net); + send_ok(thd); DBUG_PRINT("exit", ("OK")); DBUG_RETURN(0); } @@ -359,15 +356,22 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, TABLE_LIST *hash_tables; TABLE **table_ptr; TABLE *table; - int err; - int keyno=-1; - uint num_rows; MYSQL_LOCK *lock; + List<Item> list; + Protocol *protocol= thd->protocol; + char buff[MAX_FIELD_WIDTH]; + String buffer(buff, sizeof(buff), system_charset_info); + int err, keyno= -1; + uint num_rows; + byte *key; + uint key_len; DBUG_ENTER("mysql_ha_read"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", tables->db, tables->real_name, tables->alias)); - List<Item> list; + LINT_INIT(key); + LINT_INIT(key_len); + list.push_front(new Item_field(NULL,NULL,"*")); List_iterator<Item> it(list); it++; @@ -433,8 +437,14 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, if (!lock) goto err0; // mysql_lock_tables() printed error message already - if (cond && cond->fix_fields(thd,tables)) - goto err0; + if (cond) + { + if (table->query_id != thd->query_id) + cond->cleanup(); // File was reopened + if ((!cond->fixed && + cond->fix_fields(thd, tables, &cond)) || cond->check_cols(1)) + goto err0; + } if (keyname) { @@ -444,49 +454,64 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, keyname,tables->alias); goto err0; } - table->file->index_init(keyno); } - - byte *key; - uint key_len; - LINT_INIT(key); - LINT_INIT(key_len); if (insert_fields(thd,tables,tables->db,tables->alias,&it)) goto err0; select_limit+=offset_limit; - send_fields(thd,list,1); + protocol->send_fields(&list,1); + + /* + In ::external_lock InnoDB resets the fields which tell it that + the handle is used in the HANDLER interface. Tell it again that + we are using it for HANDLER. + */ - table->file->init_table_handle_for_HANDLER(); // Only InnoDB requires it + table->file->init_table_handle_for_HANDLER(); for (num_rows=0; num_rows < select_limit; ) { - switch(mode) { + switch (mode) { + case RNEXT: + if (table->file->inited != handler::NONE) + { + err=keyname ? + table->file->index_next(table->record[0]) : + table->file->rnd_next(table->record[0]); + break; + } + /* else fall through */ case RFIRST: if (keyname) + { + table->file->ha_index_or_rnd_end(); + table->file->ha_index_init(keyno); err=table->file->index_first(table->record[0]); + } else { - if (!(err=table->file->rnd_init(1))) + table->file->ha_index_or_rnd_end(); + if (!(err=table->file->ha_rnd_init(1))) err=table->file->rnd_next(table->record[0]); } mode=RNEXT; break; + case RPREV: + DBUG_ASSERT(keyname != 0); + if (table->file->inited != handler::NONE) + { + err=table->file->index_prev(table->record[0]); + break; + } + /* else fall through */ case RLAST: DBUG_ASSERT(keyname != 0); + table->file->ha_index_or_rnd_end(); + table->file->ha_index_init(keyno); err=table->file->index_last(table->record[0]); mode=RPREV; break; - case RNEXT: - err=keyname ? - table->file->index_next(table->record[0]) : - table->file->rnd_next(table->record[0]); - break; - case RPREV: - DBUG_ASSERT(keyname != 0); - err=table->file->index_prev(table->record[0]); - break; case RNEXT_SAME: /* Continue scan on "(keypart1,keypart2,...)=(c1, c2, ...) */ DBUG_ASSERT(keyname != 0); @@ -499,37 +524,42 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, KEY_PART_INFO *key_part=keyinfo->key_part; if (key_expr->elements > keyinfo->key_parts) { - my_printf_error(ER_TOO_MANY_KEY_PARTS,ER(ER_TOO_MANY_KEY_PARTS), - MYF(0),keyinfo->key_parts); - goto err; + my_printf_error(ER_TOO_MANY_KEY_PARTS,ER(ER_TOO_MANY_KEY_PARTS), + MYF(0),keyinfo->key_parts); + goto err; } - List_iterator_fast<Item> it_ke(*key_expr); + List_iterator<Item> it_ke(*key_expr); Item *item; for (key_len=0 ; (item=it_ke++) ; key_part++) { - if (item->fix_fields(thd, tables)) - goto err; - if (item->used_tables() & ~RAND_TABLE_BIT) + // 'item' can be changed by fix_fields() call + if ((!item->fixed && + item->fix_fields(thd, tables, it_ke.ref())) || + (item= *it_ke.ref())->check_cols(1)) + goto err; + if (item->used_tables() & ~RAND_TABLE_BIT) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"HANDLER ... READ"); - goto err; + goto err; } - item->save_in_field(key_part->field, 1); - key_len+=key_part->store_length; + (void) item->save_in_field(key_part->field, 1); + key_len+=key_part->store_length; } if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len)))) { - send_error(&thd->net,ER_OUTOFMEMORY); - goto err; + send_error(thd,ER_OUTOFMEMORY); + goto err; } key_copy(key, table, keyno, key_len); + table->file->ha_index_or_rnd_end(); + table->file->ha_index_init(keyno); err=table->file->index_read(table->record[0], - key,key_len,ha_rkey_mode); + key,key_len,ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; } default: - send_error(&thd->net,ER_ILLEGAL_HA); + send_error(thd,ER_ILLEGAL_HA); goto err; } @@ -550,26 +580,25 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, continue; if (num_rows >= offset_limit) { - String *packet = &thd->packet; Item *item; - packet->length(0); + protocol->prepare_for_resend(); it.rewind(); while ((item=it++)) { - if (item->send(thd,packet)) + if (item->send(thd->protocol, &buffer)) { - packet->free(); // Free used + protocol->free(); // Free used my_error(ER_OUT_OF_RESOURCES,MYF(0)); goto err; } } - my_net_write(&thd->net, (char*)packet->ptr(), packet->length()); + protocol->write(); } num_rows++; } ok: mysql_unlock_tables(thd,lock); - send_eof(&thd->net); + send_eof(thd); DBUG_PRINT("exit",("OK")); DBUG_RETURN(0); @@ -632,8 +661,10 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, while (*table_ptr) { if ((! *tmp_tables->db || - ! my_strcasecmp((*table_ptr)->table_cache_key, tmp_tables->db)) && - ! my_strcasecmp((*table_ptr)->real_name, tmp_tables->real_name)) + ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->table_cache_key, + tmp_tables->db)) && + ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->real_name, + tmp_tables->real_name)) { DBUG_PRINT("info",("*table_ptr '%s'.'%s' as '%s'", (*table_ptr)->table_cache_key, @@ -727,6 +758,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) } safe_mutex_assert_owner(&LOCK_open); + (*table_ptr)->file->ha_index_or_rnd_end(); if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ @@ -735,4 +767,3 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) DBUG_RETURN(0); } - diff --git a/sql/sql_help.cc b/sql/sql_help.cc new file mode 100644 index 00000000000..0e0d32a922d --- /dev/null +++ b/sql/sql_help.cc @@ -0,0 +1,788 @@ +/* Copyright (C) 2000 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" + +struct st_find_field +{ + const char *table_name, *field_name; + Field *field; +}; + +/* Used fields */ + +static struct st_find_field init_used_fields[]= +{ + { "help_topic", "help_topic_id", 0}, + { "help_topic", "name", 0}, + { "help_topic", "help_category_id", 0}, + { "help_topic", "description", 0}, + { "help_topic", "example", 0}, + + { "help_category", "help_category_id", 0}, + { "help_category", "parent_category_id", 0}, + { "help_category", "name", 0}, + + { "help_keyword", "help_keyword_id", 0}, + { "help_keyword", "name", 0}, + + { "help_relation", "help_topic_id", 0}, + { "help_relation", "help_keyword_id", 0} +}; + +enum enum_used_fields +{ + help_topic_help_topic_id= 0, + help_topic_name, + help_topic_help_category_id, + help_topic_description, + help_topic_example, + + help_category_help_category_id, + help_category_parent_category_id, + help_category_name, + + help_keyword_help_keyword_id, + help_keyword_name, + + help_relation_help_topic_id, + help_relation_help_keyword_id +}; + + +/* + Fill st_find_field structure with pointers to fields + + SYNOPSIS + init_fields() + thd Thread handler + tables list of all tables for fields + find_fields array of structures + count size of previous array + + RETURN VALUES + 0 all ok + 1 one of the fileds didn't finded +*/ + +static bool init_fields(THD *thd, TABLE_LIST *tables, + struct st_find_field *find_fields, uint count) +{ + DBUG_ENTER("init_fields"); + for (; count-- ; find_fields++) + { + TABLE_LIST *not_used; + /* We have to use 'new' here as field will be re_linked on free */ + Item_field *field= new Item_field("mysql", find_fields->table_name, + find_fields->field_name); + if (!(find_fields->field= find_field_in_tables(thd, field, tables, + ¬_used, TRUE))) + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* + Returns variants of found topic for help (if it is just single topic, + returns description and example, or else returns only names..) + + SYNOPSIS + memorize_variant_topic() + + thd Thread handler + topics Table of topics + count number of alredy found topics + find_fields Filled array of information for work with fields + + RETURN VALUES + names array of names of found topics (out) + + name name of found topic (out) + description description of found topic (out) + example example for found topic (out) + + NOTE + Field 'names' is set only if more than one topic is found. + Fields 'name', 'description', 'example' are set only if + found exactly one topic. +*/ + +void memorize_variant_topic(THD *thd, TABLE *topics, int count, + struct st_find_field *find_fields, + List<String> *names, + String *name, String *description, String *example) +{ + DBUG_ENTER("memorize_variant_topic"); + MEM_ROOT *mem_root= thd->mem_root; + if (count==0) + { + get_field(mem_root,find_fields[help_topic_name].field, name); + get_field(mem_root,find_fields[help_topic_description].field, description); + get_field(mem_root,find_fields[help_topic_example].field, example); + } + else + { + if (count == 1) + names->push_back(name); + String *new_name= new (thd->mem_root) String; + get_field(mem_root,find_fields[help_topic_name].field,new_name); + names->push_back(new_name); + } + DBUG_VOID_RETURN; +} + +/* + Look for topics by mask + + SYNOPSIS + search_topics() + thd Thread handler + topics Table of topics + find_fields Filled array of info for fields + select Function to test for matching help topic. + Normally 'help_topic.name like 'bit%' + + RETURN VALUES + # number of topics found + + names array of names of found topics (out) + name name of found topic (out) + description description of found topic (out) + example example for found topic (out) + + NOTE + Field 'names' is set only if more than one topic was found. + Fields 'name', 'description', 'example' are set only if + exactly one topic was found. + +*/ + +int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields, + SQL_SELECT *select, List<String> *names, + String *name, String *description, String *example) +{ + DBUG_ENTER("search_topics"); + int count= 0; + + READ_RECORD read_record_info; + init_read_record(&read_record_info, thd, topics, select,1,0); + while (!read_record_info.read_record(&read_record_info)) + { + if (!select->cond->val_int()) // Doesn't match like + continue; + memorize_variant_topic(thd,topics,count,find_fields, + names,name,description,example); + count++; + } + end_read_record(&read_record_info); + + DBUG_RETURN(count); +} + +/* + Look for keyword by mask + + SYNOPSIS + search_keyword() + thd Thread handler + keywords Table of keywords + find_fields Filled array of info for fields + select Function to test for matching keyword. + Normally 'help_keyword.name like 'bit%' + + key_id help_keyword_if of found topics (out) + + RETURN VALUES + 0 didn't find any topics matching the mask + 1 found exactly one topic matching the mask + 2 found more then one topic matching the mask +*/ + +int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields, + SQL_SELECT *select, int *key_id) +{ + DBUG_ENTER("search_keyword"); + int count= 0; + + READ_RECORD read_record_info; + init_read_record(&read_record_info, thd, keywords, select,1,0); + while (!read_record_info.read_record(&read_record_info) && count<2) + { + if (!select->cond->val_int()) // Dosn't match like + continue; + + *key_id= (int)find_fields[help_keyword_help_keyword_id].field->val_int(); + + count++; + } + end_read_record(&read_record_info); + + DBUG_RETURN(count); +} + +/* + Look for all topics with keyword + + SYNOPSIS + get_topics_for_keyword() + thd Thread handler + topics Table of topics + relations Table of m:m relation "topic/keyword" + find_fields Filled array of info for fields + key_id Primary index to use to find for keyword + + RETURN VALUES + # number of topics found + + names array of name of found topics (out) + + name name of found topic (out) + description description of found topic (out) + example example for found topic (out) + + NOTE + Field 'names' is set only if more than one topic was found. + Fields 'name', 'description', 'example' are set only if + exactly one topic was found. +*/ + +int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, + struct st_find_field *find_fields, int16 key_id, + List<String> *names, + String *name, String *description, String *example) +{ + char buff[8]; // Max int length + int count= 0; + int iindex_topic, iindex_relations; + Field *rtopic_id, *rkey_id; + + DBUG_ENTER("get_topics_for_keyword"); + + if ((iindex_topic= find_type((char*) primary_key_name, + &topics->keynames, 1+2)-1)<0 || + (iindex_relations= find_type((char*) primary_key_name, + &relations->keynames, 1+2)-1)<0) + { + send_error(thd,ER_CORRUPT_HELP_DB); + DBUG_RETURN(-1); + } + rtopic_id= find_fields[help_relation_help_topic_id].field; + rkey_id= find_fields[help_relation_help_keyword_id].field; + + topics->file->ha_index_init(iindex_topic); + relations->file->ha_index_init(iindex_relations); + + rkey_id->store((longlong) key_id); + rkey_id->get_key_image(buff, rkey_id->pack_length(), rkey_id->charset(), + Field::itRAW); + int key_res= relations->file->index_read(relations->record[0], + (byte *)buff, rkey_id->pack_length(), + HA_READ_KEY_EXACT); + + for ( ; + !key_res && key_id == (int16) rkey_id->val_int() ; + key_res= relations->file->index_next(relations->record[0])) + { + char topic_id_buff[8]; + longlong topic_id= rtopic_id->val_int(); + Field *field= find_fields[help_topic_help_topic_id].field; + field->store((longlong) topic_id); + field->get_key_image(topic_id_buff, field->pack_length(), field->charset(), + Field::itRAW); + + if (!topics->file->index_read(topics->record[0], (byte *)topic_id_buff, + field->pack_length(), HA_READ_KEY_EXACT)) + { + memorize_variant_topic(thd,topics,count,find_fields, + names,name,description,example); + count++; + } + } + topics->file->ha_index_end(); + relations->file->ha_index_end(); + DBUG_RETURN(count); +} + +/* + Look for categories by mask + + SYNOPSIS + search_categories() + thd THD for init_read_record + categories Table of categories + find_fields Filled array of info for fields + select Function to test for if matching help topic. + Normally 'help_vategory.name like 'bit%' + names List of found categories names (out) + res_id Primary index of found category (only if + found exactly one category) + + RETURN VALUES + # Number of categories found +*/ + +int search_categories(THD *thd, TABLE *categories, + struct st_find_field *find_fields, + SQL_SELECT *select, List<String> *names, int16 *res_id) +{ + Field *pfname= find_fields[help_category_name].field; + Field *pcat_id= find_fields[help_category_help_category_id].field; + int count= 0; + READ_RECORD read_record_info; + + DBUG_ENTER("search_categories"); + + init_read_record(&read_record_info, thd, categories, select,1,0); + while (!read_record_info.read_record(&read_record_info)) + { + if (select && !select->cond->val_int()) + continue; + String *lname= new (thd->mem_root) String; + get_field(thd->mem_root,pfname,lname); + if (++count == 1 && res_id) + *res_id= (int16) pcat_id->val_int(); + names->push_back(lname); + } + end_read_record(&read_record_info); + + DBUG_RETURN(count); +} + +/* + Look for all topics or subcategories of category + + SYNOPSIS + get_all_items_for_category() + thd Thread handler + items Table of items + pfname Field "name" in items + select "where" part of query.. + res list of finded names +*/ + +void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname, + SQL_SELECT *select, List<String> *res) +{ + DBUG_ENTER("get_all_items_for_category"); + + READ_RECORD read_record_info; + init_read_record(&read_record_info, thd, items, select,1,0); + while (!read_record_info.read_record(&read_record_info)) + { + if (!select->cond->val_int()) + continue; + String *name= new (thd->mem_root) String(); + get_field(thd->mem_root,pfname,name); + res->push_back(name); + } + end_read_record(&read_record_info); + + DBUG_VOID_RETURN; +} + +/* + Send to client answer for help request + + SYNOPSIS + send_answer_1() + protocol - protocol for sending + s1 - value of column "Name" + s2 - value of column "Description" + s3 - value of column "Example" + + IMPLEMENTATION + Format used: + +----------+------------+------------+ + |name |description |example | + +----------+------------+------------+ + |String(64)|String(1000)|String(1000)| + +----------+------------+------------+ + with exactly one row! + + RETURN VALUES + 1 Writing of head failed + -1 Writing of row failed + 0 Successeful send +*/ + +int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) +{ + DBUG_ENTER("send_answer_1"); + List<Item> field_list; + field_list.push_back(new Item_empty_string("name",64)); + field_list.push_back(new Item_empty_string("description",1000)); + field_list.push_back(new Item_empty_string("example",1000)); + + if (protocol->send_fields(&field_list,1)) + DBUG_RETURN(1); + + protocol->prepare_for_resend(); + protocol->store(s1); + protocol->store(s2); + protocol->store(s3); + if (protocol->write()) + DBUG_RETURN(-1); + DBUG_RETURN(0); +} + + +/* + Send to client help header + + SYNOPSIS + send_header_2() + protocol - protocol for sending + is_it_category - need column 'source_category_name' + + IMPLEMENTATION + +- -+ + |+-------------------- | +----------+--------------+ + ||source_category_name | |name |is_it_category| + |+-------------------- | +----------+--------------+ + ||String(64) | |String(64)|String(1) | + |+-------------------- | +----------+--------------+ + +- -+ + + RETURN VALUES + result of protocol->send_fields +*/ + +int send_header_2(Protocol *protocol, bool for_category) +{ + DBUG_ENTER("send_header_2"); + List<Item> field_list; + if (for_category) + field_list.push_back(new Item_empty_string("source_category_name",64)); + field_list.push_back(new Item_empty_string("name",64)); + field_list.push_back(new Item_empty_string("is_it_category",1)); + DBUG_RETURN(protocol->send_fields(&field_list,1)); +} + +/* + strcmp for using in qsort + + SYNOPSIS + strptrcmp() + ptr1 (const void*)&str1 + ptr2 (const void*)&str2 + + RETURN VALUES + same as strcmp +*/ + +extern "C" int string_ptr_cmp(const void* ptr1, const void* ptr2) +{ + String *str1= *(String**)ptr1; + String *str2= *(String**)ptr2; + return strcmp(str1->c_ptr(),str2->c_ptr()); +} + +/* + Send to client rows in format: + column1 : <name> + column2 : <is_it_category> + + SYNOPSIS + send_variant_2_list() + protocol Protocol for sending + names List of names + cat Value of the column <is_it_category> + source_name name of category for all items.. + + RETURN VALUES + -1 Writing fail + 0 Data was successefully send +*/ + +int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol, + List<String> *names, + const char *cat, String *source_name) +{ + DBUG_ENTER("send_variant_2_list"); + + String **pointers= (String**)alloc_root(mem_root, + sizeof(String*)*names->elements); + String **pos; + String **end= pointers + names->elements; + + List_iterator<String> it(*names); + for (pos= pointers; pos!=end; (*pos++= it++)); + + qsort(pointers,names->elements,sizeof(String*),string_ptr_cmp); + + for (pos= pointers; pos!=end; pos++) + { + protocol->prepare_for_resend(); + if (source_name) + protocol->store(source_name); + protocol->store(*pos); + protocol->store(cat,1,&my_charset_latin1); + if (protocol->write()) + DBUG_RETURN(-1); + } + + DBUG_RETURN(0); +} + +/* + Prepare simple SQL_SELECT table.* WHERE <Item> + + SYNOPSIS + prepare_simple_select() + thd Thread handler + cond WHERE part of select + tables list of tables, used in WHERE + table goal table + + error code of error (out) + + RETURN VALUES + # created SQL_SELECT +*/ + +SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables, + TABLE *table, int *error) +{ + if (!cond->fixed) + cond->fix_fields(thd, tables, &cond); // can never fail + SQL_SELECT *res= make_select(table,0,0,cond,error); + if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR))) + { + delete res; + res=0; + } + return res; +} + +/* + Prepare simple SQL_SELECT table.* WHERE table.name LIKE mask + + SYNOPSIS + prepare_select_for_name() + thd Thread handler + mask mask for compare with name + mlen length of mask + tables list of tables, used in WHERE + table goal table + pfname field "name" in table + + error code of error (out) + + RETURN VALUES + # created SQL_SELECT +*/ + +SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen, + TABLE_LIST *tables, TABLE *table, + Field *pfname, int *error) +{ + Item *cond= new Item_func_like(new Item_field(pfname), + new Item_string(mask,mlen,pfname->charset()), + new Item_string("\\",1,&my_charset_latin1)); + if (thd->is_fatal_error) + return 0; // OOM + return prepare_simple_select(thd,cond,tables,table,error); +} + + +/* + Server-side function 'help' + + SYNOPSIS + mysqld_help() + thd Thread handler + + RETURN VALUES + 0 Success + 1 Error and send_error already commited + -1 error && send_error should be issued (normal case) +*/ + +int mysqld_help(THD *thd, const char *mask) +{ + Protocol *protocol= thd->protocol; + SQL_SELECT *select; + st_find_field used_fields[array_elements(init_used_fields)]; + DBUG_ENTER("mysqld_help"); + + TABLE_LIST tables[4]; + bzero((gptr)tables,sizeof(tables)); + tables[0].alias= tables[0].real_name= (char*) "help_topic"; + tables[0].lock_type= TL_READ; + tables[0].next= &tables[1]; + tables[1].alias= tables[1].real_name= (char*) "help_category"; + tables[1].lock_type= TL_READ; + tables[1].next= &tables[2]; + tables[2].alias= tables[2].real_name= (char*) "help_relation"; + tables[2].lock_type= TL_READ; + tables[2].next= &tables[3]; + tables[3].alias= tables[3].real_name= (char*) "help_keyword"; + tables[3].lock_type= TL_READ; + tables[3].next= 0; + tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql"; + + List<String> topics_list, categories_list, subcategories_list; + String name, description, example; + int res, count_topics, count_categories, error; + uint mlen= strlen(mask); + MEM_ROOT *mem_root= thd->mem_root; + + if (open_and_lock_tables(thd, tables)) + { + res= -1; + goto end; + } + /* Init tables and fields to be usable from items */ + setup_tables(tables); + memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields)); + if (init_fields(thd, tables, used_fields, array_elements(used_fields))) + { + res= -1; + goto end; + } + size_t i; + for (i=0; i<sizeof(tables)/sizeof(TABLE_LIST); i++) + tables[i].table->file->init_table_handle_for_HANDLER(); + + if (!(select= + prepare_select_for_name(thd,mask,mlen,tables,tables[0].table, + used_fields[help_topic_name].field,&error))) + { + res= -1; + goto end; + } + + res= 1; + count_topics= search_topics(thd,tables[0].table,used_fields, + select,&topics_list, + &name, &description, &example); + delete select; + + if (count_topics == 0) + { + int key_id; + if (!(select= + prepare_select_for_name(thd,mask,mlen,tables,tables[3].table, + used_fields[help_keyword_name].field,&error))) + { + res= -1; + goto end; + } + count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id); + delete select; + count_topics= (count_topics != 1) ? 0 : + get_topics_for_keyword(thd,tables[0].table,tables[2].table, + used_fields,key_id,&topics_list,&name, + &description,&example); + } + + if (count_topics == 0) + { + int16 category_id; + Field *cat_cat_id= used_fields[help_category_parent_category_id].field; + if (!(select= + prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, + used_fields[help_category_name].field,&error))) + { + res= -1; + goto end; + } + + count_categories= search_categories(thd, tables[1].table, used_fields, + select, + &categories_list,&category_id); + delete select; + if (!count_categories) + { + if (send_header_2(protocol,FALSE)) + goto end; + } + else if (count_categories > 1) + { + if (send_header_2(protocol,FALSE) || + send_variant_2_list(mem_root,protocol,&categories_list,"Y",0)) + goto end; + } + else + { + Field *topic_cat_id= used_fields[help_topic_help_category_id].field; + Item *cond_topic_by_cat= + new Item_func_equal(new Item_field(topic_cat_id), + new Item_int((int32)category_id)); + Item *cond_cat_by_cat= + new Item_func_equal(new Item_field(cat_cat_id), + new Item_int((int32)category_id)); + if (!(select= prepare_simple_select(thd,cond_topic_by_cat, + tables,tables[0].table,&error))) + { + res= -1; + goto end; + } + get_all_items_for_category(thd,tables[0].table, + used_fields[help_topic_name].field, + select,&topics_list); + delete select; + if (!(select= prepare_simple_select(thd,cond_cat_by_cat,tables, + tables[1].table,&error))) + { + res= -1; + goto end; + } + get_all_items_for_category(thd,tables[1].table, + used_fields[help_category_name].field, + select,&subcategories_list); + delete select; + String *cat= categories_list.head(); + if (send_header_2(protocol, TRUE) || + send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) || + send_variant_2_list(mem_root,protocol,&subcategories_list,"Y",cat)) + goto end; + } + } + else if (count_topics == 1) + { + if (send_answer_1(protocol,&name,&description,&example)) + goto end; + } + else + { + /* First send header and functions */ + if (send_header_2(protocol, FALSE) || + send_variant_2_list(mem_root,protocol, &topics_list, "N", 0)) + goto end; + if (!(select= + prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, + used_fields[help_category_name].field,&error))) + { + res= -1; + goto end; + } + search_categories(thd, tables[1].table, used_fields, + select,&categories_list, 0); + delete select; + /* Then send categories */ + if (send_variant_2_list(mem_root,protocol, &categories_list, "Y", 0)) + goto end; + } + res= 0; + + send_eof(thd); + +end: + DBUG_RETURN(res); +} + diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index b61c766120e..283fe571d53 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -18,15 +18,16 @@ /* Insert of records */ #include "mysql_priv.h" -#include "sql_acl.h" static int check_null_fields(THD *thd,TABLE *entry); +#ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); -static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, +static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore, char *query, uint query_length, int log_on); static void end_delayed_insert(THD *thd); extern "C" pthread_handler_decl(handle_delayed_insert,arg); static void unlink_blobs(register TABLE *table); +#endif /* Define to force use of my_malloc() if the allocated memory block is big */ @@ -41,15 +42,29 @@ static void unlink_blobs(register TABLE *table); #define DELAYED_LOG_UPDATE 1 #define DELAYED_LOG_BIN 2 + /* - Check if insert fields are correct - Updates table->time_stamp to point to timestamp field or 0, depending on - if timestamp should be updated or not. + Check if insert fields are correct. + + SYNOPSIS + check_insert_fields() + thd The current thread. + table The table for insert. + fields The insert fields. + values The insert values. + + NOTE + Clears TIMESTAMP_AUTO_SET_ON_INSERT from table->timestamp_field_type + or leaves it as is, depending on if timestamp should be updated or + not. + + RETURN + 0 OK + -1 Error */ -static int -check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, - List<Item> &values, ulong counter) +static int check_insert_fields(THD *thd, TABLE *table, List<Item> &fields, + List<Item> &values) { if (fields.elements == 0 && values.elements != 0) { @@ -57,13 +72,16 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, { my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + MYF(0), 1L); return -1; } +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (grant_option && check_grant_all_columns(thd,INSERT_ACL,table)) return -1; - table->time_stamp=0; // This is saved by caller +#endif + clear_timestamp_auto_bits(table->timestamp_field_type, + TIMESTAMP_AUTO_SET_ON_INSERT); } else { // Part field list @@ -71,7 +89,7 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, { my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + MYF(0), 1L); return -1; } TABLE_LIST table_list; @@ -83,36 +101,100 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, thd->dupp_field=0; if (setup_tables(&table_list) || - setup_fields(thd,&table_list,fields,1,0,0)) + setup_fields(thd, 0, &table_list,fields,1,0,0)) return -1; + if (thd->dupp_field) { my_error(ER_FIELD_SPECIFIED_TWICE,MYF(0), thd->dupp_field->field_name); return -1; } - table->time_stamp=0; if (table->timestamp_field && // Don't set timestamp if used - table->timestamp_field->query_id != thd->query_id) - table->time_stamp= table->timestamp_field->offset()+1; + table->timestamp_field->query_id == thd->query_id) + clear_timestamp_auto_bits(table->timestamp_field_type, + TIMESTAMP_AUTO_SET_ON_INSERT); } // For the values we need select_priv +#ifndef NO_EMBEDDED_ACCESS_CHECKS table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); +#endif return 0; } -int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, - List<List_item> &values_list,enum_duplicates duplic) +/* + Check update fields for the timestamp field. + + SYNOPSIS + check_update_fields() + thd The current thread. + insert_table_list The insert table list. + table The table for update. + update_fields The update fields. + + NOTE + If the update fields include the timestamp field, + remove TIMESTAMP_AUTO_SET_ON_UPDATE from table->timestamp_field_type. + + RETURN + 0 OK + -1 Error +*/ + +static int check_update_fields(THD *thd, TABLE *table, + TABLE_LIST *insert_table_list, + List<Item> &update_fields) { - int error; + ulong timestamp_query_id; + LINT_INIT(timestamp_query_id); + + /* + Change the query_id for the timestamp column so that we can + check if this is modified directly. + */ + if (table->timestamp_field) + { + timestamp_query_id= table->timestamp_field->query_id; + table->timestamp_field->query_id= thd->query_id-1; + } + + /* + Check the fields we are going to modify. This will set the query_id + of all used fields to the threads query_id. + */ + if (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0)) + return -1; + + if (table->timestamp_field) + { + /* Don't set timestamp column if this is modified. */ + if (table->timestamp_field->query_id == thd->query_id) + clear_timestamp_auto_bits(table->timestamp_field_type, + TIMESTAMP_AUTO_SET_ON_UPDATE); + else + table->timestamp_field->query_id= timestamp_query_id; + } + + return 0; +} + + +int mysql_insert(THD *thd,TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic, + bool ignore) +{ + int error, res; /* log_on is about delayed inserts only. By default, both logs are enabled (this won't cause problems if the server runs without --log-update or --log-bin). */ int log_on= DELAYED_LOG_UPDATE | DELAYED_LOG_BIN ; - - bool transactional_table, log_delayed, bulk_insert; + bool transactional_table, log_delayed, joins_freed= FALSE; uint value_count; ulong counter = 1; ulonglong id; @@ -120,29 +202,37 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, TABLE *table; List_iterator_fast<List_item> its(values_list); List_item *values; - char *query=thd->query; +#ifndef EMBEDDED_LIBRARY + char *query= thd->query; +#endif thr_lock_type lock_type = table_list->lock_type; + TABLE_LIST *insert_table_list= (TABLE_LIST*) + thd->lex->select_lex.table_list.first; DBUG_ENTER("mysql_insert"); - if (thd->master_access & SUPER_ACL) - { - if (!(thd->options & OPTION_UPDATE_LOG)) - log_on&= ~(int) DELAYED_LOG_UPDATE; - if (!(thd->options & OPTION_BIN_LOG)) - log_on&= ~(int) DELAYED_LOG_BIN; - } - + if (!(thd->options & OPTION_UPDATE_LOG)) + log_on&= ~(int) DELAYED_LOG_UPDATE; + if (!(thd->options & OPTION_BIN_LOG)) + log_on&= ~(int) DELAYED_LOG_BIN; /* in safe mode or with skip-new change delayed insert to be regular if we are told to replace duplicates, the insert cannot be concurrent delayed insert changed to regular in slave thread */ +#ifdef EMBEDDED_LIBRARY + if (lock_type == TL_WRITE_DELAYED) + lock_type=TL_WRITE; +#else if ((lock_type == TL_WRITE_DELAYED && ((specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) || thd->slave_thread || !thd->variables.max_insert_delayed_threads)) || - (lock_type == TL_WRITE_CONCURRENT_INSERT && duplic == DUP_REPLACE)) + (lock_type == TL_WRITE_CONCURRENT_INSERT && duplic == DUP_REPLACE) || + (duplic == DUP_UPDATE)) lock_type=TL_WRITE; +#endif + table_list->lock_type= lock_type; +#ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { if (thd->locked_tables) @@ -157,21 +247,38 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, DBUG_RETURN(-1); } } - if (!(table = delayed_get_table(thd,table_list)) && !thd->fatal_error) - table = open_ltable(thd,table_list,lock_type=thd->update_lock_default); + if ((table= delayed_get_table(thd,table_list)) && !thd->is_fatal_error) + { + res= 0; + if (table_list->next) /* if sub select */ + res= open_and_lock_tables(thd, table_list->next); + } + else + { + /* Too many delayed insert threads; Use a normal insert */ + table_list->lock_type= lock_type= TL_WRITE; + res= open_and_lock_tables(thd, table_list); + } } else - table = open_ltable(thd,table_list,lock_type); - if (!table) +#endif /* EMBEDDED_LIBRARY */ + res= open_and_lock_tables(thd, table_list); + if (res || thd->is_fatal_error) DBUG_RETURN(-1); + + table= table_list->table; thd->proc_info="init"; thd->used_tables=0; values= its++; - if (check_insert_fields(thd,table,fields,*values,1) || - setup_tables(table_list) || setup_fields(thd,table_list,*values,0,0,0)) + + if (mysql_prepare_insert(thd, table_list, insert_table_list, + insert_table_list, table, + fields, values, update_fields, + update_values, duplic)) goto abort; + value_count= values->elements; - while ((values = its++)) + while ((values= its++)) { counter++; if (values->elements != value_count) @@ -181,7 +288,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, MYF(0),counter); goto abort; } - if (setup_fields(thd,table_list,*values,0,0,0)) + if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) goto abort; } its.rewind (); @@ -189,39 +296,47 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, Fill in the given fields and dump it to the table file */ - info.records=info.deleted=info.copied=0; + info.records= info.deleted= info.copied= info.updated= 0; + info.ignore= ignore; info.handle_duplicates=duplic; - // Don't count warnings for simple inserts - if (values_list.elements > 1 || (thd->options & OPTION_WARNINGS)) - thd->count_cuted_fields = 1; + info.update_fields= &update_fields; + info.update_values= &update_values; + /* + Count warnings for all inserts. + For single line insert, generate an error if try to set a NOT NULL field + to NULL + */ + thd->count_cuted_fields= ((values_list.elements == 1) ? + CHECK_FIELD_ERROR_FOR_NULL : + CHECK_FIELD_WARN); thd->cuted_fields = 0L; table->next_number_field=table->found_next_number_field; error=0; id=0; thd->proc_info="update"; - if (duplic == DUP_IGNORE || duplic == DUP_REPLACE) + if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - if ((lock_type != TL_WRITE_DELAYED && !(specialflag & SPECIAL_SAFE_MODE)) && - values_list.elements >= MIN_ROWS_TO_USE_BULK_INSERT) - { - table->file->extra_opt(HA_EXTRA_WRITE_CACHE, - min(thd->variables.read_buff_size, - table->avg_row_length*values_list.elements)); - table->file->deactivate_non_unique_index(values_list.elements); - bulk_insert=1; - } - else - bulk_insert=0; + /* + let's *try* to start bulk inserts. It won't necessary + start them as values_list.elements should be greater than + some - handler dependent - threshold. + So we call start_bulk_insert to perform nesessary checks on + values_list.elements, and - if nothing else - to initialize + the code to make the call of end_bulk_insert() below safe. + */ + if (lock_type != TL_WRITE_DELAYED) + table->file->start_bulk_insert(values_list.elements); while ((values= its++)) { if (fields.elements || !value_count) { - restore_record(table,2); // Get empty record - if (fill_record(fields, *values, 0) || check_null_fields(thd,table)) + restore_record(table,default_values); // Get empty record + if (fill_record(fields, *values, 0)|| thd->net.report_error || + check_null_fields(thd,table)) { - if (values_list.elements != 1) + if (values_list.elements != 1 && !thd->net.report_error) { info.records++; continue; @@ -233,12 +348,12 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, else { if (thd->used_tables) // Column used in values() - restore_record(table,2); // Get empty record + restore_record(table,default_values); // Get empty record else - table->record[0][0]=table->record[2][0]; // Fix delete marker - if (fill_record(table->field, *values, 0)) + table->record[0][0]=table->default_values[0]; // Fix delete marker + if (fill_record(table->field,*values, 0) || thd->net.report_error) { - if (values_list.elements != 1) + if (values_list.elements != 1 && ! thd->net.report_error) { info.records++; continue; @@ -247,12 +362,14 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, break; } } +#ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { - error=write_delayed(thd,table,duplic,query, thd->query_length, log_on); + error=write_delayed(thd, table, duplic, ignore, query, thd->query_length, log_on); query=0; } else +#endif error=write_record(table,&info); if (error) break; @@ -266,7 +383,17 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, { // Get auto increment value id= thd->last_insert_id; } + thd->row_count++; } + + free_underlaid_joins(thd, &thd->lex->select_lex); + joins_freed= TRUE; + + /* + Now all rows are inserted. Time to update logs and sends response to + user + */ +#ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { if (!error) @@ -278,34 +405,31 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, query_cache_invalidate3(thd, table_list, 1); } else +#endif { - if (bulk_insert) + if (table->file->end_bulk_insert() && !error) { - if (table->file->extra(HA_EXTRA_NO_CACHE)) - { - if (!error) - { - table->file->print_error(my_errno,MYF(0)); - error=1; - } - } - if (table->file->activate_all_index(thd)) - { - if (!error) - { - table->file->print_error(my_errno,MYF(0)); - error=1; - } - } + table->file->print_error(my_errno,MYF(0)); + error=1; } if (id && values_list.elements != 1) thd->insert_id(id); // For update log - else if (table->next_number_field) + else if (table->next_number_field && info.copied) id=table->next_number_field->val_int(); // Return auto_increment value - + + /* + Invalidate the table in the query cache if something changed. + For the transactional algorithm to work the invalidation must be + before binlog writing and ha_autocommit_... + */ + if (info.copied || info.deleted || info.updated) + query_cache_invalidate3(thd, table_list, 1); + transactional_table= table->file->has_transactions(); + log_delayed= (transactional_table || table->tmp_table); - if ((info.copied || info.deleted) && (error <= 0 || !transactional_table)) + if ((info.copied || info.deleted || info.updated) && + (error <= 0 || !transactional_table)) { mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) @@ -313,7 +437,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, if (error <= 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed); + log_delayed, FALSE); if (mysql_bin_log.write(&qinfo) && transactional_table) error=1; } @@ -323,14 +447,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, if (transactional_table) error=ha_autocommit_or_rollback(thd,error); - /* - Store table for future invalidation or invalidate it in - the query cache if something changed - */ - if (info.copied || info.deleted) - { - query_cache_invalidate3(thd, table_list, 1); - } if (thd->lock) { mysql_unlock_tables(thd, thd->lock); @@ -339,38 +455,121 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, } thd->proc_info="end"; table->next_number_field=0; - thd->count_cuted_fields=0; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; thd->next_insert_id=0; // Reset this if wrongly used - if (duplic == DUP_IGNORE || duplic == DUP_REPLACE) + if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + + /* Reset value of LAST_INSERT_ID if no rows where inserted */ + if (!info.copied && thd->insert_id_used) + { + thd->insert_id(0); + id=0; + } if (error) goto abort; - if (values_list.elements == 1 && (!(thd->options & OPTION_WARNINGS) || !thd->cuted_fields)) - send_ok(&thd->net,info.copied+info.deleted,id); + send_ok(thd,info.copied+info.deleted+info.updated,id); else { char buff[160]; - if (duplic == DUP_IGNORE) - sprintf(buff,ER(ER_INSERT_INFO),info.records, - (lock_type == TL_WRITE_DELAYED) ? 0 : - info.records-info.copied, - thd->cuted_fields); + if (ignore) + sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, + (lock_type == TL_WRITE_DELAYED) ? (ulong) 0 : + (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); else - sprintf(buff,ER(ER_INSERT_INFO),info.records,info.deleted, - thd->cuted_fields); - ::send_ok(&thd->net,info.copied+info.deleted,(ulonglong)id,buff); + sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, + (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); + ::send_ok(thd,info.copied+info.deleted+info.updated,(ulonglong)id,buff); } + table->insert_values=0; DBUG_RETURN(0); abort: +#ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) end_delayed_insert(thd); +#endif + if (!joins_freed) + free_underlaid_joins(thd, &thd->lex->select_lex); + table->insert_values=0; DBUG_RETURN(-1); } +/* + Prepare items in INSERT statement + + SYNOPSIS + mysql_prepare_insert() + thd thread handler + table_list global table list (not including first table for + INSERT ... SELECT) + insert_table_list Table we are inserting into (for INSERT ... SELECT) + dup_table_list Tables to be used in ON DUPLICATE KEY + It's either all global tables or only the table we + insert into, depending on if we are using GROUP BY + in the SELECT clause). + values Values to insert. NULL for INSERT ... SELECT + + TODO (in far future) + In cases of: + INSERT INTO t1 SELECT a, sum(a) as sum1 from t2 GROUP BY a + ON DUPLICATE KEY ... + we should be able to refer to sum1 in the ON DUPLICATE KEY part + + WARNING + You MUST set table->insert_values to 0 after calling this function + before releasing the table object. + + RETURN VALUE + 0 OK + -1 error (message is not sent to user) +*/ + +int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, + TABLE_LIST *insert_table_list, + TABLE_LIST *dup_table_list, + TABLE *table, + List<Item> &fields, List_item *values, + List<Item> &update_fields, List<Item> &update_values, + enum_duplicates duplic) +{ + DBUG_ENTER("mysql_prepare_insert"); + + if (duplic == DUP_UPDATE && !table->insert_values) + { + /* it should be allocated before Item::fix_fields() */ + table->insert_values= + (byte *)alloc_root(thd->mem_root, table->rec_buff_length); + if (!table->insert_values) + DBUG_RETURN(-1); + } + if (setup_tables(insert_table_list)) + DBUG_RETURN(-1); + if (values) + { + if (check_insert_fields(thd, table, fields, *values) || + setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0) || + (duplic == DUP_UPDATE && + (check_update_fields(thd, table, insert_table_list, update_fields) || + setup_fields(thd, 0, dup_table_list, update_values, 1, 0, 0)))) + DBUG_RETURN(-1); + if (find_real_table_in_list(table_list->next, table_list->db, + table_list->real_name)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); + DBUG_RETURN(-1); + } + } + if (duplic == DUP_UPDATE || duplic == DUP_REPLACE) + table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY); + + DBUG_RETURN(0); +} + + /* Check if there is more uniq keys after field */ static int last_uniq_key(TABLE *table,uint keynr) @@ -394,7 +593,8 @@ int write_record(TABLE *table,COPY_INFO *info) DBUG_ENTER("write_record"); info->records++; - if (info->handle_duplicates == DUP_REPLACE) + if (info->handle_duplicates == DUP_REPLACE || + info->handle_duplicates == DUP_UPDATE) { while ((error=table->file->write_row(table->record[0]))) { @@ -411,7 +611,9 @@ int write_record(TABLE *table,COPY_INFO *info) was used. This ensures that we don't get a problem when the whole range of the key has been used. */ - if (table->next_number_field && key_nr == table->next_number_index && + if (info->handle_duplicates == DUP_REPLACE && + table->next_number_field && + key_nr == table->next_number_index && table->file->auto_increment_column_changed) goto err; if (table->file->table_flags() & HA_DUPP_POS) @@ -444,30 +646,58 @@ int write_record(TABLE *table,COPY_INFO *info) HA_READ_KEY_EXACT)))) goto err; } - /* - The manual defines the REPLACE semantics that it is either an INSERT or - DELETE(s) + INSERT; FOREIGN KEY checks do not function in the defined - way if we allow MySQL to convert the latter operation internally to an - UPDATE. - */ - - if (last_uniq_key(table,key_nr) && - !table->file->referenced_by_foreign_key()) + if (info->handle_duplicates == DUP_UPDATE) { - if ((error=table->file->update_row(table->record[1],table->record[0]))) - goto err; - info->deleted++; - break; /* Update logfile and count */ + /* we don't check for other UNIQUE keys - the first row + that matches, is updated. If update causes a conflict again, + an error is returned + */ + DBUG_ASSERT(table->insert_values != NULL); + store_record(table,insert_values); + restore_record(table,record[1]); + DBUG_ASSERT(info->update_fields->elements==info->update_values->elements); + if (fill_record(*info->update_fields, *info->update_values, 0)) + goto err; + if ((error=table->file->update_row(table->record[1],table->record[0]))) + { + if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore) + break; + goto err; + } + info->updated++; + break; + } + else /* DUP_REPLACE */ + { + /* + The manual defines the REPLACE semantics that it is either + an INSERT or DELETE(s) + INSERT; FOREIGN KEY checks in + InnoDB do not function in the defined way if we allow MySQL + to convert the latter operation internally to an UPDATE. + We also should not perform this conversion if we have + timestamp field with ON UPDATE which is different from DEFAULT. + */ + if (last_uniq_key(table,key_nr) && + !table->file->referenced_by_foreign_key() && + (table->timestamp_field_type == TIMESTAMP_NO_AUTO_SET || + table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)) + { + if ((error=table->file->update_row(table->record[1], + table->record[0]))) + goto err; + info->deleted++; + break; /* Update logfile and count */ + } + else if ((error=table->file->delete_row(table->record[1]))) + goto err; + info->deleted++; } - else if ((error=table->file->delete_row(table->record[1]))) - goto err; - info->deleted++; } info->copied++; } else if ((error=table->file->write_row(table->record[0]))) { - if (info->handle_duplicates != DUP_IGNORE || + if (!info->ignore || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) goto err; } @@ -479,7 +709,7 @@ int write_record(TABLE *table,COPY_INFO *info) err: if (key) - my_afree(key); + my_safe_afree(key,table->max_unique_length,MAX_KEY_LENGTH); info->last_errno= error; table->file->print_error(error,MYF(0)); DBUG_RETURN(1); @@ -516,19 +746,21 @@ static int check_null_fields(THD *thd __attribute__((unused)), A thread is created for each table that one uses with the DELAYED attribute. *****************************************************************************/ +#ifndef EMBEDDED_LIBRARY + class delayed_row :public ilink { public: char *record,*query; enum_duplicates dup; time_t start_time; - bool query_start_used,last_insert_id_used,insert_id_used; + bool query_start_used,last_insert_id_used,insert_id_used, ignore; int log_query; ulonglong last_insert_id; - ulong time_stamp; + timestamp_auto_set_type timestamp_field_type; uint query_length; - delayed_row(enum_duplicates dup_arg, int log_query_arg) - :record(0),query(0),dup(dup_arg),log_query(log_query_arg) {} + delayed_row(enum_duplicates dup_arg, bool ignore_arg, int log_query_arg) + :record(0),query(0),dup(dup_arg),ignore(ignore_arg),log_query(log_query_arg) {} ~delayed_row() { x_free(record); @@ -547,7 +779,7 @@ public: volatile bool status,dead; COPY_INFO info; I_List<delayed_row> rows; - uint group_count; + ulong group_count; TABLE_LIST table_list; // Argument delayed_insert() @@ -556,12 +788,15 @@ public: group_count(0) { thd.user=thd.priv_user=(char*) delayed_user; - thd.host=(char*) localhost; + thd.host=(char*) my_localhost; thd.current_tablenr=0; thd.version=refresh_version; thd.command=COM_DELAYED_INSERT; + thd.lex->current_select= 0; // for my_message_sql + thd.lex->sql_command= SQLCOM_INSERT; // For innodb::store_lock() - bzero((char*) &thd.net,sizeof(thd.net)); // Safety + bzero((char*) &thd.net, sizeof(thd.net)); // Safety + bzero((char*) &table_list, sizeof(table_list)); // Safety thd.system_thread= SYSTEM_THREAD_DELAYED_INSERT; thd.host_or_ip= ""; bzero((char*) &info,sizeof(info)); @@ -685,7 +920,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) if (!(tmp=new delayed_insert())) { my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert)); - goto err1; + goto err1; } pthread_mutex_lock(&LOCK_thread_count); thread_count++; @@ -695,7 +930,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) { delete tmp; my_error(ER_OUT_OF_RESOURCES,MYF(0)); - goto err1; + goto err1; } tmp->table_list= *table_list; // Needed to open table tmp->table_list.db= tmp->thd.db; @@ -711,8 +946,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_mutex_unlock(&tmp->mutex); tmp->unlock(); delete tmp; - net_printf(&thd->net,ER_CANT_CREATE_THREAD,error); - goto err1; + net_printf(thd,ER_CANT_CREATE_THREAD,error); + goto err1; } /* Wait until table is open */ @@ -730,10 +965,10 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) thd->proc_info="got old table"; if (tmp->thd.killed) { - if (tmp->thd.fatal_error) + if (tmp->thd.is_fatal_error) { /* Copy error message and abort */ - thd->fatal_error=1; + thd->fatal_error(); strmov(thd->net.last_error,tmp->thd.net.last_error); thd->net.last_errno=tmp->thd.net.last_errno; } @@ -750,18 +985,18 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) } pthread_mutex_lock(&tmp->mutex); - table= tmp->get_local_table(thd); + table=tmp->get_local_table(thd); pthread_mutex_unlock(&tmp->mutex); if (table) thd->di=tmp; - else if (tmp->thd.fatal_error) - thd->fatal_error=1; + else if (tmp->thd.is_fatal_error) + thd->fatal_error(); /* Unlock the delayed insert object after its last access. */ tmp->unlock(); DBUG_RETURN((table_list->table=table)); err1: - thd->fatal_error= 1; + thd->fatal_error(); /* Release the protection against the global read lock and wake everyone, who might want to set a global read lock. @@ -829,8 +1064,9 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) found_next_number_field=table->found_next_number_field; for (org_field=table->field ; *org_field ; org_field++,field++) { - if (!(*field= (*org_field)->new_field(&client_thd->mem_root,copy))) + if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy))) return 0; + (*field)->orig_table= copy; // Remove connection (*field)->move_field(adjust_ptrs); // Point at copy->record[0] if (*org_field == found_next_number_field) (*field)->table->found_next_number_field= *field; @@ -841,13 +1077,18 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) if (table->timestamp_field) { /* Restore offset as this may have been reset in handle_inserts */ - copy->time_stamp=table->timestamp_field->offset()+1; copy->timestamp_field= (Field_timestamp*) copy->field[table->timestamp_field_offset]; + copy->timestamp_field->unireg_check= table->timestamp_field->unireg_check; + copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type(); } /* _rowid is not used with delayed insert */ copy->rowid_field=0; + + /* Adjust in_use for pointing to client thread */ + copy->in_use= client_thd; + return copy; /* Got fatal error */ @@ -861,7 +1102,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Put a question in queue */ -static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, +static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore, char *query, uint query_length, int log_on) { delayed_row *row=0; @@ -874,7 +1115,7 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, pthread_cond_wait(&di->cond_client,&di->mutex); thd->proc_info="storing row into queue"; - if (thd->killed || !(row= new delayed_row(duplic, log_on))) + if (thd->killed || !(row= new delayed_row(duplic, ignore, log_on))) goto err; if (!query) @@ -894,7 +1135,7 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, row->last_insert_id_used= thd->last_insert_id_used; row->insert_id_used= thd->insert_id_used; row->last_insert_id= thd->last_insert_id; - row->time_stamp= table->time_stamp; + row->timestamp_field_type= table->timestamp_field_type; di->rows.push_back(row); di->stacked_inserts++; @@ -1004,7 +1245,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) DBUG_ENTER("handle_delayed_insert"); if (init_thr_lock() || thd->store_globals()) { - thd->fatal_error=1; + thd->fatal_error(); strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES)); goto end; } @@ -1018,12 +1259,12 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) if (!(di->table=open_ltable(thd,&di->table_list,TL_WRITE_DELAYED))) { - thd->fatal_error=1; // Abort waiting inserts + thd->fatal_error(); // Abort waiting inserts goto end; } - if (di->table->file->has_transactions()) + if (!(di->table->file->table_flags() & HA_CAN_INSERT_DELAYED)) { - thd->fatal_error=1; + thd->fatal_error(); my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.real_name); goto end; } @@ -1119,7 +1360,8 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) if (! (thd->lock= mysql_lock_tables(thd, &di->table, 1, MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK))) { - di->dead=thd->killed=1; // Fatal error + di->dead= 1; // Some fatal error + thd->killed= 1; } pthread_cond_broadcast(&di->cond_client); } @@ -1127,14 +1369,17 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) { if (di->handle_inserts()) { - di->dead=thd->killed=1; // Some fatal error + di->dead= 1; // Some fatal error + thd->killed= 1; } } di->status=0; if (!di->stacked_inserts && !di->tables_in_use && thd->lock) { - /* No one is doing a insert delayed; - Unlock it so that other threads can use it */ + /* + No one is doing a insert delayed + Unlock table so that other threads can use it + */ MYSQL_LOCK *lock=thd->lock; thd->lock=0; pthread_mutex_unlock(&di->mutex); @@ -1154,7 +1399,8 @@ end: close_thread_tables(thd); // Free the table di->table=0; - di->dead=thd->killed=1; // If error + di->dead= 1; // If error + thd->killed= 1; pthread_cond_broadcast(&di->cond_client); // Safety pthread_mutex_unlock(&di->mutex); @@ -1201,7 +1447,7 @@ static void free_delayed_insert_blobs(register TABLE *table) bool delayed_insert::handle_inserts(void) { int error; - uint max_rows; + ulong max_rows; bool using_ignore=0, using_bin_log=mysql_bin_log.is_open(); delayed_row *row; DBUG_ENTER("handle_inserts"); @@ -1220,11 +1466,11 @@ bool delayed_insert::handle_inserts(void) } thd.proc_info="insert"; - max_rows=delayed_insert_limit; + max_rows= delayed_insert_limit; if (thd.killed || table->version != refresh_version) { thd.killed=1; - max_rows= ~0; // Do as much as possible + max_rows= ~(ulong)0; // Do as much as possible } /* @@ -1246,11 +1492,12 @@ bool delayed_insert::handle_inserts(void) thd.last_insert_id=row->last_insert_id; thd.last_insert_id_used=row->last_insert_id_used; thd.insert_id_used=row->insert_id_used; - table->time_stamp=row->time_stamp; + table->timestamp_field_type= row->timestamp_field_type; + info.ignore= row->ignore; info.handle_duplicates= row->dup; - if (info.handle_duplicates == DUP_IGNORE || - info.handle_duplicates == DUP_REPLACE) + if (info.ignore || + info.handle_duplicates != DUP_ERROR) { table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); using_ignore=1; @@ -1273,7 +1520,7 @@ bool delayed_insert::handle_inserts(void) mysql_update_log.write(&thd,row->query, row->query_length); if (row->log_query & DELAYED_LOG_BIN && using_bin_log) { - Query_log_event qinfo(&thd, row->query, row->query_length,0); + Query_log_event qinfo(&thd, row->query, row->query_length,0, FALSE); mysql_bin_log.write(&qinfo); } } @@ -1349,81 +1596,136 @@ bool delayed_insert::handle_inserts(void) pthread_mutex_lock(&mutex); DBUG_RETURN(1); } - - +#endif /* EMBEDDED_LIBRARY */ /*************************************************************************** Store records in INSERT ... SELECT * ***************************************************************************/ int -select_insert::prepare(List<Item> &values) +select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { + int res; + LEX *lex= thd->lex; + SELECT_LEX *lex_current_select_save= lex->current_select; + bool lex_select_no_error= lex->select_lex.no_error; DBUG_ENTER("select_insert::prepare"); - if (check_insert_fields(thd,table,*fields,values,1)) + unit= u; + /* + Since table in which we are going to insert is added to the first + select, LEX::current_select should point to the first select while + we are fixing fields from insert list. + Since these checks may cause the query to fail, we don't want the + error messages to be converted into warnings, must force no_error=0 + */ + lex->current_select= &lex->select_lex; + lex->select_lex.no_error= 0; + res= + check_insert_fields(thd, table, *fields, values) || + setup_fields(thd, 0, insert_table_list, values, 0, 0, 0) || + (info.handle_duplicates == DUP_UPDATE && + (check_update_fields(thd, table, insert_table_list, *info.update_fields) || + setup_fields(thd, 0, dup_table_list, *info.update_values, 1, 0, 0))); + lex->current_select= lex_current_select_save; + lex->select_lex.no_error= lex_select_no_error; + if (res) DBUG_RETURN(1); - restore_record(table,2); // Get empty record + restore_record(table,default_values); // Get empty record table->next_number_field=table->found_next_number_field; - thd->count_cuted_fields=1; // calc cuted fields thd->cuted_fields=0; - if (info.handle_duplicates != DUP_REPLACE) - table->file->extra(HA_EXTRA_WRITE_CACHE); - if (info.handle_duplicates == DUP_IGNORE || - info.handle_duplicates == DUP_REPLACE) + if (info.ignore || + info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - table->file->deactivate_non_unique_index((ha_rows) 0); + table->file->start_bulk_insert((ha_rows) 0); DBUG_RETURN(0); } + +void select_insert::cleanup() +{ + /* select_insert/select_create are never re-used in prepared statement */ + DBUG_ASSERT(0); +} + select_insert::~select_insert() { if (table) { table->next_number_field=0; - table->file->extra(HA_EXTRA_RESET); + table->file->reset(); } - thd->count_cuted_fields=0; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; } bool select_insert::send_data(List<Item> &values) { DBUG_ENTER("select_insert::send_data"); - if (thd->offset_limit) + bool error=0; + if (unit->offset_limit_cnt) { // using limit offset,count - thd->offset_limit--; + unit->offset_limit_cnt--; DBUG_RETURN(0); } + thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields + store_values(values); + error=thd->net.report_error || write_record(table,&info); + thd->count_cuted_fields= CHECK_FIELD_IGNORE; + + if (!error) + { + /* + Restore fields of the record since it is possible that they were + changed by ON DUPLICATE KEY UPDATE clause. + */ + if (info.handle_duplicates == DUP_UPDATE) + restore_record(table, default_values); + + if (table->next_number_field) // Clear for next record + { + table->next_number_field->reset(); + if (! last_insert_id && thd->insert_id_used) + last_insert_id=thd->insert_id(); + } + } + DBUG_RETURN(error); +} + + +void select_insert::store_values(List<Item> &values) +{ if (fields->elements) fill_record(*fields, values, 1); else fill_record(table->field, values, 1); - if (write_record(table,&info)) - DBUG_RETURN(1); - if (table->next_number_field) // Clear for next record - { - table->next_number_field->reset(); - if (! last_insert_id && thd->insert_id_used) - last_insert_id=thd->insert_id(); - } - DBUG_RETURN(0); } - void select_insert::send_error(uint errcode,const char *err) { - ::send_error(&thd->net,errcode,err); - table->file->extra(HA_EXTRA_NO_CACHE); - table->file->activate_all_index(thd); - /* - If at least one row has been inserted/modified and will stay in the table - (the table doesn't have transactions) (example: we got a duplicate key - error while inserting into a MyISAM table) we must write to the binlog (and - the error code will make the slave stop). + DBUG_ENTER("select_insert::send_error"); + + /* TODO error should be sent at the query processing end */ + ::send_error(thd,errcode,err); + + if (!table) + { + /* + This can only happen when using CREATE ... SELECT and the table was not + created becasue of an syntax error + */ + DBUG_VOID_RETURN; + } + table->file->end_bulk_insert(); + /* + If at least one row has been inserted/modified and will stay in the table + (the table doesn't have transactions) (example: we got a duplicate key + error while inserting into a MyISAM table) we must write to the binlog (and + the error code will make the slave stop). */ - if ((info.copied || info.deleted) && !table->file->has_transactions()) + if ((info.copied || info.deleted || info.updated) && + !table->file->has_transactions()) { if (last_insert_id) thd->insert_id(last_insert_id); // For binary log @@ -1431,27 +1733,39 @@ void select_insert::send_error(uint errcode,const char *err) if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, - table->file->has_transactions()); + table->file->has_transactions(), FALSE); mysql_bin_log.write(&qinfo); } if (!table->tmp_table) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } - ha_rollback_stmt(thd); - if (info.copied || info.deleted) - { + if (info.copied || info.deleted || info.updated) query_cache_invalidate3(thd, table, 1); - } + ha_rollback_stmt(thd); + DBUG_VOID_RETURN; } bool select_insert::send_eof() { int error,error2; - if (!(error=table->file->extra(HA_EXTRA_NO_CACHE))) - error=table->file->activate_all_index(thd); + DBUG_ENTER("select_insert::send_eof"); + + error=table->file->end_bulk_insert(); table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + /* + We must invalidate the table in the query cache before binlog writing + and ha_autocommit_... + */ + + if (info.copied || info.deleted || info.updated) + { + query_cache_invalidate3(thd, table, 1); + if (!(table->file->has_transactions() || table->tmp_table)) + thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + } + if (last_insert_id) thd->insert_id(last_insert_id); // For binary log /* Write to binlog before commiting transaction */ @@ -1461,32 +1775,27 @@ bool select_insert::send_eof() if (!error) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - table->file->has_transactions()); + table->file->has_transactions(), FALSE); mysql_bin_log.write(&qinfo); } if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error) error=error2; - if (info.copied || info.deleted) - { - query_cache_invalidate3(thd, table, 1); - if (!(table->file->has_transactions() || table->tmp_table)) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; - } if (error) { table->file->print_error(error,MYF(0)); - ::send_error(&thd->net); - return 1; + //TODO error should be sent at the query processing end + ::send_error(thd); + DBUG_RETURN(1); } char buff[160]; - if (info.handle_duplicates == DUP_IGNORE) - sprintf(buff,ER(ER_INSERT_INFO),info.records,info.records-info.copied, - thd->cuted_fields); + if (info.ignore) + sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, + (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); else - sprintf(buff,ER(ER_INSERT_INFO),info.records,info.deleted, - thd->cuted_fields); - ::send_ok(&thd->net,info.copied+info.deleted,last_insert_id,buff); - return 0; + sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, + (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); + ::send_ok(thd,info.copied+info.deleted+info.updated,last_insert_id,buff); + DBUG_RETURN(0); } @@ -1495,10 +1804,11 @@ bool select_insert::send_eof() ***************************************************************************/ int -select_create::prepare(List<Item> &values) +select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { DBUG_ENTER("select_create::prepare"); + unit= u; table= create_table_from_items(thd, create_info, db, name, extra_fields, keys, &values, &lock); if (!table) @@ -1515,41 +1825,24 @@ select_create::prepare(List<Item> &values) /* First field to copy */ field=table->field+table->fields - values.elements; - if (table->timestamp_field) // Don't set timestamp if used - { - table->timestamp_field->set_time(); - table->time_stamp=0; // This should be saved - } + /* Don't set timestamp if used */ + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + table->next_number_field=table->found_next_number_field; - restore_record(table,2); // Get empty record - thd->count_cuted_fields=1; // count warnings + restore_record(table,default_values); // Get empty record thd->cuted_fields=0; - if (info.handle_duplicates == DUP_IGNORE || - info.handle_duplicates == DUP_REPLACE) + if (info.ignore || + info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - table->file->deactivate_non_unique_index((ha_rows) 0); + table->file->start_bulk_insert((ha_rows) 0); DBUG_RETURN(0); } -bool select_create::send_data(List<Item> &values) +void select_create::store_values(List<Item> &values) { - if (thd->offset_limit) - { // using limit offset,count - thd->offset_limit--; - return 0; - } fill_record(field, values, 1); - if (write_record(table,&info)) - return 1; - if (table->next_number_field) // Clear for next record - { - table->next_number_field->reset(); - if (! last_insert_id && thd->insert_id_used) - last_insert_id=thd->insert_id(); - } - return 0; } @@ -1565,9 +1858,6 @@ void select_create::send_error(uint errcode,const char *err) } -extern HASH open_cache; - - bool select_create::send_eof() { bool tmp=select_insert::send_eof(); @@ -1584,7 +1874,13 @@ bool select_create::send_eof() We should be able to just keep the table in the table cache. */ if (!table->tmp_table) + { + ulong version= table->version; hash_delete(&open_cache,(byte*) table); + /* Tell threads waiting for refresh that something has happened */ + if (version != refresh_version) + VOID(pthread_cond_broadcast(&COND_refresh)); + } lock=0; table=0; VOID(pthread_mutex_unlock(&LOCK_open)); @@ -1606,9 +1902,13 @@ void select_create::abort() enum db_type table_type=table->db_type; if (!table->tmp_table) { + ulong version= table->version; hash_delete(&open_cache,(byte*) table); if (!create_info->table_existed) quick_rm_table(table_type, db, name); + /* Tell threads waiting for refresh that something has happened */ + if (version != refresh_version) + VOID(pthread_cond_broadcast(&COND_refresh)); } else if (!create_info->table_existed) close_temporary_table(thd, db, name); @@ -1624,7 +1924,9 @@ void select_create::abort() #ifdef __GNUC__ template class List_iterator_fast<List_item>; +#ifndef EMBEDDED_LIBRARY template class I_List<delayed_insert>; template class I_List_iterator<delayed_insert>; template class I_List<delayed_row>; -#endif +#endif /* EMBEDDED_LIBRARY */ +#endif /* __GNUC__ */ diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 9fb35e3f914..16641ad6dd5 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -22,7 +22,15 @@ #include <m_ctype.h> #include <hash.h> -LEX_STRING tmp_table_alias= {(char*) "tmp-table",8}; + +/* + Fake table list object, pointer to which is used as special value for + st_lex::time_zone_tables_used indicating that we implicitly use time + zone tables in this statement but real table list was not yet created. + Pointer to it is also returned by my_tz_get_tables_list() as indication + of transient error; +*/ +TABLE_LIST fake_time_zone_tables_list; /* Macros to look like lex */ @@ -40,10 +48,11 @@ LEX_STRING tmp_table_alias= {(char*) "tmp-table",8}; pthread_key(LEX*,THR_LEX); +/* Longest standard keyword name */ #define TOCK_NAME_LENGTH 24 /* - The following is based on the latin1 character set, and is only + The following data is based on the latin1 character set, and is only used when comparing keywords */ @@ -66,6 +75,7 @@ uchar to_upper_lex[] = { 208,209,210,211,212,213,214,247,216,217,218,219,220,221,222,255 }; + inline int lex_casecmp(const char *s, const char *t, uint len) { while (len-- != 0 && @@ -75,8 +85,6 @@ inline int lex_casecmp(const char *s, const char *t, uint len) #include "lex_hash.h" -static uchar state_map[256]; - void lex_init(void) { @@ -89,42 +97,6 @@ void lex_init(void) VOID(pthread_key_create(&THR_LEX,NULL)); - /* Fill state_map with states to get a faster parser */ - for (i=0; i < 256 ; i++) - { - if (isalpha(i)) - state_map[i]=(uchar) STATE_IDENT; - else if (isdigit(i)) - state_map[i]=(uchar) STATE_NUMBER_IDENT; -#if defined(USE_MB) && defined(USE_MB_IDENT) - else if (use_mb(default_charset_info) && my_ismbhead(default_charset_info, i)) - state_map[i]=(uchar) STATE_IDENT; -#endif - else if (!isgraph(i)) - state_map[i]=(uchar) STATE_SKIP; - else - state_map[i]=(uchar) STATE_CHAR; - } - state_map[(uchar)'_']=state_map[(uchar)'$']=(uchar) STATE_IDENT; - state_map[(uchar)'\'']=state_map[(uchar)'"']=(uchar) STATE_STRING; - state_map[(uchar)'-']=state_map[(uchar)'+']=(uchar) STATE_SIGNED_NUMBER; - state_map[(uchar)'.']=(uchar) STATE_REAL_OR_POINT; - state_map[(uchar)'>']=state_map[(uchar)'=']=state_map[(uchar)'!']= (uchar) STATE_CMP_OP; - state_map[(uchar)'<']= (uchar) STATE_LONG_CMP_OP; - state_map[(uchar)'&']=state_map[(uchar)'|']=(uchar) STATE_BOOL; - state_map[(uchar)'#']=(uchar) STATE_COMMENT; - state_map[(uchar)';']=(uchar) STATE_COLON; - state_map[(uchar)':']=(uchar) STATE_SET_VAR; - state_map[0]=(uchar) STATE_EOL; - state_map[(uchar)'\\']= (uchar) STATE_ESCAPE; - state_map[(uchar)'/']= (uchar) STATE_LONG_COMMENT; - state_map[(uchar)'*']= (uchar) STATE_END_LONG_COMMENT; - state_map[(uchar)'@']= (uchar) STATE_USER_END; - state_map[(uchar) '`']= (uchar) STATE_USER_VARIABLE_DELIMITER; - if (opt_sql_mode & MODE_ANSI_QUOTES) - { - state_map[(uchar) '"'] = STATE_USER_VARIABLE_DELIMITER; - } DBUG_VOID_RETURN; } @@ -136,31 +108,66 @@ void lex_free(void) } -LEX *lex_start(THD *thd, uchar *buf,uint length) +/* + This is called before every query that is to be parsed. + Because of this, it's critical to not do too much things here. + (We already do too much here) +*/ + +void lex_start(THD *thd, uchar *buf,uint length) { - LEX *lex= &thd->lex; - lex->next_state=STATE_START; + LEX *lex= thd->lex; + lex->unit.init_query(); + lex->unit.init_select(); + lex->thd= thd; + lex->unit.thd= thd; + lex->select_lex.init_query(); + lex->value_list.empty(); + lex->update_list.empty(); + lex->param_list.empty(); + lex->unit.next= lex->unit.master= + lex->unit.link_next= lex->unit.return_to= 0; + lex->unit.prev= lex->unit.link_prev= 0; + lex->unit.slave= lex->unit.global_parameters= lex->current_select= + lex->all_selects_list= &lex->select_lex; + lex->select_lex.master= &lex->unit; + lex->select_lex.prev= &lex->unit.slave; + lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0; + lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list); + lex->select_lex.options= 0; + lex->describe= 0; + lex->subqueries= lex->derived_tables= FALSE; + lex->lock_option= TL_READ; + lex->found_colon= 0; + lex->safe_to_cache_query= 1; + lex->time_zone_tables_used= 0; + lex->select_lex.select_number= 1; + lex->next_state=MY_LEX_START; lex->end_of_query=(lex->ptr=buf)+length; lex->yylineno = 1; - lex->select->create_refs=lex->in_comment=0; + lex->in_comment=0; lex->length=0; - lex->select->in_sum_expr=0; - lex->select->expr_list.empty(); - lex->select->ftfunc_list.empty(); - lex->lock_option=TL_READ; - lex->convert_set=(lex->thd=thd)->variables.convert_set; + lex->select_lex.in_sum_expr=0; + lex->select_lex.expr_list.empty(); + lex->select_lex.ftfunc_list_alloc.empty(); + lex->select_lex.ftfunc_list= &lex->select_lex.ftfunc_list_alloc; + lex->select_lex.group_list.empty(); + lex->select_lex.order_list.empty(); + lex->current_select= &lex->select_lex; lex->yacc_yyss=lex->yacc_yyvs=0; - lex->ignore_space=test(thd->sql_mode & MODE_IGNORE_SPACE); - lex->slave_thd_opt=0; + lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE); lex->sql_command=SQLCOM_END; - bzero((char *)&lex->mi,sizeof(lex->mi)); - lex->select_lex.is_item_list_lookup= 0; - return lex; + lex->duplicates= DUP_ERROR; + lex->ignore= 0; + lex->proc_list.first= 0; } void lex_end(LEX *lex) { - lex->select->expr_list.delete_elements(); // If error when parsing sql-varargs + for (SELECT_LEX *sl= lex->all_selects_list; + sl; + sl= sl->next_select_in_list()) + sl->expr_list.delete_elements(); // If error when parsing sql-varargs x_free(lex->yacc_yyss); x_free(lex->yacc_yyvs); } @@ -182,7 +189,7 @@ static int find_keyword(LEX *lex, uint len, bool function) udf_func *udf; if (function && using_udf_functions && (udf=find_udf((char*) tok, len))) { - lex->thd->safe_to_cache_query=0; + lex->safe_to_cache_query=0; lex->yylval->udf=udf; switch (udf->returns) { case STRING_RESULT: @@ -191,16 +198,38 @@ static int find_keyword(LEX *lex, uint len, bool function) return (udf->type == UDFTYPE_FUNCTION) ? UDF_FLOAT_FUNC : UDA_FLOAT_SUM; case INT_RESULT: return (udf->type == UDFTYPE_FUNCTION) ? UDF_INT_FUNC : UDA_INT_SUM; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + return 0; } } #endif return 0; } +/* + Check if name is a keyword + + SYNOPSIS + is_keyword() + name checked name + len length of checked name + + RETURN VALUES + 0 name is a keyword + 1 name isn't a keyword +*/ + +bool is_keyword(const char *name, uint len) +{ + return get_hash_symbol(name,len,0)!=0; +} /* make a copy of token before ptr and set yytoklen */ -LEX_STRING get_token(LEX *lex,uint length) +static LEX_STRING get_token(LEX *lex,uint length) { LEX_STRING tmp; yyUnget(); // ptr points now after last token char @@ -209,13 +238,42 @@ LEX_STRING get_token(LEX *lex,uint length) return tmp; } -/* Return an unescaped text literal without quotes */ -/* Fix sometimes to do only one scan of the string */ +/* + todo: + There are no dangerous charsets in mysql for function + get_quoted_token yet. But it should be fixed in the + future to operate multichar strings (like ucs2) +*/ + +static LEX_STRING get_quoted_token(LEX *lex,uint length, char quote) +{ + LEX_STRING tmp; + byte *from, *to, *end; + yyUnget(); // ptr points now after last token char + tmp.length=lex->yytoklen=length; + tmp.str=(char*) lex->thd->alloc(tmp.length+1); + for (from= (byte*) lex->tok_start, to= (byte*) tmp.str, end= to+length ; + to != end ; + ) + { + if ((*to++= *from++) == quote) + from++; // Skip double quotes + } + *to= 0; // End null for safety + return tmp; +} + + +/* + Return an unescaped text literal without quotes + Fix sometimes to do only one scan of the string +*/ static char *get_text(LEX *lex) { reg1 uchar c,sep; uint found_escape=0; + CHARSET_INFO *cs= lex->thd->charset(); sep= yyGetLast(); // String should end with this //lex->tok_start=lex->ptr-1; // Remember ' @@ -224,8 +282,8 @@ static char *get_text(LEX *lex) c = yyGet(); #ifdef USE_MB int l; - if (use_mb(default_charset_info) && - (l = my_ismbchar(default_charset_info, + if (use_mb(cs) && + (l = my_ismbchar(cs, (const char *)lex->ptr-1, (const char *)lex->end_of_query))) { lex->ptr += l-1; @@ -237,7 +295,18 @@ static char *get_text(LEX *lex) found_escape=1; if (lex->ptr == lex->end_of_query) return 0; - yySkip(); +#ifdef USE_MB + int l; + if (use_mb(cs) && + (l = my_ismbchar(cs, + (const char *)lex->ptr, + (const char *)lex->end_of_query))) { + lex->ptr += l; + continue; + } + else +#endif + yySkip(); } else if (c == sep) { @@ -265,12 +334,16 @@ static char *get_text(LEX *lex) else { uchar *to; + + /* Re-use found_escape for tracking state of escapes */ + found_escape= 0; + for (to=start ; str != end ; str++) { #ifdef USE_MB int l; - if (use_mb(default_charset_info) && - (l = my_ismbchar(default_charset_info, + if (use_mb(cs) && + (l = my_ismbchar(cs, (const char *)str, (const char *)end))) { while (l--) *to++ = *str++; @@ -278,7 +351,7 @@ static char *get_text(LEX *lex) continue; } #endif - if (*str == '\\' && str+1 != end) + if (!found_escape && *str == '\\' && str+1 != end) { switch(*++str) { case 'n': @@ -304,21 +377,24 @@ static char *get_text(LEX *lex) *to++= '\\'; // remember prefix for wildcard /* Fall through */ default: - *to++ = *str; + found_escape= 1; + str--; break; } } - else if (*str == sep) - *to++= *str++; // Two ' or " + else if (!found_escape && *str == sep) + { + found_escape= 1; + } else + { *to++ = *str; - + found_escape= 0; + } } *to=0; lex->yytoklen=(uint) (to-start); } - if (lex->convert_set) - lex->convert_set->convert((char*) start,lex->yytoklen); return (char*) start; } } @@ -344,7 +420,7 @@ static const uint signed_longlong_len=19; static const char *unsigned_longlong_str="18446744073709551615"; static const uint unsigned_longlong_len=20; -inline static uint int_token(const char *str,uint length) +static inline uint int_token(const char *str,uint length) { if (length < long_len) // quick normal case return NUM; @@ -416,85 +492,130 @@ inline static uint int_token(const char *str,uint length) return ((uchar) str[-1] <= (uchar) cmp[-1]) ? smaller : bigger; } +/* + yylex remember the following states from the following yylex() -// yylex remember the following states from the following yylex() -// STATE_EOQ ; found end of query -// STATE_OPERATOR_OR_IDENT ; last state was an ident, text or number -// (which can't be followed by a signed number) + - MY_LEX_EOQ Found end of query + - MY_LEX_OPERATOR_OR_IDENT Last state was an ident, text or number + (which can't be followed by a signed number) +*/ -int yylex(void *arg) +int yylex(void *arg, void *yythd) { reg1 uchar c; - int tokval; + int tokval, result_state; uint length; - enum lex_states state,prev_state; - LEX *lex=current_lex; + enum my_lex_states state; + LEX *lex= ((THD *)yythd)->lex; YYSTYPE *yylval=(YYSTYPE*) arg; + CHARSET_INFO *cs= ((THD *) yythd)->charset(); + uchar *state_map= cs->state_map; + uchar *ident_map= cs->ident_map; lex->yylval=yylval; // The global state lex->tok_start=lex->tok_end=lex->ptr; - prev_state=state=lex->next_state; - lex->next_state=STATE_OPERATOR_OR_IDENT; + state=lex->next_state; + lex->next_state=MY_LEX_OPERATOR_OR_IDENT; LINT_INIT(c); for (;;) { - switch(state) { - case STATE_OPERATOR_OR_IDENT: // Next is operator or keyword - case STATE_START: // Start of token + switch (state) { + case MY_LEX_OPERATOR_OR_IDENT: // Next is operator or keyword + case MY_LEX_START: // Start of token // Skip startspace - for (c=yyGet() ; (state_map[c] == STATE_SKIP) ; c= yyGet()) + for (c=yyGet() ; (state_map[c] == MY_LEX_SKIP) ; c= yyGet()) { if (c == '\n') lex->yylineno++; } lex->tok_start=lex->ptr-1; // Start of real token - state= (enum lex_states) state_map[c]; + state= (enum my_lex_states) state_map[c]; break; - case STATE_ESCAPE: + case MY_LEX_ESCAPE: if (yyGet() == 'N') { // Allow \N as shortcut for NULL yylval->lex_str.str=(char*) "\\N"; yylval->lex_str.length=2; return NULL_SYM; } - case STATE_CHAR: // Unknown or single char token - case STATE_SKIP: // This should not happen - yylval->lex_str.str=(char*) (lex->ptr=lex->tok_start);// Set to first char + case MY_LEX_CHAR: // Unknown or single char token + case MY_LEX_SKIP: // This should not happen + if (c == '-' && yyPeek() == '-' && + (my_isspace(cs,yyPeek2()) || + my_iscntrl(cs,yyPeek2()))) + { + state=MY_LEX_COMMENT; + break; + } + yylval->lex_str.str=(char*) (lex->ptr=lex->tok_start);// Set to first chr yylval->lex_str.length=1; c=yyGet(); if (c != ')') - lex->next_state= STATE_START; // Allow signed numbers + lex->next_state= MY_LEX_START; // Allow signed numbers if (c == ',') lex->tok_start=lex->ptr; // Let tok_start point at next item + /* + Check for a placeholder: it should not precede a possible identifier + because of binlogging: when a placeholder is replaced with + its value in a query for the binlog, the query must stay + grammatically correct. + */ + else if (c == '?' && ((THD*) yythd)->command == COM_PREPARE && + !ident_map[yyPeek()]) + return(PARAM_MARKER); return((int) c); - case STATE_IDENT: // Incomplete keyword or ident - if ((c == 'x' || c == 'X') && yyPeek() == '\'') + case MY_LEX_IDENT_OR_NCHAR: + if (yyPeek() != '\'') { // Found x'hex-number' - state=STATE_HEX_NUMBER; + state= MY_LEX_IDENT; break; } + yyGet(); // Skip ' + while ((c = yyGet()) && (c !='\'')) ; + length=(lex->ptr - lex->tok_start); // Length of hexnum+3 + if (c != '\'') + { + return(ABORT_SYM); // Illegal hex constant + } + yyGet(); // get_token makes an unget + yylval->lex_str=get_token(lex,length); + yylval->lex_str.str+=2; // Skip x' + yylval->lex_str.length-=3; // Don't count x' and last ' + lex->yytoklen-=3; + return (NCHAR_STRING); + + case MY_LEX_IDENT_OR_HEX: + if (yyPeek() == '\'') + { // Found x'hex-number' + state= MY_LEX_HEX_NUMBER; + break; + } + /* Fall through */ + case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling + case MY_LEX_IDENT: + uchar *start; #if defined(USE_MB) && defined(USE_MB_IDENT) - if (use_mb(default_charset_info)) + if (use_mb(cs)) { - if (my_ismbhead(default_charset_info, yyGetLast())) + result_state= IDENT_QUOTED; + if (my_mbcharlen(cs, yyGetLast()) > 1) { - int l = my_ismbchar(default_charset_info, + int l = my_ismbchar(cs, (const char *)lex->ptr-1, (const char *)lex->end_of_query); if (l == 0) { - state = STATE_CHAR; + state = MY_LEX_CHAR; continue; } lex->ptr += l - 1; } - while (state_map[c=yyGet()] == STATE_IDENT || - state_map[c] == STATE_NUMBER_IDENT) + while (ident_map[c=yyGet()]) { - if (my_ismbhead(default_charset_info, c)) + if (my_mbcharlen(cs, c) > 1) { int l; - if ((l = my_ismbchar(default_charset_info, + if ((l = my_ismbchar(cs, (const char *)lex->ptr-1, (const char *)lex->end_of_query)) == 0) break; @@ -504,54 +625,74 @@ int yylex(void *arg) } else #endif - while (state_map[c=yyGet()] == STATE_IDENT || - state_map[c] == STATE_NUMBER_IDENT) ; + { + for (result_state= c; ident_map[c= yyGet()]; result_state|= c); + /* If there were non-ASCII characters, mark that we must convert */ + result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT; + } length= (uint) (lex->ptr - lex->tok_start)-1; + start= lex->ptr; if (lex->ignore_space) { - for (; state_map[c] == STATE_SKIP ; c= yyGet()); + /* + If we find a space then this can't be an identifier. We notice this + below by checking start != lex->ptr. + */ + for (; state_map[c] == MY_LEX_SKIP ; c= yyGet()); } - if (c == '.' && (state_map[yyPeek()] == STATE_IDENT || - state_map[yyPeek()] == STATE_NUMBER_IDENT)) - lex->next_state=STATE_IDENT_SEP; + if (start == lex->ptr && c == '.' && ident_map[yyPeek()]) + lex->next_state=MY_LEX_IDENT_SEP; else { // '(' must follow directly if function yyUnget(); if ((tokval = find_keyword(lex,length,c == '('))) { - lex->next_state= STATE_START; // Allow signed numbers + lex->next_state= MY_LEX_START; // Allow signed numbers return(tokval); // Was keyword } yySkip(); // next state does a unget } yylval->lex_str=get_token(lex,length); - if (lex->convert_set) - lex->convert_set->convert((char*) yylval->lex_str.str,lex->yytoklen); - return(IDENT); - case STATE_IDENT_SEP: // Found ident and now '.' - lex->next_state=STATE_IDENT_START;// Next is an ident (not a keyword) + /* + Note: "SELECT _bla AS 'alias'" + _bla should be considered as a IDENT if charset haven't been found. + So we don't use MYF(MY_WME) with get_charset_by_csname to avoid + producing an error. + */ + + if ((yylval->lex_str.str[0]=='_') && + (lex->charset=get_charset_by_csname(yylval->lex_str.str+1, + MY_CS_PRIMARY,MYF(0)))) + return(UNDERSCORE_CHARSET); + return(result_state); // IDENT or IDENT_QUOTED + + case MY_LEX_IDENT_SEP: // Found ident and now '.' yylval->lex_str.str=(char*) lex->ptr; yylval->lex_str.length=1; c=yyGet(); // should be '.' + lex->next_state= MY_LEX_IDENT_START;// Next is an ident (not a keyword) + if (!ident_map[yyPeek()]) // Probably ` or " + lex->next_state= MY_LEX_START; return((int) c); - case STATE_NUMBER_IDENT: // number or ident which num-start - while (isdigit((c = yyGet()))) ; - if (state_map[c] != STATE_IDENT) + case MY_LEX_NUMBER_IDENT: // number or ident which num-start + while (my_isdigit(cs,(c = yyGet()))) ; + if (!ident_map[c]) { // Can't be identifier - state=STATE_INT_OR_REAL; + state=MY_LEX_INT_OR_REAL; break; } if (c == 'e' || c == 'E') { // The following test is written this way to allow numbers of type 1e1 - if (isdigit(yyPeek()) || (c=(yyGet())) == '+' || c == '-') + if (my_isdigit(cs,yyPeek()) || + (c=(yyGet())) == '+' || c == '-') { // Allow 1E+10 - if (isdigit(yyPeek())) // Number must have digit after sign + if (my_isdigit(cs,yyPeek())) // Number must have digit after sign { yySkip(); - while (isdigit(yyGet())) ; + while (my_isdigit(cs,yyGet())) ; yylval->lex_str=get_token(lex,yyLength()); return(FLOAT_NUM); } @@ -561,8 +702,8 @@ int yylex(void *arg) else if (c == 'x' && (lex->ptr - lex->tok_start) == 2 && lex->tok_start[0] == '0' ) { // Varbinary - while (isxdigit((c = yyGet()))) ; - if ((lex->ptr - lex->tok_start) >= 4 && state_map[c] != STATE_IDENT) + while (my_isxdigit(cs,(c = yyGet()))) ; + if ((lex->ptr - lex->tok_start) >= 4 && !ident_map[c]) { yylval->lex_str=get_token(lex,yyLength()); yylval->lex_str.str+=2; // Skip 0x @@ -573,29 +714,18 @@ int yylex(void *arg) yyUnget(); } // fall through - case STATE_IDENT_START: // Incomplete ident + case MY_LEX_IDENT_START: // We come here after '.' + result_state= IDENT; #if defined(USE_MB) && defined(USE_MB_IDENT) - if (use_mb(default_charset_info)) + if (use_mb(cs)) { - if (my_ismbhead(default_charset_info, yyGetLast())) + result_state= IDENT_QUOTED; + while (ident_map[c=yyGet()]) { - int l = my_ismbchar(default_charset_info, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query); - if (l == 0) - { - state = STATE_CHAR; - continue; - } - lex->ptr += l - 1; - } - while (state_map[c=yyGet()] == STATE_IDENT || - state_map[c] == STATE_NUMBER_IDENT) - { - if (my_ismbhead(default_charset_info, c)) + if (my_mbcharlen(cs, c) > 1) { int l; - if ((l = my_ismbchar(default_charset_info, + if ((l = my_ismbchar(cs, (const char *)lex->ptr-1, (const char *)lex->end_of_query)) == 0) break; @@ -605,112 +735,84 @@ int yylex(void *arg) } else #endif - while (state_map[c = yyGet()] == STATE_IDENT || - state_map[c] == STATE_NUMBER_IDENT) ; - - if (c == '.' && (state_map[yyPeek()] == STATE_IDENT || - state_map[yyPeek()] == STATE_NUMBER_IDENT)) - lex->next_state=STATE_IDENT_SEP;// Next is '.' - // fall through + { + for (result_state=0; ident_map[c= yyGet()]; result_state|= c); + /* If there were non-ASCII characters, mark that we must convert */ + result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT; + } + if (c == '.' && ident_map[yyPeek()]) + lex->next_state=MY_LEX_IDENT_SEP;// Next is '.' - case STATE_FOUND_IDENT: // Complete ident - yylval->lex_str=get_token(lex,yyLength()); - if (lex->convert_set) - lex->convert_set->convert((char*) yylval->lex_str.str,lex->yytoklen); - return(IDENT); + yylval->lex_str= get_token(lex,yyLength()); + return(result_state); - case STATE_USER_VARIABLE_DELIMITER: + case MY_LEX_USER_VARIABLE_DELIMITER: // Found quote char + { + uint double_quotes= 0; + char quote_char= c; // Used char lex->tok_start=lex->ptr; // Skip first ` -#ifdef USE_MB - if (use_mb(default_charset_info)) + while ((c=yyGet())) { - while ((c=yyGet()) && state_map[c] != STATE_USER_VARIABLE_DELIMITER && - c != (uchar) NAMES_SEP_CHAR) + int length; + if ((length= my_mbcharlen(cs, c)) == 1) { - if (my_ismbhead(default_charset_info, c)) - { - int l; - if ((l = my_ismbchar(default_charset_info, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query)) == 0) - break; - lex->ptr += l-1; - } - } - } - else + if (c == (uchar) NAMES_SEP_CHAR) + break; /* Old .frm format can't handle this char */ + if (c == quote_char) + { + if (yyPeek() != quote_char) + break; + c=yyGet(); + double_quotes++; + continue; + } + } +#ifdef USE_MB + else if (length < 1) + break; // Error + lex->ptr+= length-1; #endif - { - while ((c=yyGet()) && state_map[c] != STATE_USER_VARIABLE_DELIMITER && - c != (uchar) NAMES_SEP_CHAR) ; } - yylval->lex_str=get_token(lex,yyLength()); - if (lex->convert_set) - lex->convert_set->convert((char*) yylval->lex_str.str,lex->yytoklen); - if (state_map[c] == STATE_USER_VARIABLE_DELIMITER) + if (double_quotes) + yylval->lex_str=get_quoted_token(lex,yyLength() - double_quotes, + quote_char); + else + yylval->lex_str=get_token(lex,yyLength()); + if (c == quote_char) yySkip(); // Skip end ` - return(IDENT); - - case STATE_SIGNED_NUMBER: // Incomplete signed number - if (prev_state == STATE_OPERATOR_OR_IDENT) - { - if (c == '-' && yyPeek() == '-' && - (isspace(yyPeek2()) || iscntrl(yyPeek2()))) - state=STATE_COMMENT; - else - state= STATE_CHAR; // Must be operator - break; - } - if (!isdigit(c=yyGet()) || yyPeek() == 'x') - { - if (c != '.') - { - if (c == '-' && isspace(yyPeek())) - state=STATE_COMMENT; - else - state = STATE_CHAR; // Return sign as single char - break; - } - yyUnget(); // Fix for next loop - } - while (isdigit(c=yyGet())) ; // Incomplete real or int number - if ((c == 'e' || c == 'E') && - (yyPeek() == '+' || yyPeek() == '-' || isdigit(yyPeek()))) - { // Real number - yyUnget(); - c= '.'; // Fool next test - } - // fall through - case STATE_INT_OR_REAL: // Compleat int or incompleat real + lex->next_state= MY_LEX_START; + return(IDENT_QUOTED); + } + case MY_LEX_INT_OR_REAL: // Compleat int or incompleat real if (c != '.') { // Found complete integer number. yylval->lex_str=get_token(lex,yyLength()); return int_token(yylval->lex_str.str,yylval->lex_str.length); } // fall through - case STATE_REAL: // Incomplete real number - while (isdigit(c = yyGet())) ; + case MY_LEX_REAL: // Incomplete real number + while (my_isdigit(cs,c = yyGet())) ; if (c == 'e' || c == 'E') { c = yyGet(); if (c == '-' || c == '+') c = yyGet(); // Skip sign - if (!isdigit(c)) + if (!my_isdigit(cs,c)) { // No digit after sign - state= STATE_CHAR; + state= MY_LEX_CHAR; break; } - while (isdigit(yyGet())) ; + while (my_isdigit(cs,yyGet())) ; yylval->lex_str=get_token(lex,yyLength()); return(FLOAT_NUM); } yylval->lex_str=get_token(lex,yyLength()); return(REAL_NUM); - case STATE_HEX_NUMBER: // Found x'hexstring' + case MY_LEX_HEX_NUMBER: // Found x'hexstring' yyGet(); // Skip ' - while (isxdigit((c = yyGet()))) ; + while (my_isxdigit(cs,(c = yyGet()))) ; length=(lex->ptr - lex->tok_start); // Length of hexnum+3 if (!(length & 1) || c != '\'') { @@ -723,64 +825,71 @@ int yylex(void *arg) lex->yytoklen-=3; return (HEX_NUM); - case STATE_CMP_OP: // Incomplete comparison operator - if (state_map[yyPeek()] == STATE_CMP_OP || - state_map[yyPeek()] == STATE_LONG_CMP_OP) + case MY_LEX_CMP_OP: // Incomplete comparison operator + if (state_map[yyPeek()] == MY_LEX_CMP_OP || + state_map[yyPeek()] == MY_LEX_LONG_CMP_OP) yySkip(); if ((tokval = find_keyword(lex,(uint) (lex->ptr - lex->tok_start),0))) { - lex->next_state= STATE_START; // Allow signed numbers + lex->next_state= MY_LEX_START; // Allow signed numbers return(tokval); } - state = STATE_CHAR; // Something fishy found + state = MY_LEX_CHAR; // Something fishy found break; - case STATE_LONG_CMP_OP: // Incomplete comparison operator - if (state_map[yyPeek()] == STATE_CMP_OP || - state_map[yyPeek()] == STATE_LONG_CMP_OP) + case MY_LEX_LONG_CMP_OP: // Incomplete comparison operator + if (state_map[yyPeek()] == MY_LEX_CMP_OP || + state_map[yyPeek()] == MY_LEX_LONG_CMP_OP) { yySkip(); - if (state_map[yyPeek()] == STATE_CMP_OP) + if (state_map[yyPeek()] == MY_LEX_CMP_OP) yySkip(); } if ((tokval = find_keyword(lex,(uint) (lex->ptr - lex->tok_start),0))) { - lex->next_state= STATE_START; // Found long op + lex->next_state= MY_LEX_START; // Found long op return(tokval); } - state = STATE_CHAR; // Something fishy found + state = MY_LEX_CHAR; // Something fishy found break; - case STATE_BOOL: + case MY_LEX_BOOL: if (c != yyPeek()) { - state=STATE_CHAR; + state=MY_LEX_CHAR; break; } yySkip(); tokval = find_keyword(lex,2,0); // Is a bool operator - lex->next_state= STATE_START; // Allow signed numbers + lex->next_state= MY_LEX_START; // Allow signed numbers return(tokval); - case STATE_STRING: // Incomplete text string + case MY_LEX_STRING_OR_DELIMITER: + if (((THD *) yythd)->variables.sql_mode & MODE_ANSI_QUOTES) + { + state= MY_LEX_USER_VARIABLE_DELIMITER; + break; + } + /* " used for strings */ + case MY_LEX_STRING: // Incomplete text string if (!(yylval->lex_str.str = get_text(lex))) { - state= STATE_CHAR; // Read char by char + state= MY_LEX_CHAR; // Read char by char break; } yylval->lex_str.length=lex->yytoklen; return(TEXT_STRING); - case STATE_COMMENT: // Comment + case MY_LEX_COMMENT: // Comment lex->select_lex.options|= OPTION_FOUND_COMMENT; while ((c = yyGet()) != '\n' && c) ; yyUnget(); // Safety against eof - state = STATE_START; // Try again + state = MY_LEX_START; // Try again break; - case STATE_LONG_COMMENT: /* Long C comment? */ + case MY_LEX_LONG_COMMENT: /* Long C comment? */ if (yyPeek() != '*') { - state=STATE_CHAR; // Probable division + state=MY_LEX_CHAR; // Probable division break; } yySkip(); // Skip '*' @@ -789,8 +898,8 @@ int yylex(void *arg) { ulong version=MYSQL_VERSION_ID; yySkip(); - state=STATE_START; - if (isdigit(yyPeek())) + state=MY_LEX_START; + if (my_isdigit(cs,yyPeek())) { // Version number version=strtol((char*) lex->ptr,(char**) &lex->ptr,10); } @@ -808,89 +917,110 @@ int yylex(void *arg) } if (lex->ptr != lex->end_of_query) yySkip(); // remove last '/' - state = STATE_START; // Try again + state = MY_LEX_START; // Try again break; - case STATE_END_LONG_COMMENT: + case MY_LEX_END_LONG_COMMENT: if (lex->in_comment && yyPeek() == '/') { yySkip(); lex->in_comment=0; - state=STATE_START; + state=MY_LEX_START; } else - state=STATE_CHAR; // Return '*' + state=MY_LEX_CHAR; // Return '*' break; - case STATE_SET_VAR: // Check if ':=' + case MY_LEX_SET_VAR: // Check if ':=' if (yyPeek() != '=') { - state=STATE_CHAR; // Return ':' + state=MY_LEX_CHAR; // Return ':' break; } yySkip(); return (SET_VAR); - case STATE_COLON: // optional line terminator + case MY_LEX_SEMICOLON: // optional line terminator if (yyPeek()) { - state=STATE_CHAR; // Return ';' + THD* thd= (THD*)yythd; + if ((thd->client_capabilities & CLIENT_MULTI_STATEMENTS) && + (thd->command != COM_PREPARE)) + { + lex->safe_to_cache_query=0; + lex->found_colon=(char*)lex->ptr; + thd->server_status |= SERVER_MORE_RESULTS_EXISTS; + lex->next_state=MY_LEX_END; + return(END_OF_INPUT); + } + else + state=MY_LEX_CHAR; // Return ';' break; } /* fall true */ - case STATE_EOL: - lex->next_state=STATE_END; // Mark for next loop - return(END_OF_INPUT); - case STATE_END: - lex->next_state=STATE_END; + case MY_LEX_EOL: + if (lex->ptr >= lex->end_of_query) + { + lex->next_state=MY_LEX_END; // Mark for next loop + return(END_OF_INPUT); + } + state=MY_LEX_CHAR; + break; + case MY_LEX_END: + lex->next_state=MY_LEX_END; return(0); // We found end of input last time - - // Actually real shouldn't start - // with . but allow them anyhow - case STATE_REAL_OR_POINT: - if (isdigit(yyPeek())) - state = STATE_REAL; // Real + + /* Actually real shouldn't start with . but allow them anyhow */ + case MY_LEX_REAL_OR_POINT: + if (my_isdigit(cs,yyPeek())) + state = MY_LEX_REAL; // Real else { - state = STATE_CHAR; // return '.' - lex->next_state=STATE_IDENT_START;// Next is an ident (not a keyword) + state= MY_LEX_IDENT_SEP; // return '.' + yyUnget(); // Put back '.' } break; - case STATE_USER_END: // end '@' of user@hostname + case MY_LEX_USER_END: // end '@' of user@hostname switch (state_map[yyPeek()]) { - case STATE_STRING: - case STATE_USER_VARIABLE_DELIMITER: + case MY_LEX_STRING: + case MY_LEX_USER_VARIABLE_DELIMITER: + case MY_LEX_STRING_OR_DELIMITER: break; - case STATE_USER_END: - lex->next_state=STATE_SYSTEM_VAR; + case MY_LEX_USER_END: + lex->next_state=MY_LEX_SYSTEM_VAR; break; default: - lex->next_state=STATE_HOSTNAME; + lex->next_state=MY_LEX_HOSTNAME; break; } yylval->lex_str.str=(char*) lex->ptr; yylval->lex_str.length=1; return((int) '@'); - case STATE_HOSTNAME: // end '@' of user@hostname - for (c=yyGet() ; - isalnum(c) || c == '.' || c == '_' || c == '$'; + case MY_LEX_HOSTNAME: // end '@' of user@hostname + for (c=yyGet() ; + my_isalnum(cs,c) || c == '.' || c == '_' || c == '$'; c= yyGet()) ; yylval->lex_str=get_token(lex,yyLength()); return(LEX_HOSTNAME); - case STATE_SYSTEM_VAR: + case MY_LEX_SYSTEM_VAR: yylval->lex_str.str=(char*) lex->ptr; yylval->lex_str.length=1; - lex->next_state=STATE_IDENT_OR_KEYWORD; yySkip(); // Skip '@' + lex->next_state= (state_map[yyPeek()] == + MY_LEX_USER_VARIABLE_DELIMITER ? + MY_LEX_OPERATOR_OR_IDENT : + MY_LEX_IDENT_OR_KEYWORD); return((int) '@'); - case STATE_IDENT_OR_KEYWORD: + case MY_LEX_IDENT_OR_KEYWORD: /* We come here when we have found two '@' in a row. We should now be able to handle: [(global | local | session) .]variable_name */ - - while (state_map[c=yyGet()] == STATE_IDENT || - state_map[c] == STATE_NUMBER_IDENT) ; + + for (result_state= 0; ident_map[c= yyGet()]; result_state|= c); + /* If there were non-ASCII characters, mark that we must convert */ + result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT; + if (c == '.') - lex->next_state=STATE_IDENT_SEP; + lex->next_state=MY_LEX_IDENT_SEP; length= (uint) (lex->ptr - lex->tok_start)-1; if ((tokval= find_keyword(lex,length,0))) { @@ -898,9 +1028,787 @@ int yylex(void *arg) return(tokval); // Was keyword } yylval->lex_str=get_token(lex,length); - if (lex->convert_set) - lex->convert_set->convert((char*) yylval->lex_str.str,lex->yytoklen); - return(IDENT); + return(result_state); } } } + +/* + st_select_lex structures initialisations +*/ + +void st_select_lex_node::init_query() +{ + options= 0; + linkage= UNSPECIFIED_TYPE; + no_error= no_table_names_allowed= 0; + uncacheable= 0; +} + +void st_select_lex_node::init_select() +{ +} + +void st_select_lex_unit::init_query() +{ + st_select_lex_node::init_query(); + linkage= GLOBAL_OPTIONS_TYPE; + global_parameters= first_select(); + select_limit_cnt= HA_POS_ERROR; + offset_limit_cnt= 0; + union_distinct= 0; + prepared= optimized= executed= 0; + item= 0; + union_result= 0; + table= 0; + fake_select_lex= 0; + cleaned= 0; + item_list.empty(); + describe= 0; +} + +void st_select_lex::init_query() +{ + st_select_lex_node::init_query(); + table_list.empty(); + item_list.empty(); + join= 0; + where= 0; + olap= UNSPECIFIED_OLAP_TYPE; + having_fix_field= 0; + resolve_mode= NOMATTER_MODE; + cond_count= with_wild= 0; + ref_pointer_array= 0; + select_n_having_items= 0; + prep_where= 0; + subquery_in_having= explicit_limit= 0; + parsing_place= NO_MATTER; + is_item_list_lookup= 0; +} + +void st_select_lex::init_select() +{ + st_select_lex_node::init_select(); + group_list.empty(); + type= db= db1= table1= db2= table2= 0; + having= 0; + use_index_ptr= ignore_index_ptr= 0; + table_join_options= 0; + in_sum_expr= with_wild= 0; + options= 0; + braces= 0; + when_list.empty(); + expr_list.empty(); + interval_list.empty(); + use_index.empty(); + ftfunc_list_alloc.empty(); + ftfunc_list= &ftfunc_list_alloc; + linkage= UNSPECIFIED_TYPE; + order_list.elements= 0; + order_list.first= 0; + order_list.next= (byte**) &order_list.first; + select_limit= HA_POS_ERROR; + offset_limit= 0; + with_sum_func= 0; + +} + +/* + st_select_lex structures linking +*/ + +/* include on level down */ +void st_select_lex_node::include_down(st_select_lex_node *upper) +{ + if ((next= upper->slave)) + next->prev= &next; + prev= &upper->slave; + upper->slave= this; + master= upper; + slave= 0; +} + +/* + include on level down (but do not link) + + SYNOPSYS + st_select_lex_node::include_standalone() + upper - reference on node underr which this node should be included + ref - references on reference on this node +*/ +void st_select_lex_node::include_standalone(st_select_lex_node *upper, + st_select_lex_node **ref) +{ + next= 0; + prev= ref; + master= upper; + slave= 0; +} + +/* include neighbour (on same level) */ +void st_select_lex_node::include_neighbour(st_select_lex_node *before) +{ + if ((next= before->next)) + next->prev= &next; + prev= &before->next; + before->next= this; + master= before->master; + slave= 0; +} + +/* including in global SELECT_LEX list */ +void st_select_lex_node::include_global(st_select_lex_node **plink) +{ + if ((link_next= *plink)) + link_next->link_prev= &link_next; + link_prev= plink; + *plink= this; +} + +//excluding from global list (internal function) +void st_select_lex_node::fast_exclude() +{ + if (link_prev) + { + if ((*link_prev= link_next)) + link_next->link_prev= link_prev; + } + // Remove slave structure + for (; slave; slave= slave->next) + slave->fast_exclude(); + +} + +/* + excluding select_lex structure (except first (first select can't be + deleted, because it is most upper select)) +*/ +void st_select_lex_node::exclude() +{ + //exclude from global list + fast_exclude(); + //exclude from other structures + if ((*prev= next)) + next->prev= prev; + /* + We do not need following statements, because prev pointer of first + list element point to master->slave + if (master->slave == this) + master->slave= next; + */ +} + + +/* + Exclude level of current unit from tree of SELECTs + + SYNOPSYS + st_select_lex_unit::exclude_level() + + NOTE: units which belong to current will be brought up on level of + currernt unit +*/ +void st_select_lex_unit::exclude_level() +{ + SELECT_LEX_UNIT *units= 0, **units_last= &units; + for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) + { + // unlink current level from global SELECTs list + if (sl->link_prev && (*sl->link_prev= sl->link_next)) + sl->link_next->link_prev= sl->link_prev; + + // bring up underlay levels + SELECT_LEX_UNIT **last= 0; + for (SELECT_LEX_UNIT *u= sl->first_inner_unit(); u; u= u->next_unit()) + { + u->master= master; + last= (SELECT_LEX_UNIT**)&(u->next); + } + if (last) + { + (*units_last)= sl->first_inner_unit(); + units_last= last; + } + } + if (units) + { + // include brought up levels in place of current + (*prev)= units; + (*units_last)= (SELECT_LEX_UNIT*)next; + if (next) + next->prev= (SELECT_LEX_NODE**)units_last; + units->prev= prev; + } + else + { + // exclude currect unit from list of nodes + (*prev)= next; + if (next) + next->prev= prev; + } +} + + +/* + Exclude subtree of current unit from tree of SELECTs + + SYNOPSYS + st_select_lex_unit::exclude_tree() +*/ +void st_select_lex_unit::exclude_tree() +{ + for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) + { + // unlink current level from global SELECTs list + if (sl->link_prev && (*sl->link_prev= sl->link_next)) + sl->link_next->link_prev= sl->link_prev; + + // unlink underlay levels + for (SELECT_LEX_UNIT *u= sl->first_inner_unit(); u; u= u->next_unit()) + { + u->exclude_level(); + } + } + // exclude currect unit from list of nodes + (*prev)= next; + if (next) + next->prev= prev; +} + + +/* + st_select_lex_node::mark_as_dependent mark all st_select_lex struct from + this to 'last' as dependent + + SYNOPSIS + last - pointer to last st_select_lex struct, before wich all + st_select_lex have to be marked as dependent + + NOTE + 'last' should be reachable from this st_select_lex_node +*/ + +void st_select_lex::mark_as_dependent(SELECT_LEX *last) +{ + /* + Mark all selects from resolved to 1 before select where was + found table as depended (of select where was found table) + */ + for (SELECT_LEX *s= this; + s && s != last; + s= s->outer_select()) + if (!(s->uncacheable & UNCACHEABLE_DEPENDENT)) + { + // Select is dependent of outer select + s->uncacheable|= UNCACHEABLE_DEPENDENT; + SELECT_LEX_UNIT *munit= s->master_unit(); + munit->uncacheable|= UNCACHEABLE_DEPENDENT; + } +} + +bool st_select_lex_node::set_braces(bool value) { return 1; } +bool st_select_lex_node::inc_in_sum_expr() { return 1; } +uint st_select_lex_node::get_in_sum_expr() { return 0; } +TABLE_LIST* st_select_lex_node::get_table_list() { return 0; } +List<Item>* st_select_lex_node::get_item_list() { return 0; } +List<String>* st_select_lex_node::get_use_index() { return 0; } +List<String>* st_select_lex_node::get_ignore_index() { return 0; } +TABLE_LIST *st_select_lex_node::add_table_to_list(THD *thd, Table_ident *table, + LEX_STRING *alias, + ulong table_join_options, + thr_lock_type flags, + List<String> *use_index, + List<String> *ignore_index, + LEX_STRING *option) +{ + return 0; +} +ulong st_select_lex_node::get_table_join_options() +{ + return 0; +} + +/* + prohibit using LIMIT clause +*/ +bool st_select_lex::test_limit() +{ + if (select_limit != HA_POS_ERROR) + { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "LIMIT & IN/ALL/ANY/SOME subquery"); + return(1); + } + // We need only 1 row to determinate existence + select_limit= 1; + // no sense in ORDER BY without LIMIT + order_list.empty(); + return(0); +} + +/* + Interface method of table list creation for query + + SYNOPSIS + st_select_lex_unit::create_total_list() + thd THD pointer + result pointer on result list of tables pointer + check_derived force derived table chacking (used for creating + table list for derived query) + DESCRIPTION + This is used for UNION & subselect to create a new table list of all used + tables. + The table_list->table entry in all used tables are set to point + to the entries in this list. + + RETURN + 0 - OK + !0 - error +*/ +bool st_select_lex_unit::create_total_list(THD *thd_arg, st_lex *lex, + TABLE_LIST **result_arg) +{ + *result_arg= 0; + if (!(res= create_total_list_n_last_return(thd_arg, lex, &result_arg))) + { + /* + If time zone tables were used implicitly in statement we should add + them to global table list. + */ + if (lex->time_zone_tables_used) + { + /* + Altough we are modifying lex data, it won't raise any problem in + case when this lex belongs to some prepared statement or stored + procedure: such modification does not change any invariants imposed + by requirement to reuse the same lex for multiple executions. + */ + if ((lex->time_zone_tables_used= my_tz_get_table_list(thd)) != + &fake_time_zone_tables_list) + { + *result_arg= lex->time_zone_tables_used; + } + else + { + send_error(thd, 0); + res= 1; + } + } + } + return res; +} + +/* + Table list creation for query + + SYNOPSIS + st_select_lex_unit::create_total_list() + thd THD pointer + lex pointer on LEX stricture + result pointer on pointer on result list of tables pointer + + DESCRIPTION + This is used for UNION & subselect to create a new table list of all used + tables. + The table_list->table_list in all tables of global list are set to point + to the local SELECT_LEX entries. + + RETURN + 0 - OK + !0 - error +*/ +bool st_select_lex_unit:: +create_total_list_n_last_return(THD *thd_arg, + st_lex *lex, + TABLE_LIST ***result_arg) +{ + TABLE_LIST *slave_list_first=0, **slave_list_last= &slave_list_first; + TABLE_LIST **new_table_list= *result_arg, *aux; + SELECT_LEX *sl= (SELECT_LEX*)slave; + + /* + iterate all inner selects + fake_select (if exists), + fake_select->next_select() always is 0 + */ + for (; + sl; + sl= (sl->next_select() ? + sl->next_select() : + (sl == fake_select_lex ? + 0 : + fake_select_lex))) + { + // check usage of ORDER BY in union + if (sl->order_list.first && sl->next_select() && !sl->braces && + sl->linkage != GLOBAL_OPTIONS_TYPE) + { + net_printf(thd_arg,ER_WRONG_USAGE,"UNION","ORDER BY"); + return 1; + } + + for (SELECT_LEX_UNIT *inner= sl->first_inner_unit(); + inner; + inner= inner->next_unit()) + { + if (inner->create_total_list_n_last_return(thd, lex, + &slave_list_last)) + return 1; + } + + if ((aux= (TABLE_LIST*) sl->table_list.first)) + { + TABLE_LIST *next_table; + for (; aux; aux= next_table) + { + TABLE_LIST *cursor; + next_table= aux->next; + /* Add to the total table list */ + if (!(cursor= (TABLE_LIST *) thd->memdup((char*) aux, + sizeof(*aux)))) + { + send_error(thd,0); + return 1; + } + *new_table_list= cursor; + cursor->table_list= aux; + new_table_list= &cursor->next; + *new_table_list= 0; // end result list + aux->table_list= cursor; + } + } + } + + if (slave_list_first) + { + *new_table_list= slave_list_first; + new_table_list= slave_list_last; + } + *result_arg= new_table_list; + return 0; +} + + +st_select_lex_unit* st_select_lex_unit::master_unit() +{ + return this; +} + + +st_select_lex* st_select_lex_unit::outer_select() +{ + return (st_select_lex*) master; +} + + +bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc) +{ + return add_to_list(thd, order_list, item, asc); +} + + +bool st_select_lex::add_item_to_list(THD *thd, Item *item) +{ + return item_list.push_back(item); +} + + +bool st_select_lex::add_group_to_list(THD *thd, Item *item, bool asc) +{ + return add_to_list(thd, group_list, item, asc); +} + + +bool st_select_lex::add_ftfunc_to_list(Item_func_match *func) +{ + return !func || ftfunc_list->push_back(func); // end of memory? +} + + +st_select_lex_unit* st_select_lex::master_unit() +{ + return (st_select_lex_unit*) master; +} + + +st_select_lex* st_select_lex::outer_select() +{ + return (st_select_lex*) master->get_master(); +} + + +bool st_select_lex::set_braces(bool value) +{ + braces= value; + return 0; +} + + +bool st_select_lex::inc_in_sum_expr() +{ + in_sum_expr++; + return 0; +} + + +uint st_select_lex::get_in_sum_expr() +{ + return in_sum_expr; +} + + +TABLE_LIST* st_select_lex::get_table_list() +{ + return (TABLE_LIST*) table_list.first; +} + +List<Item>* st_select_lex::get_item_list() +{ + return &item_list; +} + + +List<String>* st_select_lex::get_use_index() +{ + return use_index_ptr; +} + + +List<String>* st_select_lex::get_ignore_index() +{ + return ignore_index_ptr; +} + + +ulong st_select_lex::get_table_join_options() +{ + return table_join_options; +} + + +bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) +{ + if (ref_pointer_array) + return 0; + + /* + We have to create array in prepared statement memory if it is + prepared statement + */ + Item_arena *arena= thd->current_arena; + return (ref_pointer_array= + (Item **)arena->alloc(sizeof(Item*) * + (item_list.elements + + select_n_having_items + + order_group_num)* 5)) == 0; +} + + +/* + Find db.table which will be updated in this unit + + SYNOPSIS + st_select_lex_unit::check_updateable() + db - data base name + table - real table name + + RETURN + 1 - found + 0 - OK (table did not found) +*/ + +bool st_select_lex_unit::check_updateable(char *db, char *table) +{ + for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) + if (sl->check_updateable(db, table)) + return 1; + return 0; +} + + +/* + Find db.table which will be updated in this select and + underlying ones (except derived tables) + + SYNOPSIS + st_select_lex::check_updateable() + db - data base name + table - real table name + + RETURN + 1 - found + 0 - OK (table did not found) +*/ + +bool st_select_lex::check_updateable(char *db, char *table) +{ + if (find_real_table_in_list(get_table_list(), db, table)) + return 1; + + return check_updateable_in_subqueries(db, table); +} + +/* + Find db.table which will be updated in underlying subqueries + + SYNOPSIS + st_select_lex::check_updateable_in_subqueries() + db - data base name + table - real table name + + RETURN + 1 - found + 0 - OK (table did not found) +*/ + +bool st_select_lex::check_updateable_in_subqueries(char *db, char *table) +{ + for (SELECT_LEX_UNIT *un= first_inner_unit(); + un; + un= un->next_unit()) + { + if (un->first_select()->linkage != DERIVED_TABLE_TYPE && + un->check_updateable(db, table)) + return 1; + } + return 0; +} + + +void st_select_lex_unit::print(String *str) +{ + for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) + { + if (sl != first_select()) + { + str->append(" union ", 7); + if (!union_distinct) + str->append("all ", 4); + } + if (sl->braces) + str->append('('); + sl->print(thd, str); + if (sl->braces) + str->append(')'); + } + if (fake_select_lex == global_parameters) + { + if (fake_select_lex->order_list.elements) + { + str->append(" order by ", 10); + fake_select_lex->print_order(str, + (ORDER *) fake_select_lex-> + order_list.first); + } + fake_select_lex->print_limit(thd, str); + } +} + + +void st_select_lex::print_order(String *str, ORDER *order) +{ + for (; order; order= order->next) + { + (*order->item)->print(str); + if (!order->asc) + str->append(" desc", 5); + if (order->next) + str->append(','); + } +} + + +void st_select_lex::print_limit(THD *thd, String *str) +{ + if (explicit_limit) + { + str->append(" limit ", 7); + char buff[20]; + // latin1 is good enough for numbers + String st(buff, sizeof(buff), &my_charset_latin1); + st.set((ulonglong)select_limit, &my_charset_latin1); + str->append(st); + if (offset_limit) + { + str->append(','); + st.set((ulonglong)select_limit, &my_charset_latin1); + str->append(st); + } + } +} + + +st_lex::st_lex() + :result(0) +{} + + +/* + Unlink first table from global table list and first table from outer select + list (lex->select_lex) + + SYNOPSIS + unlink_first_table() + tables Global table list + global_first Save first global table here + local_first Save first local table here + + NOTES + This function assumes that outer select list is non-empty. + + RETURN + global list without first table + +*/ +TABLE_LIST *st_lex::unlink_first_table(TABLE_LIST *tables, + TABLE_LIST **global_first, + TABLE_LIST **local_first) +{ + DBUG_ASSERT(select_lex.table_list.first != 0); + /* + Save pointers to first elements of global table list and list + of tables used in outer select. It does not harm if these lists + are the same. + */ + *global_first= tables; + *local_first= (TABLE_LIST*)select_lex.table_list.first; + + /* Exclude first elements from these lists */ + select_lex.table_list.first= (byte*) (*local_first)->next; + tables= tables->next; + (*global_first)->next= 0; + return tables; +} + + +/* + Link table which was unlinked with unlink_first_table() back. + + SYNOPSIS + link_first_table_back() + tables Global table list + global_first Saved first global table + local_first Saved first local table + + RETURN + global list +*/ +TABLE_LIST *st_lex::link_first_table_back(TABLE_LIST *tables, + TABLE_LIST *global_first, + TABLE_LIST *local_first) +{ + global_first->next= tables; + select_lex.table_list.first= (byte*) local_first; + return global_first; +} + +/* + There are st_select_lex::add_table_to_list & + st_select_lex::set_lock_for_tables are in sql_parse.cc + + st_select_lex::print is in sql_select.h + + st_select_lex_unit::prepare, st_select_lex_unit::exec, + st_select_lex_unit::cleanup, st_select_lex_unit::reinit_exec_mechanism, + st_select_lex_unit::change_result + are in sql_union.cc +*/ diff --git a/sql/sql_lex.h b/sql/sql_lex.h index d4b20c69bf2..bd79a194122 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -37,6 +37,11 @@ class LEX_COLUMN; #define LEX_YYSTYPE YYSTYPE * #endif +/* + When a command is added here, be sure it's also added in mysqld.cc + in "struct show_var_st status_vars[]= {" ... +*/ + enum enum_sql_command { SQLCOM_SELECT, SQLCOM_CREATE_TABLE, SQLCOM_CREATE_INDEX, SQLCOM_ALTER_TABLE, SQLCOM_UPDATE, SQLCOM_INSERT, SQLCOM_INSERT_SELECT, @@ -44,41 +49,42 @@ enum enum_sql_command { SQLCOM_SHOW_DATABASES, SQLCOM_SHOW_TABLES, SQLCOM_SHOW_FIELDS, SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_LOGS, SQLCOM_SHOW_STATUS, - SQLCOM_SHOW_INNODB_STATUS, + SQLCOM_SHOW_INNODB_STATUS,SQLCOM_SHOW_NDBCLUSTER_STATUS, SQLCOM_SHOW_PROCESSLIST, SQLCOM_SHOW_MASTER_STAT, SQLCOM_SHOW_SLAVE_STAT, - SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, + SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, SQLCOM_SHOW_CHARSETS, + SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, SQLCOM_LOAD,SQLCOM_SET_OPTION,SQLCOM_LOCK_TABLES,SQLCOM_UNLOCK_TABLES, - SQLCOM_GRANT, SQLCOM_CHANGE_DB, SQLCOM_CREATE_DB, SQLCOM_DROP_DB, - SQLCOM_REPAIR, SQLCOM_REPLACE, SQLCOM_REPLACE_SELECT, + SQLCOM_GRANT, + SQLCOM_CHANGE_DB, SQLCOM_CREATE_DB, SQLCOM_DROP_DB, SQLCOM_ALTER_DB, + SQLCOM_REPAIR, SQLCOM_REPLACE, SQLCOM_REPLACE_SELECT, SQLCOM_CREATE_FUNCTION, SQLCOM_DROP_FUNCTION, - SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK, + SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK, + SQLCOM_ASSIGN_TO_KEYCACHE, SQLCOM_PRELOAD_KEYS, SQLCOM_FLUSH, SQLCOM_KILL, SQLCOM_ANALYZE, SQLCOM_ROLLBACK, SQLCOM_ROLLBACK_TO_SAVEPOINT, SQLCOM_COMMIT, SQLCOM_SAVEPOINT, SQLCOM_SLAVE_START, SQLCOM_SLAVE_STOP, SQLCOM_BEGIN, SQLCOM_LOAD_MASTER_TABLE, SQLCOM_CHANGE_MASTER, SQLCOM_RENAME_TABLE, SQLCOM_BACKUP_TABLE, SQLCOM_RESTORE_TABLE, - SQLCOM_RESET, SQLCOM_PURGE, SQLCOM_SHOW_BINLOGS, + SQLCOM_RESET, SQLCOM_PURGE, SQLCOM_PURGE_BEFORE, SQLCOM_SHOW_BINLOGS, SQLCOM_SHOW_OPEN_TABLES, SQLCOM_LOAD_MASTER_DATA, SQLCOM_HA_OPEN, SQLCOM_HA_CLOSE, SQLCOM_HA_READ, - SQLCOM_SHOW_SLAVE_HOSTS, SQLCOM_DELETE_MULTI, SQLCOM_MULTI_UPDATE, + SQLCOM_SHOW_SLAVE_HOSTS, SQLCOM_DELETE_MULTI, SQLCOM_UPDATE_MULTI, SQLCOM_SHOW_BINLOG_EVENTS, SQLCOM_SHOW_NEW_MASTER, SQLCOM_DO, - SQLCOM_EMPTY_QUERY, + SQLCOM_SHOW_WARNS, SQLCOM_EMPTY_QUERY, SQLCOM_SHOW_ERRORS, + SQLCOM_SHOW_COLUMN_TYPES, SQLCOM_SHOW_STORAGE_ENGINES, SQLCOM_SHOW_PRIVILEGES, + SQLCOM_HELP, SQLCOM_DROP_USER, SQLCOM_REVOKE_ALL, SQLCOM_CHECKSUM, + + SQLCOM_PREPARE, SQLCOM_EXECUTE, SQLCOM_DEALLOCATE_PREPARE, + /* This should be the last !!! */ SQLCOM_END }; -enum lex_states -{ - STATE_START, STATE_CHAR, STATE_IDENT, STATE_IDENT_SEP, STATE_IDENT_START, - STATE_FOUND_IDENT, STATE_SIGNED_NUMBER, STATE_REAL, STATE_HEX_NUMBER, - STATE_CMP_OP, STATE_LONG_CMP_OP, STATE_STRING, STATE_COMMENT, STATE_END, - STATE_OPERATOR_OR_IDENT, STATE_NUMBER_IDENT, STATE_INT_OR_REAL, - STATE_REAL_OR_POINT, STATE_BOOL, STATE_EOL, STATE_ESCAPE, STATE_LONG_COMMENT, - STATE_END_LONG_COMMENT, STATE_COLON, STATE_SET_VAR, STATE_USER_END, - STATE_HOSTNAME, STATE_SKIP, STATE_USER_VARIABLE_DELIMITER, STATE_SYSTEM_VAR, - STATE_IDENT_OR_KEYWORD -}; +// describe/explain types +#define DESCRIBE_NORMAL 1 +#define DESCRIBE_EXTENDED 2 + typedef List<Item> List_item; @@ -88,6 +94,13 @@ typedef struct st_lex_master_info uint port, connect_retry; ulonglong pos; ulong server_id; + /* + Variable for MASTER_SSL option. + MASTER_SSL=0 in CHANGE MASTER TO corresponds to SSL_DISABLE + MASTER_SSL=1 corresponds to SSL_ENABLE + */ + enum {SSL_UNCHANGED=0, SSL_DISABLE, SSL_ENABLE} ssl; + char *ssl_key, *ssl_cert, *ssl_ca, *ssl_capath, *ssl_cipher; char *relay_log_name; ulong relay_log_pos; } LEX_MASTER_INFO; @@ -95,7 +108,8 @@ typedef struct st_lex_master_info enum sub_select_type { - UNSPECIFIED_TYPE, UNION_TYPE, INTERSECT_TYPE, EXCEPT_TYPE, OLAP_TYPE, NOT_A_SELECT + UNSPECIFIED_TYPE,UNION_TYPE, INTERSECT_TYPE, + EXCEPT_TYPE, GLOBAL_OPTIONS_TYPE, DERIVED_TABLE_TYPE, OLAP_TYPE }; enum olap_type @@ -103,66 +117,517 @@ enum olap_type UNSPECIFIED_OLAP_TYPE, CUBE_TYPE, ROLLUP_TYPE }; -/* The state of the lex parsing for selects */ - -typedef struct st_select_lex +enum tablespace_op_type { + NO_TABLESPACE_OP, DISCARD_TABLESPACE, IMPORT_TABLESPACE +}; + +/* + The state of the lex parsing for selects + + master and slaves are pointers to select_lex. + master is pointer to upper level node. + slave is pointer to lower level node + select_lex is a SELECT without union + unit is container of either + - One SELECT + - UNION of selects + select_lex and unit are both inherited form select_lex_node + neighbors are two select_lex or units on the same level + + All select describing structures linked with following pointers: + - list of neighbors (next/prev) (prev of first element point to slave + pointer of upper structure) + - For select this is a list of UNION's (or one element list) + - For units this is a list of sub queries for the upper level select + + - pointer to master (master), which is + If this is a unit + - pointer to outer select_lex + If this is a select_lex + - pointer to outer unit structure for select + + - pointer to slave (slave), which is either: + If this is a unit: + - first SELECT that belong to this unit + If this is a select_lex + - first unit that belong to this SELECT (subquries or derived tables) + + - list of all select_lex (link_next/link_prev) + This is to be used for things like derived tables creation, where we + go through this list and create the derived tables. + + If unit contain several selects (UNION now, INTERSECT etc later) + then it have special select_lex called fake_select_lex. It used for + storing global parameters (like ORDER BY, LIMIT) and executing union. + Subqueries used in global ORDER BY clause will be attached to this + fake_select_lex, which will allow them correctly resolve fields of + 'upper' UNION and outer selects. + + For example for following query: + + select * + from table1 + where table1.field IN (select * from table1_1_1 union + select * from table1_1_2) + union + select * + from table2 + where table2.field=(select (select f1 from table2_1_1_1_1 + where table2_1_1_1_1.f2=table2_1_1.f3) + from table2_1_1 + where table2_1_1.f1=table2.f2) + union + select * from table3; + + we will have following structure: + + select1: (select * from table1 ...) + select2: (select * from table2 ...) + select3: (select * from table3) + select1.1.1: (select * from table1_1_1) + ... + + main unit + fake0 + select1 select2 select3 + |^^ |^ + s||| ||master + l||| |+---------------------------------+ + a||| +---------------------------------+| + v|||master slave || + e||+-------------------------+ || + V| neighbor | V| + unit1.1<+==================>unit1.2 unit2.1 + fake1.1 + select1.1.1 select 1.1.2 select1.2.1 select2.1.1 + |^ + || + V| + unit2.1.1.1 + select2.1.1.1.1 + + + relation in main unit will be following: + (bigger picture for: + main unit + fake0 + select1 select2 select3 + in the above picture) + + main unit + |^^^^|fake_select_lex + |||||+--------------------------------------------+ + ||||+--------------------------------------------+| + |||+------------------------------+ || + ||+--------------+ | || + slave||master | | || + V| neighbor | neighbor | master|V + select1<========>select2<========>select3 fake0 + + list of all select_lex will be following (as it will be constructed by + parser): + + select1->select2->select3->select2.1.1->select 2.1.2->select2.1.1.1.1-+ + | + +---------------------------------------------------------------------+ + | + +->select1.1.1->select1.1.2 + +*/ + +/* + Base class for st_select_lex (SELECT_LEX) & + st_select_lex_unit (SELECT_LEX_UNIT) +*/ +struct st_lex; +class st_select_lex; +class st_select_lex_unit; +class st_select_lex_node { +protected: + st_select_lex_node *next, **prev, /* neighbor list */ + *master, *slave, /* vertical links */ + *link_next, **link_prev; /* list of whole SELECT_LEX */ +public: + + ulong options; + /* + result of this query can't be cached, bit field, can be : + UNCACHEABLE_DEPENDENT + UNCACHEABLE_RAND + UNCACHEABLE_SIDEEFFECT + UNCACHEABLE_EXPLAIN + UNCACHEABLE_PREPARE + */ + uint8 uncacheable; enum sub_select_type linkage; + bool no_table_names_allowed; /* used for global order by */ + bool no_error; /* suppress error message (convert it to warnings) */ + + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } + static void *operator new(size_t size, MEM_ROOT *mem_root) + { return (void*) alloc_root(mem_root, (uint) size); } + static void operator delete(void *ptr,size_t size) {} + static void operator delete(void *ptr, MEM_ROOT *mem_root) {} + st_select_lex_node(): linkage(UNSPECIFIED_TYPE) {} + virtual ~st_select_lex_node() {} + inline st_select_lex_node* get_master() { return master; } + virtual void init_query(); + virtual void init_select(); + void include_down(st_select_lex_node *upper); + void include_neighbour(st_select_lex_node *before); + void include_standalone(st_select_lex_node *sel, st_select_lex_node **ref); + void include_global(st_select_lex_node **plink); + void exclude(); + + virtual st_select_lex_unit* master_unit()= 0; + virtual st_select_lex* outer_select()= 0; + virtual st_select_lex* return_after_parsing()= 0; + + virtual bool set_braces(bool value); + virtual bool inc_in_sum_expr(); + virtual uint get_in_sum_expr(); + virtual TABLE_LIST* get_table_list(); + virtual List<Item>* get_item_list(); + virtual List<String>* get_use_index(); + virtual List<String>* get_ignore_index(); + virtual ulong get_table_join_options(); + virtual TABLE_LIST *add_table_to_list(THD *thd, Table_ident *table, + LEX_STRING *alias, + ulong table_options, + thr_lock_type flags= TL_UNLOCK, + List<String> *use_index= 0, + List<String> *ignore_index= 0, + LEX_STRING *option= 0); + virtual void set_lock_for_tables(thr_lock_type lock_type) {} + + friend class st_select_lex_unit; + friend bool mysql_new_select(struct st_lex *lex, bool move_down); +private: + void fast_exclude(); +}; +typedef class st_select_lex_node SELECT_LEX_NODE; + +/* + SELECT_LEX_UNIT - unit of selects (UNION, INTERSECT, ...) group + SELECT_LEXs +*/ +struct st_lex; +class THD; +class select_result; +class JOIN; +class select_union; +class Procedure; +class st_select_lex_unit: public st_select_lex_node { +protected: + TABLE_LIST result_table_list; + select_union *union_result; + TABLE *table; /* temporary table using for appending UNION results */ + + select_result *result; + int res; + ulong found_rows_for_union; + bool prepared, // prepare phase already performed for UNION (unit) + optimized, // optimize phase already performed for UNION (unit) + executed, // already executed + cleaned; + +public: + // list of fields which points to temporary table for union + List<Item> item_list; + /* + list of types of items inside union (used for union & derived tables) + + Item_type_holders from which this list consist may have pointers to Field, + pointers is valid only after preparing SELECTS of this unit and before + any SELECT of this unit execution + */ + List<Item> types; + /* + Pointer to 'last' select or pointer to unit where stored + global parameters for union + */ + st_select_lex *global_parameters; + //node on wich we should return current_select pointer after parsing subquery + st_select_lex *return_to; + /* LIMIT clause runtime counters */ + ha_rows select_limit_cnt, offset_limit_cnt; + /* not NULL if unit used in subselect, point to subselect item */ + Item_subselect *item; + /* thread handler */ + THD *thd; + /* + SELECT_LEX for hidden SELECT in onion which process global + ORDER BY and LIMIT + */ + st_select_lex *fake_select_lex; + + st_select_lex *union_distinct; /* pointer to the last UNION DISTINCT */ + bool describe; /* union exec() called for EXPLAIN */ + Procedure *last_procedure; /* Pointer to procedure, if such exists */ + + void init_query(); + bool create_total_list(THD *thd, st_lex *lex, TABLE_LIST **result); + st_select_lex_unit* master_unit(); + st_select_lex* outer_select(); + st_select_lex* first_select() + { + return my_reinterpret_cast(st_select_lex*)(slave); + } + st_select_lex* first_select_in_union() + { + return my_reinterpret_cast(st_select_lex*)(slave); + } + st_select_lex_unit* next_unit() + { + return my_reinterpret_cast(st_select_lex_unit*)(next); + } + st_select_lex* return_after_parsing() { return return_to; } + void exclude_level(); + void exclude_tree(); + + /* UNION methods */ + int prepare(THD *thd, select_result *result, ulong additional_options, + const char *tmp_table_alias); + int exec(); + int cleanup(); + inline void unclean() { cleaned= 0; } + void reinit_exec_mechanism(); + + bool check_updateable(char *db, char *table); + void print(String *str); + + bool add_fake_select_lex(THD *thd); + ulong init_prepare_fake_select_lex(THD *thd); + int change_result(select_subselect *result, select_subselect *old_result); + inline bool is_prepared() { return prepared; } + + friend void lex_start(THD *thd, uchar *buf, uint length); + friend int subselect_union_engine::exec(); +private: + bool create_total_list_n_last_return(THD *thd, st_lex *lex, + TABLE_LIST ***result); +}; +typedef class st_select_lex_unit SELECT_LEX_UNIT; + +/* + SELECT_LEX - store information of parsed SELECT statment +*/ +class st_select_lex: public st_select_lex_node +{ +public: + char *db, *db1, *table1, *db2, *table2; /* For outer join using .. */ + Item *where, *having; /* WHERE & HAVING clauses */ + Item *prep_where; /* saved WHERE clause for prepared statement processing */ enum olap_type olap; - char *db,*db1,*table1,*db2,*table2; /* For outer join using .. */ - Item *where,*having; - ha_rows select_limit,offset_limit; - ulong options, table_join_options; - List<List_item> expr_list; - List<List_item> when_list; - SQL_LIST order_list,table_list,group_list; - List<Item> item_list; - List<String> interval_list,use_index, *use_index_ptr, + SQL_LIST table_list, group_list; /* FROM & GROUP BY clauses */ + List<Item> item_list; /* list of fields & expressions */ + List<String> interval_list, use_index, *use_index_ptr, ignore_index, *ignore_index_ptr; - List<Item_func_match> ftfunc_list; - uint in_sum_expr, sort_default; - bool create_refs, braces, is_item_list_lookup; - st_select_lex *next; -} SELECT_LEX; + bool is_item_list_lookup; + /* + Usualy it is pointer to ftfunc_list_alloc, but in union used to create fake + select_lex for calling mysql_select under results of union + */ + List<Item_func_match> *ftfunc_list; + List<Item_func_match> ftfunc_list_alloc; + JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */ + const char *type; /* type of select for EXPLAIN */ + + SQL_LIST order_list; /* ORDER clause */ + List<List_item> expr_list; + List<List_item> when_list; /* WHEN clause (expression) */ + SQL_LIST *gorder_list; + ha_rows select_limit, offset_limit; /* LIMIT clause parameters */ + // Arrays of pointers to top elements of all_fields list + Item **ref_pointer_array; + + /* + number of items in select_list and HAVING clause used to get number + bigger then can be number of entries that will be added to all item + list during split_sum_func + */ + uint select_n_having_items; + uint cond_count; /* number of arguments of and/or/xor in where/having */ + enum_parsing_place parsing_place; /* where we are parsing expression */ + bool with_sum_func; /* sum function indicator */ + + ulong table_join_options; + uint in_sum_expr; + uint select_number; /* number of select (used for EXPLAIN) */ + uint with_wild; /* item list contain '*' */ + bool braces; /* SELECT ... UNION (SELECT ... ) <- this braces */ + /* TRUE when having fix field called in processing of this SELECT */ + bool having_fix_field; + /* explicit LIMIT clause was used */ + bool explicit_limit; + /* + there are subquery in HAVING clause => we can't close tables before + query processing end even if we use temporary table + */ + bool subquery_in_having; + + /* + SELECT for SELECT command st_select_lex. Used to privent scaning + item_list of non-SELECT st_select_lex (no sense find to finding + reference in it (all should be in tables, it is dangerouse due + to order of fix_fields calling for non-SELECTs commands (item list + can be not fix_fieldsd)). This value will be assigned for + primary select (sql_yac.yy) and for any subquery and + UNION SELECT (sql_parse.cc mysql_new_select()) + + + INSERT for primary st_select_lex structure of simple INSERT/REPLACE + (used for name resolution, see Item_fiels & Item_ref fix_fields, + FALSE for INSERT/REPLACE ... SELECT, because it's + st_select_lex->table_list will be preprocessed (first table removed) + before passing to handle_select) + + NOMATTER for other + */ + enum {NOMATTER_MODE, SELECT_MODE, INSERT_MODE} resolve_mode; + void init_query(); + void init_select(); + st_select_lex_unit* master_unit(); + st_select_lex_unit* first_inner_unit() + { + return (st_select_lex_unit*) slave; + } + st_select_lex* outer_select(); + st_select_lex* next_select() { return (st_select_lex*) next; } + st_select_lex* next_select_in_list() + { + return (st_select_lex*) link_next; + } + st_select_lex_node** next_select_in_list_addr() + { + return &link_next; + } + st_select_lex* return_after_parsing() + { + return master_unit()->return_after_parsing(); + } + + void mark_as_dependent(st_select_lex *last); + + bool set_braces(bool value); + bool inc_in_sum_expr(); + uint get_in_sum_expr(); + + bool add_item_to_list(THD *thd, Item *item); + bool add_group_to_list(THD *thd, Item *item, bool asc); + bool add_ftfunc_to_list(Item_func_match *func); + bool add_order_to_list(THD *thd, Item *item, bool asc); + TABLE_LIST* add_table_to_list(THD *thd, Table_ident *table, + LEX_STRING *alias, + ulong table_options, + thr_lock_type flags= TL_UNLOCK, + List<String> *use_index= 0, + List<String> *ignore_index= 0, + LEX_STRING *option= 0); + TABLE_LIST* get_table_list(); + List<Item>* get_item_list(); + List<String>* get_use_index(); + List<String>* get_ignore_index(); + ulong get_table_join_options(); + void set_lock_for_tables(thr_lock_type lock_type); + inline void init_order() + { + order_list.elements= 0; + order_list.first= 0; + order_list.next= (byte**) &order_list.first; + } + + bool test_limit(); + + friend void lex_start(THD *thd, uchar *buf, uint length); + st_select_lex() {} + void make_empty_select() + { + init_query(); + init_select(); + } + bool setup_ref_array(THD *thd, uint order_group_num); + bool check_updateable(char *db, char *table); + bool check_updateable_in_subqueries(char *db, char *table); + void print(THD *thd, String *str); + static void print_order(String *str, ORDER *order); + void print_limit(THD *thd, String *str); +}; +typedef class st_select_lex SELECT_LEX; + +#define ALTER_ADD_COLUMN 1 +#define ALTER_DROP_COLUMN 2 +#define ALTER_CHANGE_COLUMN 4 +#define ALTER_ADD_INDEX 8 +#define ALTER_DROP_INDEX 16 +#define ALTER_RENAME 32 +#define ALTER_ORDER 64 +#define ALTER_OPTIONS 128 + +typedef struct st_alter_info +{ + List<Alter_drop> drop_list; + List<Alter_column> alter_list; + uint flags; + enum enum_enable_or_disable keys_onoff; + enum tablespace_op_type tablespace_op; + bool is_simple; + + st_alter_info(){clear();} + void clear(){keys_onoff= LEAVE_AS_IS;tablespace_op= NO_TABLESPACE_OP;} + void reset(){drop_list.empty();alter_list.empty();clear();} +} ALTER_INFO; + /* The state of the lex parsing. This is saved in the THD struct */ typedef struct st_lex { uint yylineno,yytoklen; /* Simulate lex */ LEX_YYSTYPE yylval; - SELECT_LEX select_lex, *select, *last_selects; + SELECT_LEX_UNIT unit; /* most upper unit */ + SELECT_LEX select_lex; /* first SELECT_LEX */ + /* current SELECT_LEX in parsing */ + SELECT_LEX *current_select; + /* list of all SELECT_LEX */ + SELECT_LEX *all_selects_list; uchar *ptr,*tok_start,*tok_end,*end_of_query; char *length,*dec,*change,*name; + char *help_arg; char *backup_dir; /* For RESTORE/BACKUP */ char* to_log; /* For PURGE MASTER LOGS TO */ + time_t purge_time; /* For PURGE MASTER LOGS BEFORE */ char* x509_subject,*x509_issuer,*ssl_cipher; - enum SSL_type ssl_type; /* defined in violite.h */ + char* found_colon; /* For multi queries - next query */ String *wild; sql_exchange *exchange; + select_result *result; + Item *default_value, *on_update_value; + LEX_STRING *comment, name_and_length; + LEX_USER *grant_user; + gptr yacc_yyss,yacc_yyvs; + THD *thd; + CHARSET_INFO *charset; List<key_part_spec> col_list; - List<Alter_drop> drop_list; - List<Alter_column> alter_list; + List<key_part_spec> ref_list; List<String> interval_list; List<LEX_USER> users_list; List<LEX_COLUMN> columns; List<Key> key_list; List<create_field> create_list; - List<Item> *insert_list,field_list,value_list; + List<Item> *insert_list,field_list,value_list,update_list; List<List_item> many_values; List<set_var_base> var_list; + List<Item_param> param_list; SQL_LIST proc_list, auxilliary_table_list, save_list; - TYPELIB *interval; create_field *last_field; - char* savepoint_name; // Transaction savepoint id - Item *default_value; - CONVERT *convert_set; - CONVERT *thd_convert_set; // Set with SET CHAR SET - LEX_USER *grant_user; - gptr yacc_yyss,yacc_yyvs; - THD *thd; + char *savepoint_name; // Transaction savepoint id udf_func udf; HA_CHECK_OPT check_opt; // check/repair options HA_CREATE_INFO create_info; @@ -170,28 +635,77 @@ typedef struct st_lex USER_RESOURCES mqh; ulong thread_id,type; enum_sql_command sql_command; - enum lex_states next_state; + thr_lock_type lock_option, multi_lock_option; + enum SSL_type ssl_type; /* defined in violite.h */ + enum my_lex_states next_state; enum enum_duplicates duplicates; enum enum_tx_isolation tx_isolation; enum enum_ha_read_modes ha_read_mode; enum ha_rkey_function ha_rkey_mode; - enum enum_enable_or_disable alter_keys_onoff; enum enum_var_type option_type; - uint grant,grant_tot_col,which_columns, union_option; - thr_lock_type lock_option; - bool drop_primary, drop_if_exists, drop_temporary, local_file, olap; - bool in_comment,ignore_space,verbose,simple_alter; - uint slave_thd_opt; + uint uint_geom_type; + uint grant, grant_tot_col, which_columns; + uint fk_delete_opt, fk_update_opt, fk_match_option; + uint slave_thd_opt, start_transaction_opt; + uint8 describe; + bool drop_if_exists, drop_temporary, local_file, one_shot_set; + bool in_comment, ignore_space, verbose, no_write_to_binlog; + bool derived_tables; + bool safe_to_cache_query; + bool subqueries, ignore; + ALTER_INFO alter_info; + /* Prepared statements SQL syntax:*/ + LEX_STRING prepared_stmt_name; /* Statement name (in all queries) */ + /* + Prepared statement query text or name of variable that holds the + prepared statement (in PREPARE ... queries) + */ + LEX_STRING prepared_stmt_code; + /* If true, prepared_stmt_code is a name of variable that holds the query */ + bool prepared_stmt_code_is_varref; + /* Names of user variables holding parameters (in EXECUTE) */ + List<LEX_STRING> prepared_stmt_params; + /* + If points to fake_time_zone_tables_list indicates that time zone + tables are implicitly used by statement, also is used for holding + list of those tables after they are opened. + */ + TABLE_LIST *time_zone_tables_used; + st_lex(); + inline void uncacheable(uint8 cause) + { + safe_to_cache_query= 0; + + /* + There are no sense to mark select_lex and union fields of LEX, + but we should merk all subselects as uncacheable from current till + most upper + */ + SELECT_LEX *sl; + SELECT_LEX_UNIT *un; + for (sl= current_select, un= sl->master_unit(); + un != &unit; + sl= sl->outer_select(), un= sl->master_unit()) + { + sl->uncacheable|= cause; + un->uncacheable|= cause; + } + } + TABLE_LIST *unlink_first_table(TABLE_LIST *tables, + TABLE_LIST **global_first, + TABLE_LIST **local_first); + TABLE_LIST *link_first_table_back(TABLE_LIST *tables, + TABLE_LIST *global_first, + TABLE_LIST *local_first); } LEX; +extern TABLE_LIST fake_time_zone_tables_list; void lex_init(void); void lex_free(void); -LEX *lex_start(THD *thd, uchar *buf,uint length); +void lex_start(THD *thd, uchar *buf,uint length); void lex_end(LEX *lex); extern pthread_key(LEX*,THR_LEX); -extern LEX_STRING tmp_table_alias; - -#define current_lex (¤t_thd->lex) +#define current_lex (current_thd->lex) diff --git a/sql/sql_list.cc b/sql/sql_list.cc index c99cfb8c918..d57a7dfe4e3 100644 --- a/sql/sql_list.cc +++ b/sql/sql_list.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif diff --git a/sql/sql_list.h b/sql/sql_list.h index 9e62b7ce730..e799ecf3d6e 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -15,17 +15,35 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif -/* mysql standard class memoryallocator */ +/* mysql standard class memory allocator */ + +#ifdef SAFEMALLOC +#define TRASH(XX,YY) bfill((XX), (YY), 0x8F) +#else +#define TRASH(XX,YY) /* no-op */ +#endif class Sql_alloc { public: - static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } - static void operator delete(void *ptr, size_t size) {} /*lint -e715 */ + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } + static void *operator new[](size_t size) + { + return (void*) sql_alloc((uint) size); + } + static void *operator new(size_t size, MEM_ROOT *mem_root) + { return (void*) alloc_root(mem_root, (uint) size); } + static void operator delete(void *ptr, size_t size) { TRASH(ptr, size); } + static void operator delete(void *ptr, MEM_ROOT *mem_root) + { /* never called */ } + static void operator delete[](void *ptr, size_t size) { TRASH(ptr, size); } #ifdef HAVE_purify bool dummy; inline Sql_alloc() :dummy(0) {} @@ -84,6 +102,7 @@ public: first=tmp.first; last=tmp.last; } + inline base_list(bool error) { } inline bool push_back(void *info) { if (((*last)=new list_node(info, &end_of_list))) @@ -137,6 +156,15 @@ public: *prev=node; elements--; } + inline void concat(base_list *list) + { + if (!list->is_empty()) + { + *last= list->first; + last= list->last; + elements+= list->elements; + } + } inline void *pop(void) { if (first == &end_of_list) return 0; @@ -146,11 +174,15 @@ public: last= &first; return tmp->info; } + inline list_node* last_node() { return *last; } + inline list_node* first_node() { return first;} inline void *head() { return first->info; } inline void **head_ref() { return first != &end_of_list ? &first->info : 0; } inline bool is_empty() { return first == &end_of_list ; } inline list_node *last_ref() { return &end_of_list; } friend class base_list_iterator; + friend class error_list; + friend class error_list_iterator; #ifdef LIST_EXTRA_DEBUG /* @@ -214,12 +246,20 @@ protected: class base_list_iterator { +protected: base_list *list; list_node **el,**prev,*current; + void sublist(base_list &ls, uint elm) + { + ls.first= *el; + ls.last= list->last; + ls.elements= elm; + } public: - base_list_iterator(base_list &list_par) :list(&list_par),el(&list_par.first), - prev(0),current(0) + base_list_iterator(base_list &list_par) + :list(&list_par), el(&list_par.first), prev(0), current(0) {} + inline void *next(void) { prev=el; @@ -241,6 +281,7 @@ public: inline void *replace(void *element) { // Return old element void *tmp=current->info; + DBUG_ASSERT(current->info != 0); current->info=element; return tmp; } @@ -278,9 +319,9 @@ public: { return el == &list->last_ref()->next; } + friend class error_list_iterator; }; - template <class T> class List :public base_list { public: @@ -326,9 +367,13 @@ protected: inline T** ref(void) { return (T**) 0; } public: - List_iterator_fast(List<T> &a) : base_list_iterator(a) {} + inline List_iterator_fast(List<T> &a) : base_list_iterator(a) {} inline T* operator++(int) { return (T*) base_list_iterator::next_fast(); } inline void rewind(void) { base_list_iterator::rewind(); } + void sublist(List<T> &list_arg, uint el_arg) + { + base_list_iterator::sublist(list_arg, el_arg); + } }; @@ -370,7 +415,8 @@ class base_ilist { public: struct ilink *first,last; - base_ilist() { first= &last; last.prev= &first; } + inline void empty() { first= &last; last.prev= &first; } + base_ilist() { empty(); } inline bool is_empty() { return first == &last; } inline void append(ilink *a) { @@ -423,6 +469,7 @@ class I_List :private base_ilist { public: I_List() :base_ilist() {} + inline void empty() { base_ilist::empty(); } inline bool is_empty() { return base_ilist::is_empty(); } inline void append(T* a) { base_ilist::append(a); } inline void push_back(T* a) { base_ilist::push_back(a); } diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 501852b5de8..4e6c458cc43 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -41,8 +41,9 @@ public: bool error,line_cuted,found_null,enclosed; byte *row_start, /* Found row starts here */ *row_end; /* Found row ends here */ + CHARSET_INFO *read_charset; - READ_INFO(File file,uint tot_length, + READ_INFO(File file,uint tot_length,CHARSET_INFO *cs, String &field_term,String &line_start,String &line_term, String &enclosed,int escape,bool get_it_from_net, bool is_fifo); ~READ_INFO(); @@ -77,9 +78,9 @@ static int read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, List<Item> &fields, READ_INFO &read_info, String &enclosed, ulong skip_lines); - int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, List<Item> &fields, enum enum_duplicates handle_duplicates, + bool ignore, bool read_file_from_client,thr_lock_type lock_type) { char name[FN_REFLEN]; @@ -89,7 +90,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, String *field_term=ex->field_term,*escaped=ex->escaped; String *enclosed=ex->enclosed; bool is_fifo=0; +#ifndef EMBEDDED_LIBRARY LOAD_FILE_INFO lf_info; +#endif char *db = table_list->db; // This is never null /* If path for file is not defined, we will use the current database. @@ -111,6 +114,10 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, MYF(0)); DBUG_RETURN(-1); } + /* + This needs to be done before external_lock + */ + ha_enable_transaction(thd, FALSE); if (!(table = open_ltable(thd,table_list,lock_type))) DBUG_RETURN(-1); transactional_table= table->file->has_transactions(); @@ -125,7 +132,8 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, else { // Part field list thd->dupp_field=0; - if (setup_tables(table_list) || setup_fields(thd,table_list,fields,1,0,0)) + if (setup_tables(table_list) || + setup_fields(thd, 0, table_list, fields, 1, 0, 0)) DBUG_RETURN(-1); if (thd->dupp_field) { @@ -162,28 +170,30 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, /* We can't give an error in the middle when using LOCAL files */ if (read_file_from_client && handle_duplicates == DUP_ERROR) - handle_duplicates=DUP_IGNORE; + ignore= 1; +#ifndef EMBEDDED_LIBRARY if (read_file_from_client) { (void)net_request_file(&thd->net,ex->file_name); file = -1; } else +#endif { #ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS ex->file_name+=dirname_length(ex->file_name); #endif - if (!dirname_length(ex->file_name) && - strlen(ex->file_name)+strlen(mysql_data_home)+strlen(tdb)+3 < - FN_REFLEN) + if (!dirname_length(ex->file_name)) { - (void) sprintf(name,"%s/%s/%s",mysql_data_home,tdb,ex->file_name); - unpack_filename(name,name); /* Convert to system format */ + strxnmov(name, FN_REFLEN, mysql_real_data_home, tdb, NullS); + (void) fn_format(name, ex->file_name, name, "", + MY_RELATIVE_PATH | MY_UNPACK_FILENAME); } else { - unpack_filename(name,ex->file_name); + (void) fn_format(name, ex->file_name, mysql_real_data_home, "", + MY_RELATIVE_PATH | MY_UNPACK_FILENAME); #if !defined(__WIN__) && !defined(OS2) && ! defined(__NETWARE__) MY_STAT stat_info; if (!my_stat(name,&stat_info,MYF(MY_WME))) @@ -211,11 +221,12 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, COPY_INFO info; bzero((char*) &info,sizeof(info)); + info.ignore= ignore; info.handle_duplicates=handle_duplicates; info.escape_char=escaped->length() ? (*escaped)[0] : INT_MAX; - READ_INFO read_info(file,tot_length,*field_term, - *ex->line_start, *ex->line_term, *enclosed, + READ_INFO read_info(file,tot_length,thd->variables.collation_database, + *field_term,*ex->line_start, *ex->line_term, *enclosed, info.escape_char, read_file_from_client, is_fifo); if (read_info.error) { @@ -224,6 +235,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, DBUG_RETURN(-1); // Can't allocate buffers } +#ifndef EMBEDDED_LIBRARY if (mysql_bin_log.is_open()) { lf_info.thd = thd; @@ -231,15 +243,18 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, lf_info.db = db; lf_info.table_name = table_list->real_name; lf_info.fields = &fields; + lf_info.ignore= ignore; lf_info.handle_dup = handle_duplicates; lf_info.wrote_create_file = 0; lf_info.last_pos_in_file = HA_POS_ERROR; lf_info.log_delayed= log_delayed; read_info.set_io_cache_arg((void*) &lf_info); } - restore_record(table,2); +#endif /*!EMBEDDED_LIBRARY*/ - thd->count_cuted_fields=1; /* calc cuted fields */ + restore_record(table,default_values); + + thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */ thd->cuted_fields=0L; /* Skip lines if there is a line terminator */ if (ex->line_term->length()) @@ -255,17 +270,14 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if (!(error=test(read_info.error))) { - uint save_time_stamp=table->time_stamp; if (use_timestamp) - table->time_stamp=0; + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + table->next_number_field=table->found_next_number_field; - VOID(table->file->extra_opt(HA_EXTRA_WRITE_CACHE, - thd->variables.read_buff_size)); - table->bulk_insert= 1; - if (handle_duplicates == DUP_IGNORE || + if (ignore || handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - table->file->deactivate_non_unique_index((ha_rows) 0); + table->file->start_bulk_insert((ha_rows) 0); table->copy_blobs=1; if (!field_term->length() && !enclosed->length()) error=read_fixed_length(thd,info,table,fields,read_info, @@ -273,27 +285,37 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, else error=read_sep_field(thd,info,table,fields,read_info,*enclosed, skip_lines); - if (table->file->extra(HA_EXTRA_NO_CACHE)) - error=1; /* purecov: inspected */ - if (table->file->activate_all_index(thd)) - error=1; /* purecov: inspected */ + if (table->file->end_bulk_insert() && !error) + { + table->file->print_error(my_errno, MYF(0)); + error= 1; + } table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); - table->time_stamp=save_time_stamp; table->next_number_field=0; } + ha_enable_transaction(thd, TRUE); if (file >= 0) my_close(file,MYF(0)); free_blobs(table); /* if pack_blob was used */ table->copy_blobs=0; - thd->count_cuted_fields=0; /* Don`t calc cuted fields */ + thd->count_cuted_fields= CHECK_FIELD_IGNORE; + + /* + We must invalidate the table in query cache before binlog writing and + ha_autocommit_... + */ + query_cache_invalidate3(thd, table_list, 0); if (error) { if (transactional_table) ha_autocommit_or_rollback(thd,error); + if (read_file_from_client) while (!read_info.next_line()) ; + +#ifndef EMBEDDED_LIBRARY if (mysql_bin_log.is_open()) { /* @@ -325,18 +347,20 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, mysql_bin_log.write(&d); } } +#endif /*!EMBEDDED_LIBRARY*/ error= -1; // Error on read goto err; } - sprintf(name,ER(ER_LOAD_INFO),info.records,info.deleted, - info.records-info.copied,thd->cuted_fields); - send_ok(&thd->net,info.copied+info.deleted,0L,name); + sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted, + (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); + send_ok(thd,info.copied+info.deleted,0L,name); // on the slave thd->query is never initialized if (!thd->slave_thread) mysql_update_log.write(thd,thd->query,thd->query_length); if (!log_delayed) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; +#ifndef EMBEDDED_LIBRARY if (mysql_bin_log.is_open()) { /* @@ -351,10 +375,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, mysql_bin_log.write(&e); } } +#endif /*!EMBEDDED_LIBRARY*/ if (transactional_table) error=ha_autocommit_or_rollback(thd,error); - query_cache_invalidate3(thd, table_list, 0); - err: if (thd->lock) { @@ -364,7 +387,6 @@ err: DBUG_RETURN(error); } - /**************************************************************************** ** Read of rows of fixed size + optional garage + optonal newline ****************************************************************************/ @@ -378,7 +400,8 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, ulonglong id; DBUG_ENTER("read_fixed_length"); - id=0; + id= 0; + /* No fields can be null in this format. mark all fields as not null */ while ((sql_field= (Item_field*) it++)) sql_field->field->set_notnull(); @@ -408,28 +431,38 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, #endif while ((sql_field= (Item_field*) it++)) { - Field *field=sql_field->field; + Field *field= sql_field->field; if (pos == read_info.row_end) { - thd->cuted_fields++; /* Not enough fields */ - field->reset(); + thd->cuted_fields++; /* Not enough fields */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_TOO_FEW_RECORDS, + ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count); + field->reset(); } else { uint length; byte save_chr; + if (field == table->next_number_field) + table->auto_increment_field_not_null= TRUE; if ((length=(uint) (read_info.row_end-pos)) > field->field_length) length=field->field_length; save_chr=pos[length]; pos[length]='\0'; // Safeguard aganst malloc - field->store((char*) pos,length); + field->store((char*) pos,length,read_info.read_charset); pos[length]=save_chr; if ((pos+=length) > read_info.row_end) pos= read_info.row_end; /* Fills rest with space */ } } if (pos != read_info.row_end) + { thd->cuted_fields++; /* To long row */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_TOO_MANY_RECORDS, + ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); + } if (write_record(table,&info)) DBUG_RETURN(1); /* @@ -445,7 +478,13 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, if (read_info.next_line()) // Skip to next line break; if (read_info.line_cuted) + { thd->cuted_fields++; /* To long row */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_TOO_MANY_RECORDS, + ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); + } + thd->row_count++; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log @@ -466,8 +505,8 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, DBUG_ENTER("read_sep_field"); enclosed_length=enclosed.length(); - id=0; - + id= 0; + for (;;it.rewind()) { if (thd->killed) @@ -496,14 +535,17 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, { if (field->type() == FIELD_TYPE_TIMESTAMP) ((Field_timestamp*) field)->set_time(); - else if (field != table->next_number_field) - thd->cuted_fields++; + else if (field != table->next_number_field) + field->set_warning((uint) MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_NULL_TO_NOTNULL, 1); } continue; } + if (field == table->next_number_field) + table->auto_increment_field_not_null= TRUE; field->set_notnull(); read_info.row_end[0]=0; // Safe to change end marker - field->store((char*) read_info.row_start,length); + field->store((char*) read_info.row_start,length,read_info.read_charset); } if (read_info.error) break; @@ -522,6 +564,9 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, sql_field->field->set_null(); sql_field->field->reset(); thd->cuted_fields++; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_TOO_FEW_RECORDS, + ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count); } } if (write_record(table,&info)) @@ -539,7 +584,13 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, if (read_info.next_line()) // Skip to next line break; if (read_info.line_cuted) + { thd->cuted_fields++; /* To long row */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), + thd->row_count); + } + thd->row_count++; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log @@ -573,12 +624,13 @@ READ_INFO::unescape(char chr) */ -READ_INFO::READ_INFO(File file_par, uint tot_length, String &field_term, - String &line_start, String &line_term, +READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, + String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) :file(file_par),escape_char(escape) { + read_charset= cs; field_term_ptr=(char*) field_term.ptr(); field_term_length= field_term.length(); line_term_ptr=(char*) line_term.ptr(); @@ -635,13 +687,16 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, String &field_term, mysys/mf_iocache.c. So we work around the problem with a manual assignment */ + need_end_io_cache = 1; + +#ifndef EMBEDDED_LIBRARY if (get_it_from_net) cache.read_function = _my_b_net_read; - need_end_io_cache = 1; if (mysql_bin_log.is_open()) cache.pre_read = cache.pre_close = (IO_CACHE_CALLBACK) log_loaded_block; +#endif } } } @@ -723,13 +778,12 @@ int READ_INFO::read_field() { chr = GET; #ifdef USE_MB - if (use_mb(default_charset_info) && - my_ismbhead(default_charset_info, chr) && - to+my_mbcharlen(default_charset_info, chr) <= end_of_buff) + if ((my_mbcharlen(read_charset, chr) > 1) && + to+my_mbcharlen(read_charset, chr) <= end_of_buff) { uchar* p = (uchar*)to; *to++ = chr; - int ml = my_mbcharlen(default_charset_info, chr); + int ml = my_mbcharlen(read_charset, chr); int i; for (i=1; i<ml; i++) { chr = GET; @@ -737,7 +791,7 @@ int READ_INFO::read_field() goto found_eof; *to++ = chr; } - if (my_ismbchar(default_charset_info, + if (my_ismbchar(read_charset, (const char *)p, (const char *)to)) continue; @@ -755,8 +809,20 @@ int READ_INFO::read_field() *to++= (byte) escape_char; goto found_eof; } - *to++ = (byte) unescape((char) chr); - continue; + /* + When escape_char == enclosed_char, we treat it like we do for + handling quotes in SQL parsing -- you can double-up the + escape_char to include it literally, but it doesn't do escapes + like \n. This allows: LOAD DATA ... ENCLOSED BY '"' ESCAPED BY '"' + with data like: "fie""ld1", "field2" + */ + if (escape_char != enclosed_char || chr == escape_char) + { + *to++ = (byte) unescape((char) chr); + continue; + } + PUSH(chr); + chr= escape_char; } #ifdef ALLOW_LINESEPARATOR_IN_STRINGS if (chr == line_term_char) @@ -921,10 +987,10 @@ int READ_INFO::next_line() { int chr = GET; #ifdef USE_MB - if (use_mb(default_charset_info) && my_ismbhead(default_charset_info, chr)) + if (my_mbcharlen(read_charset, chr) > 1) { for (int i=1; - chr != my_b_EOF && i<my_mbcharlen(default_charset_info, chr); + chr != my_b_EOF && i<my_mbcharlen(read_charset, chr); i++) chr = GET; if (chr == escape_char) @@ -969,7 +1035,7 @@ bool READ_INFO::find_start_of_fields() { // Can't be line_start PUSH(chr); while (--ptr != line_start_ptr) - { // Restart with next char + { // Restart with next char PUSH((uchar) *ptr); } goto try_again; diff --git a/sql/sql_map.cc b/sql/sql_map.cc index e7e24f957c6..aac44949d89 100644 --- a/sql/sql_map.cc +++ b/sql/sql_map.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif diff --git a/sql/sql_map.h b/sql/sql_map.h index 632eb6e4f64..bfa6011ac54 100644 --- a/sql/sql_map.h +++ b/sql/sql_map.h @@ -17,7 +17,7 @@ /* interface for memory mapped files */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc index 6eb4fbcaaf6..024abb6c74b 100644 --- a/sql/sql_olap.cc +++ b/sql/sql_olap.cc @@ -28,7 +28,7 @@ #ifdef DISABLED_UNTIL_REWRITTEN_IN_4_1 -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -62,9 +62,9 @@ static int make_new_olap_select(LEX *lex, SELECT_LEX *select_lex, List<Item> new List_iterator<Item> list_it(select_lex->item_list); List_iterator<Item> new_it(new_fields); - while((item=list_it++)) + while ((item=list_it++)) { - bool not_found=true; + bool not_found= TRUE; if (item->type()==Item::FIELD_ITEM) { Item_field *iif = (Item_field *)item; @@ -109,15 +109,15 @@ static int olap_combos(List<Item> old_fields, List<Item> new_fields, Item *item int num_new_fields) { int sl_return = 0; - if(position == num_new_fields) + if (position == num_new_fields) { - if(item) + if (item) new_fields.push_front(item); sl_return = make_new_olap_select(lex, select_lex, new_fields); } else { - if(item) + if (item) new_fields.push_front(item); while ((num_fields - num_new_fields >= selection - position) && !sl_return) { @@ -143,17 +143,6 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex) int count=select_lex->group_list.elements; int sl_return=0; -// a fix for UNION's - for (TABLE_LIST *cursor= (TABLE_LIST *)select_lex->table_list.first; - cursor; - cursor=cursor->next) - { - if (cursor->do_redirect) - { - cursor->table= ((TABLE_LIST*) cursor->table)->table; - cursor->do_redirect= 0; - } - } lex->last_selects=select_lex; @@ -164,18 +153,20 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex) if (setup_tables((TABLE_LIST *)select_lex->table_list.first) || - setup_fields(lex->thd,(TABLE_LIST *)select_lex->table_list.first,select_lex->item_list,1,&all_fields,1) || - setup_fields(lex->thd,(TABLE_LIST *)select_lex->table_list.first,item_list_copy,1,&all_fields,1)) + setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, + select_lex->item_list, 1, &all_fields,1) || + setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, + item_list_copy, 1, &all_fields, 1)) return -1; if (select_lex->olap == CUBE_TYPE) { - for( int i=count-1; i>=0 && !sl_return; i--) + for ( int i=count-1; i>=0 && !sl_return; i--) sl_return=olap_combos(item_list_copy, new_item_list, (Item *)0, lex, select_lex, 0, 0, count, i); } else if (select_lex->olap == ROLLUP_TYPE) { - for( int i=count-1; i>=0 && !sl_return; i--) + for ( int i=count-1; i>=0 && !sl_return; i--) { Item *item; item_list_copy.pop(); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 93f696f6d49..3fc71351d74 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -15,18 +15,20 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysql_priv.h" -#include "sql_acl.h" #include "sql_repl.h" #include "repl_failsafe.h" #include <m_ctype.h> #include <myisam.h> #include <my_dir.h> -#include <assert.h> #ifdef HAVE_INNOBASE_DB #include "ha_innodb.h" #endif +#ifdef HAVE_NDBCLUSTER_DB +#include "ha_ndbcluster.h" +#endif + #ifdef HAVE_OPENSSL /* Without SSL the handshake consists of one packet. This packet @@ -44,42 +46,40 @@ #else #define MIN_HANDSHAKE_SIZE 6 #endif /* HAVE_OPENSSL */ -#define SCRAMBLE_LENGTH 8 -extern int yyparse(void); -extern "C" pthread_mutex_t THR_LOCK_keycache; #ifdef SOLARIS extern "C" int gethostname(char *name, int namelen); #endif -static int check_for_max_user_connections(USER_CONN *uc); +static void time_out_user_resource_limits(THD *thd, USER_CONN *uc); +#ifndef NO_EMBEDDED_ACCESS_CHECKS +static int check_for_max_user_connections(THD *thd, USER_CONN *uc); +#endif static void decrease_user_connections(USER_CONN *uc); static bool check_db_used(THD *thd,TABLE_LIST *tables); -static bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *tables); static bool check_multi_update_lock(THD *thd, TABLE_LIST *tables, - List<Item> *fields); -static void mysql_init_query(THD *thd); + List<Item> *fields, SELECT_LEX *select_lex); static void remove_escape(char *name); static void refresh_status(void); static bool append_file_to_dir(THD *thd, const char **filename_ptr, const char *table_name); -static bool create_total_list(THD *thd, LEX *lex, - TABLE_LIST **result, bool skip_first); -static bool check_one_table_access(THD *thd, ulong want_access, - TABLE_LIST *table, bool no_errors); - + +static TABLE_LIST* get_table_by_alias(TABLE_LIST* tl, const char* db, + const char* alias); const char *any_db="*any*"; // Special symbol for check_access const char *command_name[]={ "Sleep", "Quit", "Init DB", "Query", "Field List", "Create DB", "Drop DB", "Refresh", "Shutdown", "Statistics", "Processlist", - "Connect","Kill","Debug","Ping","Time","Delayed_insert","Change user", + "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user", "Binlog Dump","Table Dump", "Connect Out", "Register Slave", + "Prepare", "Execute", "Long Data", "Close stmt", + "Reset stmt", "Set option", "Error" // Last command number }; -bool volatile abort_slave = 0; +static char empty_c_string[1]= {0}; // Used for not defined 'db' #ifdef __WIN__ static void test_signal(int sig_ptr) @@ -126,31 +126,32 @@ static bool end_active_trans(THD *thd) } +#ifdef HAVE_REPLICATION +/* + Returns true if all tables should be ignored +*/ inline bool all_tables_not_ok(THD *thd, TABLE_LIST *tables) { - return (table_rules_on && tables && !tables_ok(thd,tables) && - ((thd->lex.sql_command != SQLCOM_DELETE_MULTI) || - !tables_ok(thd,(TABLE_LIST *)thd->lex.auxilliary_table_list.first))); + return (table_rules_on && tables && !tables_ok(thd,tables)); } +#endif static HASH hash_user_connections; -extern pthread_mutex_t LOCK_user_conn; static int get_or_create_user_conn(THD *thd, const char *user, const char *host, - USER_RESOURCES *mqh) + USER_RESOURCES *mqh) { - int return_val=0; - uint temp_len, user_len, host_len; + int return_val= 0; + uint temp_len, user_len; char temp_user[USERNAME_LENGTH+HOSTNAME_LENGTH+2]; struct user_conn *uc; DBUG_ASSERT(user != 0); DBUG_ASSERT(host != 0); - user_len=strlen(user); - host_len=strlen(host); + user_len= strlen(user); temp_len= (strmov(strmov(temp_user, user)+1, host) - temp_user)+1; (void) pthread_mutex_lock(&LOCK_user_conn); if (!(uc = (struct user_conn *) hash_search(&hash_user_connections, @@ -161,26 +162,24 @@ static int get_or_create_user_conn(THD *thd, const char *user, my_malloc(sizeof(struct user_conn) + temp_len+1, MYF(MY_WME))))) { - send_error(¤t_thd->net, 0, NullS); // Out of memory - return_val=1; + send_error(thd, 0, NullS); // Out of memory + return_val= 1; goto end; } uc->user=(char*) (uc+1); memcpy(uc->user,temp_user,temp_len+1); uc->user_len= user_len; - uc->host=uc->user + uc->user_len + 1; - uc->len = temp_len; - uc->connections = 0; - uc->questions=uc->updates=uc->conn_per_hour=0; - uc->user_resources=*mqh; - if (max_user_connections && mqh->connections > max_user_connections) - uc->user_resources.connections = max_user_connections; - uc->intime=thd->thr_create_time; - if (hash_insert(&hash_user_connections, (byte*) uc)) + uc->host= uc->user + uc->user_len + 1; + uc->len= temp_len; + uc->connections= 0; + uc->questions= uc->updates= uc->conn_per_hour=0; + uc->user_resources= *mqh; + uc->intime= thd->thr_create_time; + if (my_hash_insert(&hash_user_connections, (byte*) uc)) { my_free((char*) uc,0); - send_error(¤t_thd->net, 0, NullS); // Out of memory - return_val=1; + send_error(thd, 0, NullS); // Out of memory + return_val= 1; goto end; } } @@ -189,104 +188,208 @@ static int get_or_create_user_conn(THD *thd, const char *user, end: (void) pthread_mutex_unlock(&LOCK_user_conn); return return_val; - + } /* - Check if user is ok - Updates: - thd->{user,master_access,priv_user,priv_host,db,db_access} + Check if user exist and password supplied is correct. + SYNOPSIS + check_user() + thd thread handle, thd->{host,user,ip} are used + command originator of the check: now check_user is called + during connect and change user procedures; used for + logging. + passwd scrambled password recieved from client + passwd_len length of scrambled password + db database name to connect to, may be NULL + check_count dont know exactly + + Note, that host, user and passwd may point to communication buffer. + Current implementation does not depened on that, but future changes + should be done with this in mind; 'thd' is INOUT, all other params + are 'IN'. + + RETURN VALUE + 0 OK; thd->user, thd->master_access, thd->priv_user, thd->db and + thd->db_access are updated; OK is sent to client; + -1 access denied or handshake error; error is sent to client; + >0 error, not sent to client */ -static bool check_user(THD *thd,enum_server_command command, const char *user, - const char *passwd, const char *db, bool check_count) +int check_user(THD *thd, enum enum_server_command command, + const char *passwd, uint passwd_len, const char *db, + bool check_count) { - NET *net= &thd->net; - thd->db=0; - thd->db_length=0; - USER_RESOURCES ur; - char tmp_passwd[SCRAMBLE_LENGTH + 1]; + DBUG_ENTER("check_user"); + +#ifdef NO_EMBEDDED_ACCESS_CHECKS + thd->master_access= GLOBAL_ACLS; // Full rights + /* Change database if necessary: OK or FAIL is sent in mysql_change_db */ + if (db && db[0]) + { + thd->db= 0; + thd->db_length= 0; + if (mysql_change_db(thd, db)) + { + if (thd->user_connect) + decrease_user_connections(thd->user_connect); + DBUG_RETURN(-1); + } + } + else + send_ok(thd); + DBUG_RETURN(0); +#else - if (passwd[0] && strlen(passwd) != SCRAMBLE_LENGTH) - return 1; + my_bool opt_secure_auth_local; + pthread_mutex_lock(&LOCK_global_system_variables); + opt_secure_auth_local= opt_secure_auth; + pthread_mutex_unlock(&LOCK_global_system_variables); + /* - Move password to temporary buffer as it may be stored in communication - buffer + If the server is running in secure auth mode, short scrambles are + forbidden. */ - strmov(tmp_passwd, passwd); - passwd= tmp_passwd; // Use local copy + if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323) + { + net_printf(thd, ER_NOT_SUPPORTED_AUTH_MODE); + mysql_log.write(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); + DBUG_RETURN(-1); + } + if (passwd_len != 0 && + passwd_len != SCRAMBLE_LENGTH && + passwd_len != SCRAMBLE_LENGTH_323) + DBUG_RETURN(ER_HANDSHAKE_ERROR); - if (!(thd->user = my_strdup(user, MYF(0)))) + /* + Clear thd->db as it points to something, that will be freed when + connection is closed. We don't want to accidently free a wrong pointer + if connect failed. Also in case of 'CHANGE USER' failure, current + database will be switched to 'no database selected'. + */ + thd->db= 0; + thd->db_length= 0; + + USER_RESOURCES ur; + int res= acl_getroot(thd, &ur, passwd, passwd_len); +#ifndef EMBEDDED_LIBRARY + if (res == -1) { - send_error(net,ER_OUT_OF_RESOURCES); - return 1; + /* + This happens when client (new) sends password scrambled with + scramble(), but database holds old value (scrambled with + scramble_323()). Here we please client to send scrambled_password + in old format. + */ + NET *net= &thd->net; + if (opt_secure_auth_local) + { + net_printf(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, + thd->user, thd->host_or_ip); + mysql_log.write(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE), + thd->user, thd->host_or_ip); + DBUG_RETURN(-1); + } + if (send_old_password_request(thd) || + my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) // We have to read very + { // specific packet size + inc_host_errors(&thd->remote.sin_addr); + DBUG_RETURN(ER_HANDSHAKE_ERROR); + } + /* Final attempt to check the user based on reply */ + /* So as passwd is short, errcode is always >= 0 */ + res= acl_getroot(thd, &ur, (char *) net->read_pos, SCRAMBLE_LENGTH_323); } - thd->master_access=acl_getroot(thd, thd->host, thd->ip, thd->user, - passwd, thd->scramble, - &thd->priv_user, thd->priv_host, - protocol_version == 9 || - !(thd->client_capabilities & - CLIENT_LONG_PASSWORD),&ur); - DBUG_PRINT("info", - ("Capabilities: %d packet_length: %ld Host: '%s' Login user: '%s' Priv_user: '%s' Using password: %s Access: %u db: '%s'", - thd->client_capabilities, thd->max_client_packet_length, - thd->host_or_ip, thd->user, thd->priv_user, - passwd[0] ? "yes": "no", - thd->master_access, thd->db ? thd->db : "*none*")); - if (thd->master_access & NO_ACCESS) - { - net_printf(net, ER_ACCESS_DENIED_ERROR, - thd->user, - thd->host_or_ip, - passwd[0] ? ER(ER_YES) : ER(ER_NO)); - mysql_log.write(thd,COM_CONNECT,ER(ER_ACCESS_DENIED_ERROR), - thd->user, - thd->host_or_ip, - passwd[0] ? ER(ER_YES) : ER(ER_NO)); - return(1); // Error already given - } - if (check_count) - { - VOID(pthread_mutex_lock(&LOCK_thread_count)); - bool tmp=(thread_count - delayed_insert_threads >= max_connections && - !(thd->master_access & SUPER_ACL)); - VOID(pthread_mutex_unlock(&LOCK_thread_count)); - if (tmp) - { // Too many connections - send_error(net, ER_CON_COUNT_ERROR); - return(1); - } - } - mysql_log.write(thd,command, - (thd->priv_user == thd->user ? - (char*) "%s@%s on %s" : - (char*) "%s@%s as anonymous on %s"), - user, - thd->host_or_ip, - db ? db : (char*) ""); - thd->db_access=0; - /* Don't allow user to connect if he has done too many queries */ - if ((ur.questions || ur.updates || ur.connections || max_user_connections) && - get_or_create_user_conn(thd,user,thd->host_or_ip,&ur)) - return -1; - if (thd->user_connect && (thd->user_connect->user_resources.connections || - max_user_connections) && - check_for_max_user_connections(thd->user_connect)) - return -1; - if (db && db[0]) +#endif /*EMBEDDED_LIBRARY*/ + /* here res is always >= 0 */ + if (res == 0) { - bool error=test(mysql_change_db(thd,db)); - if (error && thd->user_connect) - decrease_user_connections(thd->user_connect); - return error; + if (!(thd->master_access & NO_ACCESS)) // authentification is OK + { + DBUG_PRINT("info", + ("Capabilities: %d packet_length: %ld Host: '%s' " + "Login user: '%s' Priv_user: '%s' Using password: %s " + "Access: %u db: '%s'", + thd->client_capabilities, thd->max_client_packet_length, + thd->host_or_ip, thd->user, thd->priv_user, + passwd_len ? "yes": "no", + thd->master_access, thd->db ? thd->db : "*none*")); + + if (check_count) + { + VOID(pthread_mutex_lock(&LOCK_thread_count)); + bool count_ok= thread_count < max_connections + delayed_insert_threads + || (thd->master_access & SUPER_ACL); + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + if (!count_ok) + { // too many connections + send_error(thd, ER_CON_COUNT_ERROR); + DBUG_RETURN(-1); + } + } + + /* Why logging is performed before all checks've passed? */ + mysql_log.write(thd,command, + (thd->priv_user == thd->user ? + (char*) "%s@%s on %s" : + (char*) "%s@%s as anonymous on %s"), + thd->user, thd->host_or_ip, + db ? db : (char*) ""); + + /* + This is the default access rights for the current database. It's + set to 0 here because we don't have an active database yet (and we + may not have an active database to set. + */ + thd->db_access=0; + + /* Don't allow user to connect if he has done too many queries */ + if ((ur.questions || ur.updates || ur.connections || + max_user_connections) && + get_or_create_user_conn(thd,thd->user,thd->host_or_ip,&ur)) + DBUG_RETURN(-1); + if (thd->user_connect && + (thd->user_connect->user_resources.connections || + max_user_connections) && + check_for_max_user_connections(thd, thd->user_connect)) + DBUG_RETURN(-1); + + /* Change database if necessary: OK or FAIL is sent in mysql_change_db */ + if (db && db[0]) + { + if (mysql_change_db(thd, db)) + { + if (thd->user_connect) + decrease_user_connections(thd->user_connect); + DBUG_RETURN(-1); + } + } + else + send_ok(thd); + thd->password= test(passwd_len); // remember for error messages + /* Ready to handle queries */ + DBUG_RETURN(0); + } + } + else if (res == 2) // client gave short hash, server has long hash + { + net_printf(thd, ER_NOT_SUPPORTED_AUTH_MODE); + mysql_log.write(thd,COM_CONNECT,ER(ER_NOT_SUPPORTED_AUTH_MODE)); + DBUG_RETURN(-1); } - send_ok(net); // Ready to handle questions - thd->password= test(passwd[0]); // Remember for error messages - return 0; // ok + net_printf(thd, ER_ACCESS_DENIED_ERROR, + thd->user, + thd->host_or_ip, + passwd_len ? ER(ER_YES) : ER(ER_NO)); + mysql_log.write(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR), + thd->user, + thd->host_or_ip, + passwd_len ? ER(ER_YES) : ER(ER_NO)); + DBUG_RETURN(-1); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } - /* Check for maximum allowable user connections, if the mysqld server is started with corresponding variable that is greater then 0. @@ -304,44 +407,84 @@ extern "C" void free_user(struct user_conn *uc) my_free((char*) uc,MYF(0)); } -void init_max_user_conn(void) +void init_max_user_conn(void) { - (void) hash_init(&hash_user_connections,max_connections,0,0, + (void) hash_init(&hash_user_connections,system_charset_info,max_connections, + 0,0, (hash_get_key) get_key_conn, (hash_free_key) free_user, 0); } -static int check_for_max_user_connections(USER_CONN *uc) +/* + check if user has already too many connections + + SYNOPSIS + check_for_max_user_connections() + thd Thread handle + uc User connect object + + NOTES + If check fails, we decrease user connection count, which means one + shouldn't call decrease_user_connections() after this function. + + RETURN + 0 ok + 1 error +*/ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + +static int check_for_max_user_connections(THD *thd, USER_CONN *uc) { int error=0; DBUG_ENTER("check_for_max_user_connections"); (void) pthread_mutex_lock(&LOCK_user_conn); if (max_user_connections && - max_user_connections < uc->connections) + max_user_connections < (uint) uc->connections) { - net_printf(&(current_thd->net),ER_TOO_MANY_USER_CONNECTIONS, uc->user); + net_printf(thd,ER_TOO_MANY_USER_CONNECTIONS, uc->user); error=1; goto end; } + time_out_user_resource_limits(thd, uc); if (uc->user_resources.connections && uc->user_resources.connections <= uc->conn_per_hour) { - net_printf(¤t_thd->net, ER_USER_LIMIT_REACHED, uc->user, - "max_connections", + net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, + "max_connections_per_hour", (long) uc->user_resources.connections); error=1; goto end; } uc->conn_per_hour++; -end: + + end: if (error) uc->connections--; // no need for decrease_user_connections() here (void) pthread_mutex_unlock(&LOCK_user_conn); DBUG_RETURN(error); } +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + +/* + Decrease user connection count + + SYNOPSIS + decrease_user_connections() + uc User connection object + NOTES + If there is a n user connection object for a connection + (which only happens if 'max_user_connections' is defined or + if someone has created a resource grant for a user), then + the connection count is always incremented on connect. + + The user connect object is not freed if some users has + 'max connections per hour' defined as we need to be able to hold + count over the lifetime of the connection. +*/ static void decrease_user_connections(USER_CONN *uc) { @@ -367,12 +510,17 @@ void free_max_user_conn(void) /* Mark all commands that somehow changes a table This is used to check number of updates / hour + + sql_command is actually set to SQLCOM_END sometimes + so we need the +1 to include it in the array. */ -char uc_update_queries[SQLCOM_END]; +char uc_update_queries[SQLCOM_END+1]; void init_update_queries(void) { + bzero((gptr) &uc_update_queries, sizeof(uc_update_queries)); + uc_update_queries[SQLCOM_CREATE_TABLE]=1; uc_update_queries[SQLCOM_CREATE_INDEX]=1; uc_update_queries[SQLCOM_ALTER_TABLE]=1; @@ -392,43 +540,69 @@ void init_update_queries(void) uc_update_queries[SQLCOM_RESTORE_TABLE]=1; uc_update_queries[SQLCOM_DELETE_MULTI]=1; uc_update_queries[SQLCOM_DROP_INDEX]=1; - uc_update_queries[SQLCOM_MULTI_UPDATE]=1; + uc_update_queries[SQLCOM_UPDATE_MULTI]=1; } +bool is_update_query(enum enum_sql_command command) +{ + DBUG_ASSERT(command >= 0 && command <= SQLCOM_END); + return uc_update_queries[command]; +} /* - Check if maximum queries per hour limit has been reached - returns 0 if OK. + Reset per-hour user resource limits when it has been more than + an hour since they were last checked - In theory we would need a mutex in the USER_CONN structure for this to - be 100 % safe, but as the worst scenario is that we would miss counting - a couple of queries, this isn't critical. -*/ + SYNOPSIS: + time_out_user_resource_limits() + thd Thread handler + uc User connection details + NOTE: + This assumes that the LOCK_user_conn mutex has been acquired, so it is + safe to test and modify members of the USER_CONN structure. +*/ -static bool check_mqh(THD *thd, uint check_command) +static void time_out_user_resource_limits(THD *thd, USER_CONN *uc) { - bool error=0; time_t check_time = thd->start_time ? thd->start_time : time(NULL); - USER_CONN *uc=thd->user_connect; - DBUG_ENTER("check_mqh"); - DBUG_ASSERT(uc != 0); + DBUG_ENTER("time_out_user_resource_limits"); /* If more than a hour since last check, reset resource checking */ if (check_time - uc->intime >= 3600) { - (void) pthread_mutex_lock(&LOCK_user_conn); uc->questions=1; uc->updates=0; uc->conn_per_hour=0; uc->intime=check_time; - (void) pthread_mutex_unlock(&LOCK_user_conn); } + + DBUG_VOID_RETURN; +} + + +/* + Check if maximum queries per hour limit has been reached + returns 0 if OK. +*/ + +static bool check_mqh(THD *thd, uint check_command) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + bool error= 0; + USER_CONN *uc=thd->user_connect; + DBUG_ENTER("check_mqh"); + DBUG_ASSERT(uc != 0); + + (void) pthread_mutex_lock(&LOCK_user_conn); + + time_out_user_resource_limits(thd, uc); + /* Check that we have not done too many questions / hour */ if (uc->user_resources.questions && uc->questions++ >= uc->user_resources.questions) { - net_printf(&thd->net, ER_USER_LIMIT_REACHED, uc->user, "max_questions", + net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, "max_questions", (long) uc->user_resources.questions); error=1; goto end; @@ -439,22 +613,26 @@ static bool check_mqh(THD *thd, uint check_command) if (uc->user_resources.updates && uc_update_queries[check_command] && uc->updates++ >= uc->user_resources.updates) { - net_printf(&thd->net, ER_USER_LIMIT_REACHED, uc->user, "max_updates", + net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates", (long) uc->user_resources.updates); error=1; goto end; } } end: + (void) pthread_mutex_unlock(&LOCK_user_conn); DBUG_RETURN(error); +#else + return (0); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) { - +#ifndef NO_EMBEDDED_ACCESS_CHECKS (void) pthread_mutex_lock(&LOCK_user_conn); - if (lu) // for GRANT + if (lu) // for GRANT { USER_CONN *uc; uint temp_len=lu->user.length+lu->host.length+2; @@ -476,7 +654,8 @@ static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) { for (uint idx=0;idx < hash_user_connections.records; idx++) { - USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections, idx); + USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections, + idx); if (get_them) get_mqh(uc->user,uc->host,uc); uc->questions=0; @@ -485,26 +664,33 @@ static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) } } (void) pthread_mutex_unlock(&LOCK_user_conn); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } - /* - Check connnetion and get priviliges - Returns 0 on ok, -1 < if error is given > 0 on error. + Perform handshake, authorize client and update thd ACL variables. + SYNOPSIS + check_connection() + thd thread handle + + RETURN + 0 success, OK is sent to user, thd is updated. + -1 error, which is sent to user + > 0 error code (not sent to user) */ -static int -check_connections(THD *thd) +#ifndef EMBEDDED_LIBRARY +static int check_connection(THD *thd) { - uint connect_errors=0; + uint connect_errors= 0; NET *net= &thd->net; - /* Store the connection details */ - DBUG_PRINT("info", (("check_connections called by thread %d"), - thd->thread_id)); - DBUG_PRINT("info",("New connection received on %s", - vio_description(net->vio))); + ulong pkt_len= 0; + char *end; + + DBUG_PRINT("info", + ("New connection received on %s", vio_description(net->vio))); #ifdef SIGNAL_WITH_VIO_CLOSE - thd->set_active_vio(net->vio); + thd->set_active_vio(net->vio); #endif if (!thd->host) // If TCP/IP connection @@ -513,32 +699,23 @@ check_connections(THD *thd) if (vio_peer_addr(net->vio, ip, &thd->peer_port)) return (ER_BAD_HOST_ERROR); - if (!(thd->ip = my_strdup(ip,MYF(0)))) + if (!(thd->ip= my_strdup(ip,MYF(0)))) return (ER_OUT_OF_RESOURCES); - thd->host_or_ip=thd->ip; - vio_in_addr(net->vio, &thd->remote.sin_addr); -#if !defined(HAVE_SYS_UN_H) || defined(HAVE_mit_thread) - /* Fast local hostname resolve for Win32 */ - if (!strcmp(thd->ip,"127.0.0.1")) - { - thd->host= (char*) localhost; - thd->host_or_ip= localhost; - } - else -#endif + thd->host_or_ip= thd->ip; + vio_in_addr(net->vio,&thd->remote.sin_addr); + if (!(specialflag & SPECIAL_NO_RESOLVE)) { - if (!(specialflag & SPECIAL_NO_RESOLVE)) + vio_in_addr(net->vio,&thd->remote.sin_addr); + thd->host=ip_to_hostname(&thd->remote.sin_addr,&connect_errors); + /* Cut very long hostnames to avoid possible overflows */ + if (thd->host) { - thd->host=ip_to_hostname(&thd->remote.sin_addr,&connect_errors); - /* Cut very long hostnames to avoid possible overflows */ - if (thd->host) - { + if (thd->host != my_localhost) thd->host[min(strlen(thd->host), HOSTNAME_LENGTH)]= 0; - thd->host_or_ip= thd->host; - } - if (connect_errors > max_connect_errors) - return(ER_HOST_IS_BLOCKED); + thd->host_or_ip= thd->host; } + if (connect_errors > max_connect_errors) + return(ER_HOST_IS_BLOCKED); } DBUG_PRINT("info",("Host: %s ip: %s", thd->host ? thd->host : "unknown host", @@ -555,12 +732,11 @@ check_connections(THD *thd) bzero((char*) &thd->remote, sizeof(thd->remote)); } vio_keepalive(net->vio, TRUE); - - ulong pkt_len=0; { /* buff[] needs to big enough to hold the server_version variable */ - char buff[SERVER_VERSION_LENGTH + SCRAMBLE_LENGTH+32],*end; - int client_flags = CLIENT_LONG_FLAG | CLIENT_CONNECT_WITH_DB; + char buff[SERVER_VERSION_LENGTH + SCRAMBLE_LENGTH + 64]; + ulong client_flags = (CLIENT_LONG_FLAG | CLIENT_CONNECT_WITH_DB | + CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION); if (opt_using_transactions) client_flags|=CLIENT_TRANSACTIONS; @@ -572,19 +748,36 @@ check_connections(THD *thd) client_flags |= CLIENT_SSL; /* Wow, SSL is avalaible! */ #endif /* HAVE_OPENSSL */ - end=strnmov(buff,server_version,SERVER_VERSION_LENGTH)+1; - int4store((uchar*) end,thd->thread_id); - end+=4; - memcpy(end,thd->scramble,SCRAMBLE_LENGTH+1); - end+=SCRAMBLE_LENGTH +1; - int2store(end,client_flags); - end[2]=(char) MY_CHARSET_CURRENT; - int2store(end+3,thd->server_status); - bzero(end+5,13); - end+=18; - if (net_write_command(net,(uchar) protocol_version, buff, + end= strnmov(buff, server_version, SERVER_VERSION_LENGTH) + 1; + int4store((uchar*) end, thd->thread_id); + end+= 4; + /* + So as check_connection is the only entry point to authorization + procedure, scramble is set here. This gives us new scramble for + each handshake. + */ + create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand); + /* + Old clients does not understand long scrambles, but can ignore packet + tail: that's why first part of the scramble is placed here, and second + part at the end of packet. + */ + end= strmake(end, thd->scramble, SCRAMBLE_LENGTH_323) + 1; + + int2store(end, client_flags); + /* write server characteristics: up to 16 bytes allowed */ + end[2]=(char) default_charset_info->number; + int2store(end+3, thd->server_status); + bzero(end+5, 13); + end+= 18; + /* write scramble tail */ + end= strmake(end, thd->scramble + SCRAMBLE_LENGTH_323, + SCRAMBLE_LENGTH - SCRAMBLE_LENGTH_323) + 1; + + /* At this point we write connection message and read reply */ + if (net_write_command(net, (uchar) protocol_version, "", 0, buff, (uint) (end-buff)) || - (pkt_len= my_net_read(net)) == packet_error || + (pkt_len= my_net_read(net)) == packet_error || pkt_len < MIN_HANDSHAKE_SIZE) { inc_host_errors(&thd->remote.sin_addr); @@ -600,8 +793,60 @@ check_connections(THD *thd) return(ER_OUT_OF_RESOURCES); thd->client_capabilities=uint2korr(net->read_pos); +#ifdef TO_BE_REMOVED_IN_4_1_RELEASE + /* + This is just a safety check against any client that would use the old + CLIENT_CHANGE_USER flag + */ + if ((thd->client_capabilities & CLIENT_PROTOCOL_41) && + !(thd->client_capabilities & (CLIENT_RESERVED | + CLIENT_SECURE_CONNECTION | + CLIENT_MULTI_RESULTS))) + thd->client_capabilities&= ~CLIENT_PROTOCOL_41; +#endif + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; + thd->max_client_packet_length= uint4korr(net->read_pos+4); + DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); + /* + Use server character set and collation if + - opt_character_set_client_handshake is not set + - client has not specified a character set + - client character set is the same as the servers + - client character set doesn't exists in server + */ + if (!opt_character_set_client_handshake || + !(thd->variables.character_set_client= + get_charset((uint) net->read_pos[8], MYF(0))) || + !my_strcasecmp(&my_charset_latin1, + global_system_variables.character_set_client->name, + thd->variables.character_set_client->name)) + { + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.character_set_results= + global_system_variables.character_set_results; + } + else + { + thd->variables.character_set_results= + thd->variables.collation_connection= + thd->variables.character_set_client; + } + thd->update_charset(); + end= (char*) net->read_pos+32; + } + else + { + thd->max_client_packet_length= uint3korr(net->read_pos+2); + end= (char*) net->read_pos+5; + } + if (thd->client_capabilities & CLIENT_IGNORE_SPACE) - thd->sql_mode|= MODE_IGNORE_SPACE; + thd->variables.sql_mode|= MODE_IGNORE_SPACE; #ifdef HAVE_OPENSSL DBUG_PRINT("info", ("client capabilities: %d", thd->client_capabilities)); if (thd->client_capabilities & CLIENT_SSL) @@ -618,10 +863,10 @@ check_connections(THD *thd) DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)", pkt_len)); inc_host_errors(&thd->remote.sin_addr); - return(ER_HANDSHAKE_ERROR); + return(ER_HANDSHAKE_ERROR); } DBUG_PRINT("info", ("Reading user information over SSL layer")); - if ((pkt_len=my_net_read(net)) == packet_error || + if ((pkt_len= my_net_read(net)) == packet_error || pkt_len < NORMAL_HANDSHAKE_SIZE) { DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)", @@ -630,37 +875,95 @@ check_connections(THD *thd) return(ER_HANDSHAKE_ERROR); } } - else - { - DBUG_PRINT("info", ("Leaving IO layer intact")); - if (pkt_len < NORMAL_HANDSHAKE_SIZE) - { - inc_host_errors(&thd->remote.sin_addr); - return ER_HANDSHAKE_ERROR; - } - } #endif - thd->max_client_packet_length=uint3korr(net->read_pos+2); - char *user= (char*) net->read_pos+5; - char *passwd= strend(user)+1; - char *db=0; - if (thd->client_capabilities & CLIENT_CONNECT_WITH_DB) - db=strend(passwd)+1; - if (strend(db ? db : passwd) - (char*)net->read_pos > pkt_len) + if (end >= (char*) net->read_pos+ pkt_len +2) { inc_host_errors(&thd->remote.sin_addr); - return ER_HANDSHAKE_ERROR; + return(ER_HANDSHAKE_ERROR); } + if (thd->client_capabilities & CLIENT_INTERACTIVE) thd->variables.net_wait_timeout= thd->variables.net_interactive_timeout; if ((thd->client_capabilities & CLIENT_TRANSACTIONS) && opt_using_transactions) - thd->net.return_status= &thd->server_status; + net->return_status= &thd->server_status; net->read_timeout=(uint) thd->variables.net_read_timeout; - if (check_user(thd,COM_CONNECT, user, passwd, db, 1)) - return (-1); - return 0; + + char *user= end; + char *passwd= strend(user)+1; + char *db= passwd; + char db_buff[NAME_LEN+1]; // buffer to store db in utf8 + char user_buff[USERNAME_LENGTH+1]; // buffer to store user in utf8 + uint dummy_errors; + + /* + Old clients send null-terminated string as password; new clients send + the size (1 byte) + string (not null-terminated). Hence in case of empty + password both send '\0'. + */ + uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? + *passwd++ : strlen(passwd); + db= thd->client_capabilities & CLIENT_CONNECT_WITH_DB ? + db + passwd_len + 1 : 0; + uint db_len= db ? strlen(db) : 0; + + if (passwd + passwd_len + db_len > net->read_pos + pkt_len) + { + inc_host_errors(&thd->remote.sin_addr); + return ER_HANDSHAKE_ERROR; + } + + /* Since 4.1 all database names are stored in utf8 */ + if (db) + { + db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1, + system_charset_info, + db, db_len, + thd->charset(), &dummy_errors)]= 0; + db= db_buff; + } + + user_buff[copy_and_convert(user_buff, sizeof(user_buff)-1, + system_charset_info, user, strlen(user), + thd->charset(), &dummy_errors)]= '\0'; + user= user_buff; + + if (thd->user) + x_free(thd->user); + if (!(thd->user= my_strdup(user, MYF(0)))) + return (ER_OUT_OF_RESOURCES); + return check_user(thd, COM_CONNECT, passwd, passwd_len, db, TRUE); +} + + +void execute_init_command(THD *thd, sys_var_str *init_command_var, + rw_lock_t *var_mutex) +{ + Vio* save_vio; + ulong save_client_capabilities; + + thd->proc_info= "Execution of init_command"; + /* + We need to lock init_command_var because + during execution of init_command_var query + values of init_command_var can't be changed + */ + rw_rdlock(var_mutex); + thd->query= init_command_var->value; + thd->query_length= init_command_var->value_length; + save_client_capabilities= thd->client_capabilities; + thd->client_capabilities|= CLIENT_MULTI_QUERIES; + /* + We don't need return result of execution to client side. + To forbid this we should set thd->net.vio to 0. + */ + save_vio= thd->net.vio; + thd->net.vio= 0; + dispatch_command(COM_QUERY, thd, thd->query, thd->query_length+1); + rw_unlock(var_mutex); + thd->client_capabilities= save_client_capabilities; + thd->net.vio= save_vio; } @@ -678,7 +981,7 @@ pthread_handler_decl(handle_one_connection,arg) // The following calls needs to be done before we call DBUG_ macros if (!(test_flags & TEST_NO_THREADS) & my_thread_init()) { - close_connection(&thd->net,ER_OUT_OF_RESOURCES); + close_connection(thd, ER_OUT_OF_RESOURCES, 1); statistic_increment(aborted_connects,&LOCK_status); end_thread(thd,0); return 0; @@ -705,7 +1008,7 @@ pthread_handler_decl(handle_one_connection,arg) #endif if (thd->store_globals()) { - close_connection(&thd->net,ER_OUT_OF_RESOURCES); + close_connection(thd, ER_OUT_OF_RESOURCES, 1); statistic_increment(aborted_connects,&LOCK_status); end_thread(thd,0); return 0; @@ -717,13 +1020,13 @@ pthread_handler_decl(handle_one_connection,arg) NET *net= &thd->net; thd->thread_stack= (char*) &thd; - if ((error=check_connections(thd))) + if ((error=check_connection(thd))) { // Wrong permissions if (error > 0) - net_printf(net,error,thd->host_or_ip); + net_printf(thd,error,thd->host_or_ip); #ifdef __NT__ if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE) - sleep(1); /* must wait after eof() */ + my_sleep(1000); /* must wait after eof() */ #endif statistic_increment(aborted_connects,&LOCK_status); goto end_thread; @@ -736,16 +1039,16 @@ pthread_handler_decl(handle_one_connection,arg) if (thd->client_capabilities & CLIENT_COMPRESS) net->compress=1; // Use compression - thd->proc_info=0; // Remove 'login' - thd->command=COM_SLEEP; - thd->version=refresh_version; + thd->version= refresh_version; + thd->proc_info= 0; thd->set_time(); - init_sql_alloc(&thd->mem_root, thd->variables.query_alloc_block_size, - thd->variables.query_prealloc_size); - init_sql_alloc(&thd->transaction.mem_root, - thd->variables.trans_alloc_block_size, - thd->variables.trans_prealloc_size); - + thd->init_for_queries(); + if (sys_init_connect.value_length && !(thd->master_access & SUPER_ACL)) + { + execute_init_command(thd, &sys_init_connect, &LOCK_sys_init_connect); + if (thd->query_error) + thd->killed= 1; + } while (!net->error && net->vio != 0 && !thd->killed) { if (do_command(thd)) @@ -753,17 +1056,17 @@ pthread_handler_decl(handle_one_connection,arg) } if (thd->user_connect) decrease_user_connections(thd->user_connect); - free_root(&thd->mem_root,MYF(0)); - if (net->error && net->vio != 0) + free_root(thd->mem_root,MYF(0)); + if (net->error && net->vio != 0 && net->report_error) { if (!thd->killed && thd->variables.log_warnings > 1) - sql_print_error(ER(ER_NEW_ABORTING_CONNECTION), - thd->thread_id,(thd->db ? thd->db : "unconnected"), - thd->user ? thd->user : "unauthenticated", - thd->host_or_ip, - (net->last_errno ? ER(net->last_errno) : - ER(ER_UNKNOWN_ERROR))); - send_error(net,net->last_errno,NullS); + sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), + thd->thread_id,(thd->db ? thd->db : "unconnected"), + thd->user ? thd->user : "unauthenticated", + thd->host_or_ip, + (net->last_errno ? ER(net->last_errno) : + ER(ER_UNKNOWN_ERROR))); + send_error(thd,net->last_errno,NullS); statistic_increment(aborted_threads,&LOCK_status); } else if (thd->killed) @@ -772,7 +1075,7 @@ pthread_handler_decl(handle_one_connection,arg) } end_thread: - close_connection(net); + close_connection(thd, 0, 1); end_thread(thd,1); /* If end_thread returns, we are either running with --one-thread @@ -784,6 +1087,8 @@ end_thread: return(0); /* purecov: deadcode */ } +#endif /* EMBEDDED_LIBRARY */ + /* Execute commands from bootstrap_file. Used when creating the initial grant tables @@ -798,12 +1103,15 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg) /* The following must be called before DBUG_ENTER */ if (my_thread_init() || thd->store_globals()) { - close_connection(&thd->net,ER_OUT_OF_RESOURCES); - thd->fatal_error=1; +#ifndef EMBEDDED_LIBRARY + close_connection(thd, ER_OUT_OF_RESOURCES, 1); +#endif + thd->fatal_error(); goto end; } DBUG_ENTER("handle_bootstrap"); +#ifndef EMBEDDED_LIBRARY pthread_detach_this_thread(); thd->thread_stack= (char*) &thd; #if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) @@ -811,6 +1119,7 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg) VOID(sigemptyset(&set)); // Get mask in use VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); #endif +#endif /* EMBEDDED_LIBRARY */ if (thd->variables.max_join_size == HA_POS_ERROR) thd->options |= OPTION_BIG_SELECTS; @@ -820,18 +1129,32 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg) thd->priv_user=thd->user=(char*) my_strdup("boot", MYF(MY_WME)); buff= (char*) thd->net.buff; - init_sql_alloc(&thd->mem_root, thd->variables.query_alloc_block_size, - thd->variables.query_prealloc_size); - init_sql_alloc(&thd->transaction.mem_root, - thd->variables.trans_alloc_block_size, - thd->variables.trans_prealloc_size); + thd->init_for_queries(); while (fgets(buff, thd->net.max_packet, file)) { - uint length=(uint) strlen(buff); - while (length && (isspace(buff[length-1]) || buff[length-1] == ';')) + ulong length= (ulong) strlen(buff); + while (buff[length-1] != '\n' && !feof(file)) + { + /* + We got only a part of the current string. Will try to increase + net buffer then read the rest of the current string. + */ + if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) + { + send_error(thd, thd->net.last_errno, NullS); + thd->is_fatal_error= 1; + break; + } + buff= (char*) thd->net.buff; + fgets(buff + length, thd->net.max_packet - length, file); + length+= (ulong) strlen(buff + length); + } + if (thd->is_fatal_error) + break; + while (length && (my_isspace(thd->charset(), buff[length-1]) || + buff[length-1] == ';')) length--; buff[length]=0; - thd->current_tablenr=0; thd->query_length=length; thd->query= thd->memdup_w_gap(buff, length+1, thd->db_length+1); thd->query[length] = '\0'; @@ -840,34 +1163,44 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg) { thd->net.error = 0; close_thread_tables(thd); // Free tables - free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC)); + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); break; } mysql_parse(thd,thd->query,length); close_thread_tables(thd); // Free tables - if (thd->fatal_error) + if (thd->is_fatal_error) break; - free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC)); + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); free_root(&thd->transaction.mem_root,MYF(MY_KEEP_PREALLOC)); } /* thd->fatal_error should be set in case something went wrong */ end: +#ifndef EMBEDDED_LIBRARY (void) pthread_mutex_lock(&LOCK_thread_count); thread_count--; (void) pthread_mutex_unlock(&LOCK_thread_count); (void) pthread_cond_broadcast(&COND_thread_count); my_thread_end(); pthread_exit(0); +#endif DBUG_RETURN(0); // Never reached } + /* This works because items are allocated with sql_alloc() */ -inline void free_items(THD *thd) +void free_items(Item *item) { + for (; item ; item=item->next) + item->delete_self(); +} + /* This works because items are allocated with sql_alloc() */ - for (Item *item=thd->free_list ; item ; item=item->next) - delete item; + +void cleanup_items(Item *item) +{ + for (; item ; item=item->next) + item->cleanup(); } int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) @@ -886,29 +1219,29 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) if (!db || check_db_name(db)) { - net_printf(&thd->net,ER_WRONG_DB_NAME, db ? db : "NULL"); + net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); goto err; } if (lower_case_table_names) - casedn_str(tbl_name); - remove_escape(tbl_name); + my_casedn_str(files_charset_info, tbl_name); + remove_escape(table_list->real_name); if (!(table=open_ltable(thd, table_list, TL_READ_NO_INSERT))) DBUG_RETURN(1); - if (check_one_table_access(thd, SELECT_ACL, table_list, 0)) + if (check_one_table_access(thd, SELECT_ACL, table_list)) goto err; thd->free_list = 0; thd->query_length=(uint) strlen(tbl_name); thd->query = tbl_name; if ((error = mysqld_dump_create_info(thd, table, -1))) { - my_error(ER_GET_ERRNO, MYF(0)); + my_error(ER_GET_ERRNO, MYF(0), my_errno); goto err; } net_flush(&thd->net); if ((error= table->file->dump(thd,fd))) - my_error(ER_GET_ERRNO, MYF(0)); + my_error(ER_GET_ERRNO, MYF(0), error); err: close_thread_tables(thd); @@ -916,7 +1249,17 @@ err: } - /* Execute one command from socket (query or simple command) */ +#ifndef EMBEDDED_LIBRARY + +/* + Read one command from socket and execute it (query or simple command). + This function is called in loop from thread function. + SYNOPSIS + do_command() + RETURN VALUE + 0 success + 1 request of thread shutdown (see dispatch_command() description) +*/ bool do_command(THD *thd) { @@ -928,14 +1271,17 @@ bool do_command(THD *thd) DBUG_ENTER("do_command"); net= &thd->net; - thd->current_tablenr=0; + /* + indicator of uninitialized lex => normal flow of errors handling + (see my_message_sql) + */ + thd->lex->current_select= 0; packet=0; old_timeout=net->read_timeout; // Wait max for 8 hours net->read_timeout=(uint) thd->variables.net_wait_timeout; - net->last_error[0]=0; // Clear error message - net->last_errno=0; + thd->clear_error(); // Clear error message net_new_transaction(net); if ((packet_length=my_net_read(net)) == packet_error) @@ -949,7 +1295,7 @@ bool do_command(THD *thd) statistic_increment(aborted_threads,&LOCK_status); DBUG_RETURN(TRUE); // We have to close it. } - send_error(net,net->last_errno,NullS); + send_error(thd,net->last_errno,NullS); net->error= 0; DBUG_RETURN(FALSE); } @@ -964,147 +1310,263 @@ bool do_command(THD *thd) command_name[command])); } net->read_timeout=old_timeout; // restore it + /* + packet_length contains length of data, as it was stored in packet + header. In case of malformed header, packet_length can be zero. + If packet_length is not zero, my_net_read ensures that this number + of bytes was actually read from network. Additionally my_net_read + sets packet[packet_length]= 0 (thus if packet_length == 0, + command == packet[0] == COM_SLEEP). + In dispatch_command packet[packet_length] points beyond the end of packet. + */ DBUG_RETURN(dispatch_command(command,thd, packet+1, (uint) packet_length)); } +#endif /* EMBEDDED_LIBRARY */ +/* + Perform one connection-level (COM_XXXX) command. + SYNOPSIS + dispatch_command() + thd connection handle + command type of command to perform + packet data for the command, packet is always null-terminated + packet_length length of packet + 1 (to show that data is + null-terminated) except for COM_SLEEP, where it + can be zero. + RETURN VALUE + 0 ok + 1 request of thread shutdown, i. e. if command is + COM_QUIT/COM_SHUTDOWN +*/ bool dispatch_command(enum enum_server_command command, THD *thd, char* packet, uint packet_length) { NET *net= &thd->net; - bool error=0; - /* - Commands which will always take a long time should be marked with - this so that they will not get logged to the slow query log - */ - bool slow_command=FALSE; + bool error= 0; DBUG_ENTER("dispatch_command"); thd->command=command; + /* + Commands which always take a long time are logged into + the slow log only if opt_log_slow_admin_statements is set. + */ + thd->enable_slow_log= TRUE; thd->set_time(); VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query_id=query_id; if (command != COM_STATISTICS && command != COM_PING) query_id++; thread_running++; + /* TODO: set thd->lex->sql_command to SQLCOM_END here */ VOID(pthread_mutex_unlock(&LOCK_thread_count)); - thd->lex.select_lex.options=0; // We store status here + thd->server_status&= + ~(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED); switch (command) { case COM_INIT_DB: + { + LEX_STRING tmp; statistic_increment(com_stat[SQLCOM_CHANGE_DB],&LOCK_status); - if (!mysql_change_db(thd,packet)) + thd->convert_string(&tmp, system_charset_info, + packet, strlen(packet), thd->charset()); + if (!mysql_change_db(thd, tmp.str)) mysql_log.write(thd,command,"%s",thd->db); break; + } +#ifdef HAVE_REPLICATION case COM_REGISTER_SLAVE: { if (!register_slave(thd, (uchar*)packet, packet_length)) - send_ok(&thd->net); + send_ok(thd); break; } +#endif case COM_TABLE_DUMP: + { + char *db, *tbl_name; + uint db_len= *(uchar*) packet; + if (db_len >= packet_length || db_len > NAME_LEN) { - statistic_increment(com_other, &LOCK_status); - slow_command = TRUE; - uint db_len = *(uchar*)packet; - if (db_len >= packet_length || db_len > NAME_LEN) - { - send_error(&thd->net, ER_UNKNOWN_COM_ERROR); - break; - } - uint tbl_len = *(uchar*)(packet + db_len + 1); - if (db_len+tbl_len+2 > packet_length || tbl_len > NAME_LEN) - { - send_error(&thd->net, ER_UNKNOWN_COM_ERROR); - break; - } - char* db = thd->alloc(db_len + tbl_len + 2); - memcpy(db, packet + 1, db_len); - char* tbl_name = db + db_len; - *tbl_name++ = 0; - memcpy(tbl_name, packet + db_len + 2, tbl_len); - tbl_name[tbl_len] = 0; - if (mysql_table_dump(thd, db, tbl_name, -1)) - send_error(&thd->net); // dump to NET + send_error(&thd->net, ER_UNKNOWN_COM_ERROR); + break; + } + uint tbl_len= *(uchar*) (packet + db_len + 1); + if (db_len+tbl_len+2 > packet_length || tbl_len > NAME_LEN) + { + send_error(&thd->net, ER_UNKNOWN_COM_ERROR); break; } + + statistic_increment(com_other, &LOCK_status); + thd->enable_slow_log= opt_log_slow_admin_statements; + db= thd->alloc(db_len + tbl_len + 2); + tbl_name= strmake(db, packet + 1, db_len)+1; + strmake(tbl_name, packet + db_len + 2, tbl_len); + if (mysql_table_dump(thd, db, tbl_name, -1)) + send_error(thd); // dump to NET + break; + } case COM_CHANGE_USER: { thd->change_user(); - clear_error_message(thd); // If errors from rollback + thd->clear_error(); // if errors from rollback - statistic_increment(com_other,&LOCK_status); - char *user= (char*) packet; + statistic_increment(com_other, &LOCK_status); + char *user= (char*) packet; char *passwd= strend(user)+1; - char *db= strend(passwd)+1; + /* + Old clients send null-terminated string ('\0' for empty string) for + password. New clients send the size (1 byte) + string (not null + terminated, so also '\0' for empty string). + */ + char db_buff[NAME_LEN+1]; // buffer to store db in utf8 + char *db= passwd; + uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? + *passwd++ : strlen(passwd); + db+= passwd_len + 1; +#ifndef EMBEDDED_LIBRARY + /* Small check for incomming packet */ + if ((uint) ((uchar*) db - net->read_pos) > packet_length) + { + send_error(thd, ER_UNKNOWN_COM_ERROR); + break; + } +#endif + /* Convert database name to utf8 */ + uint dummy_errors; + db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1, + system_charset_info, db, strlen(db), + thd->charset(), &dummy_errors)]= 0; + db= db_buff; /* Save user and privileges */ - uint save_master_access=thd->master_access; - uint save_db_access= thd->db_access; - uint save_db_length= thd->db_length; - char *save_user= thd->user; - char *save_priv_user= thd->priv_user; - char *save_db= thd->db; + uint save_master_access= thd->master_access; + uint save_db_access= thd->db_access; + uint save_db_length= thd->db_length; + char *save_user= thd->user; + char *save_priv_user= thd->priv_user; + char *save_db= thd->db; USER_CONN *save_user_connect= thd->user_connect; - - if ((uint) ((uchar*) db - net->read_pos) > packet_length) - { // Check if protocol is ok - send_error(net, ER_UNKNOWN_COM_ERROR); + + if (!(thd->user= my_strdup(user, MYF(0)))) + { + thd->user= save_user; + send_error(thd, ER_OUT_OF_RESOURCES); break; } /* Clear variables that are allocated */ - thd->user= 0; thd->user_connect= 0; - if (check_user(thd, COM_CHANGE_USER, user, passwd, db, 0)) - { // Restore old user + int res= check_user(thd, COM_CHANGE_USER, passwd, passwd_len, db, FALSE); + + if (res) + { + /* authentification failure, we shall restore old user */ + if (res > 0) + send_error(thd, ER_UNKNOWN_COM_ERROR); x_free(thd->user); - thd->master_access=save_master_access; - thd->db_access=save_db_access; - thd->db=save_db; - thd->db_length=save_db_length; - thd->user=save_user; - thd->priv_user=save_priv_user; + thd->user= save_user; + thd->priv_user= save_priv_user; thd->user_connect= save_user_connect; - break; + thd->master_access= save_master_access; + thd->db_access= save_db_access; + thd->db= save_db; + thd->db_length= save_db_length; + } + else + { + /* we've authenticated new user */ + if (save_user_connect) + decrease_user_connections(save_user_connect); + x_free((gptr) save_db); + x_free((gptr) save_user); } - if (save_user_connect) - decrease_user_connections(save_user_connect); - x_free((gptr) save_db); - x_free((gptr) save_user); break; } - + case COM_EXECUTE: + { + mysql_stmt_execute(thd, packet, packet_length); + break; + } + case COM_LONG_DATA: + { + mysql_stmt_get_longdata(thd, packet, packet_length); + break; + } + case COM_PREPARE: + { + mysql_stmt_prepare(thd, packet, packet_length); + break; + } + case COM_CLOSE_STMT: + { + mysql_stmt_free(thd, packet); + break; + } + case COM_RESET_STMT: + { + mysql_stmt_reset(thd, packet); + break; + } case COM_QUERY: { - packet_length--; // Remove end null - /* Remove garage at start and end of query */ - while (isspace(packet[0]) && packet_length > 0) - { - packet++; - packet_length--; - } - char *pos=packet+packet_length; // Point at end null - while (packet_length > 0 && (pos[-1] == ';' || isspace(pos[-1]))) - { - pos--; - packet_length--; - } - /* We must allocate some extra memory for query cache */ - thd->query_length= 0; // Extra safety: Avoid races - if (!(thd->query= (char*) thd->memdup_w_gap((gptr) (packet), - packet_length, - thd->db_length+2+ - sizeof(ha_rows)))) - break; - thd->query[packet_length]=0; - thd->packet.shrink(thd->variables.net_buffer_length);// Reclaim some memory - if (!(specialflag & SPECIAL_NO_PRIOR)) - my_pthread_setprio(pthread_self(),QUERY_PRIOR); + if (alloc_query(thd, packet, packet_length)) + break; // fatal error is set + char *packet_end= thd->query + thd->query_length; mysql_log.write(thd,command,"%s",thd->query); DBUG_PRINT("query",("%-.4096s",thd->query)); - /* thd->query_length is set by mysql_parse() */ - mysql_parse(thd,thd->query,packet_length); + mysql_parse(thd,thd->query, thd->query_length); + + while (!thd->killed && !thd->is_fatal_error && thd->lex->found_colon) + { + char *packet= thd->lex->found_colon; + /* + Multiple queries exits, execute them individually + in embedded server - just store them to be executed later + */ +#ifndef EMBEDDED_LIBRARY + if (thd->lock || thd->open_tables || thd->derived_tables) + close_thread_tables(thd); +#endif + ulong length= (ulong)(packet_end-packet); + + log_slow_statement(thd); + + /* Remove garbage at start of query */ + while (my_isspace(thd->charset(), *packet) && length > 0) + { + packet++; + length--; + } + VOID(pthread_mutex_lock(&LOCK_thread_count)); + thd->query_length= length; + thd->query= packet; + thd->query_id= query_id++; + thd->set_time(); /* Reset the query start time. */ + /* TODO: set thd->lex->sql_command to SQLCOM_END here */ + VOID(pthread_mutex_unlock(&LOCK_thread_count)); +#ifndef EMBEDDED_LIBRARY + mysql_parse(thd, packet, length); +#else + /* + 'packet' can point inside the query_rest's buffer + so we have to do memmove here + */ + if (thd->query_rest.length() > length) + { + memmove(thd->query_rest.c_ptr(), packet, length); + thd->query_rest.length(length); + } + else + thd->query_rest.copy(packet, length, thd->query_rest.charset()); + + thd->server_status&= ~ (SERVER_QUERY_NO_INDEX_USED | + SERVER_QUERY_NO_GOOD_INDEX_USED); + break; +#endif /*EMBEDDED_LIBRARY*/ + } + if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),WAIT_PRIOR); DBUG_PRINT("info",("query ready")); @@ -1112,36 +1574,44 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } case COM_FIELD_LIST: // This isn't actually needed #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ break; #else { - char *fields; + char *fields, *pend; TABLE_LIST table_list; + LEX_STRING conv_name; + statistic_increment(com_stat[SQLCOM_SHOW_FIELDS],&LOCK_status); bzero((char*) &table_list,sizeof(table_list)); if (!(table_list.db=thd->db)) { - send_error(net,ER_NO_DB_ERROR); + send_error(thd,ER_NO_DB_ERROR); break; } thd->free_list=0; - table_list.alias= table_list.real_name= thd->strdup(packet); - packet=strend(packet)+1; + pend= strend(packet); + thd->convert_string(&conv_name, system_charset_info, + packet, (uint) (pend-packet), thd->charset()); + table_list.alias= table_list.real_name= conv_name.str; + packet= pend+1; thd->query_length= strlen(packet); // for simplicity: don't optimize if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; mysql_log.write(thd,command,"%s %s",table_list.real_name,fields); if (lower_case_table_names) - casedn_str(table_list.real_name); + my_casedn_str(files_charset_info, table_list.real_name); remove_escape(table_list.real_name); // This can't have wildcards - if (check_access(thd,SELECT_ACL,table_list.db,&table_list.grant.privilege)) + if (check_access(thd,SELECT_ACL,table_list.db,&table_list.grant.privilege, + 0, 0)) break; - if (grant_option && check_grant(thd,SELECT_ACL,&table_list,2)) + if (grant_option && + check_grant(thd, SELECT_ACL, &table_list, 2, UINT_MAX, 0)) break; mysqld_list_fields(thd,&table_list,fields); - free_items(thd); + free_items(thd->free_list); + thd->free_list= 0; break; } #endif @@ -1155,20 +1625,22 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_CREATE_DB: // QQ: To be removed { char *db=thd->strdup(packet), *alias; + HA_CREATE_INFO create_info; statistic_increment(com_stat[SQLCOM_CREATE_DB],&LOCK_status); // null test to handle EOM if (!db || !(alias= thd->strdup(db)) || check_db_name(db)) { - net_printf(&thd->net,ER_WRONG_DB_NAME, db ? db : "NULL"); + net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); break; } - if (check_access(thd,CREATE_ACL,db,0,1)) + if (check_access(thd,CREATE_ACL,db,0,1,0)) break; mysql_log.write(thd,command,packet); + bzero(&create_info, sizeof(create_info)); if (mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db), - 0, 0) < 0) - send_error(&thd->net, thd->killed ? ER_SERVER_SHUTDOWN : 0); + &create_info, 0) < 0) + send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); break; } case COM_DROP_DB: // QQ: To be removed @@ -1178,33 +1650,34 @@ bool dispatch_command(enum enum_server_command command, THD *thd, // null test to handle EOM if (!db || !(alias= thd->strdup(db)) || check_db_name(db)) { - net_printf(&thd->net,ER_WRONG_DB_NAME, db ? db : "NULL"); + net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); break; } - if (check_access(thd,DROP_ACL,db,0,1)) + if (check_access(thd,DROP_ACL,db,0,1,0)) break; if (thd->locked_tables || thd->active_transaction()) { - send_error(&thd->net,ER_LOCK_OR_ACTIVE_TRANSACTION); + send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); break; } mysql_log.write(thd,command,db); if (mysql_rm_db(thd, (lower_case_table_names == 2 ? alias : db), 0, 0) < 0) - send_error(&thd->net, thd->killed ? ER_SERVER_SHUTDOWN : 0); + send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); break; } +#ifndef EMBEDDED_LIBRARY case COM_BINLOG_DUMP: { + ulong pos; + ushort flags; + uint32 slave_server_id; + statistic_increment(com_other,&LOCK_status); - slow_command = TRUE; + thd->enable_slow_log= opt_log_slow_admin_statements; if (check_global_access(thd, REPL_SLAVE_ACL)) break; - mysql_log.write(thd,command, 0); - ulong pos; - ushort flags; - uint32 slave_server_id; /* TODO: The following has to be changed to an 8 byte integer */ pos = uint4korr(packet); flags = uint2korr(packet + 4); @@ -1212,6 +1685,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if ((slave_server_id= uint4korr(packet+6))) // mysqlbinlog.server_id==0 kill_zombie_dump_threads(slave_server_id); thd->server_id = slave_server_id; + + mysql_log.write(thd, command, "Log: '%s' Pos: %ld", packet+10, + (long) pos); mysql_binlog_send(thd, thd->strdup(packet + 10), (my_off_t) pos, flags); unregister_slave(thd,1,1); // fake COM_QUIT -- if we get here, the thread needs to terminate @@ -1219,6 +1695,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, net->error = 0; break; } +#endif case COM_REFRESH: { statistic_increment(com_stat[SQLCOM_FLUSH],&LOCK_status); @@ -1226,36 +1703,62 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (check_global_access(thd,RELOAD_ACL)) break; mysql_log.write(thd,command,NullS); - /* error sending is deferred to reload_acl_and_cache */ - reload_acl_and_cache(thd, options, (TABLE_LIST*) 0) ; + if (reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, NULL)) + send_error(thd, 0); + else + send_ok(thd); break; } +#ifndef EMBEDDED_LIBRARY case COM_SHUTDOWN: + { statistic_increment(com_other,&LOCK_status); if (check_global_access(thd,SHUTDOWN_ACL)) break; /* purecov: inspected */ - DBUG_PRINT("quit",("Got shutdown command")); + /* + If the client is < 4.1.3, it is going to send us no argument; then + packet_length is 1, packet[0] is the end 0 of the packet. Note that + SHUTDOWN_DEFAULT is 0. If client is >= 4.1.3, the shutdown level is in + packet[0]. + */ + enum mysql_enum_shutdown_level level= + (enum mysql_enum_shutdown_level) (uchar) packet[0]; + DBUG_PRINT("quit",("Got shutdown command for level %u", level)); + if (level == SHUTDOWN_DEFAULT) + level= SHUTDOWN_WAIT_ALL_BUFFERS; // soon default will be configurable + else if (level != SHUTDOWN_WAIT_ALL_BUFFERS) + { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "this shutdown level"); + send_error(thd); + break; + } + DBUG_PRINT("quit",("Got shutdown command for level %u", level)); mysql_log.write(thd,command,NullS); - send_eof(net); + send_eof(thd); #ifdef __WIN__ sleep(1); // must wait after eof() #endif #ifndef OS2 - send_eof(net); // This is for 'quit request' + send_eof(thd); // This is for 'quit request' #endif - close_connection(net); + close_connection(thd, 0, 1); close_thread_tables(thd); // Free before kill - free_root(&thd->mem_root,MYF(0)); + free_root(thd->mem_root,MYF(0)); free_root(&thd->transaction.mem_root,MYF(0)); kill_mysql(); error=TRUE; break; - + } +#endif case COM_STATISTICS: { mysql_log.write(thd,command,NullS); statistic_increment(com_stat[SQLCOM_SHOW_STATUS],&LOCK_status); +#ifndef EMBEDDED_LIBRARY char buff[200]; +#else + char *buff= thd->net.last_error; +#endif ulong uptime = (ulong) (thd->start_time - start_time); sprintf((char*) buff, "Uptime: %ld Threads: %d Questions: %lu Slow queries: %ld Opens: %ld Flush tables: %ld Open tables: %u Queries per second avg: %.3f", @@ -1268,22 +1771,25 @@ bool dispatch_command(enum enum_server_command command, THD *thd, sprintf(strend(buff), " Memory in use: %ldK Max memory used: %ldK", (sf_malloc_cur_memory+1023L)/1024L, (sf_malloc_max_memory+1023L)/1024L); - #endif +#endif +#ifndef EMBEDDED_LIBRARY VOID(my_net_write(net, buff,(uint) strlen(buff))); VOID(net_flush(net)); +#endif break; } case COM_PING: statistic_increment(com_other,&LOCK_status); - send_ok(net); // Tell client we are alive + send_ok(thd); // Tell client we are alive break; case COM_PROCESS_INFO: statistic_increment(com_stat[SQLCOM_SHOW_PROCESSLIST],&LOCK_status); if (!thd->priv_user[0] && check_global_access(thd,PROCESS_ACL)) break; mysql_log.write(thd,command,NullS); - mysqld_list_processes(thd,thd->master_access & PROCESS_ACL ? NullS : - thd->priv_user,0); + mysqld_list_processes(thd, + thd->master_access & PROCESS_ACL ? + NullS : thd->priv_user, 0); break; case COM_PROCESS_KILL: { @@ -1292,13 +1798,32 @@ bool dispatch_command(enum enum_server_command command, THD *thd, kill_one_thread(thd,id); break; } + case COM_SET_OPTION: + { + statistic_increment(com_stat[SQLCOM_SET_OPTION], &LOCK_status); + enum_mysql_set_option command= (enum_mysql_set_option) uint2korr(packet); + switch (command) { + case MYSQL_OPTION_MULTI_STATEMENTS_ON: + thd->client_capabilities|= CLIENT_MULTI_STATEMENTS; + send_eof(thd); + break; + case MYSQL_OPTION_MULTI_STATEMENTS_OFF: + thd->client_capabilities&= ~CLIENT_MULTI_STATEMENTS; + send_eof(thd); + break; + default: + send_error(thd, ER_UNKNOWN_COM_ERROR); + break; + } + break; + } case COM_DEBUG: statistic_increment(com_other,&LOCK_status); if (check_global_access(thd, SUPER_ACL)) break; /* purecov: inspected */ mysql_print_status(thd); mysql_log.write(thd,command,NullS); - send_eof(net); + send_eof(thd); break; case COM_SLEEP: case COM_CONNECT: // Impossible here @@ -1306,99 +1831,193 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_DELAYED_INSERT: case COM_END: default: - send_error(net, ER_UNKNOWN_COM_ERROR); + send_error(thd, ER_UNKNOWN_COM_ERROR); break; } - if (thd->lock || thd->open_tables) + if (thd->lock || thd->open_tables || thd->derived_tables) { thd->proc_info="closing tables"; close_thread_tables(thd); /* Free tables */ } - if (thd->fatal_error) - send_error(net,0); // End of memory ? + if (thd->is_fatal_error) + send_error(thd,0); // End of memory ? + + log_slow_statement(thd); + thd->proc_info="cleaning up"; + VOID(pthread_mutex_lock(&LOCK_thread_count)); // For process list + thd->proc_info=0; + thd->command=COM_SLEEP; + thd->query=0; + thd->query_length=0; + thread_running--; + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); + DBUG_RETURN(error); +} + + +void log_slow_statement(THD *thd) +{ time_t start_of_query=thd->start_time; thd->end_time(); // Set start time - /* If not reading from backup and if the query took too long */ - if (!slow_command && !thd->user_time) // do not log 'slow_command' queries + /* + Do not log administrative statements unless the appropriate option is + set; do not log into slow log if reading from backup. + */ + if (thd->enable_slow_log && !thd->user_time) { thd->proc_info="logging slow query"; if ((ulong) (thd->start_time - thd->time_after_lock) > thd->variables.long_query_time || - ((thd->lex.select_lex.options & - (QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED)) && - (specialflag & SPECIAL_LONG_LOG_FORMAT))) + ((thd->server_status & + (SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED)) && + (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES))) { long_query_count++; mysql_slow_log.write(thd, thd->query, thd->query_length, start_of_query); } } - thd->proc_info="cleaning up"; - VOID(pthread_mutex_lock(&LOCK_thread_count)); // For process list - thd->proc_info=0; - thd->command=COM_SLEEP; - thd->query=0; - thd->query_length=0; - thread_running--; - VOID(pthread_mutex_unlock(&LOCK_thread_count)); - thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory - free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC)); - DBUG_RETURN(error); } + +/* + Read query from packet and store in thd->query + Used in COM_QUERY and COM_PREPARE + + DESCRIPTION + Sets the following THD variables: + query + query_length + + RETURN VALUES + 0 ok + 1 error; In this case thd->fatal_error is set +*/ + +bool alloc_query(THD *thd, char *packet, ulong packet_length) +{ + packet_length--; // Remove end null + /* Remove garbage at start and end of query */ + while (my_isspace(thd->charset(),packet[0]) && packet_length > 0) + { + packet++; + packet_length--; + } + char *pos=packet+packet_length; // Point at end null + while (packet_length > 0 && + (pos[-1] == ';' || my_isspace(thd->charset() ,pos[-1]))) + { + pos--; + packet_length--; + } + /* We must allocate some extra memory for query cache */ + thd->query_length= 0; // Extra safety: Avoid races + if (!(thd->query= (char*) thd->memdup_w_gap((gptr) (packet), + packet_length, + thd->db_length+ 1 + + QUERY_CACHE_FLAGS_SIZE))) + return 1; + thd->query[packet_length]=0; + thd->query_length= packet_length; + + /* Reclaim some memory */ + thd->packet.shrink(thd->variables.net_buffer_length); + thd->convert_buffer.shrink(thd->variables.net_buffer_length); + + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),QUERY_PRIOR); + return 0; +} + +static void reset_one_shot_variables(THD *thd) +{ + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.collation_database= + global_system_variables.collation_database; + thd->variables.collation_server= + global_system_variables.collation_server; + thd->update_charset(); + thd->variables.time_zone= + global_system_variables.time_zone; + thd->one_shot_set= 0; +} + + /**************************************************************************** ** mysql_execute_command ** Execute command saved in thd and current_lex->sql_command ****************************************************************************/ void -mysql_execute_command(void) -{ - int res=0; - THD *thd=current_thd; - LEX *lex= &thd->lex; - TABLE_LIST *tables=(TABLE_LIST*) lex->select_lex.table_list.first; - SELECT_LEX *select_lex = lex->select; - bool slave_fake_lock= 0; - MYSQL_LOCK *fake_prev_lock= 0; +mysql_execute_command(THD *thd) +{ + int res= 0; + LEX *lex= thd->lex; + SELECT_LEX *select_lex= &lex->select_lex; + TABLE_LIST *tables= (TABLE_LIST*) select_lex->table_list.first; + SELECT_LEX_UNIT *unit= &lex->unit; DBUG_ENTER("mysql_execute_command"); + /* + Reset warning count for each query that uses tables + A better approach would be to reset this for any commands + that is not a SHOW command or a select that only access local + variables, but for now this is probably good enough. + */ + if (tables || &lex->select_lex != lex->all_selects_list || + lex->time_zone_tables_used) + mysql_reset_errors(thd); + + /* + When subselects or time_zone info is used in a query + we create a new TABLE_LIST containing all referenced tables + and set local variable 'tables' to point to this list. + */ + if ((&lex->select_lex != lex->all_selects_list || + lex->time_zone_tables_used) && + lex->unit.create_total_list(thd, lex, &tables)) + DBUG_VOID_RETURN; + +#ifdef HAVE_REPLICATION if (thd->slave_thread) { - if (lex->sql_command == SQLCOM_MULTI_UPDATE) - { - DBUG_PRINT("info",("need faked locked tables")); - - if (check_multi_update_lock(thd, tables, &select_lex->item_list)) - goto error; - - /* Fix for replication, the tables are opened and locked, - now we pretend that we have performed a LOCK TABLES action */ - - fake_prev_lock= thd->locked_tables; - if (thd->lock) - thd->locked_tables= thd->lock; - thd->lock= 0; - slave_fake_lock= 1; - } - /* - Skip if we are in the slave thread, some table rules have been - given and the table list says the query should not be replicated + /* + Check if statment should be skipped because of slave filtering + rules + + Exceptions are: + - UPDATE MULTI: For this statement, we want to check the filtering + rules later in the code + - SET: we always execute it (Not that many SET commands exists in + the binary log anyway -- only 4.1 masters write SET statements, + in 5.0 there are no SET statements in the binary log) + - DROP TEMPORARY TABLE IF EXISTS: we always execute it (otherwise we + have stale files on slave caused by exclusion of one tmp table). */ - if (all_tables_not_ok(thd,tables)) + if (!(lex->sql_command == SQLCOM_UPDATE_MULTI) && + !(lex->sql_command == SQLCOM_SET_OPTION) && + !(lex->sql_command == SQLCOM_DROP_TABLE && + lex->drop_temporary && lex->drop_if_exists) && + all_tables_not_ok(thd,tables)) { /* we warn the slave SQL thread */ my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + reset_one_shot_variables(thd); DBUG_VOID_RETURN; } #ifndef TO_BE_DELETED /* - This is a workaround to deal with the shortcoming in 3.23.44-3.23.46 - masters in RELEASE_LOCK() logging. We re-write SELECT RELEASE_LOCK() - as DO RELEASE_LOCK() + This is a workaround to deal with the shortcoming in 3.23.44-3.23.46 + masters in RELEASE_LOCK() logging. We re-write SELECT RELEASE_LOCK() + as DO RELEASE_LOCK() */ if (lex->sql_command == SQLCOM_SELECT) { @@ -1407,12 +2026,8 @@ mysql_execute_command(void) } #endif } - - if (lex->select_lex.next && - create_total_list(thd,lex,&tables, - (lex->sql_command == SQLCOM_CREATE_TABLE))) - DBUG_VOID_RETURN; - +#endif /* HAVE_REPLICATION */ + /* When option readonly is set deny operations which change tables. Except for the replication thread and the 'super' users. @@ -1421,7 +2036,7 @@ mysql_execute_command(void) !(thd->slave_thread || (thd->master_access & SUPER_ACL)) && (uc_update_queries[lex->sql_command] > 0)) { - send_error(&thd->net,ER_CANT_UPDATE_WITH_READLOCK); + net_printf(thd, ER_OPTION_PREVENTS_STATEMENT, "--read-only"); DBUG_VOID_RETURN; } @@ -1429,111 +2044,263 @@ mysql_execute_command(void) switch (lex->sql_command) { case SQLCOM_SELECT: { - select_result *result; - if (select_lex->options & SELECT_DESCRIBE) - lex->exchange=0; + /* assign global limit variable if limit is not given */ + { + SELECT_LEX *param= lex->unit.global_parameters; + if (!param->explicit_limit) + param->select_limit= thd->variables.select_limit; + } + + select_result *result=lex->result; if (tables) { res=check_table_access(thd, lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL, - tables); + tables,0); } else res=check_access(thd, lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL, - any_db); + any_db,0,0,0); if (res) { res=0; break; // Error message is given } - - thd->offset_limit=select_lex->offset_limit; - thd->select_limit=select_lex->select_limit+select_lex->offset_limit; - if (thd->select_limit < select_lex->select_limit) - thd->select_limit= HA_POS_ERROR; // no limit - if (thd->select_limit == HA_POS_ERROR) + /* + In case of single SELECT unit->global_parameters points on first SELECT + TODO: move counters to SELECT_LEX + */ + unit->offset_limit_cnt= (ha_rows) unit->global_parameters->offset_limit; + unit->select_limit_cnt= (ha_rows) (unit->global_parameters->select_limit+ + unit->global_parameters->offset_limit); + if (unit->select_limit_cnt < + (ha_rows) unit->global_parameters->select_limit) + unit->select_limit_cnt= HA_POS_ERROR; // no limit + if (unit->select_limit_cnt == HA_POS_ERROR && !select_lex->next_select()) select_lex->options&= ~OPTION_FOUND_ROWS; - if (lex->exchange) + if (!(res=open_and_lock_tables(thd,tables))) { - if (lex->exchange->dumpfile) + if (lex->describe) { - if (!(result=new select_dump(lex->exchange))) + if (!(result= new select_send())) { - res= -1; - break; + send_error(thd, ER_OUT_OF_RESOURCES); + DBUG_VOID_RETURN; + } + else + thd->send_explain_fields(result); + res= mysql_explain_union(thd, &thd->lex->unit, result); + if (lex->describe & DESCRIBE_EXTENDED) + { + char buff[1024]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + thd->lex->unit.print(&str); + str.append('\0'); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_YES, str.ptr()); } + result->send_eof(); + delete result; } else { - if (!(result=new select_export(lex->exchange))) + if (!result && !(result= new select_send())) { res= -1; break; } + query_cache_store_query(thd, tables); + res= handle_select(thd, lex, result); + if (result != lex->result) + delete result; } } - else if (!(result=new select_send())) + break; + } + case SQLCOM_PREPARE: + { + char *query_str; + uint query_len; + if (lex->prepared_stmt_code_is_varref) { - res= -1; -#ifdef DELETE_ITEMS - delete select_lex->having; - delete select_lex->where; -#endif - break; + /* This is PREPARE stmt FROM @var. */ + String str; + CHARSET_INFO *to_cs= thd->variables.collation_connection; + bool need_conversion; + user_var_entry *entry; + String *pstr= &str; + uint32 unused; + /* + Convert @var contents to string in connection character set. Although + it is known that int/real/NULL value cannot be a valid query we still + convert it for error messages to uniform. + */ + if ((entry= + (user_var_entry*)hash_search(&thd->user_vars, + (byte*)lex->prepared_stmt_code.str, + lex->prepared_stmt_code.length)) + && entry->value) + { + my_bool is_var_null; + pstr= entry->val_str(&is_var_null, &str, NOT_FIXED_DEC); + /* + NULL value of variable checked early as entry->value so here + we can't get NULL in normal conditions + */ + DBUG_ASSERT(!is_var_null); + if (!pstr) + { + res= -1; + break; // EOM (error should be reported by allocator) + } + } + else + { + /* + variable absent or equal to NULL, so we need to set variable to + something reasonable to get readable error message during parsing + */ + str.set("NULL", 4, &my_charset_latin1); + } + + need_conversion= + String::needs_conversion(pstr->length(), pstr->charset(), + to_cs, &unused); + + query_len= need_conversion? (pstr->length() * to_cs->mbmaxlen) : + pstr->length(); + if (!(query_str= alloc_root(thd->mem_root, query_len+1))) + { + res= -1; + break; // EOM (error should be reported by allocator) + } + + if (need_conversion) + { + uint dummy_errors; + query_len= copy_and_convert(query_str, query_len, to_cs, + pstr->ptr(), pstr->length(), + pstr->charset(), &dummy_errors); + } + else + memcpy(query_str, pstr->ptr(), pstr->length()); + query_str[query_len]= 0; } else { - /* - Normal select: - Change lock if we are using SELECT HIGH PRIORITY, - FOR UPDATE or IN SHARE MODE - - TODO: Delete the following loop when locks is set by sql_yacc - */ - TABLE_LIST *table; - for (table = tables ; table ; table=table->next) - table->lock_type= lex->lock_option; + query_str= lex->prepared_stmt_code.str; + query_len= lex->prepared_stmt_code.length; + DBUG_PRINT("info", ("PREPARE: %.*s FROM '%.*s' \n", + lex->prepared_stmt_name.length, + lex->prepared_stmt_name.str, + query_len, query_str)); } - - if (!(res=open_and_lock_tables(thd,tables))) + thd->command= COM_PREPARE; + if (!mysql_stmt_prepare(thd, query_str, query_len + 1, + &lex->prepared_stmt_name)) + send_ok(thd, 0L, 0L, "Statement prepared"); + break; + } + case SQLCOM_EXECUTE: + { + DBUG_PRINT("info", ("EXECUTE: %.*s\n", + lex->prepared_stmt_name.length, + lex->prepared_stmt_name.str)); + mysql_sql_stmt_execute(thd, &lex->prepared_stmt_name); + lex->prepared_stmt_params.empty(); + break; + } + case SQLCOM_DEALLOCATE_PREPARE: + { + Statement* stmt; + DBUG_PRINT("info", ("DEALLOCATE PREPARE: %.*s\n", + lex->prepared_stmt_name.length, + lex->prepared_stmt_name.str)); + /* We account deallocate in the same manner as mysql_stmt_close */ + statistic_increment(com_stmt_close, &LOCK_status); + if ((stmt= thd->stmt_map.find_by_name(&lex->prepared_stmt_name))) { - query_cache_store_query(thd, tables); - res=handle_select(thd, lex, result); + thd->stmt_map.erase(stmt); + send_ok(thd); } else - delete result; + { + res= -1; + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), + lex->prepared_stmt_name.length, lex->prepared_stmt_name.str, + "DEALLOCATE PREPARE"); + } break; } case SQLCOM_DO: - res=mysql_do(thd, *lex->insert_list); + if (tables && ((res= check_table_access(thd, SELECT_ACL, tables,0)) || + (res= open_and_lock_tables(thd,tables)))) + break; + + res= mysql_do(thd, *lex->insert_list); + if (thd->net.report_error) + res= -1; break; case SQLCOM_EMPTY_QUERY: - send_ok(&thd->net); + send_ok(thd); break; + case SQLCOM_HELP: + res= mysqld_help(thd,lex->help_arg); + break; + +#ifndef EMBEDDED_LIBRARY case SQLCOM_PURGE: { if (check_global_access(thd, SUPER_ACL)) goto error; + // PURGE MASTER LOGS TO 'file' res = purge_master_logs(thd, lex->to_log); break; } + case SQLCOM_PURGE_BEFORE: + { + if (check_global_access(thd, SUPER_ACL)) + goto error; + // PURGE MASTER LOGS BEFORE 'data' + res = purge_master_logs_before_date(thd, lex->purge_time); + break; + } +#endif + case SQLCOM_SHOW_WARNS: + { + res= mysqld_show_warnings(thd, (ulong) + ((1L << (uint) MYSQL_ERROR::WARN_LEVEL_NOTE) | + (1L << (uint) MYSQL_ERROR::WARN_LEVEL_WARN) | + (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR) + )); + break; + } + case SQLCOM_SHOW_ERRORS: + { + res= mysqld_show_warnings(thd, (ulong) + (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR)); + break; + } case SQLCOM_SHOW_NEW_MASTER: { if (check_global_access(thd, REPL_SLAVE_ACL)) goto error; /* This query don't work now. See comment in repl_failsafe.cc */ #ifndef WORKING_NEW_MASTER - net_printf(&thd->net, ER_NOT_SUPPORTED_YET, "SHOW NEW MASTER"); + net_printf(thd, ER_NOT_SUPPORTED_YET, "SHOW NEW MASTER"); res= 1; #else res = show_new_master(thd); #endif break; } + +#ifdef HAVE_REPLICATION case SQLCOM_SHOW_SLAVE_HOSTS: { if (check_global_access(thd, REPL_SLAVE_ACL)) @@ -1548,12 +2315,15 @@ mysql_execute_command(void) res = show_binlog_events(thd); break; } +#endif + case SQLCOM_BACKUP_TABLE: { if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL, tables) || + check_table_access(thd,SELECT_ACL, tables,0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ + thd->enable_slow_log= opt_log_slow_admin_statements; res = mysql_backup_table(thd, tables); break; @@ -1561,13 +2331,31 @@ mysql_execute_command(void) case SQLCOM_RESTORE_TABLE: { if (check_db_used(thd,tables) || - check_table_access(thd, INSERT_ACL, tables) || + check_table_access(thd, INSERT_ACL, tables,0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ + thd->enable_slow_log= opt_log_slow_admin_statements; res = mysql_restore_table(thd, tables); break; } - + case SQLCOM_ASSIGN_TO_KEYCACHE: + { + if (check_db_used(thd, tables) || + check_access(thd, INDEX_ACL, tables->db, + &tables->grant.privilege, 0, 0)) + goto error; + res= mysql_assign_to_keycache(thd, tables, &lex->name_and_length); + break; + } + case SQLCOM_PRELOAD_KEYS: + { + if (check_db_used(thd, tables) || + check_access(thd, INDEX_ACL, tables->db, + &tables->grant.privilege, 0, 0)) + goto error; + res = mysql_preload_keys(thd, tables); + break; + } #ifdef HAVE_REPLICATION case SQLCOM_CHANGE_MASTER: { @@ -1596,7 +2384,7 @@ mysql_execute_command(void) res = show_binlog_info(thd); break; } - + case SQLCOM_LOAD_MASTER_DATA: // sync with master if (check_global_access(thd, SUPER_ACL)) goto error; @@ -1605,9 +2393,14 @@ mysql_execute_command(void) else res = load_master_data(thd); break; - #endif /* HAVE_REPLICATION */ - +#ifdef HAVE_NDBCLUSTER_DB + case SQLCOM_SHOW_NDBCLUSTER_STATUS: + { + res = ndbcluster_show_status(thd); + break; + } +#endif #ifdef HAVE_INNOBASE_DB case SQLCOM_SHOW_INNODB_STATUS: { @@ -1617,24 +2410,24 @@ mysql_execute_command(void) break; } #endif - #ifdef HAVE_REPLICATION case SQLCOM_LOAD_MASTER_TABLE: { if (!tables->db) tables->db=thd->db; - if (check_access(thd,CREATE_ACL,tables->db,&tables->grant.privilege)) + if (check_access(thd,CREATE_ACL,tables->db,&tables->grant.privilege,0,0)) goto error; /* purecov: inspected */ if (grant_option) { /* Check that the first table has CREATE privilege */ - TABLE_LIST *tmp_table_list=tables->next; - tables->next=0; - bool error=check_grant(thd,CREATE_ACL,tables); - tables->next=tmp_table_list; - if (error) + if (check_grant(thd, CREATE_ACL, tables, 0, 1, 0)) goto error; } + if (strlen(tables->real_name) > NAME_LEN) + { + net_printf(thd,ER_WRONG_TABLE_NAME, tables->real_name); + break; + } pthread_mutex_lock(&LOCK_active_mi); /* fetch_master_table will send the error to the client on failure. @@ -1643,7 +2436,7 @@ mysql_execute_command(void) if (!fetch_master_table(thd, tables->db, tables->real_name, active_mi, 0, 0)) { - send_ok(&thd->net); + send_ok(thd); } pthread_mutex_unlock(&LOCK_active_mi); break; @@ -1652,46 +2445,55 @@ mysql_execute_command(void) case SQLCOM_CREATE_TABLE: { - ulong want_priv= ((lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ? - CREATE_TMP_ACL : CREATE_ACL); - if (!tables->db) - tables->db=thd->db; - lex->create_info.alias= tables->alias; - if (check_access(thd,want_priv,tables->db,&tables->grant.privilege) || - check_merge_table_access(thd, tables->db, - (TABLE_LIST *) - lex->create_info.merge_list.first)) - goto error; /* purecov: inspected */ - if (grant_option && want_priv != CREATE_TMP_ACL) + /* If CREATE TABLE of non-temporary table, do implicit commit */ + if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) { - /* Check that the first table has CREATE privilege */ - TABLE_LIST *tmp_table_list=tables->next; - tables->next=0; - bool error=check_grant(thd, want_priv, tables); - tables->next=tmp_table_list; - if (error) - goto error; + if (end_active_trans(thd)) + { + res= -1; + break; + } } - if (strlen(tables->real_name) > NAME_LEN) + else { - net_printf(&thd->net, ER_WRONG_TABLE_NAME, tables->alias); - res=0; - break; + /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */ + thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; } + /* Skip first table, which is the table we are creating */ + TABLE_LIST *create_table, *create_table_local; + tables= lex->unlink_first_table(tables, &create_table, + &create_table_local); + + if ((res= create_table_precheck(thd, tables, create_table))) + goto unsent_create_error; + #ifndef HAVE_READLINK lex->create_info.data_file_name=lex->create_info.index_file_name=0; #else /* Fix names if symlinked tables */ if (append_file_to_dir(thd, &lex->create_info.data_file_name, - tables->real_name) || + create_table->real_name) || append_file_to_dir(thd,&lex->create_info.index_file_name, - tables->real_name)) + create_table->real_name)) { res=-1; - break; + goto unsent_create_error; } #endif /* + If we are using SET CHARSET without DEFAULT, add an implicite + DEFAULT to not confuse old users. (This may change). + */ + if ((lex->create_info.used_fields & + (HA_CREATE_USED_DEFAULT_CHARSET | HA_CREATE_USED_CHARSET)) == + HA_CREATE_USED_CHARSET) + { + lex->create_info.used_fields&= ~HA_CREATE_USED_CHARSET; + lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; + lex->create_info.default_table_charset= lex->create_info.table_charset; + lex->create_info.table_charset= 0; + } + /* The create-select command will open and read-lock the select table and then create, open and write-lock the new table. If a global read lock steps in, we get a deadlock. The write lock waits for @@ -1707,82 +2509,74 @@ mysql_execute_command(void) if (wait_if_global_read_lock(thd, 0, 1)) { res= -1; - break; + goto unsent_create_error; } if (select_lex->item_list.elements) // With select { select_result *result; - if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && - check_dup(tables->db, tables->real_name, tables->next)) - { - net_printf(&thd->net,ER_INSERT_TABLE_USED,tables->real_name); - goto error1; - } - if (lex->create_info.used_fields & HA_CREATE_USED_UNION) - { - TABLE_LIST *tab; - for (tab= tables; tab; tab= tab->next) - { - if (check_dup(tables->db, tab->real_name, - (TABLE_LIST*)lex->create_info.merge_list.first)) - { - net_printf(&thd->net, ER_INSERT_TABLE_USED, tab->real_name); - goto error1; - } - } - } - if (tables->next) - { - TABLE_LIST *table; - if (check_table_access(thd, SELECT_ACL, tables->next)) - goto error1; // Error message is given - /* TODO: Delete the following loop when locks is set by sql_yacc */ - for (table = tables->next ; table ; table=table->next) - table->lock_type= lex->lock_option; - } select_lex->options|= SELECT_NO_UNLOCK; - thd->offset_limit=select_lex->offset_limit; - thd->select_limit=select_lex->select_limit+select_lex->offset_limit; - if (thd->select_limit < select_lex->select_limit) - thd->select_limit= HA_POS_ERROR; // No limit - - /* Skip first table, which is the table we are creating */ - lex->select_lex.table_list.first= - (byte*) (((TABLE_LIST *) lex->select_lex.table_list.first)->next); - if (!(res=open_and_lock_tables(thd,tables->next))) + unit->offset_limit_cnt= select_lex->offset_limit; + unit->select_limit_cnt= select_lex->select_limit+ + select_lex->offset_limit; + if (unit->select_limit_cnt < select_lex->select_limit) + unit->select_limit_cnt= HA_POS_ERROR; // No limit + + if (!(res=open_and_lock_tables(thd,tables))) { - if ((result=new select_create(tables->db ? tables->db : thd->db, - tables->real_name, + res= -1; // If error + if ((result=new select_create(create_table->db, + create_table->real_name, &lex->create_info, lex->create_list, lex->key_list, - select_lex->item_list,lex->duplicates))) + select_lex->item_list, lex->duplicates, + lex->ignore))) + { + /* + CREATE from SELECT give its SELECT_LEX for SELECT, + and item_list belong to SELECT + */ + select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; res=handle_select(thd, lex, result); - else - res= -1; + select_lex->resolve_mode= SELECT_LEX::NOMATTER_MODE; + } + //reset for PS + lex->create_list.empty(); + lex->key_list.empty(); } } else // regular create { - res = mysql_create_table(thd,tables->db ? tables->db : thd->db, - tables->real_name, - &lex->create_info, - lex->create_list, - lex->key_list,0); + if (lex->name) + res= mysql_create_like_table(thd, create_table, &lex->create_info, + (Table_ident *)lex->name); + else + { + res= mysql_create_table(thd,create_table->db, + create_table->real_name, &lex->create_info, + lex->create_list, + lex->key_list,0,0); + } if (!res) - send_ok(&thd->net); + send_ok(thd); } /* Release the protection against the global read lock and wake everyone, who might want to set a global read lock. */ start_waiting_global_read_lock(thd); + +unsent_create_error: + // put tables back for PS rexecuting + tables= lex->link_first_table_back(tables, create_table, + create_table_local); break; } case SQLCOM_CREATE_INDEX: - if (check_one_table_access(thd, INDEX_ACL, tables, 0)) + if (check_one_table_access(thd, INDEX_ACL, tables)) goto error; /* purecov: inspected */ + thd->enable_slow_log= opt_log_slow_admin_statements; if (end_active_trans(thd)) res= -1; else @@ -1811,9 +2605,9 @@ mysql_execute_command(void) To prevent that, refuse SLAVE STOP if the client thread has locked tables */ - if (thd->locked_tables || thd->active_transaction()) + if (thd->locked_tables || thd->active_transaction() || thd->global_read_lock) { - send_error(&thd->net,ER_LOCK_OR_ACTIVE_TRANSACTION); + send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); break; } { @@ -1826,28 +2620,45 @@ mysql_execute_command(void) case SQLCOM_ALTER_TABLE: #if defined(DONT_ALLOW_SHOW_COMMANDS) - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ break; #else { ulong priv=0; if (lex->name && (!lex->name[0] || strlen(lex->name) > NAME_LEN)) { - net_printf(&thd->net,ER_WRONG_TABLE_NAME,lex->name); + net_printf(thd, ER_WRONG_TABLE_NAME, lex->name); res= 1; break; } if (!select_lex->db) - select_lex->db=tables->db; - if (check_access(thd,ALTER_ACL,tables->db,&tables->grant.privilege) || - check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv) || - check_merge_table_access(thd, tables->db, + { + /* + In the case of ALTER TABLE ... RENAME we should supply the + default database if the new name is not explicitly qualified + by a database. (Bug #11493) + */ + if (lex->alter_info.flags & ALTER_RENAME) + { + if (! thd->db) + { + send_error(thd,ER_NO_DB_ERROR); + goto error; + } + select_lex->db= thd->db; + } + else + select_lex->db=tables->db; + } + if (check_access(thd,ALTER_ACL,tables->db,&tables->grant.privilege,0,0) || + check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0)|| + check_merge_table_access(thd, tables->db, (TABLE_LIST *) lex->create_info.merge_list.first)) goto error; /* purecov: inspected */ if (grant_option) { - if (check_grant(thd,ALTER_ACL,tables)) + if (check_grant(thd, ALTER_ACL, tables, 0, UINT_MAX, 0)) goto error; if (lex->name && !test_all_bits(priv,INSERT_ACL | CREATE_ACL)) { // Rename of table @@ -1856,7 +2667,8 @@ mysql_execute_command(void) tmp_table.real_name=lex->name; tmp_table.db=select_lex->db; tmp_table.grant.privilege=priv; - if (check_grant(thd,INSERT_ACL | CREATE_ACL, &tmp_table)) + if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, 0, + UINT_MAX, 0)) goto error; } } @@ -1867,17 +2679,18 @@ mysql_execute_command(void) res= -1; else { + thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_alter_table(thd, select_lex->db, lex->name, &lex->create_info, tables, lex->create_list, - lex->key_list, lex->drop_list, lex->alter_list, + lex->key_list, + select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, - lex->drop_primary, lex->duplicates, - lex->alter_keys_onoff, lex->simple_alter); + lex->duplicates, lex->ignore, &lex->alter_info); } break; } -#endif +#endif /*DONT_ALLOW_SHOW_COMMANDS*/ case SQLCOM_RENAME_TABLE: { TABLE_LIST *table; @@ -1886,20 +2699,25 @@ mysql_execute_command(void) for (table=tables ; table ; table=table->next->next) { if (check_access(thd, ALTER_ACL | DROP_ACL, table->db, - &table->grant.privilege) || + &table->grant.privilege,0,0) || check_access(thd, INSERT_ACL | CREATE_ACL, table->next->db, - &table->next->grant.privilege)) + &table->next->grant.privilege,0,0)) goto error; if (grant_option) { TABLE_LIST old_list,new_list; + /* + we do not need initialize old_list and new_list because we will + come table[0] and table->next[0] there + */ old_list=table[0]; new_list=table->next[0]; old_list.next=new_list.next=0; - if (check_grant(thd,ALTER_ACL,&old_list) || + if (check_grant(thd, ALTER_ACL, &old_list, 0, UINT_MAX, 0) || (!test_all_bits(table->next->grant.privilege, INSERT_ACL | CREATE_ACL) && - check_grant(thd,INSERT_ACL | CREATE_ACL, &new_list))) + check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, + UINT_MAX, 0))) goto error; } } @@ -1910,9 +2728,10 @@ mysql_execute_command(void) res= -1; break; } +#ifndef EMBEDDED_LIBRARY case SQLCOM_SHOW_BINLOGS: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ DBUG_VOID_RETURN; #else { @@ -1921,168 +2740,172 @@ mysql_execute_command(void) res = show_binlogs(thd); break; } -#endif +#endif +#endif /* EMBEDDED_LIBRARY */ case SQLCOM_SHOW_CREATE: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ DBUG_VOID_RETURN; #else { if (check_db_used(thd, tables) || check_access(thd, SELECT_ACL | EXTRA_ACL, tables->db, - &tables->grant.privilege)) + &tables->grant.privilege,0,0)) goto error; - res = mysqld_show_create(thd, tables); + if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) + goto error; + res= mysqld_show_create(thd, tables); break; } #endif + case SQLCOM_CHECKSUM: + { + if (check_db_used(thd,tables) || + check_table_access(thd, SELECT_ACL | EXTRA_ACL , tables,0)) + goto error; /* purecov: inspected */ + res = mysql_checksum_table(thd, tables, &lex->check_opt); + break; + } case SQLCOM_REPAIR: { if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables)) + check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) goto error; /* purecov: inspected */ + thd->enable_slow_log= opt_log_slow_admin_statements; res = mysql_repair_table(thd, tables, &lex->check_opt); + /* ! we write after unlocking the table */ + if (!res && !lex->no_write_to_binlog) + { + mysql_update_log.write(thd, thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); // No binlog error generated + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } break; } case SQLCOM_CHECK: { if (check_db_used(thd,tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL , tables)) + check_table_access(thd, SELECT_ACL | EXTRA_ACL , tables,0)) goto error; /* purecov: inspected */ + thd->enable_slow_log= opt_log_slow_admin_statements; res = mysql_check_table(thd, tables, &lex->check_opt); break; } case SQLCOM_ANALYZE: { if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables)) + check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) goto error; /* purecov: inspected */ + thd->enable_slow_log= opt_log_slow_admin_statements; res = mysql_analyze_table(thd, tables, &lex->check_opt); + /* ! we write after unlocking the table */ + if (!res && !lex->no_write_to_binlog) + { + mysql_update_log.write(thd, thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); // No binlog error generated + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } break; } case SQLCOM_OPTIMIZE: { - HA_CREATE_INFO create_info; if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables)) + check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) goto error; /* purecov: inspected */ - if (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) - { - /* Use ALTER TABLE */ - lex->create_list.empty(); - lex->key_list.empty(); - lex->col_list.empty(); - lex->drop_list.empty(); - lex->alter_list.empty(); - bzero((char*) &create_info,sizeof(create_info)); - create_info.db_type=DB_TYPE_DEFAULT; - create_info.row_type=ROW_TYPE_DEFAULT; - res= mysql_alter_table(thd, NullS, NullS, &create_info, - tables, lex->create_list, - lex->key_list, lex->drop_list, lex->alter_list, - (ORDER *) 0, - 0,DUP_ERROR); + thd->enable_slow_log= opt_log_slow_admin_statements; + res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ? + mysql_recreate_table(thd, tables, 1) : + mysql_optimize_table(thd, tables, &lex->check_opt); + /* ! we write after unlocking the table */ + if (!res && !lex->no_write_to_binlog) + { + mysql_update_log.write(thd, thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); // No binlog error generated + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } } - else - res = mysql_optimize_table(thd, tables, &lex->check_opt); break; } case SQLCOM_UPDATE: - if (check_db_used(thd,tables)) - goto error; - if (select_lex->item_list.elements != lex->value_list.elements) - { - send_error(&thd->net,ER_WRONG_VALUE_COUNT); - DBUG_VOID_RETURN; - } - if (check_one_table_access(thd, UPDATE_ACL, tables, 0)) - goto error; /* purecov: inspected */ - - + if (update_precheck(thd, tables)) + break; res= mysql_update(thd,tables, - select_lex->item_list, - lex->value_list, - select_lex->where, - (ORDER *) select_lex->order_list.first, - select_lex->select_limit, - lex->duplicates); - break; - case SQLCOM_MULTI_UPDATE: - if (check_db_used(thd,tables)) - goto error; - if (select_lex->item_list.elements != lex->value_list.elements) - { - send_error(&thd->net,ER_WRONG_VALUE_COUNT); - goto error; - } - { - const char *msg= 0; - TABLE_LIST *table; - lex->sql_command= SQLCOM_MULTI_UPDATE; + select_lex->item_list, + lex->value_list, + select_lex->where, + select_lex->order_list.elements, + (ORDER *) select_lex->order_list.first, + select_lex->select_limit, + lex->duplicates, lex->ignore); + if (thd->net.report_error) + res= -1; + break; + case SQLCOM_UPDATE_MULTI: + { + if ((res= multi_update_precheck(thd, tables))) + break; - /* - Ensure that we have UPDATE or SELECT privilege for each table - The exact privilege is checked in mysql_multi_update() - */ - for (table= tables ; table ; table= table->next) - { - TABLE_LIST *save= table->next; - table->next= 0; - if (check_one_table_access(thd, UPDATE_ACL, table, 1) && - check_one_table_access(thd, SELECT_ACL, table, 0)) - goto error; - table->next= save; - } - if (select_lex->order_list.elements) - msg="ORDER BY"; - else if (select_lex->select_limit && select_lex->select_limit != - HA_POS_ERROR) - msg="LIMIT"; - if (msg) + res= mysql_multi_update_lock(thd, tables, &select_lex->item_list, + select_lex); +#ifdef HAVE_REPLICATION + /* Check slave filtering rules */ + if (thd->slave_thread) + if (all_tables_not_ok(thd,tables)) { - net_printf(&thd->net, ER_WRONG_USAGE, "UPDATE", msg); - res= 1; + if (res!= 0) + { + res= 0; /* don't care of prev failure */ + thd->clear_error(); /* filters are of highest prior */ + } + /* we warn the slave SQL thread */ + my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); break; } - res= mysql_multi_update(thd,tables, - &select_lex->item_list, - &lex->value_list, - select_lex->where, - select_lex->options, - lex->duplicates); - } - break; - case SQLCOM_INSERT: - if (check_one_table_access(thd, INSERT_ACL, tables, 0)) - goto error; /* purecov: inspected */ - res = mysql_insert(thd,tables,lex->field_list,lex->many_values, - lex->duplicates); +#endif /* HAVE_REPLICATION */ + if (res) + break; + + res= mysql_multi_update(thd,tables, + &select_lex->item_list, + &lex->value_list, + select_lex->where, + select_lex->options, + lex->duplicates, lex->ignore, unit, select_lex); break; + } case SQLCOM_REPLACE: - if (check_one_table_access(thd, INSERT_ACL | DELETE_ACL, tables, 0)) - goto error; /* purecov: inspected */ - res = mysql_insert(thd,tables,lex->field_list,lex->many_values, - DUP_REPLACE); + case SQLCOM_INSERT: + { + if ((res= insert_precheck(thd, tables))) + break; + res= mysql_insert(thd,tables,lex->field_list,lex->many_values, + lex->update_list, lex->value_list, + lex->duplicates, lex->ignore); + if (thd->net.report_error) + res= -1; break; + } case SQLCOM_REPLACE_SELECT: case SQLCOM_INSERT_SELECT: { - { - /* - Check that we have modify privileges for the first table and - select privileges for the rest - */ - ulong privilege= (lex->sql_command == SQLCOM_INSERT_SELECT ? - INSERT_ACL : INSERT_ACL | DELETE_ACL); - TABLE_LIST *save_next=tables->next; - tables->next=0; - if (check_one_table_access(thd, privilege, tables, 0)) - goto error; - tables->next=save_next; - if ((res=check_table_access(thd, SELECT_ACL, save_next))) - goto error; - } + TABLE_LIST *first_local_table= (TABLE_LIST *) select_lex->table_list.first; + TABLE_LIST dup_tables; + TABLE *insert_table; + if ((res= insert_precheck(thd, tables))) + break; + /* Fix lock for first table */ if (tables->lock_type == TL_WRITE_DELAYED) tables->lock_type= TL_WRITE; @@ -2091,122 +2914,154 @@ mysql_execute_command(void) select_lex->options|= SELECT_NO_UNLOCK; select_result *result; - thd->offset_limit=select_lex->offset_limit; - thd->select_limit=select_lex->select_limit+select_lex->offset_limit; - if (thd->select_limit < select_lex->select_limit) - thd->select_limit= HA_POS_ERROR; // No limit + unit->offset_limit_cnt= select_lex->offset_limit; + unit->select_limit_cnt= select_lex->select_limit+select_lex->offset_limit; + if (unit->select_limit_cnt < select_lex->select_limit) + unit->select_limit_cnt= HA_POS_ERROR; // No limit + + if ((res= open_and_lock_tables(thd, tables))) + break; + insert_table= tables->table; + /* MERGE sub-tables can only be detected after open. */ + if (mysql_lock_have_duplicate(thd, insert_table, tables->next)) { - /* TODO: Delete the following loop when locks is set by sql_yacc */ - TABLE_LIST *table; - for (table = tables->next ; table ; table=table->next) - table->lock_type= lex->lock_option; + /* Using same table for INSERT and SELECT */ + select_lex->options |= OPTION_BUFFER_RESULT; } /* Skip first table, which is the table we are inserting in */ - lex->select_lex.table_list.first= - (byte*) (((TABLE_LIST *) lex->select_lex.table_list.first)->next); - if (!(res=open_and_lock_tables(thd, tables))) + select_lex->table_list.first= (byte*) first_local_table->next; + tables= (TABLE_LIST *) select_lex->table_list.first; + dup_tables= *first_local_table; + first_local_table->next= 0; + if (select_lex->group_list.elements != 0) { - /* MERGE sub-tables can only be detected after open. */ - if (mysql_lock_have_duplicate(thd, tables->table, tables->next)) - { - /* Using same table for INSERT and SELECT */ - select_lex->options |= OPTION_BUFFER_RESULT; - } - if ((result=new select_insert(tables->table,&lex->field_list, - lex->duplicates))) - res=handle_select(thd,lex,result); + /* + When we are using GROUP BY we can't refere to other tables in the + ON DUPLICATE KEY part + */ + dup_tables.next= 0; + } + + if (!(res= mysql_prepare_insert(thd, tables, first_local_table, + &dup_tables, insert_table, + lex->field_list, 0, + lex->update_list, lex->value_list, + lex->duplicates)) && + (result= new select_insert(insert_table, first_local_table, + &dup_tables, &lex->field_list, + &lex->update_list, &lex->value_list, + lex->duplicates, lex->ignore))) + { + /* + insert/replace from SELECT give its SELECT_LEX for SELECT, + and item_list belong to SELECT + */ + lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; + res= handle_select(thd, lex, result); + /* revert changes for SP */ + lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; + delete result; + if (thd->net.report_error) + res= -1; } else res= -1; + insert_table->insert_values= 0; // Set by mysql_prepare_insert() + first_local_table->next= tables; + lex->select_lex.table_list.first= (byte*) first_local_table; break; } case SQLCOM_TRUNCATE: - if (check_one_table_access(thd, DELETE_ACL, tables, 0)) - goto error; /* purecov: inspected */ + if (end_active_trans(thd)) + { + res= -1; + break; + } + if (check_one_table_access(thd, DELETE_ACL, tables)) + goto error; /* Don't allow this within a transaction because we want to use re-generate table */ if (thd->locked_tables || thd->active_transaction()) { - send_error(&thd->net,ER_LOCK_OR_ACTIVE_TRANSACTION,NullS); + send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION,NullS); goto error; } res=mysql_truncate(thd, tables, 0); break; case SQLCOM_DELETE: { - if (check_one_table_access(thd, DELETE_ACL, tables, 0)) - goto error; - // Set privilege for the WHERE clause - tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege); + if ((res= delete_precheck(thd, tables))) + break; res = mysql_delete(thd,tables, select_lex->where, - (ORDER*) select_lex->order_list.first, + &select_lex->order_list, select_lex->select_limit, select_lex->options); + if (thd->net.report_error) + res= -1; break; } case SQLCOM_DELETE_MULTI: { - TABLE_LIST *aux_tables=(TABLE_LIST *)thd->lex.auxilliary_table_list.first; - TABLE_LIST *auxi; - uint table_count=0; + TABLE_LIST *aux_tables= + (TABLE_LIST *)thd->lex->auxilliary_table_list.first; + TABLE_LIST *target_tbl; + uint table_count; multi_delete *result; - /* sql_yacc guarantees that tables and aux_tables are not zero */ - if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) || - check_table_access(thd,SELECT_ACL, tables) || - check_table_access(thd,DELETE_ACL, aux_tables)) - goto error; - if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where) - { - send_error(&thd->net,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); - goto error; - } - for (auxi=(TABLE_LIST*) aux_tables ; auxi ; auxi=auxi->next) - { - table_count++; - /* All tables in aux_tables must be found in FROM PART */ - TABLE_LIST *walk; - for (walk=(TABLE_LIST*) tables ; walk ; walk=walk->next) - { - if (!strcmp(auxi->real_name,walk->real_name) && - !strcmp(walk->db,auxi->db)) - break; - } - if (!walk) - { - net_printf(&thd->net,ER_NONUNIQ_TABLE,auxi->real_name); - goto error; - } - walk->lock_type= auxi->lock_type; - // Store address to table as we need it later - auxi->table= my_reinterpret_cast(TABLE *) (walk); - } - if (add_item_to_list(new Item_null())) + if ((res= multi_delete_precheck(thd, tables, &table_count))) + break; + + /* condition will be TRUE on SP re-excuting */ + if (select_lex->item_list.elements != 0) + select_lex->item_list.empty(); + if (add_item_to_list(thd, new Item_null())) { res= -1; break; } - tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege); + thd->proc_info="init"; if ((res=open_and_lock_tables(thd,tables))) break; /* Fix tables-to-be-deleted-from list to point at opened tables */ - for (auxi=(TABLE_LIST*) aux_tables ; auxi ; auxi=auxi->next) - auxi->table= (my_reinterpret_cast(TABLE_LIST*) (auxi->table))->table; - - if (!thd->fatal_error && (result= new multi_delete(thd,aux_tables, - table_count))) - { - res=mysql_select(thd,tables,select_lex->item_list, - select_lex->where, - (ORDER *)NULL,(ORDER *)NULL,(Item *)NULL, - (ORDER *)NULL, - select_lex->options | thd->options | - SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, - result); + for (target_tbl= (TABLE_LIST*) aux_tables; + target_tbl; + target_tbl= target_tbl->next) + { + TABLE_LIST *orig= target_tbl->table_list; + target_tbl->table= orig->table; + /* + Multi-delete can't be constructed over-union => we always have + single SELECT on top and have to check underlying SELECTs of it + */ + if (lex->select_lex.check_updateable_in_subqueries(orig->db, + orig->real_name)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), + orig->real_name); + res= -1; + break; + } + } + + if (!thd->is_fatal_error && (result= new multi_delete(thd,aux_tables, + table_count))) + { + res= mysql_select(thd, &select_lex->ref_pointer_array, + select_lex->get_table_list(), + select_lex->with_wild, + select_lex->item_list, + select_lex->where, + 0, (ORDER *)NULL, (ORDER *)NULL, (Item *)NULL, + (ORDER *)NULL, + select_lex->options | thd->options | + SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, + result, unit, select_lex); + if (thd->net.report_error) + res= -1; delete result; } else @@ -2216,35 +3071,46 @@ mysql_execute_command(void) } case SQLCOM_DROP_TABLE: { - if (check_table_access(thd,DROP_ACL,tables)) - goto error; /* purecov: inspected */ - /* - If this is a slave thread, we may sometimes execute some - DROP / * 40005 TEMPORARY * / TABLE - that come from parts of binlogs (likely if we use RESET SLAVE or CHANGE - MASTER TO), while the temporary table has already been dropped. - To not generate such irrelevant "table does not exist errors", - we silently add IF EXISTS if TEMPORARY was used. - */ - if (thd->slave_thread && lex->drop_temporary) - lex->drop_if_exists= 1; - if (end_active_trans(thd)) - res= -1; + if (!lex->drop_temporary) + { + if (check_table_access(thd,DROP_ACL,tables,0)) + goto error; /* purecov: inspected */ + if (end_active_trans(thd)) + { + res= -1; + break; + } + } else - res = mysql_rm_table(thd,tables,lex->drop_if_exists); + { + /* + If this is a slave thread, we may sometimes execute some + DROP / * 40005 TEMPORARY * / TABLE + that come from parts of binlogs (likely if we use RESET SLAVE or CHANGE + MASTER TO), while the temporary table has already been dropped. + To not generate such irrelevant "table does not exist errors", + we silently add IF EXISTS if TEMPORARY was used. + */ + if (thd->slave_thread) + lex->drop_if_exists= 1; + + /* So that DROP TEMPORARY TABLE gets to binlog at commit/rollback */ + thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; + } + res= mysql_rm_table(thd,tables,lex->drop_if_exists, lex->drop_temporary); } break; case SQLCOM_DROP_INDEX: - if (check_one_table_access(thd, INDEX_ACL, tables, 0)) - goto error; /* purecov: inspected */ + if (check_one_table_access(thd, INDEX_ACL, tables)) + goto error; /* purecov: inspected */ if (end_active_trans(thd)) res= -1; else - res = mysql_drop_index(thd, tables, lex->drop_list); + res = mysql_drop_index(thd, tables, &lex->alter_info); break; case SQLCOM_SHOW_DATABASES: #if defined(DONT_ALLOW_SHOW_COMMANDS) - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ DBUG_VOID_RETURN; #else if ((specialflag & SPECIAL_SKIP_SHOW_DB) && @@ -2256,9 +3122,19 @@ mysql_execute_command(void) case SQLCOM_SHOW_PROCESSLIST: if (!thd->priv_user[0] && check_global_access(thd,PROCESS_ACL)) break; - mysqld_list_processes(thd,thd->master_access & PROCESS_ACL ? NullS : + mysqld_list_processes(thd, + thd->master_access & PROCESS_ACL ? NullS : thd->priv_user,lex->verbose); break; + case SQLCOM_SHOW_STORAGE_ENGINES: + res= mysqld_show_storage_engines(thd); + break; + case SQLCOM_SHOW_PRIVILEGES: + res= mysqld_show_privileges(thd); + break; + case SQLCOM_SHOW_COLUMN_TYPES: + res= mysqld_show_column_types(thd); + break; case SQLCOM_SHOW_STATUS: res= mysqld_show(thd,(lex->wild ? lex->wild->ptr() : NullS),status_vars, OPT_GLOBAL, &LOCK_status); @@ -2269,41 +3145,48 @@ mysql_execute_command(void) &LOCK_global_system_variables); break; case SQLCOM_SHOW_LOGS: - { - res= mysqld_show_logs(thd); - break; - } +#ifdef DONT_ALLOW_SHOW_COMMANDS + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + DBUG_VOID_RETURN; +#else + { + if (grant_option && check_access(thd, FILE_ACL, any_db,0,0,0)) + goto error; + res= mysqld_show_logs(thd); + break; + } +#endif case SQLCOM_SHOW_TABLES: /* FALL THROUGH */ #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ DBUG_VOID_RETURN; #else { char *db=select_lex->db ? select_lex->db : thd->db; if (!db) { - send_error(&thd->net,ER_NO_DB_ERROR); /* purecov: inspected */ + send_error(thd,ER_NO_DB_ERROR); /* purecov: inspected */ goto error; /* purecov: inspected */ } remove_escape(db); // Fix escaped '_' if (check_db_name(db)) { - net_printf(&thd->net,ER_WRONG_DB_NAME, db); + net_printf(thd,ER_WRONG_DB_NAME, db); goto error; } - if (check_access(thd,SELECT_ACL,db,&thd->col_access)) + if (check_access(thd,SELECT_ACL,db,&thd->col_access,0,0)) goto error; /* purecov: inspected */ if (!thd->col_access && check_grant_db(thd,db)) { - net_printf(&thd->net,ER_DBACCESS_DENIED_ERROR, + net_printf(thd, ER_DBACCESS_DENIED_ERROR, thd->priv_user, thd->priv_host, db); goto error; } /* grant is checked in mysqld_show_tables */ - if (select_lex->options & SELECT_DESCRIBE) + if (lex->describe) res= mysqld_extend_show_tables(thd,db, (lex->wild ? lex->wild->ptr() : NullS)); else @@ -2315,9 +3198,15 @@ mysql_execute_command(void) case SQLCOM_SHOW_OPEN_TABLES: res= mysqld_show_open_tables(thd,(lex->wild ? lex->wild->ptr() : NullS)); break; + case SQLCOM_SHOW_CHARSETS: + res= mysqld_show_charsets(thd,(lex->wild ? lex->wild->ptr() : NullS)); + break; + case SQLCOM_SHOW_COLLATIONS: + res= mysqld_show_collations(thd,(lex->wild ? lex->wild->ptr() : NullS)); + break; case SQLCOM_SHOW_FIELDS: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ DBUG_VOID_RETURN; #else { @@ -2325,9 +3214,9 @@ mysql_execute_command(void) remove_escape(db); // Fix escaped '_' remove_escape(tables->real_name); if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, - &tables->grant.privilege)) + &tables->grant.privilege, 0, 0)) goto error; /* purecov: inspected */ - if (grant_option && check_grant(thd,SELECT_ACL,tables,2)) + if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) goto error; res= mysqld_show_fields(thd,tables, (lex->wild ? lex->wild->ptr() : NullS), @@ -2337,16 +3226,17 @@ mysql_execute_command(void) #endif case SQLCOM_SHOW_KEYS: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ DBUG_VOID_RETURN; #else { char *db=tables->db; remove_escape(db); // Fix escaped '_' remove_escape(tables->real_name); - if (check_access(thd,SELECT_ACL,db,&tables->grant.privilege)) - goto error; /* purecov: inspected */ - if (grant_option && check_grant(thd,SELECT_ACL,tables,2)) + if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, + &tables->grant.privilege, 0, 0)) + goto error; /* purecov: inspected */ + if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) goto error; res= mysqld_show_keys(thd,tables); break; @@ -2355,14 +3245,15 @@ mysql_execute_command(void) case SQLCOM_CHANGE_DB: mysql_change_db(thd,select_lex->db); break; + case SQLCOM_LOAD: { uint privilege= (lex->duplicates == DUP_REPLACE ? - INSERT_ACL | UPDATE_ACL | DELETE_ACL : INSERT_ACL); + INSERT_ACL | DELETE_ACL : INSERT_ACL); if (!lex->local_file) { - if (check_access(thd,privilege | FILE_ACL,tables->db)) + if (check_access(thd,privilege | FILE_ACL,tables->db,0,0,0)) goto error; } else @@ -2370,21 +3261,51 @@ mysql_execute_command(void) if (!(thd->client_capabilities & CLIENT_LOCAL_FILES) || ! opt_local_infile) { - send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); + send_error(thd,ER_NOT_ALLOWED_COMMAND); goto error; } - if (check_one_table_access(thd, privilege, tables, 0)) + if (check_one_table_access(thd, privilege, tables)) goto error; } res=mysql_load(thd, lex->exchange, tables, lex->field_list, - lex->duplicates, (bool) lex->local_file, lex->lock_option); + lex->duplicates, lex->ignore, (bool) lex->local_file, lex->lock_option); break; } + case SQLCOM_SET_OPTION: - if (!(res=sql_set_variables(thd, &lex->var_list))) - send_ok(&thd->net); + { + List<set_var_base> *lex_var_list= &lex->var_list; + if (tables && ((res= check_table_access(thd, SELECT_ACL, tables,0)) || + (res= open_and_lock_tables(thd,tables)))) + break; + if (lex->one_shot_set && not_all_support_one_shot(lex_var_list)) + { + my_printf_error(0, "The SET ONE_SHOT syntax is reserved for \ +purposes internal to the MySQL server", MYF(0)); + res= -1; + break; + } + if (!(res= sql_set_variables(thd, lex_var_list))) + { + /* + If the previous command was a SET ONE_SHOT, we don't want to forget + about the ONE_SHOT property of that SET. So we use a |= instead of = . + */ + thd->one_shot_set|= lex->one_shot_set; + send_ok(thd); + } + if (thd->net.report_error) + res= -1; break; + } + case SQLCOM_UNLOCK_TABLES: + /* + It is critical for mysqldump --single-transaction --master-data that + UNLOCK TABLES does not implicitely commit a connection which has only + done FLUSH TABLES WITH READ LOCK + BEGIN. If this assumption becomes + false, mysqldump will not work. + */ unlock_locked_tables(thd); if (thd->options & OPTION_TABLE_LOCK) { @@ -2393,13 +3314,13 @@ mysql_execute_command(void) } if (thd->global_read_lock) unlock_global_read_lock(thd); - send_ok(&thd->net); + send_ok(thd); break; case SQLCOM_LOCK_TABLES: unlock_locked_tables(thd); if (check_db_used(thd,tables) || end_active_trans(thd)) goto error; - if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, tables)) + if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, tables,0)) goto error; thd->in_lock_tables=1; thd->options|= OPTION_TABLE_LOCK; @@ -2411,7 +3332,7 @@ mysql_execute_command(void) #endif /*HAVE_QUERY_CACHE*/ thd->locked_tables=thd->lock; thd->lock=0; - send_ok(&thd->net); + send_ok(thd); } else thd->options&= ~(ulong) (OPTION_TABLE_LOCK); @@ -2419,10 +3340,15 @@ mysql_execute_command(void) break; case SQLCOM_CREATE_DB: { + if (end_active_trans(thd)) + { + res= -1; + break; + } char *alias; if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name)) { - net_printf(&thd->net,ER_WRONG_DB_NAME, lex->name); + net_printf(thd,ER_WRONG_DB_NAME, lex->name); break; } /* @@ -2432,6 +3358,7 @@ mysql_execute_command(void) do_db/ignore_db. And as this query involves no tables, tables_ok() above was not called. So we have to check rules again here. */ +#ifdef HAVE_REPLICATION if (thd->slave_thread && (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(lex->name))) @@ -2439,19 +3366,24 @@ mysql_execute_command(void) my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); break; } - - if (check_access(thd,CREATE_ACL,lex->name,0,1)) +#endif + if (check_access(thd,CREATE_ACL,lex->name,0,1,0)) break; - res=mysql_create_db(thd,(lower_case_table_names == 2 ? alias : lex->name), - lex->create_info.options,0); + res= mysql_create_db(thd,(lower_case_table_names == 2 ? alias : lex->name), + &lex->create_info, 0); break; } case SQLCOM_DROP_DB: { + if (end_active_trans(thd)) + { + res= -1; + break; + } char *alias; if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name)) { - net_printf(&thd->net,ER_WRONG_DB_NAME, lex->name); + net_printf(thd, ER_WRONG_DB_NAME, lex->name); break; } /* @@ -2461,6 +3393,7 @@ mysql_execute_command(void) do_db/ignore_db. And as this query involves no tables, tables_ok() above was not called. So we have to check rules again here. */ +#ifdef HAVE_REPLICATION if (thd->slave_thread && (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(lex->name))) @@ -2468,44 +3401,130 @@ mysql_execute_command(void) my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); break; } - if (check_access(thd,DROP_ACL,lex->name,0,1)) +#endif + if (check_access(thd,DROP_ACL,lex->name,0,1,0)) break; if (thd->locked_tables || thd->active_transaction()) { - send_error(&thd->net,ER_LOCK_OR_ACTIVE_TRANSACTION); + send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); goto error; } res=mysql_rm_db(thd, (lower_case_table_names == 2 ? alias : lex->name), lex->drop_if_exists, 0); break; } + case SQLCOM_ALTER_DB: + { + char *db= lex->name ? lex->name : thd->db; + if (!db) + { + send_error(thd, ER_NO_DB_ERROR); + goto error; + } + if (!strip_sp(db) || check_db_name(db)) + { + net_printf(thd, ER_WRONG_DB_NAME, db); + break; + } + /* + If in a slave thread : + ALTER DATABASE DB may not be preceded by USE DB. + For that reason, maybe db_ok() in sql/slave.cc did not check the + do_db/ignore_db. And as this query involves no tables, tables_ok() + above was not called. So we have to check rules again here. + */ +#ifdef HAVE_REPLICATION + if (thd->slave_thread && + (!db_ok(db, replicate_do_db, replicate_ignore_db) || + !db_ok_with_wild_table(db))) + { + my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + break; + } +#endif + if (check_access(thd, ALTER_ACL, db, 0, 1, 0)) + break; + if (thd->locked_tables || thd->active_transaction()) + { + send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); + goto error; + } + res= mysql_alter_db(thd, db, &lex->create_info); + break; + } + case SQLCOM_SHOW_CREATE_DB: + { + if (!strip_sp(lex->name) || check_db_name(lex->name)) + { + net_printf(thd,ER_WRONG_DB_NAME, lex->name); + break; + } + if (check_access(thd,SELECT_ACL,lex->name,0,1,0)) + break; + res=mysqld_show_create_db(thd,lex->name,&lex->create_info); + break; + } case SQLCOM_CREATE_FUNCTION: - if (check_access(thd,INSERT_ACL,"mysql",0,1)) + if (check_access(thd,INSERT_ACL,"mysql",0,1,0)) break; #ifdef HAVE_DLOPEN if (!(res = mysql_create_function(thd,&lex->udf))) - send_ok(&thd->net); + send_ok(thd); #else + net_printf(thd, ER_CANT_OPEN_LIBRARY, lex->udf.dl, 0, "feature disabled"); res= -1; #endif break; case SQLCOM_DROP_FUNCTION: - if (check_access(thd,DELETE_ACL,"mysql",0,1)) + if (check_access(thd,DELETE_ACL,"mysql",0,1,0)) break; #ifdef HAVE_DLOPEN - if (!(res = mysql_drop_function(thd,lex->udf.name))) - send_ok(&thd->net); + if (!(res = mysql_drop_function(thd,&lex->udf.name))) + send_ok(thd); #else res= -1; #endif break; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + case SQLCOM_DROP_USER: + { + if (check_access(thd, GRANT_ACL,"mysql",0,1,0)) + break; + if (!(res= mysql_drop_user(thd, lex->users_list))) + { + mysql_update_log.write(thd, thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + send_ok(thd); + } + break; + } + case SQLCOM_REVOKE_ALL: + { + if (check_access(thd, GRANT_ACL ,"mysql",0,1,0)) + break; + if (!(res = mysql_revoke_all(thd, lex->users_list))) + { + mysql_update_log.write(thd, thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + send_ok(thd); + } + break; + } case SQLCOM_REVOKE: case SQLCOM_GRANT: { if (check_access(thd, lex->grant | lex->grant_tot_col | GRANT_ACL, - tables && tables->db ? tables->db : select_lex->db, + tables ? tables->db : select_lex->db, tables ? &tables->grant.privilege : 0, - tables ? 0 : 1)) + tables ? 0 : 1, 0)) goto error; /* @@ -2522,20 +3541,37 @@ mysql_execute_command(void) if (user->password.str && (strcmp(thd->user,user->user.str) || user->host.str && - my_strcasecmp(user->host.str, thd->host_or_ip))) + my_strcasecmp(&my_charset_latin1, + user->host.str, thd->host_or_ip))) { - if (check_access(thd, UPDATE_ACL, "mysql",0,1)) + if (check_access(thd, UPDATE_ACL, "mysql", 0, 1, 1)) + { + send_error(thd, ER_PASSWORD_NOT_ALLOWED); goto error; - break; // We are allowed to do changes + } + break; // We are allowed to do global changes } } } + if (specialflag & SPECIAL_NO_RESOLVE) + { + LEX_USER *user; + List_iterator <LEX_USER> user_list(lex->users_list); + while ((user=user_list++)) + { + if (hostname_requires_resolving(user->host.str)) + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_HOSTNAME_WONT_WORK, + ER(ER_WARN_HOSTNAME_WONT_WORK), + user->host.str); + } + } if (tables) { if (grant_option && check_grant(thd, (lex->grant | lex->grant_tot_col | GRANT_ACL), - tables)) + tables, 0, UINT_MAX, 0)) goto error; if (!(res = mysql_table_grant(thd,tables,lex->users_list, lex->columns, lex->grant, @@ -2545,7 +3581,7 @@ mysql_execute_command(void) if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } } @@ -2554,7 +3590,7 @@ mysql_execute_command(void) { if (lex->columns.elements) { - send_error(&thd->net,ER_ILLEGAL_GRANT_FOR_TABLE); + send_error(thd,ER_ILLEGAL_GRANT_FOR_TABLE); res=1; } else @@ -2566,7 +3602,7 @@ mysql_execute_command(void) if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } if (mqh_used && lex->sql_command == SQLCOM_GRANT) @@ -2580,28 +3616,60 @@ mysql_execute_command(void) } break; } - case SQLCOM_FLUSH: +#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ case SQLCOM_RESET: + /* + RESET commands are never written to the binary log, so we have to + initialize this variable because RESET shares the same code as FLUSH + */ + lex->no_write_to_binlog= 1; + case SQLCOM_FLUSH: + { if (check_global_access(thd,RELOAD_ACL) || check_db_used(thd, tables)) goto error; - /* error sending is deferred to reload_acl_and_cache */ - reload_acl_and_cache(thd, lex->type, tables) ; + /* + reload_acl_and_cache() will tell us if we are allowed to write to the + binlog or not. + */ + bool write_to_binlog; + if (reload_acl_and_cache(thd, lex->type, tables, &write_to_binlog)) + send_error(thd, 0); + else + { + /* + We WANT to write and we CAN write. + ! we write after unlocking the table. + */ + if (!lex->no_write_to_binlog && write_to_binlog) + { + mysql_update_log.write(thd, thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } + send_ok(thd); + } break; + } case SQLCOM_KILL: kill_one_thread(thd,lex->thread_id); break; +#ifndef NO_EMBEDDED_ACCESS_CHECKS case SQLCOM_SHOW_GRANTS: res=0; if ((thd->priv_user && !strcmp(thd->priv_user,lex->grant_user->user.str)) || - !check_access(thd, SELECT_ACL, "mysql",0,1)) + !check_access(thd, SELECT_ACL, "mysql",0,1,0)) { res = mysql_show_grants(thd,lex->grant_user); } break; +#endif case SQLCOM_HA_OPEN: if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL, tables)) + check_table_access(thd,SELECT_ACL, tables,0)) goto error; res = mysql_ha_open(thd, tables); break; @@ -2639,7 +3707,9 @@ mysql_execute_command(void) thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) | OPTION_BEGIN); thd->server_status|= SERVER_STATUS_IN_TRANS; - send_ok(&thd->net); + if (!(lex->start_transaction_opt & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT) || + !(res= ha_start_consistent_snapshot(thd))) + send_ok(thd); } break; case SQLCOM_COMMIT: @@ -2653,7 +3723,7 @@ mysql_execute_command(void) thd->server_status&= ~SERVER_STATUS_IN_TRANS; if (!ha_commit(thd)) { - send_ok(&thd->net); + send_ok(thd); } else res= -1; @@ -2673,9 +3743,9 @@ mysql_execute_command(void) message in the error log, so we don't send it. */ if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && !thd->slave_thread) - send_warning(&thd->net,ER_WARNING_NOT_COMPLETE_ROLLBACK,0); + send_warning(thd,ER_WARNING_NOT_COMPLETE_ROLLBACK,0); else - send_ok(&thd->net); + send_ok(thd); } else res= -1; @@ -2685,45 +3755,77 @@ mysql_execute_command(void) if (!ha_rollback_to_savepoint(thd, lex->savepoint_name)) { if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && !thd->slave_thread) - send_warning(&thd->net,ER_WARNING_NOT_COMPLETE_ROLLBACK,0); + send_warning(thd, ER_WARNING_NOT_COMPLETE_ROLLBACK, 0); else - send_ok(&thd->net); + send_ok(thd); } else res= -1; break; case SQLCOM_SAVEPOINT: if (!ha_savepoint(thd, lex->savepoint_name)) - send_ok(&thd->net); + send_ok(thd); else res= -1; break; default: /* Impossible */ - send_ok(&thd->net); + send_ok(thd); break; } thd->proc_info="query end"; // QQ + + /* + Reset system variables temporarily modified by SET ONE SHOT. + + Exception: If this is a SET, do nothing. This is to allow + mysqlbinlog to print many SET commands (in this case we want the + charset temp setting to live until the real query). This is also + needed so that SET CHARACTER_SET_CLIENT... does not cancel itself + immediately. + */ + if (thd->one_shot_set && lex->sql_command != SQLCOM_SET_OPTION) + reset_one_shot_variables(thd); + if (res < 0) - send_error(&thd->net,thd->killed ? ER_SERVER_SHUTDOWN : 0); + send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); error: - if (unlikely(slave_fake_lock)) - { - DBUG_PRINT("info",("undoing faked lock")); - thd->lock= thd->locked_tables; - thd->locked_tables= fake_prev_lock; - if (thd->lock == thd->locked_tables) - thd->lock= 0; - } DBUG_VOID_RETURN; +} - error1: - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); - DBUG_VOID_RETURN; + +/* + Check grants for commands which work only with one table and all other + tables belonging to subselects or implicitly opened tables. + + SYNOPSIS + check_one_table_access() + thd Thread handler + privilege requested privelage + tables table list of command + + RETURN + 0 - OK + 1 - access denied, error is sent to client +*/ + +int check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables) +{ + if (check_access(thd, privilege, tables->db, &tables->grant.privilege,0,0)) + return 1; + + /* Show only 1 table for check_grant */ + if (grant_option && check_grant(thd, privilege, tables, 0, 1, 0)) + return 1; + + /* Check rights on tables of subselects and implictly opened tables */ + TABLE_LIST *subselects_tables; + if ((subselects_tables= tables->next)) + { + if ((check_table_access(thd, SELECT_ACL, subselects_tables,0))) + return 1; + } + return 0; } @@ -2751,10 +3853,13 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, bool dont_check_global_grants, bool no_errors) { DBUG_ENTER("check_access"); - DBUG_PRINT("enter",("want_access: %lu master_access: %lu", want_access, - thd->master_access)); - ulong db_access,dummy; + DBUG_PRINT("enter",("db: '%s' want_access: %lu master_access: %lu", + db ? db : "", want_access, thd->master_access)); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + ulong db_access; bool db_is_pattern= test(want_access & GRANT_ACL); +#endif + ulong dummy; if (save_priv) *save_priv=0; else @@ -2763,10 +3868,13 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, if ((!db || !db[0]) && !thd->db && !dont_check_global_grants) { if (!no_errors) - send_error(&thd->net,ER_NO_DB_ERROR); /* purecov: tested */ + send_error(thd,ER_NO_DB_ERROR); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ } +#ifdef NO_EMBEDDED_ACCESS_CHECKS + DBUG_RETURN(0); +#else if ((thd->master_access & want_access) == want_access) { /* @@ -2777,8 +3885,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, db_access= thd->db_access; if (!(thd->master_access & SELECT_ACL) && (db && (!thd->db || db_is_pattern || strcmp(db,thd->db)))) - db_access=acl_get(thd->host, thd->ip, (char*) &thd->remote.sin_addr, - thd->priv_user, db, db_is_pattern); + db_access=acl_get(thd->host, thd->ip, thd->priv_user, db, db_is_pattern); *save_priv=thd->master_access | db_access; DBUG_RETURN(FALSE); } @@ -2786,7 +3893,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, ! db && dont_check_global_grants) { // We can never grant this if (!no_errors) - net_printf(&thd->net,ER_ACCESS_DENIED_ERROR, + net_printf(thd,ER_ACCESS_DENIED_ERROR, thd->priv_user, thd->priv_host, thd->password ? ER(ER_YES) : ER(ER_NO));/* purecov: tested */ @@ -2797,11 +3904,11 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, DBUG_RETURN(FALSE); // Allow select on anything if (db && (!thd->db || db_is_pattern || strcmp(db,thd->db))) - db_access=acl_get(thd->host, thd->ip, (char*) &thd->remote.sin_addr, - thd->priv_user, db, db_is_pattern); + db_access=acl_get(thd->host, thd->ip, thd->priv_user, db, db_is_pattern); else db_access=thd->db_access; - // Remove SHOW attribute and access rights we already have + DBUG_PRINT("info",("db_access: %lu", db_access)); + /* Remove SHOW attribute and access rights we already have */ want_access &= ~(thd->master_access | EXTRA_ACL); db_access= ((*save_priv=(db_access | thd->master_access)) & want_access); @@ -2811,11 +3918,12 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, !(want_access & ~(db_access | TABLE_ACLS)))) DBUG_RETURN(FALSE); /* Ok */ if (!no_errors) - net_printf(&thd->net,ER_DBACCESS_DENIED_ERROR, + net_printf(thd,ER_DBACCESS_DENIED_ERROR, thd->priv_user, thd->priv_host, db ? db : thd->db ? thd->db : "unknown"); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -2840,13 +3948,17 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, bool check_global_access(THD *thd, ulong want_access) { +#ifdef NO_EMBEDDED_ACCESS_CHECKS + return 0; +#else char command[128]; if ((thd->master_access & want_access)) return 0; get_privilege_desc(command, sizeof(command), want_access); - net_printf(&thd->net,ER_SPECIFIC_ACCESS_DENIED_ERROR, + net_printf(thd,ER_SPECIFIC_ACCESS_DENIED_ERROR, command); return 1; +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -2864,6 +3976,11 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, TABLE_LIST *org_tables=tables; for (; tables ; tables=tables->next) { + if (tables->derived || + (tables->table && (int)tables->table->tmp_table) || + my_tz_check_n_skip_implicit_tables(&tables, + thd->lex->time_zone_tables_used)) + continue; if ((thd->master_access & want_access) == (want_access & ~EXTRA_ACL) && thd->db) tables->grant.privilege= want_access; @@ -2886,20 +4003,27 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, } if (grant_option) return check_grant(thd,want_access & ~EXTRA_ACL,org_tables, - test(want_access & EXTRA_ACL), no_errors); + test(want_access & EXTRA_ACL), UINT_MAX, no_errors); return FALSE; } - -static bool -check_one_table_access(THD *thd, ulong want_access, TABLE_LIST *table, - bool no_errors) +bool check_merge_table_access(THD *thd, char *db, + TABLE_LIST *table_list) { - if (check_access(thd, want_access, table->db, &table->grant.privilege, 0, - no_errors)) - return 1; - return (grant_option && check_grant(thd, want_access, table, 0, - no_errors)); + int error=0; + if (table_list) + { + /* Check that all tables use the current database */ + TABLE_LIST *tmp; + for (tmp=table_list; tmp ; tmp=tmp->next) + { + if (!tmp->db || !tmp->db[0]) + tmp->db=db; + } + error=check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL, + table_list,0); + } + return error; } @@ -2911,7 +4035,7 @@ static bool check_db_used(THD *thd,TABLE_LIST *tables) { if (!(tables->db=thd->db)) { - send_error(&thd->net,ER_NO_DB_ERROR); /* purecov: tested */ + send_error(thd,ER_NO_DB_ERROR); /* purecov: tested */ return TRUE; /* purecov: tested */ } } @@ -2919,32 +4043,6 @@ static bool check_db_used(THD *thd,TABLE_LIST *tables) return FALSE; } - -static bool check_merge_table_access(THD *thd, char *db, - TABLE_LIST *table_list) -{ - int error=0; - if (table_list) - { - /* Check that all tables use the current database */ - TABLE_LIST *tmp; - for (tmp=table_list; tmp ; tmp=tmp->next) - { - if (!tmp->db || !tmp->db[0]) - tmp->db=db; - else if (strcmp(tmp->db,db)) - { - send_error(&thd->net,ER_UNION_TABLES_IN_DIFFERENT_DIR); - return 1; - } - } - error=check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL, - table_list); - } - return error; -} - - /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ @@ -2955,6 +4053,10 @@ static bool check_merge_table_access(THD *thd, char *db, #define used_stack(A,B) (long) (B - A) #endif +#ifndef DBUG_OFF +long max_stack_used; +#endif + #ifndef EMBEDDED_LIBRARY bool check_stack_overrun(THD *thd,char *buf __attribute__((unused))) { @@ -2964,9 +4066,12 @@ bool check_stack_overrun(THD *thd,char *buf __attribute__((unused))) { sprintf(errbuff[0],ER(ER_STACK_OVERRUN),stack_used,thread_stack); my_message(ER_STACK_OVERRUN,errbuff[0],MYF(0)); - thd->fatal_error=1; + thd->fatal_error(); return 1; } +#ifndef DBUG_OFF + max_stack_used= max(max_stack_used, stack_used); +#endif return 0; } #endif /* EMBEDDED_LIBRARY */ @@ -3002,82 +4107,202 @@ bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize) return 0; } - /**************************************************************************** - Initialize global thd variables needed for query + Initialize global thd variables needed for query ****************************************************************************/ -static void -mysql_init_query(THD *thd) +void +mysql_init_query(THD *thd, uchar *buf, uint length) { DBUG_ENTER("mysql_init_query"); - thd->lex.select_lex.item_list.empty(); - thd->lex.value_list.empty(); - thd->lex.select_lex.table_list.elements=0; - thd->free_list=0; thd->lex.union_option=0; - thd->lex.select = &thd->lex.select_lex; - thd->lex.select_lex.table_list.first=0; - thd->lex.select_lex.table_list.next= (byte**) &thd->lex.select_lex.table_list.first; - thd->lex.select_lex.next=0; - /* - select_lex.options is also inited in dispatch_command(), but for - replication (which bypasses dispatch_command() and calls mysql_parse() - directly) we must do it here. - */ - thd->lex.select_lex.options=0; - thd->lex.olap=0; - thd->lex.select->olap= UNSPECIFIED_OLAP_TYPE; - thd->fatal_error=0; // Safety - thd->last_insert_id_used=thd->query_start_used=thd->insert_id_used=0; - thd->rand_used=0; - thd->sent_row_count=thd->examined_row_count=0; - thd->safe_to_cache_query=1; + lex_start(thd, buf, length); + mysql_reset_thd_for_next_command(thd); DBUG_VOID_RETURN; } + +/* + Reset THD part responsible for command processing state. + + DESCRIPTION + This needs to be called before execution of every statement + (prepared or conventional). + + TODO + Make it a method of THD and align its name with the rest of + reset/end/start/init methods. + Call it after we use THD for queries, not before. +*/ + +void mysql_reset_thd_for_next_command(THD *thd) +{ + DBUG_ENTER("mysql_reset_thd_for_next_command"); + thd->free_list= 0; + thd->select_number= 1; + thd->total_warn_count= 0; // Warnings for this query + thd->last_insert_id_used= thd->query_start_used= thd->insert_id_used=0; + thd->sent_row_count= thd->examined_row_count= 0; + thd->is_fatal_error= thd->rand_used= thd->time_zone_used= 0; + thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS | + SERVER_QUERY_NO_INDEX_USED | + SERVER_QUERY_NO_GOOD_INDEX_USED); + thd->tmp_table_used= 0; + if (opt_bin_log) + reset_dynamic(&thd->user_var_events); + thd->clear_error(); + DBUG_VOID_RETURN; +} + + void mysql_init_select(LEX *lex) { - SELECT_LEX *select_lex = lex->select; - select_lex->where=select_lex->having=0; - select_lex->select_limit= lex->thd->variables.select_limit; - select_lex->offset_limit=0; - select_lex->options=0; - select_lex->linkage=UNSPECIFIED_TYPE; - select_lex->olap= UNSPECIFIED_OLAP_TYPE; - lex->exchange = 0; - lex->proc_list.first=0; - select_lex->order_list.empty(); - select_lex->group_list.empty(); - select_lex->next = (SELECT_LEX *)NULL; + SELECT_LEX *select_lex= lex->current_select; + select_lex->init_select(); + select_lex->select_limit= HA_POS_ERROR; + if (select_lex == &lex->select_lex) + { + DBUG_ASSERT(lex->result == 0); + lex->exchange= 0; + } } bool -mysql_new_select(LEX *lex) +mysql_new_select(LEX *lex, bool move_down) { - SELECT_LEX *select_lex = (SELECT_LEX *) lex->thd->calloc(sizeof(SELECT_LEX)); - if (!select_lex) + SELECT_LEX *select_lex; + if (!(select_lex= new(lex->thd->mem_root) SELECT_LEX())) return 1; - lex->select->next=select_lex; - lex->select=select_lex; - select_lex->table_list.next= (byte**) &select_lex->table_list.first; - select_lex->item_list.empty(); - select_lex->when_list.empty(); - select_lex->expr_list.empty(); - select_lex->interval_list.empty(); - select_lex->use_index.empty(); - select_lex->ftfunc_list.empty(); + select_lex->select_number= ++lex->thd->select_number; + select_lex->init_query(); + select_lex->init_select(); + /* + Don't evaluate this subquery during statement prepare even if + it's a constant one. The flag is switched off in the end of + mysql_stmt_prepare. + */ + if (lex->thd->current_arena->is_stmt_prepare()) + select_lex->uncacheable|= UNCACHEABLE_PREPARE; + + if (move_down) + { + lex->subqueries= TRUE; + /* first select_lex of subselect or derived table */ + SELECT_LEX_UNIT *unit; + if (!(unit= new(lex->thd->mem_root) SELECT_LEX_UNIT())) + return 1; + + unit->init_query(); + unit->init_select(); + unit->thd= lex->thd; + unit->include_down(lex->current_select); + unit->link_next= 0; + unit->link_prev= 0; + unit->return_to= lex->current_select; + select_lex->include_down(unit); + // TODO: assign resolve_mode for fake subquery after merging with new tree + } + else + { + select_lex->include_neighbour(lex->current_select); + if (!select_lex->master_unit()->fake_select_lex && + select_lex->master_unit()->add_fake_select_lex(lex->thd)) + return 1; + } + + select_lex->master_unit()->global_parameters= select_lex; + select_lex->include_global((st_select_lex_node**)&lex->all_selects_list); + lex->current_select= select_lex; + select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; return 0; } +/* + Create a select to return the same output as 'SELECT @@var_name'. + + SYNOPSIS + create_select_for_variable() + var_name Variable name + + DESCRIPTION + Used for SHOW COUNT(*) [ WARNINGS | ERROR] + + This will crash with a core dump if the variable doesn't exists +*/ + +void create_select_for_variable(const char *var_name) +{ + THD *thd; + LEX *lex; + LEX_STRING tmp, null_lex_string; + Item *var; + char buff[MAX_SYS_VAR_LENGTH*2+4+8], *end; + DBUG_ENTER("create_select_for_variable"); + + thd= current_thd; + lex= thd->lex; + mysql_init_select(lex); + lex->sql_command= SQLCOM_SELECT; + tmp.str= (char*) var_name; + tmp.length=strlen(var_name); + bzero((char*) &null_lex_string.str, sizeof(null_lex_string)); + /* + We set the name of Item to @@session.var_name because that then is used + as the column name in the output. + */ + if ((var= get_system_var(thd, OPT_SESSION, tmp, null_lex_string))) + { + end= strxmov(buff, "@@session.", var_name, NullS); + var->set_name(buff, end-buff, system_charset_info); + add_item_to_list(thd, var); + } + DBUG_VOID_RETURN; +} + +static TABLE_LIST* get_table_by_alias(TABLE_LIST* tl, const char* db, + const char* alias) +{ + for (;tl;tl= tl->next) + { + if (!strcmp(db,tl->db) && + tl->alias && !my_strcasecmp(table_alias_charset,tl->alias,alias)) + return tl; + } + + return 0; +} + +/* Sets up lex->auxilliary_table_list */ +void fix_multi_delete_lex(LEX* lex) +{ + TABLE_LIST *tl; + TABLE_LIST *good_list= (TABLE_LIST*)lex->select_lex.table_list.first; + + for (tl= (TABLE_LIST*)lex->auxilliary_table_list.first; tl; tl= tl->next) + { + TABLE_LIST* good_table= get_table_by_alias(good_list,tl->db,tl->alias); + if (good_table && !good_table->derived) + { + /* + real_name points to a member of Table_ident which is + allocated via thd->strmake() from THD memroot + */ + tl->real_name= good_table->real_name; + tl->real_name_length= good_table->real_name_length; + good_table->updating= tl->updating; + } + } +} void mysql_init_multi_delete(LEX *lex) { - lex->sql_command = SQLCOM_DELETE_MULTI; + lex->sql_command= SQLCOM_DELETE_MULTI; mysql_init_select(lex); - lex->select->select_limit=lex->thd->select_limit=HA_POS_ERROR; - lex->select->table_list.save_and_clear(&lex->auxilliary_table_list); + lex->select_lex.select_limit= lex->unit.select_limit_cnt= + HA_POS_ERROR; + lex->select_lex.table_list.save_and_clear(&lex->auxilliary_table_list); + lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ; } @@ -3085,41 +4310,64 @@ void mysql_init_multi_delete(LEX *lex) When you modify mysql_parse(), you may need to mofify mysql_test_parse_for_slave() in this same file. */ + void mysql_parse(THD *thd, char *inBuf, uint length) { DBUG_ENTER("mysql_parse"); - thd->query_length = length; - mysql_init_query(thd); + mysql_init_query(thd, (uchar*) inBuf, length); if (query_cache_send_result_to_client(thd, inBuf, length) <= 0) { - LEX *lex=lex_start(thd, (uchar*) inBuf, length); - if (!yyparse() && ! thd->fatal_error) + LEX *lex= thd->lex; + if (!yyparse((void *)thd) && ! thd->is_fatal_error) { +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (mqh_used && thd->user_connect && - check_mqh(thd, thd->lex.sql_command)) + check_mqh(thd, lex->sql_command)) { thd->net.error = 0; } else +#endif { - mysql_execute_command(); - query_cache_end_of_result(&thd->net); + if (thd->net.report_error) + send_error(thd, 0, NullS); + else + { + /* + Binlog logs a string starting from thd->query and having length + thd->query_length; so we set thd->query_length correctly (to not + log several statements in one event, when we executed only first). + We set it to not see the ';' (otherwise it would get into binlog + and Query_log_event::print() would give ';;' output). + This also helps display only the current query in SHOW + PROCESSLIST. + Note that we don't need LOCK_thread_count to modify query_length. + */ + if (lex->found_colon && + (thd->query_length= (ulong)(lex->found_colon - thd->query))) + thd->query_length--; + /* Actually execute the query */ + mysql_execute_command(thd); + query_cache_end_of_result(thd); + } } } else { DBUG_PRINT("info",("Command aborted. Fatal_error: %d", - thd->fatal_error)); + thd->is_fatal_error)); query_cache_abort(&thd->net); } thd->proc_info="freeing items"; - free_items(thd); /* Free strings used by items */ - lex_end(lex); + thd->end_statement(); + DBUG_ASSERT(thd->change_list.is_empty()); } DBUG_VOID_RETURN; } + +#ifdef HAVE_REPLICATION /* Usable by the replication SQL thread only: just parse a query to know if it can be ignored because of replicate-*-table rules. @@ -3131,19 +4379,17 @@ void mysql_parse(THD *thd, char *inBuf, uint length) bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) { - LEX *lex; + LEX *lex= thd->lex; bool error= 0; - mysql_init_query(thd); - lex= lex_start(thd, (uchar*) inBuf, length); - if (!yyparse() && ! thd->fatal_error && + mysql_init_query(thd, (uchar*) inBuf, length); + if (!yyparse((void*) thd) && ! thd->is_fatal_error && all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first)) error= 1; /* Ignore question */ - free_items(thd); /* Free strings used by items */ - lex_end(lex); - + thd->end_statement(); return error; } +#endif /***************************************************************************** @@ -3151,71 +4397,85 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) ** Return 0 if ok ******************************************************************************/ -bool add_field_to_list(char *field_name, enum_field_types type, +bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, char *length, char *decimals, - uint type_modifier, Item *default_value,char *change, - TYPELIB *interval) + uint type_modifier, + Item *default_value, Item *on_update_value, + LEX_STRING *comment, + char *change, + List<String> *interval_list, CHARSET_INFO *cs, + uint uint_geom_type) { register create_field *new_field; - THD *thd=current_thd; - LEX *lex= &thd->lex; + LEX *lex= thd->lex; uint allowed_type_modifier=0; + char warn_buff[MYSQL_ERRMSG_SIZE]; DBUG_ENTER("add_field_to_list"); if (strlen(field_name) > NAME_LEN) { - net_printf(&thd->net, ER_TOO_LONG_IDENT, field_name); /* purecov: inspected */ + net_printf(thd, ER_TOO_LONG_IDENT, field_name); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } if (type_modifier & PRI_KEY_FLAG) { lex->col_list.push_back(new key_part_spec(field_name,0)); - lex->key_list.push_back(new Key(Key::PRIMARY,NullS, - lex->col_list)); + lex->key_list.push_back(new Key(Key::PRIMARY, NullS, HA_KEY_ALG_UNDEF, + 0, lex->col_list)); lex->col_list.empty(); } if (type_modifier & (UNIQUE_FLAG | UNIQUE_KEY_FLAG)) { lex->col_list.push_back(new key_part_spec(field_name,0)); - lex->key_list.push_back(new Key(Key::UNIQUE,NullS, + lex->key_list.push_back(new Key(Key::UNIQUE, NullS, HA_KEY_ALG_UNDEF, 0, lex->col_list)); lex->col_list.empty(); } if (default_value) { - /* - We allow specifying value for first TIMESTAMP column - altough it is silently ignored. This should be fixed in 4.1 - (by proper warning or real support for default values) + /* + Default value should be literal => basic constants => + no need fix_fields() + + We allow only one function as part of default value - + NOW() as default for TIMESTAMP type. */ - if (default_value->type() == Item::NULL_ITEM) + if (default_value->type() == Item::FUNC_ITEM && + !(((Item_func*)default_value)->functype() == Item_func::NOW_FUNC && + type == FIELD_TYPE_TIMESTAMP)) + { + net_printf(thd, ER_INVALID_DEFAULT, field_name); + DBUG_RETURN(1); + } + else if (default_value->type() == Item::NULL_ITEM) { - default_value=0; + default_value= 0; if ((type_modifier & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) == NOT_NULL_FLAG) { - net_printf(&thd->net,ER_INVALID_DEFAULT,field_name); + net_printf(thd,ER_INVALID_DEFAULT,field_name); DBUG_RETURN(1); } } -#ifdef MYSQL41000 else if (type_modifier & AUTO_INCREMENT_FLAG) { - net_printf(&thd->net, ER_INVALID_DEFAULT, field_name); + net_printf(thd, ER_INVALID_DEFAULT, field_name); DBUG_RETURN(1); } -#endif } + + if (on_update_value && type != FIELD_TYPE_TIMESTAMP) + { + net_printf(thd, ER_INVALID_ON_UPDATE, field_name); + DBUG_RETURN(1); + } + if (!(new_field=new create_field())) DBUG_RETURN(1); new_field->field=0; new_field->field_name=field_name; -#ifdef MYSQL41000 new_field->def= default_value; -#else - new_field->def= (type_modifier & AUTO_INCREMENT_FLAG ? 0 : default_value); -#endif new_field->flags= type_modifier; new_field->unireg_check= (type_modifier & AUTO_INCREMENT_FLAG ? Field::NEXT_NUMBER : Field::NONE); @@ -3223,9 +4483,24 @@ bool add_field_to_list(char *field_name, enum_field_types type, NOT_FIXED_DEC-1) : 0; new_field->sql_type=type; new_field->length=0; + new_field->char_length= 0; new_field->change=change; new_field->interval=0; new_field->pack_length=0; + new_field->charset=cs; + new_field->geom_type= (Field::geometry_type) uint_geom_type; + + if (!comment) + { + new_field->comment.str=0; + new_field->comment.length=0; + } + else + { + /* In this case comment is always of type Item_string */ + new_field->comment.str= (char*) comment->str; + new_field->comment.length=comment->length; + } if (length && !(new_field->length= (uint) atoi(length))) length=0; /* purecov: inspected */ uint sign_len=type_modifier & UNSIGNED_FLAG ? 0 : 1; @@ -3237,29 +4512,26 @@ bool add_field_to_list(char *field_name, enum_field_types type, switch (type) { case FIELD_TYPE_TINY: - if (!length) new_field->length=3+sign_len; + if (!length) new_field->length=MAX_TINYINT_WIDTH+sign_len; allowed_type_modifier= AUTO_INCREMENT_FLAG; break; case FIELD_TYPE_SHORT: - if (!length) new_field->length=5+sign_len; + if (!length) new_field->length=MAX_SMALLINT_WIDTH+sign_len; allowed_type_modifier= AUTO_INCREMENT_FLAG; break; case FIELD_TYPE_INT24: - if (!length) new_field->length=8+sign_len; + if (!length) new_field->length=MAX_MEDIUMINT_WIDTH+sign_len; allowed_type_modifier= AUTO_INCREMENT_FLAG; break; case FIELD_TYPE_LONG: - if (!length) new_field->length=10+sign_len; + if (!length) new_field->length=MAX_INT_WIDTH+sign_len; allowed_type_modifier= AUTO_INCREMENT_FLAG; break; case FIELD_TYPE_LONGLONG: - if (!length) new_field->length=20; + if (!length) new_field->length=MAX_BIGINT_WIDTH; allowed_type_modifier= AUTO_INCREMENT_FLAG; break; - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: case FIELD_TYPE_NULL: - case FIELD_TYPE_GEOMETRY: break; case FIELD_TYPE_DECIMAL: if (!length) @@ -3276,17 +4548,43 @@ bool add_field_to_list(char *field_name, enum_field_types type, new_field->length++; } break; + case FIELD_TYPE_STRING: + case FIELD_TYPE_VAR_STRING: + if (new_field->length <= MAX_FIELD_CHARLENGTH || default_value) + break; + /* Convert long CHAR() and VARCHAR columns to TEXT or BLOB */ + new_field->sql_type= FIELD_TYPE_BLOB; + sprintf(warn_buff, ER(ER_AUTO_CONVERT), field_name, "CHAR", + (cs == &my_charset_bin) ? "BLOB" : "TEXT"); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_AUTO_CONVERT, + warn_buff); + /* fall through */ case FIELD_TYPE_BLOB: case FIELD_TYPE_TINY_BLOB: case FIELD_TYPE_LONG_BLOB: case FIELD_TYPE_MEDIUM_BLOB: + case FIELD_TYPE_GEOMETRY: + if (new_field->length) + { + /* The user has given a length to the blob column */ + if (new_field->length < 256) + type= FIELD_TYPE_TINY_BLOB; + else if (new_field->length < 65536) + type= FIELD_TYPE_BLOB; + else if (new_field->length < 256L*256L*256L) + type= FIELD_TYPE_MEDIUM_BLOB; + else + type= FIELD_TYPE_LONG_BLOB; + new_field->length= 0; + } + new_field->sql_type= type; if (default_value) // Allow empty as default value { String str,*res; res=default_value->val_str(&str); if (res->length()) { - net_printf(&thd->net,ER_BLOB_CANT_HAVE_DEFAULT,field_name); /* purecov: inspected */ + net_printf(thd,ER_BLOB_CANT_HAVE_DEFAULT,field_name); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } new_field->def=0; @@ -3306,7 +4604,7 @@ bool add_field_to_list(char *field_name, enum_field_types type, uint tmp_length=new_field->length; if (tmp_length > PRECISION_FOR_DOUBLE) { - net_printf(&thd->net,ER_WRONG_FIELD_SPEC,field_name); + net_printf(thd,ER_WRONG_FIELD_SPEC,field_name); DBUG_RETURN(1); } else if (tmp_length > PRECISION_FOR_FLOAT) @@ -3334,18 +4632,7 @@ bool add_field_to_list(char *field_name, enum_field_types type, } break; case FIELD_TYPE_TIMESTAMP: -#if MYSQL_VERSION_ID < 40100 - /* - When in in --new mode, we should create TIMESTAMP(19) fields by default; - otherwise we will have problems with ALTER TABLE changing lengths of - existing TIMESTAMP fields to 19 and adding new fields with length 14. - */ - if (thd->variables.new_mode) - new_field->length= 19; - else if (!length) -#else if (!length) -#endif new_field->length= 14; // Full date YYYYMMDDHHMMSS else if (new_field->length != 19) { @@ -3356,7 +4643,46 @@ bool add_field_to_list(char *field_name, enum_field_types type, new_field->length=((new_field->length+1)/2)*2; /* purecov: inspected */ new_field->length= min(new_field->length,14); /* purecov: inspected */ } - new_field->flags|= ZEROFILL_FLAG | UNSIGNED_FLAG | NOT_NULL_FLAG; + new_field->flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; + if (default_value) + { + /* Grammar allows only NOW() value for ON UPDATE clause */ + if (default_value->type() == Item::FUNC_ITEM && + ((Item_func*)default_value)->functype() == Item_func::NOW_FUNC) + { + new_field->unireg_check= (on_update_value?Field::TIMESTAMP_DNUN_FIELD: + Field::TIMESTAMP_DN_FIELD); + /* + We don't need default value any longer moreover it is dangerous. + Everything handled by unireg_check further. + */ + new_field->def= 0; + } + else + new_field->unireg_check= (on_update_value?Field::TIMESTAMP_UN_FIELD: + Field::NONE); + } + else + { + /* + If we have default TIMESTAMP NOT NULL column without explicit DEFAULT + or ON UPDATE values then for the sake of compatiblity we should treat + this column as having DEFAULT NOW() ON UPDATE NOW() (when we don't + have another TIMESTAMP column with auto-set option before this one) + or DEFAULT 0 (in other cases). + So here we are setting TIMESTAMP_OLD_FIELD only temporary, and will + replace this value by TIMESTAMP_DNUN_FIELD or NONE later when + information about all TIMESTAMP fields in table will be availiable. + + If we have TIMESTAMP NULL column without explicit DEFAULT value + we treat it as having DEFAULT NULL attribute. + */ + new_field->unireg_check= on_update_value ? + Field::TIMESTAMP_UN_FIELD : + (new_field->flags & NOT_NULL_FLAG ? + Field::TIMESTAMP_OLD_FIELD: + Field::NONE); + } break; case FIELD_TYPE_DATE: // Old date type if (protocol_version != PROTOCOL_VERSION-1) @@ -3373,73 +4699,53 @@ bool add_field_to_list(char *field_name, enum_field_types type, break; case FIELD_TYPE_SET: { - if (interval->count > sizeof(longlong)*8) - { - net_printf(&thd->net,ER_TOO_BIG_SET,field_name); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - new_field->pack_length=(interval->count+7)/8; - if (new_field->pack_length > 4) - new_field->pack_length=8; - new_field->interval=interval; - new_field->length=0; - for (const char **pos=interval->type_names; *pos ; pos++) - { - new_field->length+=(uint) strip_sp((char*) *pos)+1; - } - new_field->length--; - set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1); - if (default_value) + if (interval_list->elements > sizeof(longlong)*8) { - thd->cuted_fields=0; - String str,*res; - res=default_value->val_str(&str); - (void) find_set(interval,res->ptr(),res->length()); - if (thd->cuted_fields) - { - net_printf(&thd->net,ER_INVALID_DEFAULT,field_name); - DBUG_RETURN(1); - } + net_printf(thd,ER_TOO_BIG_SET,field_name); /* purecov: inspected */ + DBUG_RETURN(1); /* purecov: inspected */ } + new_field->pack_length= get_set_pack_length(interval_list->elements); + + List_iterator<String> it(*interval_list); + String *tmp; + while ((tmp= it++)) + new_field->interval_list.push_back(tmp); + /* + Set fake length to 1 to pass the below conditions. + Real length will be set in mysql_prepare_table() + when we know the character set of the column + */ + new_field->length= 1; } break; case FIELD_TYPE_ENUM: { - new_field->interval=interval; - new_field->pack_length=interval->count < 256 ? 1 : 2; // Should be safe - new_field->length=(uint) strip_sp((char*) interval->type_names[0]); - for (const char **pos=interval->type_names+1; *pos ; pos++) - { - uint length=(uint) strip_sp((char*) *pos); - set_if_bigger(new_field->length,length); - } - set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1); - if (default_value) - { - String str,*res; - res=default_value->val_str(&str); - if (!find_enum(interval,res->ptr(),res->length())) - { - net_printf(&thd->net,ER_INVALID_DEFAULT,field_name); - DBUG_RETURN(1); - } - } - break; + // Should be safe + new_field->pack_length= get_enum_pack_length(interval_list->elements); + + List_iterator<String> it(*interval_list); + String *tmp; + while ((tmp= it++)) + new_field->interval_list.push_back(tmp); + new_field->length= 1; // See comment for FIELD_TYPE_SET above. } + break; } - if (new_field->length >= MAX_FIELD_WIDTH || + if ((new_field->length > MAX_FIELD_CHARLENGTH && type != FIELD_TYPE_SET && + type != FIELD_TYPE_ENUM) || (!new_field->length && !(new_field->flags & BLOB_FLAG) && - type != FIELD_TYPE_STRING && type != FIELD_TYPE_VAR_STRING)) + type != FIELD_TYPE_STRING && + type != FIELD_TYPE_VAR_STRING && type != FIELD_TYPE_GEOMETRY)) { - net_printf(&thd->net,ER_TOO_BIG_FIELDLENGTH,field_name, - MAX_FIELD_WIDTH-1); /* purecov: inspected */ + net_printf(thd,ER_TOO_BIG_FIELDLENGTH,field_name, + MAX_FIELD_CHARLENGTH); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } type_modifier&= AUTO_INCREMENT_FLAG; if ((~allowed_type_modifier) & type_modifier) { - net_printf(&thd->net,ER_WRONG_FIELD_SPEC,field_name); + net_printf(thd,ER_WRONG_FIELD_SPEC,field_name); DBUG_RETURN(1); } if (!new_field->pack_length) @@ -3448,6 +4754,7 @@ bool add_field_to_list(char *field_name, enum_field_types type, FIELD_TYPE_STRING : new_field->sql_type, new_field->length); + new_field->char_length= new_field->length; lex->create_list.push_back(new_field); lex->last_field=new_field; DBUG_RETURN(0); @@ -3466,13 +4773,13 @@ add_proc_to_list(THD* thd, Item *item) ORDER *order; Item **item_ptr; - if (!(order = (ORDER *) sql_alloc(sizeof(ORDER)+sizeof(Item*)))) + if (!(order = (ORDER *) thd->alloc(sizeof(ORDER)+sizeof(Item*)))) return 1; item_ptr = (Item**) (order+1); *item_ptr= item; order->item=item_ptr; order->free_me=0; - thd->lex.proc_list.link_in_list((byte*) order,(byte**) &order->next); + thd->lex->proc_list.link_in_list((byte*) order,(byte**) &order->next); return 0; } @@ -3492,8 +4799,8 @@ static void remove_escape(char *name) #ifdef USE_MB int l; /* if ((l = ismbchar(name, name+MBMAXLEN))) { Wei He: I think it's wrong */ - if (use_mb(default_charset_info) && - (l = my_ismbchar(default_charset_info, name, strend))) + if (use_mb(system_charset_info) && + (l = my_ismbchar(system_charset_info, name, strend))) { while (l--) *to++ = *name++; @@ -3513,16 +4820,14 @@ static void remove_escape(char *name) ****************************************************************************/ -bool add_to_list(SQL_LIST &list,Item *item,bool asc) +bool add_to_list(THD *thd, SQL_LIST &list,Item *item,bool asc) { ORDER *order; - Item **item_ptr; DBUG_ENTER("add_to_list"); - if (!(order = (ORDER *) sql_alloc(sizeof(ORDER)+sizeof(Item*)))) + if (!(order = (ORDER *) thd->alloc(sizeof(ORDER)))) DBUG_RETURN(1); - item_ptr = (Item**) (order+1); - *item_ptr=item; - order->item= item_ptr; + order->item_ptr= item; + order->item= &order->item_ptr; order->asc = asc; order->free_me=0; order->used=0; @@ -3550,14 +4855,16 @@ bool add_to_list(SQL_LIST &list,Item *item,bool asc) # Pointer to TABLE_LIST element added to the total table list */ -TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias, - ulong table_options, - thr_lock_type lock_type, - List<String> *use_index, - List<String> *ignore_index) +TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, + Table_ident *table, + LEX_STRING *alias, + ulong table_options, + thr_lock_type lock_type, + List<String> *use_index_arg, + List<String> *ignore_index_arg, + LEX_STRING *option) { register TABLE_LIST *ptr; - THD *thd=current_thd; char *alias_str; DBUG_ENTER("add_table_to_list"); @@ -3567,17 +4874,23 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias, if (check_table_name(table->table.str,table->table.length) || table->db.str && check_db_name(table->db.str)) { - net_printf(&thd->net,ER_WRONG_TABLE_NAME,table->table.str); + net_printf(thd, ER_WRONG_TABLE_NAME, table->table.str); DBUG_RETURN(0); } if (!alias) /* Alias is case sensitive */ + { + if (table->sel) + { + net_printf(thd,ER_DERIVED_MUST_HAVE_ALIAS); + DBUG_RETURN(0); + } if (!(alias_str=thd->memdup(alias_str,table->table.length+1))) DBUG_RETURN(0); - + } if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST)))) DBUG_RETURN(0); /* purecov: inspected */ - if (table->db.str) + if (table->db.str) { ptr->db= table->db.str; ptr->db_length= table->db.length; @@ -3589,42 +4902,51 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias, } else { - ptr->db= (char*) ""; + /* The following can't be "" as we may do 'casedn_str()' on it */ + ptr->db= empty_c_string; ptr->db_length= 0; } - + if (thd->current_arena->is_stmt_prepare()) + ptr->db= thd->strdup(ptr->db); + ptr->alias= alias_str; - table_case_convert(table->table.str, table->table.length); + if (lower_case_table_names && table->table.length) + my_casedn_str(files_charset_info, table->table.str); ptr->real_name=table->table.str; ptr->real_name_length=table->table.length; - ptr->lock_type= lock_type; + ptr->lock_type= lock_type; ptr->updating= test(table_options & TL_OPTION_UPDATING); ptr->force_index= test(table_options & TL_OPTION_FORCE_INDEX); - if (use_index) - ptr->use_index=(List<String> *) thd->memdup((gptr) use_index, - sizeof(*use_index)); - if (ignore_index) - ptr->ignore_index=(List<String> *) thd->memdup((gptr) ignore_index, - sizeof(*ignore_index)); - + ptr->ignore_leaves= test(table_options & TL_OPTION_IGNORE_LEAVES); + ptr->derived= table->sel; + ptr->cacheable_table= 1; + if (use_index_arg) + ptr->use_index=(List<String> *) thd->memdup((gptr) use_index_arg, + sizeof(*use_index_arg)); + if (ignore_index_arg) + ptr->ignore_index=(List<String> *) thd->memdup((gptr) ignore_index_arg, + sizeof(*ignore_index_arg)); + ptr->option= option ? option->str : 0; /* check that used name is unique */ if (lock_type != TL_IGNORE) { - for (TABLE_LIST *tables=(TABLE_LIST*) thd->lex.select->table_list.first ; + for (TABLE_LIST *tables=(TABLE_LIST*) table_list.first ; tables ; tables=tables->next) { - if (!strcmp(alias_str,tables->alias) && !strcmp(ptr->db, tables->db)) + if (!my_strcasecmp(table_alias_charset, alias_str, tables->alias) && + !strcmp(ptr->db, tables->db)) { - net_printf(&thd->net,ER_NONUNIQ_TABLE,alias_str); /* purecov: tested */ + net_printf(thd,ER_NONUNIQ_TABLE,alias_str); /* purecov: tested */ DBUG_RETURN(0); /* purecov: tested */ } } } - thd->lex.select->table_list.link_in_list((byte*) ptr,(byte**) &ptr->next); + table_list.link_in_list((byte*) ptr, (byte**) &ptr->next); DBUG_RETURN(ptr); } + /* Set lock for all tables in current select level @@ -3638,15 +4960,14 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias, query */ -void set_lock_for_tables(thr_lock_type lock_type) +void st_select_lex::set_lock_for_tables(thr_lock_type lock_type) { - THD *thd=current_thd; bool for_update= lock_type >= TL_READ_NO_INSERT; DBUG_ENTER("set_lock_for_tables"); DBUG_PRINT("enter", ("lock_type: %d for_update: %d", lock_type, for_update)); - for (TABLE_LIST *tables= (TABLE_LIST*) thd->lex.select->table_list.first ; + for (TABLE_LIST *tables= (TABLE_LIST*) table_list.first ; tables ; tables=tables->next) { @@ -3658,70 +4979,59 @@ void set_lock_for_tables(thr_lock_type lock_type) /* -** This is used for UNION to create a new table list of all used tables -** The table_list->table entry in all used tables are set to point -** to the entries in this list. -*/ + Create a fake SELECT_LEX for a unit -static bool create_total_list(THD *thd, LEX *lex, - TABLE_LIST **result, bool skip_first) -{ - /* Handle the case when we are not using union */ - if (!lex->select_lex.next) - { - *result= (TABLE_LIST*) lex->select_lex.table_list.first; - return 0; - } + SYNOPSIS: + add_fake_select_lex() + thd thread handle + + DESCRIPTION + The method create a fake SELECT_LEX object for a unit. + This object is created for any union construct containing a union + operation and also for any single select union construct of the form + (SELECT ... ORDER BY order_list [LIMIT n]) ORDER BY ... + or of the form + (SELECT ... ORDER BY LIMIT n) ORDER BY ... + + NOTES + The object is used to retrieve rows from the temporary table + where the result on the union is obtained. - /* We should skip first table if SQL command is SQLCOM_CREATE_TABLE */ - SELECT_LEX *sl; - TABLE_LIST **new_table_list= result, *aux; + RETURN VALUES + 1 on failure to create the object + 0 on success +*/ - *new_table_list=0; // end result list - for (sl= &lex->select_lex; sl; sl=sl->next) +bool st_select_lex_unit::add_fake_select_lex(THD *thd) +{ + SELECT_LEX *first_sl= first_select(); + DBUG_ENTER("add_fake_select_lex"); + DBUG_ASSERT(!fake_select_lex); + + if (!(fake_select_lex= new (thd->mem_root) SELECT_LEX())) + DBUG_RETURN(1); + fake_select_lex->include_standalone(this, + (SELECT_LEX_NODE**)&fake_select_lex); + fake_select_lex->select_number= INT_MAX; + fake_select_lex->make_empty_select(); + fake_select_lex->linkage= GLOBAL_OPTIONS_TYPE; + fake_select_lex->select_limit= HA_POS_ERROR; + + if (!first_sl->next_select()) { - if (sl->order_list.first && sl->next && !sl->braces) - { - net_printf(&thd->net,ER_WRONG_USAGE,"UNION","ORDER BY"); - return 1; - } - if ((aux= (TABLE_LIST*) sl->table_list.first)) - { - TABLE_LIST *next; - for (; aux; aux=next) - { - TABLE_LIST *cursor= *result; - if (skip_first && cursor) - cursor= cursor->next; - next= aux->next; - for ( ; cursor; cursor=cursor->next) - if (!strcmp(cursor->db,aux->db) && - !strcmp(cursor->real_name,aux->real_name) && - !strcmp(cursor->alias, aux->alias)) - break; - if (!cursor) - { - /* Add not used table to the total table list */ - if (!(cursor = (TABLE_LIST *) thd->memdup((char*) aux, - sizeof(*aux)))) - { - send_error(&thd->net,0); - return 1; - } - *new_table_list= cursor; - new_table_list= &cursor->next; - *new_table_list=0; // end result list - } - else - aux->shared=1; // Mark that it's used twice - aux->table= my_reinterpret_cast(TABLE *) (cursor); - } - } + /* + This works only for + (SELECT ... ORDER BY list [LIMIT n]) ORDER BY order_list [LIMIT m], + (SELECT ... LIMIT n) ORDER BY order_list [LIMIT m] + just before the parser starts processing order_list + */ + global_parameters= fake_select_lex; + fake_select_lex->no_table_names_allowed= 1; + thd->lex->current_select= fake_select_lex; } - return 0; + DBUG_RETURN(0); } - void add_join_on(TABLE_LIST *b,Item *expr) { if (expr) @@ -3761,47 +5071,87 @@ void add_join_natural(TABLE_LIST *a,TABLE_LIST *b) b->natural_join=a; } - /* Check if name is used in table list */ - -bool check_dup(const char *db, const char *name, TABLE_LIST *tables) -{ - for (; tables ; tables=tables->next) - if (!strcmp(name,tables->real_name) && !strcmp(db,tables->db)) - return 1; - return 0; -} +/* + Reload/resets privileges and the different caches. + SYNOPSIS + reload_acl_and_cache() + thd Thread handler + options What should be reset/reloaded (tables, privileges, + slave...) + tables Tables to flush (if any) + write_to_binlog Depending on 'options', it may be very bad to write the + query to the binlog (e.g. FLUSH SLAVE); this is a + pointer where, if it is not NULL, reload_acl_and_cache() + will put 0 if it thinks we really should not write to + the binlog. Otherwise it will put 1. -/* - Reload/resets privileges and the different caches + RETURN + 0 ok + !=0 error */ -bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables) +bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, + bool *write_to_binlog) { bool result=0; - bool error_already_sent=0; select_errors=0; /* Write if more errors */ + bool tmp_write_to_binlog= 1; +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (options & REFRESH_GRANT) { - acl_reload(thd); - grant_reload(thd); - if (mqh_used) - reset_mqh(thd,(LEX_USER *) NULL,true); + THD *tmp_thd= 0; + /* + If reload_acl_and_cache() is called from SIGHUP handler we have to + allocate temporary THD for execution of acl_reload()/grant_reload(). + */ + if (!thd && (thd= (tmp_thd= new THD))) + thd->store_globals(); + if (thd) + { + (void)acl_reload(thd); + (void)grant_reload(thd); + if (mqh_used) + reset_mqh(thd, (LEX_USER *) NULL, TRUE); + } + if (tmp_thd) + { + delete tmp_thd; + /* Remember that we don't have a THD */ + my_pthread_setspecific_ptr(THR_THD, 0); + thd= 0; + } } +#endif if (options & REFRESH_LOG) { /* Flush the normal query log, the update log, the binary log, the slow query log, and the relay log (if it exists). */ + + /* + Writing this command to the binlog may result in infinite loops when + doing mysqlbinlog|mysql, and anyway it does not really make sense to + log it automatically (would cause more trouble to users than it would + help them) + */ + tmp_write_to_binlog= 0; mysql_log.new_file(1); mysql_update_log.new_file(1); mysql_bin_log.new_file(1); mysql_slow_log.new_file(1); +#ifdef HAVE_REPLICATION + if (mysql_bin_log.is_open() && expire_logs_days) + { + long purge_time= time(0) - expire_logs_days*24*60*60; + if (purge_time >= 0) + mysql_bin_log.purge_logs_before_date(purge_time); + } pthread_mutex_lock(&LOCK_active_mi); rotate_relay_log(active_mi); pthread_mutex_unlock(&LOCK_active_mi); - +#endif if (ha_flush_logs()) result=1; if (flush_error_log()) @@ -3818,10 +5168,38 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables) query_cache.flush(); // RESET QUERY CACHE } #endif /*HAVE_QUERY_CACHE*/ - if (options & (REFRESH_TABLES | REFRESH_READ_LOCK)) + /* + Note that if REFRESH_READ_LOCK bit is set then REFRESH_TABLES is set too + (see sql_yacc.yy) + */ + if (options & (REFRESH_TABLES | REFRESH_READ_LOCK)) { if ((options & REFRESH_READ_LOCK) && thd) { + /* + We must not try to aspire a global read lock if we have a write + locked table. This would lead to a deadlock when trying to + reopen (and re-lock) the table after the flush. + */ + if (thd->locked_tables) + { + THR_LOCK_DATA **lock_p= thd->locked_tables->locks; + THR_LOCK_DATA **end_p= lock_p + thd->locked_tables->lock_count; + + for (; lock_p < end_p; lock_p++) + { + if ((*lock_p)->type == TL_WRITE) + { + my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0)); + return 1; + } + } + } + /* + Writing to the binlog could cause deadlocks, as we don't log + UNLOCK TABLES + */ + tmp_write_to_binlog= 0; if (lock_global_read_lock(thd)) return 1; result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, @@ -3830,6 +5208,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables) } else result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables); + my_dbopt_cleanup(); } if (options & REFRESH_HOSTS) hostname_cache_refresh(); @@ -3837,9 +5216,14 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables) refresh_status(); if (options & REFRESH_THREADS) flush_thread_cache(); +#ifdef HAVE_REPLICATION if (options & REFRESH_MASTER) + { + tmp_write_to_binlog= 0; if (reset_master(thd)) result=1; + } +#endif #ifdef OPENSSL if (options & REFRESH_DES_KEY_FILE) { @@ -3847,37 +5231,23 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables) result=load_des_key_file(des_key_file); } #endif +#ifdef HAVE_REPLICATION if (options & REFRESH_SLAVE) { + tmp_write_to_binlog= 0; pthread_mutex_lock(&LOCK_active_mi); if (reset_slave(thd, active_mi)) - { result=1; - /* - reset_slave() sends error itself. - If it didn't, one would either change reset_slave()'s prototype, to - pass *errorcode and *errmsg to it when it's called or - change reset_slave to use my_error() to register the error. - */ - error_already_sent=1; - } pthread_mutex_unlock(&LOCK_active_mi); } +#endif if (options & REFRESH_USER_RESOURCES) reset_mqh(thd,(LEX_USER *) NULL); - - if (thd && !error_already_sent) - { - if (result) - send_error(&thd->net,0); - else - send_ok(&thd->net); - } - + if (write_to_binlog) + *write_to_binlog= tmp_write_to_binlog; return result; } - /* kill on thread @@ -3919,24 +5289,25 @@ void kill_one_thread(THD *thd, ulong id) } if (!error) - send_ok(&thd->net); + send_ok(thd); else - net_printf(&thd->net,error,id); + net_printf(thd,error,id); } + /* Clear most status variables */ static void refresh_status(void) { - pthread_mutex_lock(&THR_LOCK_keycache); pthread_mutex_lock(&LOCK_status); for (struct show_var_st *ptr=status_vars; ptr->name; ptr++) { if (ptr->type == SHOW_LONG) - *(ulong*) ptr->value=0; + *(ulong*) ptr->value= 0; } + /* Reset the counters of all key caches (default and named). */ + process_key_caches(reset_key_cache_counters); pthread_mutex_unlock(&LOCK_status); - pthread_mutex_unlock(&THR_LOCK_keycache); } @@ -3966,6 +5337,7 @@ static bool append_file_to_dir(THD *thd, const char **filename_ptr, return 0; } + /* Check if the select is a simple select (not an union) @@ -3980,64 +5352,502 @@ static bool append_file_to_dir(THD *thd, const char **filename_ptr, bool check_simple_select() { THD *thd= current_thd; - if (thd->lex.select != &thd->lex.select_lex) + if (thd->lex->current_select != &thd->lex->select_lex) { char command[80]; - strmake(command, thd->lex.yylval->symbol.str, - min(thd->lex.yylval->symbol.length, sizeof(command)-1)); - net_printf(&thd->net, ER_CANT_USE_OPTION_HERE, command); + strmake(command, thd->lex->yylval->symbol.str, + min(thd->lex->yylval->symbol.length, sizeof(command)-1)); + net_printf(thd, ER_CANT_USE_OPTION_HERE, command); return 1; } return 0; } + +Comp_creator *comp_eq_creator(bool invert) +{ + return invert?(Comp_creator *)&ne_creator:(Comp_creator *)&eq_creator; +} + + +Comp_creator *comp_ge_creator(bool invert) +{ + return invert?(Comp_creator *)<_creator:(Comp_creator *)&ge_creator; +} + + +Comp_creator *comp_gt_creator(bool invert) +{ + return invert?(Comp_creator *)&le_creator:(Comp_creator *)>_creator; +} + + +Comp_creator *comp_le_creator(bool invert) +{ + return invert?(Comp_creator *)>_creator:(Comp_creator *)&le_creator; +} + + +Comp_creator *comp_lt_creator(bool invert) +{ + return invert?(Comp_creator *)&ge_creator:(Comp_creator *)<_creator; +} + + +Comp_creator *comp_ne_creator(bool invert) +{ + return invert?(Comp_creator *)&eq_creator:(Comp_creator *)&ne_creator; +} + + /* - Setup locking for multi-table updates. Used by the replication slave. - Replication slave SQL thread examines (all_tables_not_ok()) the - locking state of referenced tables to determine if the query has to - be executed or ignored. Since in multi-table update, the - 'default' lock is read-only, this lock is corrected early enough by - calling this function, before the slave decides to execute/ignore. + Construct ALL/ANY/SOME subquery Item SYNOPSIS - check_multi_update_lock() - thd Current thread - tables List of user-supplied tables - fields List of fields requiring update + all_any_subquery_creator() + left_expr - pointer to left expression + cmp - compare function creator + all - true if we create ALL subquery + select_lex - pointer on parsed subquery structure + + RETURN VALUE + constructed Item (or 0 if out of memory) +*/ +Item * all_any_subquery_creator(Item *left_expr, + chooser_compare_func_creator cmp, + bool all, + SELECT_LEX *select_lex) +{ + if ((cmp == &comp_eq_creator) && !all) // = ANY <=> IN + return new Item_in_subselect(left_expr, select_lex); - RETURN VALUES - 0 ok - 1 error + if ((cmp == &comp_ne_creator) && all) // <> ALL <=> NOT IN + return new Item_func_not(new Item_in_subselect(left_expr, select_lex)); + + Item_allany_subselect *it= + new Item_allany_subselect(left_expr, (*cmp)(all), select_lex, all); + if (all) + return it->upper_item= new Item_func_not_all(it); /* ALL */ + + return it->upper_item= new Item_func_nop_all(it); /* ANY/SOME */ +} + + +/* + CREATE INDEX and DROP INDEX are implemented by calling ALTER TABLE with + the proper arguments. This isn't very fast but it should work for most + cases. + + In the future ALTER TABLE will notice that only added indexes + and create these one by one for the existing table without having to do + a full rebuild. + + One should normally create all indexes with CREATE TABLE or ALTER TABLE. */ -static bool check_multi_update_lock(THD *thd, TABLE_LIST *tables, - List<Item> *fields) + +int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys) { - bool res= 1; + List<create_field> fields; + ALTER_INFO alter_info; + alter_info.flags= ALTER_ADD_INDEX; + alter_info.is_simple= 0; + HA_CREATE_INFO create_info; + DBUG_ENTER("mysql_create_index"); + bzero((char*) &create_info,sizeof(create_info)); + create_info.db_type=DB_TYPE_DEFAULT; + create_info.default_table_charset= thd->variables.collation_database; + DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, + &create_info, table_list, + fields, keys, 0, (ORDER*)0, + DUP_ERROR, 0, &alter_info)); +} + + +int mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) +{ + List<create_field> fields; + List<Key> keys; + HA_CREATE_INFO create_info; + DBUG_ENTER("mysql_drop_index"); + bzero((char*) &create_info,sizeof(create_info)); + create_info.db_type=DB_TYPE_DEFAULT; + create_info.default_table_charset= thd->variables.collation_database; + alter_info->clear(); + alter_info->flags= ALTER_DROP_INDEX; + alter_info->is_simple= 0; + DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, + &create_info, table_list, + fields, keys, 0, (ORDER*)0, + DUP_ERROR, 0, alter_info)); +} + + +/* + Multi update query pre-check + + SYNOPSIS + multi_update_precheck() + thd Thread handler + tables Global table list + + RETURN VALUE + 0 OK + 1 Error (message is sent to user) + -1 Error (message is not sent to user) +*/ + +int multi_update_precheck(THD *thd, TABLE_LIST *tables) +{ + DBUG_ENTER("multi_update_precheck"); + const char *msg= 0; TABLE_LIST *table; - DBUG_ENTER("check_multi_update_lock"); - - if (check_db_used(thd, tables)) - goto error; + LEX *lex= thd->lex; + SELECT_LEX *select_lex= &lex->select_lex; + TABLE_LIST *update_list= (TABLE_LIST*)select_lex->table_list.first; + if (select_lex->item_list.elements != lex->value_list.elements) + { + my_error(ER_WRONG_VALUE_COUNT, MYF(0)); + DBUG_RETURN(-1); + } /* Ensure that we have UPDATE or SELECT privilege for each table The exact privilege is checked in mysql_multi_update() */ - for (table= tables ; table ; table= table->next) + for (table= update_list; table; table= table->next) { - TABLE_LIST *save= table->next; - table->next= 0; - if (check_one_table_access(thd, UPDATE_ACL, table, 1) && - check_one_table_access(thd, SELECT_ACL, table, 0)) - goto error; - table->next= save; + if (table->derived) + table->grant.privilege= SELECT_ACL; + else if ((check_access(thd, UPDATE_ACL, table->db, + &table->grant.privilege, 0, 1) || + grant_option && + check_grant(thd, UPDATE_ACL, table, 0, 1, 1)) && + (check_access(thd, SELECT_ACL, table->db, + &table->grant.privilege, 0, 0) || + grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0))) + DBUG_RETURN(1); + + /* + We assign following flag only to copy of table, because it will + be checked only if query contains subqueries i.e. only if copy exists + */ + if (table->table_list) + table->table_list->table_in_update_from_clause= 1; + } + /* + Is there tables of subqueries? + */ + if (&lex->select_lex != lex->all_selects_list || lex->time_zone_tables_used) + { + DBUG_PRINT("info",("Checking sub query list")); + for (table= tables; table; table= table->next) + { + if (my_tz_check_n_skip_implicit_tables(&table, + lex->time_zone_tables_used)) + continue; + else if (table->table_in_update_from_clause) + { + /* + If we check table by local TABLE_LIST copy then we should copy + grants to global table list, because it will be used for table + opening. + */ + if (table->table_list) + table->grant= table->table_list->grant; + } + else if (!table->derived) + { + if (check_access(thd, SELECT_ACL, table->db, + &table->grant.privilege, 0, 0) || + grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0)) + DBUG_RETURN(1); + } + } } + + if (select_lex->order_list.elements) + msg= "ORDER BY"; + else if (select_lex->select_limit && select_lex->select_limit != + HA_POS_ERROR) + msg= "LIMIT"; + if (msg) + { + my_error(ER_WRONG_USAGE, MYF(0), "UPDATE", msg); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); +} + +/* + Multi delete query pre-check + + SYNOPSIS + multi_delete_precheck() + thd Thread handler + tables Global table list + table_count Pointer to table counter + + RETURN VALUE + 0 OK + 1 error (message is sent to user) + -1 error (message is not sent to user) +*/ + +int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) +{ + DBUG_ENTER("multi_delete_precheck"); + SELECT_LEX *select_lex= &thd->lex->select_lex; + TABLE_LIST *aux_tables= + (TABLE_LIST *)thd->lex->auxilliary_table_list.first; + TABLE_LIST *delete_tables= (TABLE_LIST *)select_lex->table_list.first; + TABLE_LIST *target_tbl; + + *table_count= 0; + + /* sql_yacc guarantees that tables and aux_tables are not zero */ + DBUG_ASSERT(aux_tables != 0); + if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) || + check_table_access(thd,SELECT_ACL, tables,0) || + check_table_access(thd,DELETE_ACL, aux_tables,0)) + DBUG_RETURN(1); + if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where) + { + my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, MYF(0)); + DBUG_RETURN(-1); + } + for (target_tbl= aux_tables; target_tbl; target_tbl= target_tbl->next) + { + (*table_count)++; + /* All tables in aux_tables must be found in FROM PART */ + TABLE_LIST *walk; + walk= get_table_by_alias(delete_tables,target_tbl->db,target_tbl->alias); + if (!walk) + { + my_error(ER_UNKNOWN_TABLE, MYF(0), target_tbl->real_name, + "MULTI DELETE"); + DBUG_RETURN(-1); + } + if (walk->derived) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), target_tbl->real_name, + "DELETE"); + DBUG_RETURN(-1); + } + walk->lock_type= target_tbl->lock_type; + target_tbl->table_list= walk; // Remember corresponding table - if (mysql_multi_update_lock(thd, tables, fields)) - goto error; - - res= 0; - -error: - DBUG_RETURN(res); + /* in case of subselects, we need to set lock_type in + * corresponding table in list of all tables */ + if (walk->table_list) + { + target_tbl->table_list= walk->table_list; + walk->table_list->lock_type= walk->lock_type; + } + } + DBUG_RETURN(0); +} + + +/* + simple UPDATE query pre-check + + SYNOPSIS + update_precheck() + thd Thread handler + tables Global table list + + RETURN VALUE + 0 OK + 1 Error (message is sent to user) + -1 Error (message is not sent to user) +*/ + +int update_precheck(THD *thd, TABLE_LIST *tables) +{ + DBUG_ENTER("update_precheck"); + if (thd->lex->select_lex.item_list.elements != thd->lex->value_list.elements) + { + my_error(ER_WRONG_VALUE_COUNT, MYF(0)); + DBUG_RETURN(-1); + } + DBUG_RETURN((check_db_used(thd, tables) || + check_one_table_access(thd, UPDATE_ACL, tables)) ? 1 : 0); +} + + +/* + simple DELETE query pre-check + + SYNOPSIS + delete_precheck() + thd Thread handler + tables Global table list + + RETURN VALUE + 0 OK + 1 error (message is sent to user) + -1 error (message is not sent to user) +*/ + +int delete_precheck(THD *thd, TABLE_LIST *tables) +{ + DBUG_ENTER("delete_precheck"); + if (check_one_table_access(thd, DELETE_ACL, tables)) + DBUG_RETURN(1); + /* Set privilege for the WHERE clause */ + tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege); + DBUG_RETURN(0); +} + + +/* + simple INSERT query pre-check + + SYNOPSIS + insert_precheck() + thd Thread handler + tables Global table list + + RETURN VALUE + 0 OK + 1 error (message is sent to user) + -1 error (message is not sent to user) +*/ + +int insert_precheck(THD *thd, TABLE_LIST *tables) +{ + LEX *lex= thd->lex; + DBUG_ENTER("insert_precheck"); + + /* + Check that we have modify privileges for the first table and + select privileges for the rest + */ + ulong privilege= INSERT_ACL | + (lex->duplicates == DUP_REPLACE ? DELETE_ACL : 0) | + (lex->duplicates == DUP_UPDATE ? UPDATE_ACL : 0); + + if (check_one_table_access(thd, privilege, tables)) + DBUG_RETURN(1); + + if (lex->update_list.elements != lex->value_list.elements) + { + my_error(ER_WRONG_VALUE_COUNT, MYF(0)); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); +} + + +/* + CREATE TABLE query pre-check + + SYNOPSIS + create_table_precheck() + thd Thread handler + tables Global table list + create_table Table which will be created + + RETURN VALUE + 0 OK + 1 Error (message is sent to user) +*/ + +int create_table_precheck(THD *thd, TABLE_LIST *tables, + TABLE_LIST *create_table) +{ + LEX *lex= thd->lex; + SELECT_LEX *select_lex= &lex->select_lex; + ulong want_priv; + int error= 1; // Error message is given + DBUG_ENTER("create_table_precheck"); + + want_priv= ((lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ? + CREATE_TMP_ACL : CREATE_ACL); + lex->create_info.alias= create_table->alias; + if (check_access(thd, want_priv, create_table->db, + &create_table->grant.privilege, 0, 0) || + check_merge_table_access(thd, create_table->db, + (TABLE_LIST *) + lex->create_info.merge_list.first)) + goto err; + if (grant_option && want_priv != CREATE_TMP_ACL && + check_grant(thd, want_priv, create_table, 0, UINT_MAX, 0)) + goto err; + + if (select_lex->item_list.elements) + { + /* Check permissions for used tables in CREATE TABLE ... SELECT */ + + /* + For temporary tables or PREPARED STATEMETNS we don't have to check + if the created table exists + */ + if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && + ! thd->current_arena->is_stmt_prepare() && + find_real_table_in_list(tables, create_table->db, + create_table->real_name)) + { + net_printf(thd,ER_UPDATE_TABLE_USED, create_table->real_name); + + goto err; + } + if (lex->create_info.used_fields & HA_CREATE_USED_UNION) + { + TABLE_LIST *tab; + for (tab= tables; tab; tab= tab->next) + { + if (find_real_table_in_list((TABLE_LIST*) lex->create_info. + merge_list.first, + tables->db, tab->real_name)) + { + net_printf(thd, ER_UPDATE_TABLE_USED, tab->real_name); + goto err; + } + } + } + + if (tables && check_table_access(thd, SELECT_ACL, tables,0)) + goto err; + } + error= 0; + +err: + DBUG_RETURN(error); +} + + +/* + negate given expression + + SYNOPSIS + negate_expression() + thd therad handler + expr expression for negation + + RETURN + negated expression +*/ + +Item *negate_expression(THD *thd, Item *expr) +{ + Item *negated; + if (expr->type() == Item::FUNC_ITEM && + ((Item_func *) expr)->functype() == Item_func::NOT_FUNC) + { + /* it is NOT(NOT( ... )) */ + Item *arg= ((Item_func *) expr)->arguments()[0]; + enum_parsing_place place= thd->lex->current_select->parsing_place; + if (arg->is_bool_func() || place == IN_WHERE || place == IN_HAVING) + return arg; + /* + if it is not boolean function then we have to emulate value of + not(not(a)), it will be a != 0 + */ + return new Item_func_ne(arg, new Item_int((char*) "0", 0, 1)); + } + + if ((negated= expr->neg_transformer(thd)) != 0) + return negated; + return new Item_func_not(expr); } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc new file mode 100644 index 00000000000..741d84eab44 --- /dev/null +++ b/sql/sql_prepare.cc @@ -0,0 +1,2145 @@ +/* Copyright (C) 1995-2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/********************************************************************** +This file contains the implementation of prepare and executes. + +Prepare: + + - Server gets the query from client with command 'COM_PREPARE'; + in the following format: + [COM_PREPARE:1] [query] + - Parse the query and recognize any parameter markers '?' and + store its information list in lex->param_list + - Allocate a new statement for this prepare; and keep this in + 'thd->prepared_statements' pool. + - Without executing the query, return back to client the total + number of parameters along with result-set metadata information + (if any) in the following format: + [STMT_ID:4] + [Column_count:2] + [Param_count:2] + [Columns meta info] (if Column_count > 0) + [Params meta info] (if Param_count > 0 ) (TODO : 4.1.1) + +Prepare-execute: + + - Server gets the command 'COM_EXECUTE' to execute the + previously prepared query. If there is any param markers; then client + will send the data in the following format: + [COM_EXECUTE:1] + [STMT_ID:4] + [NULL_BITS:(param_count+7)/8)] + [TYPES_SUPPLIED_BY_CLIENT(0/1):1] + [[length]data] + [[length]data] .. [[length]data]. + (Note: Except for string/binary types; all other types will not be + supplied with length field) + - Replace the param items with this new data. If it is a first execute + or types altered by client; then setup the conversion routines. + - Execute the query without re-parsing and send back the results + to client + +Long data handling: + + - Server gets the long data in pieces with command type 'COM_LONG_DATA'. + - The packet recieved will have the format as: + [COM_LONG_DATA:1][STMT_ID:4][parameter_number:2][data] + - data from the packet is appended to long data value buffer for this + placeholder. + - It's up to the client to check for read data ended. The server doesn't + care; and also server doesn't notify to the client that it got the + data or not; if there is any error; then during execute; the error + will be returned + +***********************************************************************/ + +#include "mysql_priv.h" +#include "sql_select.h" // for JOIN +#include <m_ctype.h> // for isspace() +#ifdef EMBEDDED_LIBRARY +/* include MYSQL_BIND headers */ +#include <mysql.h> +#endif + +/****************************************************************************** + Prepared_statement: statement which can contain placeholders +******************************************************************************/ + +class Prepared_statement: public Statement +{ +public: + THD *thd; + Item_param **param_array; + uint param_count; + uint last_errno; + char last_error[MYSQL_ERRMSG_SIZE]; +#ifndef EMBEDDED_LIBRARY + bool (*set_params)(Prepared_statement *st, uchar *data, uchar *data_end, + uchar *read_pos, String *expanded_query); +#else + bool (*set_params_data)(Prepared_statement *st, String *expanded_query); +#endif + bool (*set_params_from_vars)(Prepared_statement *stmt, + List<LEX_STRING>& varnames, + String *expanded_query); +public: + Prepared_statement(THD *thd_arg); + virtual ~Prepared_statement(); + void setup_set_params(); + virtual Item_arena::Type type() const; +}; + +static void execute_stmt(THD *thd, Prepared_statement *stmt, + String *expanded_query, bool set_context); + +/****************************************************************************** + Implementation +******************************************************************************/ + + +inline bool is_param_null(const uchar *pos, ulong param_no) +{ + return pos[param_no/8] & (1 << (param_no & 7)); +} + +enum { STMT_QUERY_LOG_LENGTH= 8192 }; + +enum enum_send_error { DONT_SEND_ERROR= 0, SEND_ERROR }; + +/* + Seek prepared statement in statement map by id: returns zero if statement + was not found, pointer otherwise. +*/ + +static Prepared_statement * +find_prepared_statement(THD *thd, ulong id, const char *where, + enum enum_send_error se) +{ + Statement *stmt= thd->stmt_map.find(id); + + if (stmt == 0 || stmt->type() != Item_arena::PREPARED_STATEMENT) + { + char llbuf[22]; + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), 22, llstr(id, llbuf), where); + if (se == SEND_ERROR) + send_error(thd); + return 0; + } + return (Prepared_statement *) stmt; +} + + +/* + Send prepared stmt info to client after prepare +*/ + +#ifndef EMBEDDED_LIBRARY +static bool send_prep_stmt(Prepared_statement *stmt, uint columns) +{ + NET *net= &stmt->thd->net; + char buff[9]; + DBUG_ENTER("send_prep_stmt"); + + buff[0]= 0; /* OK packet indicator */ + int4store(buff+1, stmt->id); + int2store(buff+5, columns); + int2store(buff+7, stmt->param_count); + /* + Send types and names of placeholders to the client + XXX: fix this nasty upcast from List<Item_param> to List<Item> + */ + DBUG_RETURN(my_net_write(net, buff, sizeof(buff)) || + (stmt->param_count && + stmt->thd->protocol_simple.send_fields((List<Item> *) + &stmt->lex->param_list, + 0))); +} +#else +static bool send_prep_stmt(Prepared_statement *stmt, + uint columns __attribute__((unused))) +{ + THD *thd= stmt->thd; + + thd->client_stmt_id= stmt->id; + thd->client_param_count= stmt->param_count; + thd->net.last_errno= 0; + + return 0; +} +#endif /*!EMBEDDED_LIBRARY*/ + + +/* + Read the length of the parameter data and return back to + caller by positing the pointer to param data. +*/ + +#ifndef EMBEDDED_LIBRARY +static ulong get_param_length(uchar **packet, ulong len) +{ + reg1 uchar *pos= *packet; + if (len < 1) + return 0; + if (*pos < 251) + { + (*packet)++; + return (ulong) *pos; + } + if (len < 3) + return 0; + if (*pos == 252) + { + (*packet)+=3; + return (ulong) uint2korr(pos+1); + } + if (len < 4) + return 0; + if (*pos == 253) + { + (*packet)+=4; + return (ulong) uint3korr(pos+1); + } + if (len < 5) + return 0; + (*packet)+=9; // Must be 254 when here + /* + In our client-server protocol all numbers bigger than 2^24 + stored as 8 bytes with uint8korr. Here we always know that + parameter length is less than 2^4 so don't look at the second + 4 bytes. But still we need to obey the protocol hence 9 in the + assignment above. + */ + return (ulong) uint4korr(pos+1); +} +#else +#define get_param_length(packet, len) len +#endif /*!EMBEDDED_LIBRARY*/ + + /* + Data conversion routines + SYNOPSIS + set_param_xx() + param parameter item + pos input data buffer + len length of data in the buffer + + All these functions read the data from pos, convert it to requested type + and assign to param; pos is advanced to predefined length. + + Make a note that the NULL handling is examined at first execution + (i.e. when input types altered) and for all subsequent executions + we don't read any values for this. + + RETURN VALUE + none +*/ + +static void set_param_tiny(Item_param *param, uchar **pos, ulong len) +{ +#ifndef EMBEDDED_LIBRARY + if (len < 1) + return; +#endif + int8 value= (int8) **pos; + param->set_int(param->unsigned_flag ? (longlong) ((uint8) value) : + (longlong) value, 4); + *pos+= 1; +} + +static void set_param_short(Item_param *param, uchar **pos, ulong len) +{ + int16 value; +#ifndef EMBEDDED_LIBRARY + if (len < 2) + return; + value= sint2korr(*pos); +#else + shortget(value, *pos); +#endif + param->set_int(param->unsigned_flag ? (longlong) ((uint16) value) : + (longlong) value, 6); + *pos+= 2; +} + +static void set_param_int32(Item_param *param, uchar **pos, ulong len) +{ + int32 value; +#ifndef EMBEDDED_LIBRARY + if (len < 4) + return; + value= sint4korr(*pos); +#else + longget(value, *pos); +#endif + param->set_int(param->unsigned_flag ? (longlong) ((uint32) value) : + (longlong) value, 11); + *pos+= 4; +} + +static void set_param_int64(Item_param *param, uchar **pos, ulong len) +{ + longlong value; +#ifndef EMBEDDED_LIBRARY + if (len < 8) + return; + value= (longlong) sint8korr(*pos); +#else + longlongget(value, *pos); +#endif + param->set_int(value, 21); + *pos+= 8; +} + +static void set_param_float(Item_param *param, uchar **pos, ulong len) +{ + float data; +#ifndef EMBEDDED_LIBRARY + if (len < 4) + return; + float4get(data,*pos); +#else + floatget(data, *pos); +#endif + param->set_double((double) data); + *pos+= 4; +} + +static void set_param_double(Item_param *param, uchar **pos, ulong len) +{ + double data; +#ifndef EMBEDDED_LIBRARY + if (len < 8) + return; + float8get(data,*pos); +#else + doubleget(data, *pos); +#endif + param->set_double((double) data); + *pos+= 8; +} + +#ifndef EMBEDDED_LIBRARY + +/* + Read date/time/datetime parameter values from network (binary + protocol). See writing counterparts of these functions in + libmysql.c (store_param_{time,date,datetime}). +*/ + +static void set_param_time(Item_param *param, uchar **pos, ulong len) +{ + MYSQL_TIME tm; + ulong length= get_param_length(pos, len); + + if (length >= 8) + { + uchar *to= *pos; + uint day; + + tm.neg= (bool) to[0]; + day= (uint) sint4korr(to+1); + tm.hour= (uint) to[5] + day * 24; + tm.minute= (uint) to[6]; + tm.second= (uint) to[7]; + tm.second_part= (length > 8) ? (ulong) sint4korr(to+8) : 0; + if (tm.hour > 838) + { + /* TODO: add warning 'Data truncated' here */ + tm.hour= 838; + tm.minute= 59; + tm.second= 59; + } + tm.day= tm.year= tm.month= 0; + } + else + set_zero_time(&tm, MYSQL_TIMESTAMP_TIME); + param->set_time(&tm, MYSQL_TIMESTAMP_TIME, + MAX_TIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); + *pos+= length; +} + +static void set_param_datetime(Item_param *param, uchar **pos, ulong len) +{ + MYSQL_TIME tm; + ulong length= get_param_length(pos, len); + + if (length >= 4) + { + uchar *to= *pos; + + tm.neg= 0; + tm.year= (uint) sint2korr(to); + tm.month= (uint) to[2]; + tm.day= (uint) to[3]; + if (length > 4) + { + tm.hour= (uint) to[4]; + tm.minute= (uint) to[5]; + tm.second= (uint) to[6]; + } + else + tm.hour= tm.minute= tm.second= 0; + + tm.second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0; + } + else + set_zero_time(&tm, MYSQL_TIMESTAMP_DATETIME); + param->set_time(&tm, MYSQL_TIMESTAMP_DATETIME, + MAX_DATETIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); + *pos+= length; +} + +static void set_param_date(Item_param *param, uchar **pos, ulong len) +{ + MYSQL_TIME tm; + ulong length= get_param_length(pos, len); + + if (length >= 4) + { + uchar *to= *pos; + + tm.year= (uint) sint2korr(to); + tm.month= (uint) to[2]; + tm.day= (uint) to[3]; + + tm.hour= tm.minute= tm.second= 0; + tm.second_part= 0; + tm.neg= 0; + } + else + set_zero_time(&tm, MYSQL_TIMESTAMP_DATE); + param->set_time(&tm, MYSQL_TIMESTAMP_DATE, + MAX_DATE_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); + *pos+= length; +} + +#else/*!EMBEDDED_LIBRARY*/ +void set_param_time(Item_param *param, uchar **pos, ulong len) +{ + MYSQL_TIME tm= *((MYSQL_TIME*)*pos); + tm.hour+= tm.day * 24; + tm.day= tm.year= tm.month= 0; + if (tm.hour > 838) + { + /* TODO: add warning 'Data truncated' here */ + tm.hour= 838; + tm.minute= 59; + tm.second= 59; + } + param->set_time(&tm, MYSQL_TIMESTAMP_TIME, + MAX_TIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); + +} + +void set_param_datetime(Item_param *param, uchar **pos, ulong len) +{ + MYSQL_TIME *to= (MYSQL_TIME*)*pos; + + param->set_time(to, MYSQL_TIMESTAMP_DATETIME, + MAX_DATETIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); +} + +void set_param_date(Item_param *param, uchar **pos, ulong len) +{ + MYSQL_TIME *to= (MYSQL_TIME*)*pos; + + param->set_time(to, MYSQL_TIMESTAMP_DATE, + MAX_DATE_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); +} +#endif /*!EMBEDDED_LIBRARY*/ + + +static void set_param_str(Item_param *param, uchar **pos, ulong len) +{ + ulong length= get_param_length(pos, len); + param->set_str((const char *)*pos, length); + *pos+= length; +} + + +#undef get_param_length + +static void setup_one_conversion_function(THD *thd, Item_param *param, + uchar param_type) +{ + switch (param_type) { + case MYSQL_TYPE_TINY: + param->set_param_func= set_param_tiny; + param->item_type= Item::INT_ITEM; + param->item_result_type= INT_RESULT; + break; + case MYSQL_TYPE_SHORT: + param->set_param_func= set_param_short; + param->item_type= Item::INT_ITEM; + param->item_result_type= INT_RESULT; + break; + case MYSQL_TYPE_LONG: + param->set_param_func= set_param_int32; + param->item_type= Item::INT_ITEM; + param->item_result_type= INT_RESULT; + break; + case MYSQL_TYPE_LONGLONG: + param->set_param_func= set_param_int64; + param->item_type= Item::INT_ITEM; + param->item_result_type= INT_RESULT; + break; + case MYSQL_TYPE_FLOAT: + param->set_param_func= set_param_float; + param->item_type= Item::REAL_ITEM; + param->item_result_type= REAL_RESULT; + break; + case MYSQL_TYPE_DOUBLE: + param->set_param_func= set_param_double; + param->item_type= Item::REAL_ITEM; + param->item_result_type= REAL_RESULT; + break; + case MYSQL_TYPE_TIME: + param->set_param_func= set_param_time; + param->item_type= Item::STRING_ITEM; + param->item_result_type= STRING_RESULT; + break; + case MYSQL_TYPE_DATE: + param->set_param_func= set_param_date; + param->item_type= Item::STRING_ITEM; + param->item_result_type= STRING_RESULT; + break; + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_TIMESTAMP: + param->set_param_func= set_param_datetime; + param->item_type= Item::STRING_ITEM; + param->item_result_type= STRING_RESULT; + break; + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + param->set_param_func= set_param_str; + param->value.cs_info.character_set_of_placeholder= &my_charset_bin; + param->value.cs_info.character_set_client= + thd->variables.character_set_client; + param->value.cs_info.final_character_set_of_str_value= &my_charset_bin; + param->item_type= Item::STRING_ITEM; + param->item_result_type= STRING_RESULT; + break; + default: + /* + The client library ensures that we won't get any other typecodes + except typecodes above and typecodes for string types. Marking + label as 'default' lets us to handle malformed packets as well. + */ + { + CHARSET_INFO *fromcs= thd->variables.character_set_client; + CHARSET_INFO *tocs= thd->variables.collation_connection; + uint32 dummy_offset; + + param->value.cs_info.character_set_of_placeholder= fromcs; + param->value.cs_info.character_set_client= fromcs; + + /* + Setup source and destination character sets so that they + are different only if conversion is necessary: this will + make later checks easier. + */ + param->value.cs_info.final_character_set_of_str_value= + String::needs_conversion(0, fromcs, tocs, &dummy_offset) ? + tocs : fromcs; + param->set_param_func= set_param_str; + /* + Exact value of max_length is not known unless data is converted to + charset of connection, so we have to set it later. + */ + param->item_type= Item::STRING_ITEM; + param->item_result_type= STRING_RESULT; + } + } + param->param_type= (enum enum_field_types) param_type; +} + +#ifndef EMBEDDED_LIBRARY +/* + Update the parameter markers by reading data from client packet + and if binary/update log is set, generate the valid query. +*/ + +static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, + uchar *read_pos, uchar *data_end, + String *query) +{ + THD *thd= stmt->thd; + Item_param **begin= stmt->param_array; + Item_param **end= begin + stmt->param_count; + uint32 length= 0; + String str; + const String *res; + DBUG_ENTER("insert_params_withlog"); + + if (query->copy(stmt->query, stmt->query_length, default_charset_info)) + DBUG_RETURN(1); + + for (Item_param **it= begin; it < end; ++it) + { + Item_param *param= *it; + if (param->state != Item_param::LONG_DATA_VALUE) + { + if (is_param_null(null_array, it - begin)) + param->set_null(); + else + { + if (read_pos >= data_end) + DBUG_RETURN(1); + param->set_param_func(param, &read_pos, data_end - read_pos); + } + } + res= param->query_val_str(&str); + if (param->convert_str_value(thd)) + DBUG_RETURN(1); /* out of memory */ + + if (query->replace(param->pos_in_query+length, 1, *res)) + DBUG_RETURN(1); + + length+= res->length()-1; + } + DBUG_RETURN(0); +} + + +static bool insert_params(Prepared_statement *stmt, uchar *null_array, + uchar *read_pos, uchar *data_end, + String *expanded_query) +{ + Item_param **begin= stmt->param_array; + Item_param **end= begin + stmt->param_count; + + DBUG_ENTER("insert_params"); + + for (Item_param **it= begin; it < end; ++it) + { + Item_param *param= *it; + if (param->state != Item_param::LONG_DATA_VALUE) + { + if (is_param_null(null_array, it - begin)) + param->set_null(); + else + { + if (read_pos >= data_end) + DBUG_RETURN(1); + param->set_param_func(param, &read_pos, data_end - read_pos); + } + } + if (param->convert_str_value(stmt->thd)) + DBUG_RETURN(1); /* out of memory */ + } + DBUG_RETURN(0); +} + + +static bool setup_conversion_functions(Prepared_statement *stmt, + uchar **data, uchar *data_end) +{ + /* skip null bits */ + uchar *read_pos= *data + (stmt->param_count+7) / 8; + + DBUG_ENTER("setup_conversion_functions"); + + if (*read_pos++) //types supplied / first execute + { + /* + First execute or types altered by the client, setup the + conversion routines for all parameters (one time) + */ + Item_param **it= stmt->param_array; + Item_param **end= it + stmt->param_count; + THD *thd= stmt->thd; + for (; it < end; ++it) + { + ushort typecode; + const uint signed_bit= 1 << 15; + + if (read_pos >= data_end) + DBUG_RETURN(1); + + typecode= sint2korr(read_pos); + read_pos+= 2; + (**it).unsigned_flag= test(typecode & signed_bit); + setup_one_conversion_function(thd, *it, (uchar) (typecode & ~signed_bit)); + } + } + *data= read_pos; + DBUG_RETURN(0); +} + +#else + +static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query) +{ + THD *thd= stmt->thd; + Item_param **it= stmt->param_array; + Item_param **end= it + stmt->param_count; + MYSQL_BIND *client_param= stmt->thd->client_params; + + DBUG_ENTER("emb_insert_params"); + + for (; it < end; ++it, ++client_param) + { + Item_param *param= *it; + setup_one_conversion_function(thd, param, client_param->buffer_type); + if (param->state != Item_param::LONG_DATA_VALUE) + { + if (*client_param->is_null) + param->set_null(); + else + { + uchar *buff= (uchar*) client_param->buffer; + param->unsigned_flag= client_param->is_unsigned; + param->set_param_func(param, &buff, + client_param->length ? + *client_param->length : + client_param->buffer_length); + } + } + if (param->convert_str_value(thd)) + DBUG_RETURN(1); /* out of memory */ + } + DBUG_RETURN(0); +} + + +static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) +{ + THD *thd= stmt->thd; + Item_param **it= stmt->param_array; + Item_param **end= it + stmt->param_count; + MYSQL_BIND *client_param= thd->client_params; + + String str; + const String *res; + uint32 length= 0; + + DBUG_ENTER("emb_insert_params_withlog"); + + if (query->copy(stmt->query, stmt->query_length, default_charset_info)) + DBUG_RETURN(1); + + for (; it < end; ++it, ++client_param) + { + Item_param *param= *it; + setup_one_conversion_function(thd, param, client_param->buffer_type); + if (param->state != Item_param::LONG_DATA_VALUE) + { + if (*client_param->is_null) + param->set_null(); + else + { + uchar *buff= (uchar*)client_param->buffer; + param->unsigned_flag= client_param->is_unsigned; + param->set_param_func(param, &buff, + client_param->length ? + *client_param->length : + client_param->buffer_length); + } + } + res= param->query_val_str(&str); + if (param->convert_str_value(thd)) + DBUG_RETURN(1); /* out of memory */ + + if (query->replace(param->pos_in_query+length, 1, *res)) + DBUG_RETURN(1); + + length+= res->length()-1; + } + DBUG_RETURN(0); +} + +#endif /*!EMBEDDED_LIBRARY*/ + + +/* + Set prepared statement parameters from user variables. + SYNOPSIS + insert_params_from_vars() + stmt Statement + varnames List of variables. Caller must ensure that number of variables + in the list is equal to number of statement parameters + query Ignored +*/ + +static bool insert_params_from_vars(Prepared_statement *stmt, + List<LEX_STRING>& varnames, + String *query __attribute__((unused))) +{ + Item_param **begin= stmt->param_array; + Item_param **end= begin + stmt->param_count; + user_var_entry *entry; + LEX_STRING *varname; + List_iterator<LEX_STRING> var_it(varnames); + DBUG_ENTER("insert_params_from_vars"); + + for (Item_param **it= begin; it < end; ++it) + { + Item_param *param= *it; + varname= var_it++; + entry= (user_var_entry*)hash_search(&stmt->thd->user_vars, + (byte*) varname->str, + varname->length); + if (param->set_from_user_var(stmt->thd, entry) || + param->convert_str_value(stmt->thd)) + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* + Do the same as insert_params_from_vars but also construct query text for + binary log. + SYNOPSIS + insert_params_from_vars() + stmt Statement + varnames List of variables. Caller must ensure that number of variables + in the list is equal to number of statement parameters + query The query with parameter markers replaced with their values +*/ + +static bool insert_params_from_vars_with_log(Prepared_statement *stmt, + List<LEX_STRING>& varnames, + String *query) +{ + Item_param **begin= stmt->param_array; + Item_param **end= begin + stmt->param_count; + user_var_entry *entry; + LEX_STRING *varname; + DBUG_ENTER("insert_params_from_vars"); + + List_iterator<LEX_STRING> var_it(varnames); + String buf; + const String *val; + uint32 length= 0; + if (query->copy(stmt->query, stmt->query_length, default_charset_info)) + DBUG_RETURN(1); + + for (Item_param **it= begin; it < end; ++it) + { + Item_param *param= *it; + varname= var_it++; + if (get_var_with_binlog(stmt->thd, *varname, &entry)) + DBUG_RETURN(1); + + if (param->set_from_user_var(stmt->thd, entry)) + DBUG_RETURN(1); + /* Insert @'escaped-varname' instead of parameter in the query */ + if (entry) + { + char *begin, *ptr; + buf.length(0); + if (buf.reserve(entry->name.length*2+3)) + DBUG_RETURN(1); + + begin= ptr= buf.c_ptr_quick(); + *ptr++= '@'; + *ptr++= '\''; + ptr+= escape_string_for_mysql(&my_charset_utf8_general_ci, + ptr, entry->name.str, entry->name.length); + *ptr++= '\''; + buf.length(ptr - begin); + val= &buf; + } + else + val= &my_null_string; + + if (param->convert_str_value(stmt->thd)) + DBUG_RETURN(1); /* out of memory */ + + if (query->replace(param->pos_in_query+length, 1, *val)) + DBUG_RETURN(1); + length+= val->length()-1; + } + DBUG_RETURN(0); +} + +/* + Validate INSERT statement: + + SYNOPSIS + mysql_test_insert() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 ok + 1 error, sent to the client + -1 error, not sent to client +*/ +static int mysql_test_insert(Prepared_statement *stmt, + TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic) +{ + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + List_iterator_fast<List_item> its(values_list); + List_item *values; + int res= -1; + TABLE_LIST *insert_table_list= + (TABLE_LIST*) lex->select_lex.table_list.first; + DBUG_ENTER("mysql_test_insert"); + + if ((res= insert_precheck(thd, table_list))) + DBUG_RETURN(res); + + /* + open temporary memory pool for temporary data allocated by derived + tables & preparation procedure + Note that this is done without locks (should not be needed as we will not + access any data here) + If we would use locks, then we have to ensure we are not using + TL_WRITE_DELAYED as having two such locks can cause table corruption. + */ + if (open_normal_and_derived_tables(thd, table_list)) + { + DBUG_RETURN(-1); + } + + if ((values= its++)) + { + uint value_count; + ulong counter= 0; + + table_list->table->insert_values=(byte *)1; // don't allocate insert_values + if ((res= mysql_prepare_insert(thd, table_list, insert_table_list, + insert_table_list, + table_list->table, fields, values, + update_fields, update_values, duplic))) + goto error; + + value_count= values->elements; + its.rewind(); + + while ((values= its++)) + { + counter++; + if (values->elements != value_count) + { + my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, + ER(ER_WRONG_VALUE_COUNT_ON_ROW), + MYF(0), counter); + goto error; + } + if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) + goto error; + } + } + + res= 0; +error: + lex->unit.cleanup(); + table_list->table->insert_values=0; + DBUG_RETURN(res); +} + + +/* + Validate UPDATE statement + + SYNOPSIS + mysql_test_update() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int mysql_test_update(Prepared_statement *stmt, + TABLE_LIST *table_list) +{ + int res; + THD *thd= stmt->thd; + SELECT_LEX *select= &stmt->lex->select_lex; + DBUG_ENTER("mysql_test_update"); + + if ((res= update_precheck(thd, table_list))) + DBUG_RETURN(res); + + if (open_and_lock_tables(thd, table_list)) + res= -1; + else + { + TABLE_LIST *update_table_list= (TABLE_LIST *)select->table_list.first; + if (!(res= mysql_prepare_update(thd, table_list, + update_table_list, + &select->where, + select->order_list.elements, + (ORDER *) select->order_list.first))) + { + if (setup_fields(thd, 0, update_table_list, + select->item_list, 1, 0, 0) || + setup_fields(thd, 0, update_table_list, + stmt->lex->value_list, 0, 0, 0)) + res= -1; + } + stmt->lex->unit.cleanup(); + } + /* TODO: here we should send types of placeholders to the client. */ + DBUG_RETURN(res); +} + + +/* + Validate DELETE statement + + SYNOPSIS + mysql_test_delete() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int mysql_test_delete(Prepared_statement *stmt, + TABLE_LIST *table_list) +{ + int res; + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + DBUG_ENTER("mysql_test_delete"); + + if ((res= delete_precheck(thd, table_list))) + DBUG_RETURN(res); + + if (open_and_lock_tables(thd, table_list)) + res= -1; + else + { + res= mysql_prepare_delete(thd, table_list, &lex->select_lex.where); + lex->unit.cleanup(); + } + /* TODO: here we should send types of placeholders to the client. */ + DBUG_RETURN(res); +} + + +/* + Validate SELECT statement. + In case of success, if this query is not EXPLAIN, send column list info + back to client. + + SYNOPSIS + mysql_test_select() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ + +static int mysql_test_select(Prepared_statement *stmt, + TABLE_LIST *tables, bool text_protocol) +{ + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + SELECT_LEX_UNIT *unit= &lex->unit; + int result= 1; + DBUG_ENTER("mysql_test_select"); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + ulong privilege= lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL; + if (tables) + { + if (check_table_access(thd, privilege, tables,0)) + DBUG_RETURN(1); + } + else if (check_access(thd, privilege, any_db,0,0,0)) + DBUG_RETURN(1); +#endif + + if (!lex->result && !(lex->result= new (stmt->mem_root) select_send)) + { + send_error(thd); + goto err; + } + + if (open_and_lock_tables(thd, tables)) + { + send_error(thd); + goto err; + } + + thd->used_tables= 0; // Updated by setup_fields + + // JOIN::prepare calls + if (unit->prepare(thd, 0, 0, "")) + { + send_error(thd); + goto err_prep; + } + if (!text_protocol) + { + if (lex->describe) + { + if (send_prep_stmt(stmt, 0) || thd->protocol->flush()) + goto err_prep; + } + else + { + /* Make copy of item list, as change_columns may change it */ + List<Item> fields(lex->select_lex.item_list); + + /* Change columns if a procedure like analyse() */ + if (unit->last_procedure && + unit->last_procedure->change_columns(fields)) + goto err_prep; + + /* + We can use lex->result as it should've been + prepared in unit->prepare call above. + */ + if (send_prep_stmt(stmt, lex->result->field_count(fields)) || + lex->result->send_fields(fields, 0) || + thd->protocol->flush()) + goto err_prep; + } + } + result= 0; // ok + +err_prep: + unit->cleanup(); +err: + DBUG_RETURN(result); +} + + +/* + Validate and prepare for execution DO statement expressions + + SYNOPSIS + mysql_test_do_fields() + stmt prepared statemen handler + tables list of tables queries + values list of expressions + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ + +static int mysql_test_do_fields(Prepared_statement *stmt, + TABLE_LIST *tables, + List<Item> *values) +{ + DBUG_ENTER("mysql_test_do_fields"); + THD *thd= stmt->thd; + int res= 0; + if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0))) + DBUG_RETURN(res); + + if (tables && (res= open_and_lock_tables(thd, tables))) + { + DBUG_RETURN(res); + } + res= setup_fields(thd, 0, 0, *values, 0, 0, 0); + stmt->lex->unit.cleanup(); + if (res) + DBUG_RETURN(-1); + DBUG_RETURN(0); +} + + +/* + Validate and prepare for execution SET statement expressions + + SYNOPSIS + mysql_test_set_fields() + stmt prepared statemen handler + tables list of tables queries + values list of expressions + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int mysql_test_set_fields(Prepared_statement *stmt, + TABLE_LIST *tables, + List<set_var_base> *var_list) +{ + DBUG_ENTER("mysql_test_set_fields"); + List_iterator_fast<set_var_base> it(*var_list); + THD *thd= stmt->thd; + set_var_base *var; + int res= 0; + + if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0))) + DBUG_RETURN(res); + + if (tables && (res= open_and_lock_tables(thd, tables))) + goto error; + while ((var= it++)) + { + if (var->light_check(thd)) + { + stmt->lex->unit.cleanup(); + res= -1; + goto error; + } + } +error: + stmt->lex->unit.cleanup(); + DBUG_RETURN(res); +} + + +/* + Check internal SELECT of the prepared command + + SYNOPSIS + select_like_statement_test() + stmt - prepared table handler + tables - global list of tables + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int select_like_statement_test(Prepared_statement *stmt, + TABLE_LIST *tables) +{ + DBUG_ENTER("select_like_statement_test"); + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + int res= 0; + + if (tables && (res= open_and_lock_tables(thd, tables))) + goto end; + + thd->used_tables= 0; // Updated by setup_fields + + // JOIN::prepare calls + if (lex->unit.prepare(thd, 0, 0, "")) + { + res= thd->net.report_error ? -1 : 1; + } +end: + lex->unit.cleanup(); + DBUG_RETURN(res); +} + + +/* + Validate and prepare for execution CREATE TABLE statement + + SYNOPSIS + mysql_test_create_table() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int mysql_test_create_table(Prepared_statement *stmt, + TABLE_LIST *tables) +{ + DBUG_ENTER("mysql_test_create_table"); + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + SELECT_LEX *select_lex= &lex->select_lex; + int res= 0; + + /* Skip first table, which is the table we are creating */ + TABLE_LIST *create_table, *create_table_local; + tables= lex->unlink_first_table(tables, &create_table, + &create_table_local); + + if (!(res= create_table_precheck(thd, tables, create_table)) && + select_lex->item_list.elements) + { + select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; + res= select_like_statement_test(stmt, tables); + select_lex->resolve_mode= SELECT_LEX::NOMATTER_MODE; + } + + /* put tables back for PS rexecuting */ + tables= lex->link_first_table_back(tables, create_table, + create_table_local); + DBUG_RETURN(res); +} + + +/* + Validate and prepare for execution multi update statement + + SYNOPSIS + mysql_test_multiupdate() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int mysql_test_multiupdate(Prepared_statement *stmt, + TABLE_LIST *tables) +{ + int res; + if ((res= multi_update_precheck(stmt->thd, tables))) + return res; + return select_like_statement_test(stmt, tables); +} + + +/* + Validate and prepare for execution multi delete statement + + SYNOPSIS + mysql_test_multidelete() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int mysql_test_multidelete(Prepared_statement *stmt, + TABLE_LIST *tables) +{ + int res; + stmt->thd->lex->current_select= &stmt->thd->lex->select_lex; + if (add_item_to_list(stmt->thd, new Item_null())) + return -1; + + uint fake_counter; + if ((res= multi_delete_precheck(stmt->thd, tables, &fake_counter))) + return res; + return select_like_statement_test(stmt, tables); +} + + +/* + Validate and prepare for execution INSERT ... SELECT statement + + SYNOPSIS + mysql_test_insert_select() + stmt prepared statemen handler + tables list of tables queries + + RETURN VALUE + 0 success + 1 error, sent to client + -1 error, not sent to client +*/ +static int mysql_test_insert_select(Prepared_statement *stmt, + TABLE_LIST *tables) +{ + int res; + LEX *lex= stmt->lex; + if ((res= insert_precheck(stmt->thd, tables))) + return res; + TABLE_LIST *first_local_table= + (TABLE_LIST *)lex->select_lex.table_list.first; + /* Skip first table, which is the table we are inserting in */ + lex->select_lex.table_list.first= (byte*) first_local_table->next; + /* + insert/replace from SELECT give its SELECT_LEX for SELECT, + and item_list belong to SELECT + */ + lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; + res= select_like_statement_test(stmt, tables); + /* revert changes*/ + lex->select_lex.table_list.first= (byte*) first_local_table; + lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; + return res; +} + + +/* + Send the prepare query results back to client + SYNOPSIS + send_prepare_results() + stmt prepared statement + RETURN VALUE + 0 success + 1 error, sent to client +*/ +static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) +{ + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + SELECT_LEX *select_lex= &lex->select_lex; + TABLE_LIST *tables=(TABLE_LIST*) select_lex->table_list.first; + enum enum_sql_command sql_command= lex->sql_command; + int res= 0; + DBUG_ENTER("send_prepare_results"); + DBUG_PRINT("enter",("command: %d, param_count: %ld", + sql_command, stmt->param_count)); + + if ((&lex->select_lex != lex->all_selects_list || + lex->time_zone_tables_used) && + lex->unit.create_total_list(thd, lex, &tables)) + DBUG_RETURN(1); + + switch (sql_command) { + case SQLCOM_REPLACE: + case SQLCOM_INSERT: + res= mysql_test_insert(stmt, tables, lex->field_list, + lex->many_values, + select_lex->item_list, lex->value_list, + lex->duplicates); + break; + + case SQLCOM_UPDATE: + res= mysql_test_update(stmt, tables); + break; + + case SQLCOM_DELETE: + res= mysql_test_delete(stmt, tables); + break; + + case SQLCOM_SELECT: + if ((res= mysql_test_select(stmt, tables, text_protocol))) + goto error; + /* Statement and field info has already been sent */ + DBUG_RETURN(0); + + case SQLCOM_CREATE_TABLE: + res= mysql_test_create_table(stmt, tables); + break; + + case SQLCOM_DO: + res= mysql_test_do_fields(stmt, tables, lex->insert_list); + break; + + case SQLCOM_SET_OPTION: + res= mysql_test_set_fields(stmt, tables, &lex->var_list); + break; + + case SQLCOM_DELETE_MULTI: + res= mysql_test_multidelete(stmt, tables); + break; + + case SQLCOM_UPDATE_MULTI: + res= mysql_test_multiupdate(stmt, tables); + break; + + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE_SELECT: + res= mysql_test_insert_select(stmt, tables); + break; + + case SQLCOM_SHOW_DATABASES: + case SQLCOM_SHOW_PROCESSLIST: + case SQLCOM_SHOW_STORAGE_ENGINES: + case SQLCOM_SHOW_PRIVILEGES: + case SQLCOM_SHOW_COLUMN_TYPES: + case SQLCOM_SHOW_STATUS: + case SQLCOM_SHOW_VARIABLES: + case SQLCOM_SHOW_LOGS: + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_OPEN_TABLES: + case SQLCOM_SHOW_CHARSETS: + case SQLCOM_SHOW_COLLATIONS: + case SQLCOM_SHOW_FIELDS: + case SQLCOM_SHOW_KEYS: + case SQLCOM_SHOW_CREATE_DB: + case SQLCOM_SHOW_GRANTS: + case SQLCOM_DROP_TABLE: + case SQLCOM_RENAME_TABLE: + break; + + default: + /* + All other is not supported yet + */ + res= -1; + my_error(ER_UNSUPPORTED_PS, MYF(0)); + goto error; + } + if (res == 0) + DBUG_RETURN(text_protocol? 0 : (send_prep_stmt(stmt, 0) || + thd->protocol->flush())); +error: + if (res < 0) + send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); + DBUG_RETURN(1); +} + +/* + Initialize array of parameters in statement from LEX. + (We need to have quick access to items by number in mysql_stmt_get_longdata). + This is to avoid using malloc/realloc in the parser. +*/ + +static bool init_param_array(Prepared_statement *stmt) +{ + LEX *lex= stmt->lex; + THD *thd= stmt->thd; + if ((stmt->param_count= lex->param_list.elements)) + { + if (stmt->param_count > (uint) UINT_MAX16) + { + /* Error code to be defined in 5.0 */ + send_error(thd, ER_UNKNOWN_ERROR, + "Prepared statement contains too many placeholders."); + return 1; + } + Item_param **to; + List_iterator<Item_param> param_iterator(lex->param_list); + /* Use thd->mem_root as it points at statement mem_root */ + stmt->param_array= (Item_param **) + alloc_root(stmt->thd->mem_root, + sizeof(Item_param*) * stmt->param_count); + if (!stmt->param_array) + { + send_error(thd, ER_OUT_OF_RESOURCES); + return 1; + } + for (to= stmt->param_array; + to < stmt->param_array + stmt->param_count; + ++to) + { + *to= param_iterator++; + } + } + return 0; +} + +/* + Given a query string with parameter markers, create a Prepared Statement + from it and send PS info back to the client. + + SYNOPSIS + mysql_stmt_prepare() + packet query to be prepared + packet_length query string length, including ignored trailing NULL or + quote char. + name NULL or statement name. For unnamed statements binary PS + protocol is used, for named statements text protocol is + used. + RETURN + 0 OK, statement prepared successfully + other Error + + NOTES + This function parses the query and sends the total number of parameters + and resultset metadata information back to client (if any), without + executing the query i.e. without any log/disk writes. This allows the + queries to be re-executed without re-parsing during execute. + + If parameter markers are found in the query, then store the information + using Item_param along with maintaining a list in lex->param_array, so + that a fast and direct retrieval can be made without going through all + field items. + +*/ + +int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, + LEX_STRING *name) +{ + LEX *lex; + Prepared_statement *stmt= new Prepared_statement(thd); + int error; + DBUG_ENTER("mysql_stmt_prepare"); + + DBUG_PRINT("prep_query", ("%s", packet)); + + /* + If this is an SQLCOM_PREPARE, we also increase Com_prepare_sql. + However, it seems handy if com_stmt_prepare is increased always, + no matter what kind of prepare is processed. + */ + statistic_increment(com_stmt_prepare, &LOCK_status); + + if (stmt == 0) + { + send_error(thd, ER_OUT_OF_RESOURCES); + DBUG_RETURN(1); + } + + if (name) + { + stmt->name.length= name->length; + if (!(stmt->name.str= memdup_root(stmt->mem_root, (char*)name->str, + name->length))) + { + delete stmt; + send_error(thd, ER_OUT_OF_RESOURCES); + DBUG_RETURN(1); + } + } + + if (thd->stmt_map.insert(thd, stmt)) + { + /* + The error is sent in the insert. The statement itself + will be also deleted there (this is how the hash works). + */ + DBUG_RETURN(1); + } + + thd->set_n_backup_statement(stmt, &thd->stmt_backup); + thd->set_n_backup_item_arena(stmt, &thd->stmt_backup); + + if (alloc_query(thd, packet, packet_length)) + { + thd->restore_backup_statement(stmt, &thd->stmt_backup); + thd->restore_backup_item_arena(stmt, &thd->stmt_backup); + /* Statement map deletes statement on erase */ + thd->stmt_map.erase(stmt); + send_error(thd, ER_OUT_OF_RESOURCES); + DBUG_RETURN(1); + } + + mysql_log.write(thd, thd->command, "[%lu] %s", stmt->id, packet); + + thd->current_arena= stmt; + mysql_init_query(thd, (uchar *) thd->query, thd->query_length); + /* Reset warnings from previous command */ + mysql_reset_errors(thd); + lex= thd->lex; + lex->safe_to_cache_query= 0; + + error= yyparse((void *)thd) || thd->is_fatal_error || + thd->net.report_error || init_param_array(stmt); + /* + While doing context analysis of the query (in send_prepare_results) we + allocate a lot of additional memory: for open tables, JOINs, derived + tables, etc. Let's save a snapshot of current parse tree to the + statement and restore original THD. In cases when some tree + transformation can be reused on execute, we set again thd->mem_root from + stmt->mem_root (see setup_wild for one place where we do that). + */ + thd->restore_backup_item_arena(stmt, &thd->stmt_backup); + + if (!error) + error= send_prepare_results(stmt, test(name)); + + /* restore to WAIT_PRIOR: QUERY_PRIOR is set inside alloc_query */ + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),WAIT_PRIOR); + lex_end(lex); + thd->restore_backup_statement(stmt, &thd->stmt_backup); + cleanup_items(stmt->free_list); + close_thread_tables(thd); + free_items(thd->free_list); + thd->rollback_item_tree_changes(); + thd->free_list= 0; + thd->current_arena= thd; + + if (error) + { + /* Statement map deletes statement on erase */ + thd->stmt_map.erase(stmt); + stmt= NULL; + if (thd->net.report_error) + send_error(thd); + /* otherwise the error is sent inside yyparse/send_prepare_results */ + } + else + { + stmt->setup_set_params(); + SELECT_LEX *sl= stmt->lex->all_selects_list; + for (; sl; sl= sl->next_select_in_list()) + { + /* + Save WHERE clause pointers, because they may be changed + during query optimisation. + */ + sl->prep_where= sl->where; + /* + Switch off a temporary flag that prevents evaluation of + subqueries in statement prepare. + */ + sl->uncacheable&= ~UNCACHEABLE_PREPARE; + } + stmt->state= Item_arena::PREPARED; + } + + DBUG_RETURN(!stmt); +} + +/* Reinit statement before execution */ + +static void reset_stmt_for_execute(Prepared_statement *stmt) +{ + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + SELECT_LEX *sl= lex->all_selects_list; + + for (; sl; sl= sl->next_select_in_list()) + { + /* remove option which was put by mysql_explain_union() */ + sl->options&= ~SELECT_DESCRIBE; + /* + Copy WHERE clause pointers to avoid damaging they by optimisation + */ + if (sl->prep_where) + { + sl->where= sl->prep_where->copy_andor_structure(thd); + sl->where->cleanup(); + } + DBUG_ASSERT(sl->join == 0); + ORDER *order; + /* Fix GROUP list */ + for (order= (ORDER *)sl->group_list.first; order; order= order->next) + order->item= &order->item_ptr; + /* Fix ORDER list */ + for (order= (ORDER *)sl->order_list.first; order; order= order->next) + order->item= &order->item_ptr; + + /* + TODO: When the new table structure is ready, then have a status bit + to indicate the table is altered, and re-do the setup_* + and open the tables back. + */ + for (TABLE_LIST *tables= (TABLE_LIST*) sl->table_list.first; + tables; + tables= tables->next) + { + /* + Reset old pointers to TABLEs: they are not valid since the tables + were closed in the end of previous prepare or execute call. + */ + tables->table= 0; + tables->table_list= 0; + } + + { + SELECT_LEX_UNIT *unit= sl->master_unit(); + unit->unclean(); + unit->types.empty(); + /* for derived tables & PS (which can't be reset by Item_subquery) */ + unit->reinit_exec_mechanism(); + } + } + lex->current_select= &lex->select_lex; + if (lex->result) + lex->result->cleanup(); +} + + +/* + Clears parameters from data left from previous execution or long data + + SYNOPSIS + reset_stmt_params() + stmt - prepared statement for which parameters should be reset +*/ + +static void reset_stmt_params(Prepared_statement *stmt) +{ + Item_param **item= stmt->param_array; + Item_param **end= item + stmt->param_count; + for (;item < end ; ++item) + (**item).reset(); +} + + +/* + Executes previously prepared query. + If there is any parameters, then replace markers with the data supplied + from client, and then execute the query. + SYNOPSIS + mysql_stmt_execute() + thd Current thread + packet Query string + packet_length Query string length, including terminator character. +*/ + +void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) +{ + ulong stmt_id= uint4korr(packet); + /* + Query text for binary log, or empty string if the query is not put into + binary log. + */ + String expanded_query; +#ifndef EMBEDDED_LIBRARY + uchar *packet_end= (uchar *) packet + packet_length - 1; +#endif + Prepared_statement *stmt; + DBUG_ENTER("mysql_stmt_execute"); + + packet+= 9; /* stmt_id + 5 bytes of flags */ + + statistic_increment(com_stmt_execute, &LOCK_status); + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_execute", + SEND_ERROR))) + DBUG_VOID_RETURN; + + DBUG_PRINT("exec_query:", ("%s", stmt->query)); + + /* Check if we got an error when sending long data */ + if (stmt->state == Item_arena::ERROR) + { + send_error(thd, stmt->last_errno, stmt->last_error); + DBUG_VOID_RETURN; + } + + DBUG_ASSERT(thd->free_list == NULL); + mysql_reset_thd_for_next_command(thd); +#ifndef EMBEDDED_LIBRARY + if (stmt->param_count) + { + uchar *null_array= (uchar *) packet; + if (setup_conversion_functions(stmt, (uchar **) &packet, packet_end) || + stmt->set_params(stmt, null_array, (uchar *) packet, packet_end, + &expanded_query)) + goto set_params_data_err; + } +#else + /* + In embedded library we re-install conversion routines each time + we set params, and also we don't need to parse packet. + So we do it in one function. + */ + if (stmt->param_count && stmt->set_params_data(stmt, &expanded_query)) + goto set_params_data_err; +#endif + thd->protocol= &thd->protocol_prep; // Switch to binary protocol + execute_stmt(thd, stmt, &expanded_query, TRUE); + thd->protocol= &thd->protocol_simple; // Use normal protocol + DBUG_VOID_RETURN; + +set_params_data_err: + reset_stmt_params(stmt); + my_error(ER_WRONG_ARGUMENTS, MYF(0), "mysql_stmt_execute"); + send_error(thd); + DBUG_VOID_RETURN; +} + + +/* + Execute prepared statement using parameter values from + lex->prepared_stmt_params and send result to the client using text protocol. +*/ + +void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) +{ + Prepared_statement *stmt; + /* + Query text for binary log, or empty string if the query is not put into + binary log. + */ + String expanded_query; + DBUG_ENTER("mysql_sql_stmt_execute"); + + /* See comment for statistics_increment in mysql_stmt_prepare */ + statistic_increment(com_stmt_execute, &LOCK_status); + if (!(stmt= (Prepared_statement*)thd->stmt_map.find_by_name(stmt_name))) + { + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), stmt_name->length, + stmt_name->str, "EXECUTE"); + send_error(thd); + DBUG_VOID_RETURN; + } + + if (stmt->param_count != thd->lex->prepared_stmt_params.elements) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); + send_error(thd); + DBUG_VOID_RETURN; + } + + DBUG_ASSERT(thd->free_list == NULL); + /* Must go before setting variables, as it clears thd->user_var_events */ + mysql_reset_thd_for_next_command(thd); + thd->set_n_backup_statement(stmt, &thd->stmt_backup); + if (stmt->set_params_from_vars(stmt, + thd->stmt_backup.lex->prepared_stmt_params, + &expanded_query)) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); + send_error(thd); + } + thd->command= COM_EXECUTE; /* For nice messages in general log */ + execute_stmt(thd, stmt, &expanded_query, FALSE); + DBUG_VOID_RETURN; +} + + +/* + Execute prepared statement. + SYNOPSIS + execute_stmt() + thd Current thread + stmt Statement to execute + expanded_query If binary log is enabled, query string with parameter + placeholders replaced with actual values. Otherwise empty + string. + NOTES + Caller must set parameter values and thd::protocol. + thd->free_list is assumed to be garbage. +*/ + +static void execute_stmt(THD *thd, Prepared_statement *stmt, + String *expanded_query, bool set_context) +{ + DBUG_ENTER("execute_stmt"); + if (set_context) + thd->set_n_backup_statement(stmt, &thd->stmt_backup); + reset_stmt_for_execute(stmt); + + if (expanded_query->length() && + alloc_query(thd, (char *)expanded_query->ptr(), + expanded_query->length()+1)) + { + my_error(ER_OUTOFMEMORY, 0, expanded_query->length()); + DBUG_VOID_RETURN; + } + mysql_log.write(thd, thd->command, "[%lu] %s", stmt->id, thd->query); + /* + At first execution of prepared statement we will perform logical + transformations of the query tree (i.e. negations elimination). + This should be done permanently on the parse tree of this statement. + */ + thd->current_arena= stmt; + + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),QUERY_PRIOR); + mysql_execute_command(thd); + thd->lex->unit.cleanup(); + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), WAIT_PRIOR); + /* + 'start_time' is set in dispatch_command, but THD::query will + be freed when we return from this function. So let's log the slow + query here. + */ + log_slow_statement(thd); + /* Prevent from second logging in the end of dispatch_command */ + thd->enable_slow_log= FALSE; + + /* Free Items that were created during this execution of the PS. */ + free_items(thd->free_list); + thd->free_list= 0; + if (stmt->state == Item_arena::PREPARED) + stmt->state= Item_arena::EXECUTED; + thd->current_arena= thd; + cleanup_items(stmt->free_list); + thd->rollback_item_tree_changes(); + reset_stmt_params(stmt); + close_thread_tables(thd); // to close derived tables + thd->set_statement(&thd->stmt_backup); + DBUG_VOID_RETURN; +} + + +/* + Reset a prepared statement in case there was a recoverable error. + SYNOPSIS + mysql_stmt_reset() + thd Thread handle + packet Packet with stmt id + + DESCRIPTION + This function resets statement to the state it was right after prepare. + It can be used to: + - clear an error happened during mysql_stmt_send_long_data + - cancel long data stream for all placeholders without + having to call mysql_stmt_execute. + Sends 'OK' packet in case of success (statement was reset) + or 'ERROR' packet (unrecoverable error/statement not found/etc). +*/ + +void mysql_stmt_reset(THD *thd, char *packet) +{ + /* There is always space for 4 bytes in buffer */ + ulong stmt_id= uint4korr(packet); + Prepared_statement *stmt; + + DBUG_ENTER("mysql_stmt_reset"); + + statistic_increment(com_stmt_reset, &LOCK_status); + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_reset", + SEND_ERROR))) + DBUG_VOID_RETURN; + + stmt->state= Item_arena::PREPARED; + + /* + Clear parameters from data which could be set by + mysql_stmt_send_long_data() call. + */ + reset_stmt_params(stmt); + + mysql_reset_thd_for_next_command(thd); + send_ok(thd); + + DBUG_VOID_RETURN; +} + + +/* + Delete a prepared statement from memory. + Note: we don't send any reply to that command. +*/ + +void mysql_stmt_free(THD *thd, char *packet) +{ + /* There is always space for 4 bytes in packet buffer */ + ulong stmt_id= uint4korr(packet); + Prepared_statement *stmt; + + DBUG_ENTER("mysql_stmt_free"); + + statistic_increment(com_stmt_close, &LOCK_status); + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_close", + DONT_SEND_ERROR))) + DBUG_VOID_RETURN; + + /* Statement map deletes statement on erase */ + thd->stmt_map.erase(stmt); + DBUG_VOID_RETURN; +} + + +/* + Long data in pieces from client + + SYNOPSIS + mysql_stmt_get_longdata() + thd Thread handle + pos String to append + packet_length Length of string + + DESCRIPTION + Get a part of a long data. + To make the protocol efficient, we are not sending any return packages + here. + If something goes wrong, then we will send the error on 'execute' + + We assume that the client takes care of checking that all parts are sent + to the server. (No checking that we get a 'end of column' in the server) +*/ + +void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) +{ + ulong stmt_id; + uint param_number; + Prepared_statement *stmt; + Item_param *param; + char *packet_end= packet + packet_length - 1; + + DBUG_ENTER("mysql_stmt_get_longdata"); + + statistic_increment(com_stmt_send_long_data, &LOCK_status); +#ifndef EMBEDDED_LIBRARY + /* Minimal size of long data packet is 6 bytes */ + if ((ulong) (packet_end - packet) < MYSQL_LONG_DATA_HEADER) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), "mysql_stmt_send_long_data"); + DBUG_VOID_RETURN; + } +#endif + + stmt_id= uint4korr(packet); + packet+= 4; + + if (!(stmt=find_prepared_statement(thd, stmt_id, "mysql_stmt_send_long_data", + DONT_SEND_ERROR))) + DBUG_VOID_RETURN; + + param_number= uint2korr(packet); + packet+= 2; +#ifndef EMBEDDED_LIBRARY + if (param_number >= stmt->param_count) + { + /* Error will be sent in execute call */ + stmt->state= Item_arena::ERROR; + stmt->last_errno= ER_WRONG_ARGUMENTS; + sprintf(stmt->last_error, ER(ER_WRONG_ARGUMENTS), + "mysql_stmt_send_long_data"); + DBUG_VOID_RETURN; + } +#endif + + param= stmt->param_array[param_number]; + +#ifndef EMBEDDED_LIBRARY + if (param->set_longdata(packet, (ulong) (packet_end - packet))) +#else + if (param->set_longdata(thd->extra_data, thd->extra_length)) +#endif + { + stmt->state= Item_arena::ERROR; + stmt->last_errno= ER_OUTOFMEMORY; + sprintf(stmt->last_error, ER(ER_OUTOFMEMORY), 0); + } + DBUG_VOID_RETURN; +} + + +Prepared_statement::Prepared_statement(THD *thd_arg) + :Statement(thd_arg), + thd(thd_arg), + param_array(0), + param_count(0), + last_errno(0) +{ + *last_error= '\0'; +} + + +void Prepared_statement::setup_set_params() +{ + /* Setup binary logging */ + if (mysql_bin_log.is_open() && is_update_query(lex->sql_command) || + mysql_log.is_open() || mysql_slow_log.is_open()) + { + set_params_from_vars= insert_params_from_vars_with_log; +#ifndef EMBEDDED_LIBRARY + set_params= insert_params_withlog; +#else + set_params_data= emb_insert_params_withlog; +#endif + } + else + { + set_params_from_vars= insert_params_from_vars; +#ifndef EMBEDDED_LIBRARY + set_params= insert_params; +#else + set_params_data= emb_insert_params; +#endif + } +} + + +Prepared_statement::~Prepared_statement() +{ + free_items(free_list); + delete lex->result; +} + + +Item_arena::Type Prepared_statement::type() const +{ + return PREPARED_STATEMENT; +} + diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index c560b96a615..388034e0f1a 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -24,6 +24,8 @@ static TABLE_LIST *rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error); +static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list); + /* Every second entry in the table_list is the original name and every second entry is the new name. @@ -46,6 +48,8 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) DBUG_RETURN(1); } + if (wait_if_global_read_lock(thd,0,1)) + DBUG_RETURN(1); VOID(pthread_mutex_lock(&LOCK_open)); if (lock_table_names(thd, table_list)) goto err; @@ -54,17 +58,10 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) if ((ren_table=rename_tables(thd,table_list,0))) { /* Rename didn't succeed; rename back the tables in reverse order */ - TABLE_LIST *prev=0,*table; - /* Reverse the table list */ + TABLE_LIST *table; - while (table_list) - { - TABLE_LIST *next=table_list->next; - table_list->next=prev; - prev=table_list; - table_list=next; - } - table_list=prev; + /* Reverse the table list */ + table_list= reverse_table_list(table_list); /* Find the last renamed table */ for (table=table_list ; @@ -73,6 +70,10 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) table=table->next->next; // Skip error table /* Revert to old names */ rename_tables(thd, table, 1); + + /* Revert the table list (for prepared statements) */ + table_list= reverse_table_list(table_list); + error= 1; } @@ -83,21 +84,47 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } - send_ok(&thd->net); + send_ok(thd); } unlock_table_names(thd,table_list); err: pthread_mutex_unlock(&LOCK_open); + start_waiting_global_read_lock(thd); DBUG_RETURN(error); } /* + reverse table list + + SYNOPSIS + reverse_table_list() + table_list pointer to table _list + + RETURN + pointer to new (reversed) list +*/ +static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list) +{ + TABLE_LIST *prev= 0; + + while (table_list) + { + TABLE_LIST *next= table_list->next; + table_list->next= prev; + prev= table_list; + table_list= next; + } + return (prev); +} + + +/* Rename all tables in list; Return pointer to wrong entry if something goes wrong. Note that the table_list may be empty! */ diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index ed2d477fc5d..963c4ccf5a6 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -15,14 +15,11 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysql_priv.h" +#ifdef HAVE_REPLICATION + #include "sql_repl.h" -#include "sql_acl.h" #include "log_event.h" -#include "mini_client.h" #include <my_dir.h> -#include <assert.h> - -extern const char* any_db; int max_binlog_dump_events = 0; // unlimited my_bool opt_sporadic_binlog_dump_fail = 0; @@ -155,7 +152,8 @@ File open_binlog(IO_CACHE *log, const char *log_file_name, File file; DBUG_ENTER("open_binlog"); - if ((file = my_open(log_file_name, O_RDONLY | O_BINARY, MYF(MY_WME))) < 0) + if ((file = my_open(log_file_name, O_RDONLY | O_BINARY | O_SHARE, + MYF(MY_WME))) < 0) { sql_print_error("Failed to open log (\ file '%s', errno %d)", log_file_name, my_errno); @@ -249,7 +247,7 @@ bool log_in_use(const char* log_name) if ((linfo = tmp->current_linfo)) { pthread_mutex_lock(&linfo->lock); - result = !memcmp(log_name, linfo->log_file_name, log_name_len); + result = !bcmp(log_name, linfo->log_file_name, log_name_len); pthread_mutex_unlock(&linfo->lock); if (result) break; @@ -260,44 +258,61 @@ bool log_in_use(const char* log_name) return result; } - -int purge_master_logs(THD* thd, const char* to_log) +int purge_error_message(THD* thd, int res) { - char search_file_name[FN_REFLEN]; - const char* errmsg = 0; - int res; - - if (!mysql_bin_log.is_open()) - goto end; - - mysql_bin_log.make_log_name(search_file_name, to_log); - res = mysql_bin_log.purge_logs(thd, search_file_name); + const char *errmsg= 0; - switch(res) { + switch (res) { case 0: break; - case LOG_INFO_EOF: errmsg = "Target log not found in binlog index"; break; - case LOG_INFO_IO: errmsg = "I/O error reading log index file"; break; - case LOG_INFO_INVALID: errmsg = "Server configuration does not permit \ -binlog purge"; break; - case LOG_INFO_SEEK: errmsg = "Failed on fseek()"; break; - case LOG_INFO_MEM: errmsg = "Out of memory"; break; - case LOG_INFO_FATAL: errmsg = "Fatal error during purge"; break; - case LOG_INFO_IN_USE: errmsg = "A purgeable log is in use, will not purge"; + case LOG_INFO_EOF: errmsg= "Target log not found in binlog index"; break; + case LOG_INFO_IO: errmsg= "I/O error reading log index file"; break; + case LOG_INFO_INVALID: + errmsg= "Server configuration does not permit binlog purge"; break; + case LOG_INFO_SEEK: errmsg= "Failed on fseek()"; break; + case LOG_INFO_MEM: errmsg= "Out of memory"; break; + case LOG_INFO_FATAL: errmsg= "Fatal error during purge"; break; + case LOG_INFO_IN_USE: errmsg= "A purgeable log is in use, will not purge"; break; - default: errmsg = "Unknown error during purge"; break; + default: errmsg= "Unknown error during purge"; break; } if (errmsg) { - send_error(&thd->net, 0, errmsg); + send_error(thd, 0, errmsg); return 1; } - -end: - send_ok(&thd->net); + send_ok(thd); return 0; } + +int purge_master_logs(THD* thd, const char* to_log) +{ + char search_file_name[FN_REFLEN]; + if (!mysql_bin_log.is_open()) + { + send_ok(thd); + return 0; + } + + mysql_bin_log.make_log_name(search_file_name, to_log); + return purge_error_message(thd, + mysql_bin_log.purge_logs(search_file_name, 0, 1, + 1, NULL)); +} + + +int purge_master_logs_before_date(THD* thd, time_t purge_time) +{ + if (!mysql_bin_log.is_open()) + { + send_ok(thd); + return 0; + } + return purge_error_message(thd, + mysql_bin_log.purge_logs_before_date(purge_time)); +} + /* TODO: Clean up loop to only have one call to send_file() */ @@ -378,32 +393,33 @@ impossible position"; We need to start a packet with something other than 255 to distiquish it from error */ - packet->set("\0", 1); + packet->set("\0", 1, &my_charset_bin); /* Before 4.0.14 we called fake_rotate_event below only if (pos == BIN_LOG_HEADER_SIZE), because if this is false then the slave already knows the binlog's name. Now we always call fake_rotate_event; if the slave already knew the log's - name (ex: CHANGE MASTER TO MASTER_LOG_FILE=...) this is useless but does not - harm much. It is nice for 3.23 (>=.58) slaves which test Rotate events + name (ex: CHANGE MASTER TO MASTER_LOG_FILE=...) this is useless but does + not harm much. It is nice for 3.23 (>=.58) slaves which test Rotate events to see if the master is 4.0 (then they choose to stop because they can't - replicate 4.0); by always calling fake_rotate_event we are sure that 3.23.58 - and newer will detect the problem as soon as replication starts (BUG#198). + replicate 4.0); by always calling fake_rotate_event we are sure that + 3.23.58 and newer will detect the problem as soon as replication starts + (BUG#198). Always calling fake_rotate_event makes sending of normal - (=from-binlog) Rotate events a priori unneeded, but it is not so simple: the - 2 Rotate events are not equivalent, the normal one is before the Stop event, - the fake one is after. If we don't send the normal one, then the Stop event - will be interpreted (by existing 4.0 slaves) as "the master stopped", which - is wrong. So for safety, given that we want minimum modification of 4.0, we - send the normal and fake Rotates. + (=from-binlog) Rotate events a priori unneeded, but it is not so simple: + the 2 Rotate events are not equivalent, the normal one is before the Stop + event, the fake one is after. If we don't send the normal one, then the + Stop event will be interpreted (by existing 4.0 slaves) as "the master + stopped", which is wrong. So for safety, given that we want minimum + modification of 4.0, we send the normal and fake Rotates. */ if (fake_rotate_event(net, packet, log_file_name, pos, &errmsg)) { my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; goto err; } - packet->set("\0", 1); + packet->set("\0", 1, &my_charset_bin); while (!net->error && net->vio != 0 && !thd->killed) { @@ -437,7 +453,7 @@ impossible position"; goto err; } } - packet->set("\0", 1); + packet->set("\0", 1, &my_charset_bin); } /* TODO: now that we are logging the offset, check to make sure @@ -562,7 +578,7 @@ Increase max_allowed_packet on master"; goto err; } } - packet->set("\0", 1); + packet->set("\0", 1, &my_charset_bin); /* No need to net_flush because we will get to flush later when we hit EOF pretty quick @@ -612,7 +628,7 @@ Increase max_allowed_packet on master"; goto err; } packet->length(0); - packet->append("\0",1); + packet->append('\0'); } } @@ -620,7 +636,7 @@ end: end_io_cache(&log); (void)my_close(file, MYF(MY_WME)); - send_eof(&thd->net); + send_eof(thd); thd->proc_info = "Waiting to finalize termination"; pthread_mutex_lock(&LOCK_thread_count); thd->current_linfo = 0; @@ -642,83 +658,178 @@ err: pthread_mutex_unlock(&LOCK_thread_count); if (file >= 0) (void) my_close(file, MYF(MY_WME)); - send_error(&thd->net, my_errno, errmsg); + send_error(thd, my_errno, errmsg); DBUG_VOID_RETURN; } int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) { - int slave_errno = 0; - if (!thd) thd = current_thd; - NET* net = &thd->net; + int slave_errno= 0; int thread_mask; DBUG_ENTER("start_slave"); - if (check_access(thd, SUPER_ACL, any_db)) + if (check_access(thd, SUPER_ACL, any_db,0,0,0)) DBUG_RETURN(1); lock_slave_threads(mi); // this allows us to cleanly read slave_running + // Get a mask of _stopped_ threads init_thread_mask(&thread_mask,mi,1 /* inverse */); - if (thd->lex.slave_thd_opt) - thread_mask &= thd->lex.slave_thd_opt; - if (thread_mask) + /* + Below we will start all stopped threads. + But if the user wants to start only one thread, do as if the other thread + was running (as we don't wan't to touch the other thread), so set the + bit to 0 for the other thread + */ + if (thd->lex->slave_thd_opt) + thread_mask&= thd->lex->slave_thd_opt; + if (thread_mask) //some threads are stopped, start them { if (init_master_info(mi,master_info_file,relay_log_info_file, 0, thread_mask)) slave_errno=ER_MASTER_INFO; else if (server_id_supplied && *mi->host) - slave_errno = start_slave_threads(0 /*no mutex */, + { + /* + If we will start SQL thread we will care about UNTIL options + If not and they are specified we will ignore them and warn user + about this fact. + */ + if (thread_mask & SLAVE_SQL) + { + pthread_mutex_lock(&mi->rli.data_lock); + + if (thd->lex->mi.pos) + { + mi->rli.until_condition= RELAY_LOG_INFO::UNTIL_MASTER_POS; + mi->rli.until_log_pos= thd->lex->mi.pos; + /* + We don't check thd->lex->mi.log_file_name for NULL here + since it is checked in sql_yacc.yy + */ + strmake(mi->rli.until_log_name, thd->lex->mi.log_file_name, + sizeof(mi->rli.until_log_name)-1); + } + else if (thd->lex->mi.relay_log_pos) + { + mi->rli.until_condition= RELAY_LOG_INFO::UNTIL_RELAY_POS; + mi->rli.until_log_pos= thd->lex->mi.relay_log_pos; + strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name, + sizeof(mi->rli.until_log_name)-1); + } + else + clear_until_condition(&mi->rli); + + if (mi->rli.until_condition != RELAY_LOG_INFO::UNTIL_NONE) + { + /* Preparing members for effective until condition checking */ + const char *p= fn_ext(mi->rli.until_log_name); + char *p_end; + if (*p) + { + //p points to '.' + mi->rli.until_log_name_extension= strtoul(++p,&p_end, 10); + /* + p_end points to the first invalid character. If it equals + to p, no digits were found, error. If it contains '\0' it + means conversion went ok. + */ + if (p_end==p || *p_end) + slave_errno=ER_BAD_SLAVE_UNTIL_COND; + } + else + slave_errno=ER_BAD_SLAVE_UNTIL_COND; + + /* mark the cached result of the UNTIL comparison as "undefined" */ + mi->rli.until_log_names_cmp_result= + RELAY_LOG_INFO::UNTIL_LOG_NAMES_CMP_UNKNOWN; + + /* Issuing warning then started without --skip-slave-start */ + if (!opt_skip_slave_start) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_MISSING_SKIP_SLAVE, + ER(ER_MISSING_SKIP_SLAVE)); + } + + pthread_mutex_unlock(&mi->rli.data_lock); + } + else if (thd->lex->mi.pos || thd->lex->mi.relay_log_pos) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNTIL_COND_IGNORED, + ER(ER_UNTIL_COND_IGNORED)); + + + if (!slave_errno) + slave_errno = start_slave_threads(0 /*no mutex */, 1 /* wait for start */, mi, master_info_file,relay_log_info_file, thread_mask); + } else slave_errno = ER_BAD_SLAVE; } else - slave_errno = ER_SLAVE_MUST_STOP; + //no error if all threads are already started, only a warning + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING, + ER(ER_SLAVE_WAS_RUNNING)); unlock_slave_threads(mi); if (slave_errno) { if (net_report) - send_error(net, slave_errno); + send_error(thd, slave_errno); DBUG_RETURN(1); } else if (net_report) - send_ok(net); + send_ok(thd); DBUG_RETURN(0); } + int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report ) { - int slave_errno = 0; - if (!thd) thd = current_thd; - NET* net = &thd->net; + int slave_errno; + if (!thd) + thd = current_thd; - if (check_access(thd, SUPER_ACL, any_db)) + if (check_access(thd, SUPER_ACL, any_db,0,0,0)) return 1; thd->proc_info = "Killing slave"; int thread_mask; lock_slave_threads(mi); + // Get a mask of _running_ threads init_thread_mask(&thread_mask,mi,0 /* not inverse*/); - if (thd->lex.slave_thd_opt) - thread_mask &= thd->lex.slave_thd_opt; - slave_errno = (thread_mask) ? - terminate_slave_threads(mi,thread_mask, - 1 /*skip lock */) : ER_SLAVE_NOT_RUNNING; + /* + Below we will stop all running threads. + But if the user wants to stop only one thread, do as if the other thread + was stopped (as we don't wan't to touch the other thread), so set the + bit to 0 for the other thread + */ + if (thd->lex->slave_thd_opt) + thread_mask &= thd->lex->slave_thd_opt; + + if (thread_mask) + { + slave_errno= terminate_slave_threads(mi,thread_mask, + 1 /*skip lock */); + } + else + { + //no error if both threads are already stopped, only a warning + slave_errno= 0; + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_NOT_RUNNING, + ER(ER_SLAVE_WAS_NOT_RUNNING)); + } unlock_slave_threads(mi); thd->proc_info = 0; if (slave_errno) { if (net_report) - send_error(net, slave_errno); + send_error(thd, slave_errno); return 1; } else if (net_report) - send_ok(net); + send_ok(thd); return 0; } @@ -732,16 +843,9 @@ int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report ) thd Thread handler mi Master info for the slave - - NOTES - We don't send ok in this functions as this is called from - reload_acl_and_cache() which may have done other tasks, which may - have failed for which we want to send and error. - RETURN 0 ok 1 error - In this case error is sent to the client with send_error() */ @@ -762,7 +866,7 @@ int reset_slave(THD *thd, MASTER_INFO* mi) error=1; goto err; } - //delete relay logs, clear relay log coordinates + // delete relay logs, clear relay log coordinates if ((error= purge_relay_logs(&mi->rli, thd, 1 /* just reset */, &errmsg))) @@ -776,16 +880,23 @@ int reset_slave(THD *thd, MASTER_INFO* mi) STATUS; before doing START SLAVE; */ init_master_info_with_options(mi); - clear_last_slave_error(&mi->rli); - //close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0 + /* + Reset errors (the idea is that we forget about the + old master). + */ + clear_slave_error(&mi->rli); + clear_until_condition(&mi->rli); + + // close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0 end_master_info(mi); - //and delete these two files + // and delete these two files fn_format(fname, master_info_file, mysql_data_home, "", 4+32); if (my_stat(fname, &stat_area, MYF(0)) && my_delete(fname, MYF(MY_WME))) { error=1; goto err; } + // delete relay_log_info_file fn_format(fname, relay_log_info_file, mysql_data_home, "", 4+32); if (my_stat(fname, &stat_area, MYF(0)) && my_delete(fname, MYF(MY_WME))) { @@ -795,8 +906,8 @@ int reset_slave(THD *thd, MASTER_INFO* mi) err: unlock_slave_threads(mi); - if (thd && error) - send_error(&thd->net, sql_errno, errmsg); + if (error) + my_error(sql_errno, MYF(0), errmsg); DBUG_RETURN(error); } @@ -860,18 +971,18 @@ int change_master(THD* thd, MASTER_INFO* mi) init_thread_mask(&thread_mask,mi,0 /*not inverse*/); if (thread_mask) // We refuse if any slave thread is running { - net_printf(&thd->net,ER_SLAVE_MUST_STOP); + net_printf(thd,ER_SLAVE_MUST_STOP); unlock_slave_threads(mi); DBUG_RETURN(1); } thd->proc_info = "Changing master"; - LEX_MASTER_INFO* lex_mi = &thd->lex.mi; + LEX_MASTER_INFO* lex_mi= &thd->lex->mi; // TODO: see if needs re-write if (init_master_info(mi, master_info_file, relay_log_info_file, 0, thread_mask)) { - send_error(&thd->net, ER_MASTER_INFO); + send_error(thd, ER_MASTER_INFO); unlock_slave_threads(mi); DBUG_RETURN(1); } @@ -891,7 +1002,6 @@ int change_master(THD* thd, MASTER_INFO* mi) { mi->master_log_name[0] = 0; mi->master_log_pos= BIN_LOG_HEADER_SIZE; - mi->rli.pending = 0; } if (lex_mi->log_file_name) @@ -900,7 +1010,6 @@ int change_master(THD* thd, MASTER_INFO* mi) if (lex_mi->pos) { mi->master_log_pos= lex_mi->pos; - mi->rli.pending = 0; } DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); @@ -914,18 +1023,39 @@ int change_master(THD* thd, MASTER_INFO* mi) mi->port = lex_mi->port; if (lex_mi->connect_retry) mi->connect_retry = lex_mi->connect_retry; + + if (lex_mi->ssl != LEX_MASTER_INFO::SSL_UNCHANGED) + mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::SSL_ENABLE); + if (lex_mi->ssl_ca) + strmake(mi->ssl_ca, lex_mi->ssl_ca, sizeof(mi->ssl_ca)-1); + if (lex_mi->ssl_capath) + strmake(mi->ssl_capath, lex_mi->ssl_capath, sizeof(mi->ssl_capath)-1); + if (lex_mi->ssl_cert) + strmake(mi->ssl_cert, lex_mi->ssl_cert, sizeof(mi->ssl_cert)-1); + if (lex_mi->ssl_cipher) + strmake(mi->ssl_cipher, lex_mi->ssl_cipher, sizeof(mi->ssl_cipher)-1); + if (lex_mi->ssl_key) + strmake(mi->ssl_key, lex_mi->ssl_key, sizeof(mi->ssl_key)-1); +#ifndef HAVE_OPENSSL + if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath || + lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key ) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS)); +#endif if (lex_mi->relay_log_name) { need_relay_log_purge= 0; - strmake(mi->rli.relay_log_name,lex_mi->relay_log_name, - sizeof(mi->rli.relay_log_name)-1); + strmake(mi->rli.group_relay_log_name,lex_mi->relay_log_name, + sizeof(mi->rli.group_relay_log_name)-1); + strmake(mi->rli.event_relay_log_name,lex_mi->relay_log_name, + sizeof(mi->rli.event_relay_log_name)-1); } if (lex_mi->relay_log_pos) { need_relay_log_purge= 0; - mi->rli.relay_log_pos=lex_mi->relay_log_pos; + mi->rli.group_relay_log_pos= mi->rli.event_relay_log_pos= lex_mi->relay_log_pos; } /* @@ -953,21 +1083,25 @@ int change_master(THD* thd, MASTER_INFO* mi) of replication is not 100% clear, so we guard against problems using max(). */ - mi->master_log_pos = max(BIN_LOG_HEADER_SIZE, mi->rli.master_log_pos); - strmake(mi->master_log_name,mi->rli.master_log_name, + mi->master_log_pos = max(BIN_LOG_HEADER_SIZE, + mi->rli.group_master_log_pos); + strmake(mi->master_log_name, mi->rli.group_master_log_name, sizeof(mi->master_log_name)-1); } - + /* + Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never + a slave before). + */ flush_master_info(mi, 0); if (need_relay_log_purge) { - mi->rli.skip_log_purge= 0; + relay_log_purge= 1; thd->proc_info="Purging old relay logs"; if (purge_relay_logs(&mi->rli, thd, 0 /* not only reset, but also reinit */, &errmsg)) { - net_printf(&thd->net, 0, "Failed purging old relay logs: %s",errmsg); + net_printf(thd, 0, "Failed purging old relay logs: %s",errmsg); unlock_slave_threads(mi); DBUG_RETURN(1); } @@ -975,19 +1109,20 @@ int change_master(THD* thd, MASTER_INFO* mi) else { const char* msg; - mi->rli.skip_log_purge= 1; + relay_log_purge= 0; /* Relay log is already initialized */ if (init_relay_log_pos(&mi->rli, - mi->rli.relay_log_name, - mi->rli.relay_log_pos, + mi->rli.group_relay_log_name, + mi->rli.group_relay_log_pos, 0 /*no data lock*/, &msg)) { - net_printf(&thd->net,0,"Failed initializing relay log position: %s",msg); + net_printf(thd,0,"Failed initializing relay log position: %s",msg); unlock_slave_threads(mi); DBUG_RETURN(1); } } + mi->rli.group_master_log_pos = mi->master_log_pos; DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); /* @@ -1000,17 +1135,18 @@ int change_master(THD* thd, MASTER_INFO* mi) ''/0: we have lost all copies of the original good coordinates. That's why we always save good coords in rli. */ - mi->rli.master_log_pos = mi->master_log_pos; - strmake(mi->rli.master_log_name,mi->master_log_name, - sizeof(mi->rli.master_log_name)-1); + mi->rli.group_master_log_pos= mi->master_log_pos; + strmake(mi->rli.group_master_log_name,mi->master_log_name, + sizeof(mi->rli.group_master_log_name)-1); - if (!mi->rli.master_log_name[0]) // uninitialized case - mi->rli.master_log_pos=0; + if (!mi->rli.group_master_log_name[0]) // uninitialized case + mi->rli.group_master_log_pos=0; pthread_mutex_lock(&mi->rli.data_lock); mi->rli.abort_pos_wait++; /* for MASTER_POS_WAIT() to abort */ - /* Clear the error, for a clean start. */ - clear_last_slave_error(&mi->rli); + /* Clear the errors, for a clean start */ + clear_slave_error(&mi->rli); + clear_until_condition(&mi->rli); /* If we don't write new coordinates to disk now, then old will remain in relay-log.info until START SLAVE is issued; but if mysqld is shutdown @@ -1018,13 +1154,13 @@ int change_master(THD* thd, MASTER_INFO* mi) in-memory value at restart (thus causing errors, as the old relay log does not exist anymore). */ - flush_relay_log_info(&mi->rli); + flush_relay_log_info(&mi->rli); pthread_cond_broadcast(&mi->data_cond); pthread_mutex_unlock(&mi->rli.data_lock); unlock_slave_threads(mi); thd->proc_info = 0; - send_ok(&thd->net); + send_ok(thd); DBUG_RETURN(0); } @@ -1058,19 +1194,20 @@ int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1, int show_binlog_events(THD* thd) { + Protocol *protocol= thd->protocol; DBUG_ENTER("show_binlog_events"); List<Item> field_list; - const char* errmsg = 0; + const char *errmsg = 0; IO_CACHE log; File file = -1; Log_event::init_show_field_list(&field_list); - if (send_fields(thd, field_list, 1)) + if (protocol-> send_fields(&field_list, 1)) DBUG_RETURN(-1); if (mysql_bin_log.is_open()) { - LEX_MASTER_INFO *lex_mi = &thd->lex.mi; + LEX_MASTER_INFO *lex_mi= &thd->lex->mi; ha_rows event_count, limit_start, limit_end; my_off_t pos = max(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly char search_file_name[FN_REFLEN], *name; @@ -1079,8 +1216,8 @@ int show_binlog_events(THD* thd) LOG_INFO linfo; Log_event* ev; - limit_start = thd->lex.select->offset_limit; - limit_end = thd->lex.select->select_limit + limit_start; + limit_start= thd->lex->current_select->offset_limit; + limit_end= thd->lex->current_select->select_limit + limit_start; name= search_file_name; if (log_file_name) @@ -1107,7 +1244,7 @@ int show_binlog_events(THD* thd) (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,0)); ) { if (event_count >= limit_start && - ev->net_send(thd, linfo.log_file_name, pos)) + ev->net_send(protocol, linfo.log_file_name, pos)) { errmsg = "Net error"; delete ev; @@ -1146,7 +1283,7 @@ err: DBUG_RETURN(-1); } - send_eof(&thd->net); + send_eof(thd); pthread_mutex_lock(&LOCK_thread_count); thd->current_linfo = 0; pthread_mutex_unlock(&LOCK_thread_count); @@ -1156,31 +1293,32 @@ err: int show_binlog_info(THD* thd) { + Protocol *protocol= thd->protocol; DBUG_ENTER("show_binlog_info"); List<Item> field_list; field_list.push_back(new Item_empty_string("File", FN_REFLEN)); - field_list.push_back(new Item_empty_string("Position",20)); - field_list.push_back(new Item_empty_string("Binlog_do_db",20)); - field_list.push_back(new Item_empty_string("Binlog_ignore_db",20)); + field_list.push_back(new Item_return_int("Position",20, + MYSQL_TYPE_LONGLONG)); + field_list.push_back(new Item_empty_string("Binlog_Do_DB",255)); + field_list.push_back(new Item_empty_string("Binlog_Ignore_DB",255)); - if (send_fields(thd, field_list, 1)) + if (protocol->send_fields(&field_list, 1)) DBUG_RETURN(-1); - String* packet = &thd->packet; - packet->length(0); + protocol->prepare_for_resend(); if (mysql_bin_log.is_open()) { LOG_INFO li; mysql_bin_log.get_current_log(&li); int dir_len = dirname_length(li.log_file_name); - net_store_data(packet, li.log_file_name + dir_len); - net_store_data(packet, (longlong)li.pos); - net_store_data(packet, &binlog_do_db); - net_store_data(packet, &binlog_ignore_db); - if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length())) + protocol->store(li.log_file_name + dir_len, &my_charset_bin); + protocol->store((ulonglong) li.pos); + protocol->store(&binlog_do_db); + protocol->store(&binlog_ignore_db); + if (protocol->write()) DBUG_RETURN(-1); } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } @@ -1200,50 +1338,76 @@ int show_binlog_info(THD* thd) int show_binlogs(THD* thd) { IO_CACHE *index_file; + LOG_INFO cur; + File file; char fname[FN_REFLEN]; - NET* net = &thd->net; List<Item> field_list; - String *packet = &thd->packet; uint length; + int cur_dir_len; + Protocol *protocol= thd->protocol; + DBUG_ENTER("show_binlogs"); if (!mysql_bin_log.is_open()) { //TODO: Replace with ER() error message - send_error(net, 0, "You are not using binary logging"); + send_error(thd, 0, "You are not using binary logging"); return 1; } field_list.push_back(new Item_empty_string("Log_name", 255)); - if (send_fields(thd, field_list, 1)) - return 1; + field_list.push_back(new Item_return_int("File_size", 20, + MYSQL_TYPE_LONGLONG)); + if (protocol->send_fields(&field_list, 1)) + DBUG_RETURN(1); mysql_bin_log.lock_index(); index_file=mysql_bin_log.get_index_file(); - + + mysql_bin_log.get_current_log(&cur); + cur_dir_len= dirname_length(cur.log_file_name); + reinit_io_cache(index_file, READ_CACHE, (my_off_t) 0, 0, 0); /* The file ends with EOF or empty line */ while ((length=my_b_gets(index_file, fname, sizeof(fname))) > 1) { - int dir_len = dirname_length(fname); - packet->length(0); - /* The -1 is for removing newline from fname */ - net_store_data(packet, fname + dir_len, length-1-dir_len); - if (my_net_write(net, (char*) packet->ptr(), packet->length())) + int dir_len; + ulonglong file_length= 0; // Length if open fails + fname[--length] = '\0'; // remove the newline + + protocol->prepare_for_resend(); + dir_len= dirname_length(fname); + length-= dir_len; + protocol->store(fname + dir_len, length, &my_charset_bin); + + if (!(strncmp(fname+dir_len, cur.log_file_name+cur_dir_len, length))) + file_length= cur.pos; /* The active log, use the active position */ + else + { + /* this is an old log, open it and find the size */ + if ((file= my_open(fname, O_RDONLY | O_SHARE | O_BINARY, + MYF(0))) >= 0) + { + file_length= (ulonglong) my_seek(file, 0L, MY_SEEK_END, MYF(0)); + my_close(file, MYF(0)); + } + } + protocol->store(file_length); + if (protocol->write()) goto err; } mysql_bin_log.unlock_index(); - send_eof(net); - return 0; + send_eof(thd); + DBUG_RETURN(0); err: mysql_bin_log.unlock_index(); - return 1; + DBUG_RETURN(1); } int log_loaded_block(IO_CACHE* file) { - LOAD_FILE_INFO* lf_info; + LOAD_FILE_INFO *lf_info; uint block_len ; /* file->request_pos contains position where we started last read */ @@ -1265,7 +1429,7 @@ int log_loaded_block(IO_CACHE* file) { Create_file_log_event c(lf_info->thd,lf_info->ex,lf_info->db, lf_info->table_name, *lf_info->fields, - lf_info->handle_dup, buffer, + lf_info->handle_dup, lf_info->ignore, buffer, block_len, lf_info->log_delayed); mysql_bin_log.write(&c); lf_info->wrote_create_file = 1; @@ -1273,3 +1437,7 @@ int log_loaded_block(IO_CACHE* file) } return 0; } + +#endif /* HAVE_REPLICATION */ + + diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 5eac754c25c..21b3d2955f7 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -14,6 +14,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#ifdef HAVE_REPLICATION #include "slave.h" typedef struct st_slave_info @@ -49,6 +50,7 @@ int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1, int reset_slave(THD *thd, MASTER_INFO* mi); int reset_master(THD* thd); int purge_master_logs(THD* thd, const char* to_log); +int purge_master_logs_before_date(THD* thd, time_t purge_time); bool log_in_use(const char* log_name); void adjust_linfo_offsets(my_off_t purge_offset); int show_binlogs(THD* thd); @@ -65,7 +67,10 @@ typedef struct st_load_file_info enum enum_duplicates handle_dup; char* db; char* table_name; - bool wrote_create_file, log_delayed; + bool wrote_create_file, log_delayed, ignore; } LOAD_FILE_INFO; int log_loaded_block(IO_CACHE* file); + +#endif /* HAVE_REPLICATION */ + diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 32658f92416..46dba61cfc5 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2003 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,28 +17,29 @@ /* mysql_select and join optimization */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" #include "sql_select.h" -#include "opt_ft.h" - #include <m_ctype.h> #include <hash.h> #include <ft_global.h> -#include <assert.h> const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref", - "MAYBE_REF","ALL","range","index","fulltext" }; + "MAYBE_REF","ALL","range","index","fulltext", + "ref_or_null","unique_subquery","index_subquery" +}; +static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array); static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, DYNAMIC_ARRAY *keyuse); static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse, JOIN_TAB *join_tab, - uint tables,COND *conds,table_map table_map); + uint tables, COND *conds, + table_map table_map, SELECT_LEX *select_lex); static int sort_keyuse(KEYUSE *a,KEYUSE *b); static void set_position(JOIN *join,uint index,JOIN_TAB *table,KEYUSE *key); static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, @@ -56,18 +57,18 @@ static store_key *get_store_key(THD *thd, static bool make_simple_join(JOIN *join,TABLE *tmp_table); static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item); static void make_join_readinfo(JOIN *join,uint options); -static void join_free(JOIN *join); -static bool only_eq_ref_tables(JOIN *join,ORDER *order,table_map tables); +static bool only_eq_ref_tables(JOIN *join, ORDER *order, table_map tables); static void update_depend_map(JOIN *join); static void update_depend_map(JOIN *join, ORDER *order); static ORDER *remove_const(JOIN *join,ORDER *first_order,COND *cond, - bool *simple_order); + bool change_list, bool *simple_order); static int return_zero_rows(JOIN *join, select_result *res,TABLE_LIST *tables, List<Item> &fields, bool send_row, uint select_options, const char *info, - Item *having, Procedure *proc); -static COND *optimize_cond(COND *conds,Item::cond_result *cond_value); -static COND *remove_eq_conds(COND *cond,Item::cond_result *cond_value); + Item *having, Procedure *proc, + SELECT_LEX_UNIT *unit); +static COND *optimize_cond(THD *thd, COND *conds, + Item::cond_result *cond_value); static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item); static bool open_tmp_table(TABLE *table); static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, @@ -76,7 +77,7 @@ static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table, Procedure *proc); static int sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records); static int sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records); -static int flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skipp_last); +static int flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last); static int end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); static int end_send_group(JOIN *join, JOIN_TAB *join_tab,bool end_of_records); static int end_write(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); @@ -105,50 +106,60 @@ static int join_read_prev_same(READ_RECORD *info); static int join_read_prev(READ_RECORD *info); static int join_ft_read_first(JOIN_TAB *tab); static int join_ft_read_next(READ_RECORD *info); +static int join_read_always_key_or_null(JOIN_TAB *tab); +static int join_read_next_same_or_null(READ_RECORD *info); static COND *make_cond_for_table(COND *cond,table_map table, table_map used_table); static Item* part_of_refkey(TABLE *form,Field *field); -static uint find_shortest_key(TABLE *table, key_map usable_keys); +static uint find_shortest_key(TABLE *table, const key_map *usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, ha_rows select_limit, bool no_changes); -static int create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows filesort_limit, - ha_rows select_limit); +static int create_sort_index(THD *thd, JOIN *join, ORDER *order, + ha_rows filesort_limit, ha_rows select_limit); static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields, Item *having); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, ulong offset,Item *having); -static int remove_dup_with_hash_index(THD *thd, TABLE *table, +static int remove_dup_with_hash_index(THD *thd,TABLE *table, uint field_count, Field **first_field, ulong key_length,Item *having); static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count); static ulong used_blob_length(CACHE_FIELD **ptr); static bool store_record_in_cache(JOIN_CACHE *cache); -static void reset_cache(JOIN_CACHE *cache); +static void reset_cache_read(JOIN_CACHE *cache); +static void reset_cache_write(JOIN_CACHE *cache); static void read_cached_record(JOIN_TAB *tab); static bool cmp_buffer_with_ref(JOIN_TAB *tab); -static int setup_group(THD *thd,TABLE_LIST *tables,List<Item> &fields, - List<Item> &all_fields, ORDER *order, bool *hidden); static bool setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields, List<Item> &all_fields,ORDER *new_order); -static ORDER *create_distinct_group(THD *thd, ORDER *order, - List<Item> &fields, +static ORDER *create_distinct_group(THD *thd, Item **ref_pointer_array, + ORDER *order, List<Item> &fields, bool *all_order_by_fields_used); static bool test_if_subpart(ORDER *a,ORDER *b); static TABLE *get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables); static void calc_group_buffer(JOIN *join,ORDER *group); +static bool make_group_fields(JOIN *main_join, JOIN *curr_join); static bool alloc_group_fields(JOIN *join,ORDER *group); -static bool make_sum_func_list(JOIN *join,List<Item> &fields); -static bool change_to_use_tmp_fields(List<Item> &func); -static bool change_refs_to_tmp_fields(THD *thd, List<Item> &func); +// Create list for using with tempory table +static bool change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, + List<Item> &new_list1, + List<Item> &new_list2, + uint elements, List<Item> &items); +// Create list for using with tempory table +static bool change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array, + List<Item> &new_list1, + List<Item> &new_list2, + uint elements, List<Item> &items); static void init_tmptable_sum_functions(Item_sum **func); static void update_tmptable_sum_func(Item_sum **func,TABLE *tmp_table); -static void copy_sum_funcs(Item_sum **func_ptr); +static void copy_sum_funcs(Item_sum **func_ptr, Item_sum **end); static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab); -static void init_sum_functions(Item_sum **func); +static bool init_sum_functions(Item_sum **func, Item_sum **end); static bool update_sum_func(Item_sum **func); static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, bool distinct, const char *message=NullS); -static void describe_info(JOIN *join, const char *info); +static Item *remove_additional_cond(Item* conds); + /* This handles SELECT with and without UNION @@ -158,135 +169,160 @@ int handle_select(THD *thd, LEX *lex, select_result *result) { int res; register SELECT_LEX *select_lex = &lex->select_lex; + DBUG_ENTER("handle_select"); -#ifdef DISABLED_UNTIL_REWRITTEN_IN_4_1 - if (lex->olap) - { - SELECT_LEX *sl, *sl_next; - int error; - for (sl= &select_lex; sl; sl=sl_next) - { - sl_next=sl->next; // Save if sl->next changes - if (sl->olap != UNSPECIFIED_OLAP_TYPE) - { - if ((error=handle_olaps(lex,sl))) - return error; - lex->last_selects->next=sl_next; - } - } - lex->select = select_lex; - } -#endif /* DISABLED_UNTIL_REWRITTEN_IN_4_1 */ - if (select_lex->next) - res=mysql_union(thd,lex,result); + if (select_lex->next_select() || select_lex->master_unit()->fake_select_lex) + res=mysql_union(thd, lex, result, &lex->unit); else - res=mysql_select(thd,(TABLE_LIST*) select_lex->table_list.first, - select_lex->item_list, - select_lex->where, - (ORDER*) select_lex->order_list.first, - (ORDER*) select_lex->group_list.first, - select_lex->having, - (ORDER*) lex->proc_list.first, - select_lex->options | thd->options, - result); - if (res && result) + res= mysql_select(thd, &select_lex->ref_pointer_array, + (TABLE_LIST*) select_lex->table_list.first, + select_lex->with_wild, select_lex->item_list, + select_lex->where, + select_lex->order_list.elements + + select_lex->group_list.elements, + (ORDER*) select_lex->order_list.first, + (ORDER*) select_lex->group_list.first, + select_lex->having, + (ORDER*) lex->proc_list.first, + select_lex->options | thd->options, + result, &(lex->unit), &(lex->select_lex)); + + /* Don't set res if it's -1 as we may want this later */ + DBUG_PRINT("info",("res: %d report_error: %d", res, + thd->net.report_error)); + if (thd->net.report_error || res<0) + { + result->send_error(0, NullS); result->abort(); - delete result; - return res; + res= 1; // Error sent to client + } + DBUG_RETURN(res); } +/* + Function to setup clauses without sum functions +*/ +inline int setup_without_group(THD *thd, Item **ref_pointer_array, + TABLE_LIST *tables, + List<Item> &fields, + List<Item> &all_fields, + COND **conds, + ORDER *order, + ORDER *group, bool *hidden_group_fields) +{ + bool save_allow_sum_func; + int res; + DBUG_ENTER("setup_without_group"); + + save_allow_sum_func= thd->allow_sum_func; + thd->allow_sum_func= 0; + res= (setup_conds(thd, tables, conds) || + setup_order(thd, ref_pointer_array, tables, fields, all_fields, + order) || + setup_group(thd, ref_pointer_array, tables, fields, all_fields, + group, hidden_group_fields)); + thd->allow_sum_func= save_allow_sum_func; + DBUG_RETURN(res); +} + /***************************************************************************** Check fields, find best join, do the select and output fields. mysql_select assumes that all tables are already opened *****************************************************************************/ +/* + Prepare of whole select (including sub queries in future). + return -1 on error + 0 on success +*/ int -mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, - ORDER *order, ORDER *group,Item *having,ORDER *proc_param, - ulong select_options,select_result *result) -{ - TABLE *tmp_table; - int error, tmp_error; - bool need_tmp; - bool hidden_group_fields; - /* - simple_xxxxx is set if ORDER/GROUP BY doesn't include any references - to other tables than the first non-constant table in the JOIN. - It's also set if ORDER/GROUP BY is empty. - */ - bool simple_order, simple_group; - /* - Is set only in case if we have a GROUP BY clause - and no ORDER BY after constant elimination of 'order'. - */ - bool no_order; - /* Is set if we have a GROUP BY and we have ORDER BY on a constant. */ - bool skip_sort_order; - /* We cannot always prepare the result before selecting. */ - bool is_result_prepared; - ha_rows select_limit; - Item::cond_result cond_value; - SQL_SELECT *select; - DYNAMIC_ARRAY keyuse; - JOIN join; - Procedure *procedure; - List<Item> all_fields(fields); - bool select_distinct; - SELECT_LEX *cur_sel = thd->lex.select; - DBUG_ENTER("mysql_select"); +JOIN::prepare(Item ***rref_pointer_array, + TABLE_LIST *tables_init, + uint wild_num, COND *conds_init, uint og_num, + ORDER *order_init, ORDER *group_init, + Item *having_init, + ORDER *proc_param_init, SELECT_LEX *select_lex_arg, + SELECT_LEX_UNIT *unit_arg) +{ + DBUG_ENTER("JOIN::prepare"); - /* Check that all tables, fields, conds and order are ok */ + // to prevent double initialization on EXPLAIN + if (optimized) + DBUG_RETURN(0); - select_distinct=test(select_options & SELECT_DISTINCT); - tmp_table=0; - select=0; - no_order=skip_sort_order=0; - bzero((char*) &keyuse,sizeof(keyuse)); - thd->proc_info="init"; - thd->used_tables=0; // Updated by setup_fields - /* select_limit is used to decide if we are likely to scan the whole table */ - select_limit= thd->select_limit; - if (having || (select_options & OPTION_FOUND_ROWS)) - select_limit= HA_POS_ERROR; + conds= conds_init; + order= order_init; + group_list= group_init; + having= having_init; + proc_param= proc_param_init; + tables_list= tables_init; + select_lex= select_lex_arg; + select_lex->join= this; + union_part= (unit_arg->first_select()->next_select() != 0); + + /* Check that all tables, fields, conds and order are ok */ - if (setup_tables(tables) || - setup_fields(thd,tables,fields,1,&all_fields,1) || - setup_conds(thd,tables,&conds) || - setup_order(thd,tables,fields,all_fields,order) || - setup_group(thd,tables,fields,all_fields,group,&hidden_group_fields)) + if (setup_tables(tables_list) || + setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || + select_lex->setup_ref_array(thd, og_num) || + setup_fields(thd, (*rref_pointer_array), tables_list, fields_list, 1, + &all_fields, 1) || + setup_without_group(thd, (*rref_pointer_array), tables_list, fields_list, + all_fields, &conds, order, group_list, + &hidden_group_fields)) DBUG_RETURN(-1); /* purecov: inspected */ + ref_pointer_array= *rref_pointer_array; + if (having) { thd->where="having clause"; thd->allow_sum_func=1; - if (having->fix_fields(thd,tables) || thd->fatal_error) + select_lex->having_fix_field= 1; + bool having_fix_rc= (!having->fixed && + (having->fix_fields(thd, tables_list, &having) || + having->check_cols(1))); + select_lex->having_fix_field= 0; + if (having_fix_rc || thd->net.report_error) DBUG_RETURN(-1); /* purecov: inspected */ if (having->with_sum_func) - having->split_sum_func(all_fields); + having->split_sum_func2(thd, ref_pointer_array, all_fields, &having); + } + + // Is it subselect + { + Item_subselect *subselect; + if ((subselect= select_lex->master_unit()->item)) + { + Item_subselect::trans_res res; + if ((res= subselect->select_transformer(this)) != + Item_subselect::RES_OK) + DBUG_RETURN((res == Item_subselect::RES_ERROR)); + } } - if (setup_ftfuncs(thd)) /* should be after having->fix_fields */ + + if (setup_ftfuncs(select_lex)) /* should be after having->fix_fields */ DBUG_RETURN(-1); + + /* Check if one one uses a not constant column with group functions and no GROUP BY. TODO: Add check of calculation of GROUP functions and fields: SELECT COUNT(*)+table.col1 from table1; */ - join.table=0; - join.tables=0; { - if (!group) + if (!group_list) { uint flag=0; - List_iterator_fast<Item> it(fields); + List_iterator_fast<Item> it(fields_list); Item *item; while ((item= it++)) { if (item->with_sum_func) flag|=1; - else if (!(flag & 2) && !item->const_item()) + else if (!(flag & 2) && !item->const_during_execution()) flag|=2; } if (flag == 3) @@ -295,71 +331,66 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, DBUG_RETURN(-1); } } - TABLE_LIST *table; - for (table=tables ; table ; table=table->next) - join.tables++; + TABLE_LIST *table_ptr; + for (table_ptr= tables_list ; table_ptr ; table_ptr= table_ptr->next) + tables++; + } + { + /* Caclulate the number of groups */ + send_group_parts= 0; + for (ORDER *group_tmp= group_list ; group_tmp ; group_tmp= group_tmp->next) + send_group_parts++; } - procedure=setup_procedure(thd,proc_param,result,fields,&error); + + procedure= setup_procedure(thd, proc_param, result, fields_list, &error); if (error) - DBUG_RETURN(-1); /* purecov: inspected */ + goto err; /* purecov: inspected */ if (procedure) { - if (setup_new_fields(thd,tables,fields,all_fields,procedure->param_fields)) - { /* purecov: inspected */ - delete procedure; /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ - } + if (setup_new_fields(thd, tables_list, fields_list, all_fields, + procedure->param_fields)) + goto err; /* purecov: inspected */ if (procedure->group) { - if (!test_if_subpart(procedure->group,group)) + if (!test_if_subpart(procedure->group,group_list)) { /* purecov: inspected */ my_message(0,"Can't handle procedures with differents groups yet", MYF(0)); /* purecov: inspected */ - delete procedure; /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + goto err; /* purecov: inspected */ } } #ifdef NOT_NEEDED - else if (!group && procedure->flags & PROC_GROUP) + else if (!group_list && procedure->flags & PROC_GROUP) { my_message(0,"Select must have a group with this procedure",MYF(0)); - delete procedure; - DBUG_RETURN(-1); + goto err; } #endif if (order && (procedure->flags & PROC_NO_SORT)) - { /* purecov: inspected */ + { /* purecov: inspected */ my_message(0,"Can't use order with this procedure",MYF(0)); /* purecov: inspected */ - delete procedure; /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + goto err; /* purecov: inspected */ } } /* Init join struct */ - join.thd=thd; - join.lock=thd->lock; - join.join_tab=0; - join.tmp_table_param.copy_field=0; - join.sum_funcs=0; - join.send_records=join.found_records=join.examined_rows=0; - join.tmp_table_param.end_write_records= HA_POS_ERROR; - join.first_record=join.sort_and_group=0; - join.select_options=select_options; - join.result=result; - count_field_types(&join.tmp_table_param,all_fields,0); - join.const_tables=0; - join.having=0; - join.do_send_rows = 1; - join.group= group != 0; - join.row_limit= ((select_distinct || order || group) ? HA_POS_ERROR : - thd->select_limit); + count_field_types(&tmp_table_param, all_fields, 0); + ref_pointer_array_size= all_fields.elements*sizeof(Item*); + this->group= group_list != 0; + row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR : + unit_arg->select_limit_cnt); + /* select_limit is used to decide if we are likely to scan the whole table */ + select_limit= unit_arg->select_limit_cnt; + if (having || (select_options & OPTION_FOUND_ROWS)) + select_limit= HA_POS_ERROR; + do_send_rows = (unit_arg->select_limit_cnt) ? 1 : 0; + unit= unit_arg; #ifdef RESTRICTED_GROUP - if (join.sum_func_count && !group && (join.func_count || join.field_count)) + if (sum_func_count && !group_list && (func_count || field_count)) { my_message(ER_WRONG_SUM_SELECT,ER(ER_WRONG_SUM_SELECT),MYF(0)); - delete procedure; - DBUG_RETURN(-1); + goto err; } #endif /* @@ -370,184 +401,204 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, indexes on the result table, which may be used during the select, if it is the same table (Bug #6034). Do the preparation after the select phase. */ - if ((is_result_prepared= (! procedure && - ! test(select_options & OPTION_BUFFER_RESULT))) && - result->prepare(fields)) - { /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + if (! procedure && ! test(select_options & OPTION_BUFFER_RESULT) && + result && result->prepare(fields_list, unit_arg)) + goto err; /* purecov: inspected */ + + if (select_lex->olap == ROLLUP_TYPE && rollup_init()) + goto err; + if (alloc_func_list()) + goto err; + + DBUG_RETURN(0); // All OK + +err: + delete procedure; /* purecov: inspected */ + procedure= 0; + DBUG_RETURN(-1); /* purecov: inspected */ +} + +/* + test if it is known for optimisation IN subquery + + SYNOPSYS + JOIN::test_in_subselect + where - pointer for variable in which conditions should be + stored if subquery is known + + RETURN + 1 - known + 0 - unknown +*/ + +bool JOIN::test_in_subselect(Item **where) +{ + if (conds->type() == Item::FUNC_ITEM && + ((Item_func *)this->conds)->functype() == Item_func::EQ_FUNC && + ((Item_func *)conds)->arguments()[0]->type() == Item::REF_ITEM && + ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM) + { + join_tab->info= "Using index"; + *where= 0; + return 1; } + if (conds->type() == Item::COND_ITEM && + ((class Item_func *)this->conds)->functype() == + Item_func::COND_AND_FUNC) + { + if ((*where= remove_additional_cond(conds))) + join_tab->info= "Using index; Using where"; + else + join_tab->info= "Using index"; + return 1; + } + return 0; +} + +/* + global select optimisation. + return 0 - success + 1 - error + error code saved in field 'error' +*/ +int +JOIN::optimize() +{ + DBUG_ENTER("JOIN::optimize"); + // to prevent double initialization on EXPLAIN + if (optimized) + DBUG_RETURN(0); + optimized= 1; + + // Ignore errors of execution if option IGNORE present + if (thd->lex->ignore) + thd->lex->current_select->no_error= 1; #ifdef HAVE_REF_TO_FIELDS // Not done yet /* Add HAVING to WHERE if possible */ - if (having && !group && ! join.sum_func_count) + if (having && !group_list && !sum_func_count) { if (!conds) { - conds=having; - having=0; + conds= having; + having= 0; } else if ((conds=new Item_cond_and(conds,having))) { - conds->fix_fields(thd,tables); - conds->change_ref_to_fields(thd,tables); + conds->fix_fields(thd, tables_list, &conds); + conds->change_ref_to_fields(thd, tables_list); conds->top_level_item(); - having=0; + having= 0; } } #endif - conds=optimize_cond(conds,&cond_value); - if (thd->fatal_error) // Out of memory + conds= optimize_cond(thd, conds, &cond_value); + if (thd->net.report_error) { - delete procedure; - DBUG_RETURN(0); + error= 1; + DBUG_PRINT("error",("Error from optimize_cond")); + DBUG_RETURN(1); } - if (cond_value == Item::COND_FALSE || !thd->select_limit) - { /* Impossible cond */ - if (! is_result_prepared && ! procedure && result->prepare(fields)) - goto err; - error=return_zero_rows(&join, result, tables, fields, - join.tmp_table_param.sum_func_count != 0 && !group, - select_options,"Impossible WHERE",having, - procedure); - delete procedure; - DBUG_RETURN(error); + + if (cond_value == Item::COND_FALSE || + (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) + { /* Impossible cond */ + zero_result_cause= "Impossible WHERE"; + error= 0; + DBUG_RETURN(0); } /* Optimize count(*), min() and max() */ - if (tables && join.tmp_table_param.sum_func_count && ! group) + if (tables_list && tmp_table_param.sum_func_count && ! group_list) { int res; /* - opt_sum_query returns -1 if no rows match to the WHERE conditions, - or 1 if all items were resolved, or 0, or an error number HA_ERR_... + opt_sum_query() returns -1 if no rows match to the WHERE conditions, + or 1 if all items were resolved, or 0, or an error number HA_ERR_... */ - if ((res=opt_sum_query(tables, all_fields, conds))) + if ((res=opt_sum_query(tables_list, all_fields, conds))) { if (res > 1) { - delete procedure; - DBUG_RETURN(-1); + DBUG_RETURN(1); } if (res < 0) { - if (! is_result_prepared && ! procedure && result->prepare(fields)) - goto err; - error=return_zero_rows(&join, result, tables, fields, !group, - select_options,"No matching min/max row", - having,procedure); - delete procedure; - DBUG_RETURN(error); - } - if (select_options & SELECT_DESCRIBE) - { - describe_info(&join, "Select tables optimized away"); - delete procedure; - DBUG_RETURN(error); + zero_result_cause= "No matching min/max row"; + error=0; + DBUG_RETURN(0); } - tables=0; // All tables resolved + zero_result_cause= "Select tables optimized away"; + tables_list= 0; // All tables resolved } } - if (!tables) - { // Only test of functions - error=0; - if (select_options & SELECT_DESCRIBE) - describe_info(&join, "No tables used"); - else - { - if (! is_result_prepared && ! procedure && result->prepare(fields)) - goto err; - result->send_fields(fields,1); - if (!having || having->val_int()) - { - if (join.do_send_rows && result->send_data(fields)) - { - result->send_error(0,NullS); /* purecov: inspected */ - error=1; - } - else - error=(int) result->send_eof(); - } - else - error=(int) result->send_eof(); - } - delete procedure; - DBUG_RETURN(error); + if (!tables_list) + { + error= 0; + DBUG_RETURN(0); } - - error = -1; - join.sort_by_table=get_sort_by_table(order,group,tables); + error= -1; // Error is sent to client + sort_by_table= get_sort_by_table(order, group_list, tables_list); /* Calculate how to do the join */ - thd->proc_info="statistics"; - if (make_join_statistics(&join,tables,conds,&keyuse) || thd->fatal_error) - goto err; - - thd->proc_info="preparing"; - - select_distinct= select_distinct && (join.const_tables != join.tables); + thd->proc_info= "statistics"; + if (make_join_statistics(this, tables_list, conds, &keyuse) || + thd->is_fatal_error) + { + DBUG_PRINT("error",("Error: make_join_statistics() failed")); + DBUG_RETURN(1); + } - if (result->initialize_tables(&join)) - goto err; - if (join.const_table_map != join.found_const_table_map && - !(select_options & SELECT_DESCRIBE)) + /* Remove distinct if only const tables */ + select_distinct= select_distinct && (const_tables != tables); + thd->proc_info= "preparing"; + if (result->initialize_tables(this)) { - if (! is_result_prepared && ! procedure && result->prepare(fields)) - goto err; - error=return_zero_rows(&join,result,tables,fields, - join.tmp_table_param.sum_func_count != 0 && - !group,0,"no matching row in const table",having, - procedure); - goto err; + DBUG_PRINT("error",("Error: initialize_tables() failed")); + DBUG_RETURN(1); // error == -1 + } + if (const_table_map != found_const_table_map && + !(select_options & SELECT_DESCRIBE) && + (!conds || + !(conds->used_tables() & RAND_TABLE_BIT) || + select_lex->master_unit() == &thd->lex->unit)) // upper level SELECT + { + zero_result_cause= "no matching row in const table"; + DBUG_PRINT("error",("Error: %s", zero_result_cause)); + error= 0; + DBUG_RETURN(0); } if (!(thd->options & OPTION_BIG_SELECTS) && - join.best_read > (double) thd->variables.max_join_size && + best_read > (double) thd->variables.max_join_size && !(select_options & SELECT_DESCRIBE)) { /* purecov: inspected */ - result->send_error(ER_TOO_BIG_SELECT,ER(ER_TOO_BIG_SELECT)); /* purecov: inspected */ - error= 1; /* purecov: inspected */ - goto err; /* purecov: inspected */ + my_message(ER_TOO_BIG_SELECT, ER(ER_TOO_BIG_SELECT), MYF(0)); + error= -1; + DBUG_RETURN(1); } - if (join.const_tables && !thd->locked_tables && + if (const_tables && !thd->locked_tables && !(select_options & SELECT_NO_UNLOCK)) - { - TABLE **table, **end; - for (table=join.table, end=table + join.const_tables ; - table != end; - table++) - { - /* BDB tables require that we call index_end() before doing an unlock */ - if ((*table)->key_read) - { - (*table)->key_read=0; - (*table)->file->extra(HA_EXTRA_NO_KEYREAD); - } - (*table)->file->index_end(); - } - mysql_unlock_some_tables(thd, join.table,join.const_tables); - } - if (!conds && join.outer_join) + mysql_unlock_some_tables(thd, table, const_tables); + + if (!conds && outer_join) { /* Handle the case where we have an OUTER JOIN without a WHERE */ conds=new Item_int((longlong) 1,1); // Always true } - select=make_select(*join.table, join.const_table_map, - join.const_table_map,conds,&error); + select=make_select(*table, const_table_map, + const_table_map, conds, &error); if (error) - { /* purecov: inspected */ - error= -1; /* purecov: inspected */ - goto err; /* purecov: inspected */ + { /* purecov: inspected */ + error= -1; /* purecov: inspected */ + DBUG_PRINT("error",("Error: make_select() failed")); + DBUG_RETURN(1); } - if (make_join_select(&join,select,conds)) + if (make_join_select(this, select, conds)) { - if (! is_result_prepared && ! procedure && result->prepare(fields)) - goto err; - error=return_zero_rows(&join, result, tables, fields, - join.tmp_table_param.sum_func_count != 0 && !group, - select_options, - "Impossible WHERE noticed after reading const tables", - having,procedure); - goto err; + zero_result_cause= + "Impossible WHERE noticed after reading const tables"; + DBUG_RETURN(0); // error == 0 } error= -1; /* if goto err */ @@ -555,7 +606,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, /* Optimize distinct away if possible */ { ORDER *org_order= order; - order=remove_const(&join,order,conds,&simple_order); + order=remove_const(this, order,conds,1, &simple_order); /* If we are using ORDER BY NULL or ORDER BY const_expression, return result in any order (even if we are using a GROUP BY) @@ -563,12 +614,12 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, if (!order && org_order) skip_sort_order= 1; } - if (group || join.tmp_table_param.sum_func_count) + if (group_list || tmp_table_param.sum_func_count) { - if (! hidden_group_fields) + if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE) select_distinct=0; } - else if (select_distinct && join.tables - join.const_tables == 1) + else if (select_distinct && tables - const_tables == 1) { /* We are only using one table. In this case we change DISTINCT to a @@ -585,15 +636,16 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, because in this case we can just create a temporary table that holds LIMIT rows and stop when this table is full. */ - JOIN_TAB *tab= &join.join_tab[join.const_tables]; + JOIN_TAB *tab= &join_tab[const_tables]; bool all_order_fields_used; if (order) skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1); - if ((group=create_distinct_group(thd, order, fields, - &all_order_fields_used))) + if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array, + order, fields_list, + &all_order_fields_used))) { bool skip_group= (skip_sort_order && - test_if_skip_sort_order(tab, group, select_limit, + test_if_skip_sort_order(tab, group_list, select_limit, 1) != 0); if ((skip_group && all_order_fields_used) || select_limit == HA_POS_ERROR || @@ -609,43 +661,52 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, /* Force MySQL to read the table in sorted order to get result in ORDER BY order. - */ - join.tmp_table_param.quick_group=0; + */ + tmp_table_param.quick_group=0; } order=0; - } - join.group=1; // For end_write_group + } + group=1; // For end_write_group } else - group= 0; - } else if (thd->fatal_error) // End of memory - goto err; + group_list= 0; + } + else if (thd->is_fatal_error) // End of memory + DBUG_RETURN(1); + } + simple_group= 0; + { + ORDER *old_group_list; + group_list= remove_const(this, (old_group_list= group_list), conds, + rollup.state == ROLLUP::STATE_NONE, + &simple_group); + if (old_group_list && !group_list) + select_distinct= 0; } - group=remove_const(&join,group,conds,&simple_group); - if (!group && join.group) + if (!group_list && group) { order=0; // The output has only one row simple_order=1; select_distinct= 0; // No need in distinct for 1 row } - calc_group_buffer(&join,group); - join.send_group_parts=join.tmp_table_param.group_parts; /* Save org parts */ + calc_group_buffer(this, group_list); + send_group_parts= tmp_table_param.group_parts; /* Save org parts */ if (procedure && procedure->group) { - group=procedure->group=remove_const(&join,procedure->group,conds, - &simple_group); - calc_group_buffer(&join,group); + group_list= procedure->group= remove_const(this, procedure->group, conds, + 1, &simple_group); + calc_group_buffer(this, group_list); } - if (test_if_subpart(group,order) || - (!group && join.tmp_table_param.sum_func_count)) + if (test_if_subpart(group_list, order) || + (!group_list && tmp_table_param.sum_func_count)) order=0; // Can't use sort on head table if using row cache - if (join.full_join) + if (full_join) { - if (group) + if (group_list) simple_group=0; if (order) simple_order=0; @@ -660,17 +721,87 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, - We are using different ORDER BY and GROUP BY orders - The user wants us to buffer the result. */ - need_tmp= (join.const_tables != join.tables && + need_tmp= (const_tables != tables && ((select_distinct || !simple_order || !simple_group) || - (group && order) || + (group_list && order) || test(select_options & OPTION_BUFFER_RESULT))); // No cache for MATCH - make_join_readinfo(&join, + make_join_readinfo(this, (select_options & (SELECT_DESCRIBE | SELECT_NO_JOIN_CACHE)) | - (cur_sel->ftfunc_list.elements ? SELECT_NO_JOIN_CACHE : - 0)); + (select_lex->ftfunc_list->elements ? + SELECT_NO_JOIN_CACHE : 0)); + + /* Perform FULLTEXT search before all regular searches */ + if (!(select_options & SELECT_DESCRIBE)) + init_ftfuncs(thd, select_lex, test(order)); + + /* + is this simple IN subquery? + */ + if (!group_list && !order && + unit->item && unit->item->substype() == Item_subselect::IN_SUBS && + tables == 1 && conds && + !unit->first_select()->next_select()) + { + if (!having) + { + Item *where= 0; + if (join_tab[0].type == JT_EQ_REF && + join_tab[0].ref.items[0]->name == in_left_expr_name) + { + if (test_in_subselect(&where)) + { + join_tab[0].type= JT_UNIQUE_SUBQUERY; + error= 0; + DBUG_RETURN(unit->item-> + change_engine(new + subselect_uniquesubquery_engine(thd, + join_tab, + unit->item, + where))); + } + } + else if (join_tab[0].type == JT_REF && + join_tab[0].ref.items[0]->name == in_left_expr_name) + { + if (test_in_subselect(&where)) + { + join_tab[0].type= JT_INDEX_SUBQUERY; + error= 0; + DBUG_RETURN(unit->item-> + change_engine(new + subselect_indexsubquery_engine(thd, + join_tab, + unit->item, + where, + 0))); + } + } + } else if (join_tab[0].type == JT_REF_OR_NULL && + join_tab[0].ref.items[0]->name == in_left_expr_name && + having->type() == Item::FUNC_ITEM && + ((Item_func *) having)->functype() == + Item_func::ISNOTNULLTEST_FUNC) + { + join_tab[0].type= JT_INDEX_SUBQUERY; + error= 0; + + if ((conds= remove_additional_cond(conds))) + join_tab->info= "Using index; Using where"; + else + join_tab->info= "Using index"; + + DBUG_RETURN(unit->item-> + change_engine(new subselect_indexsubquery_engine(thd, + join_tab, + unit->item, + conds, + 1))); + } + + } /* Need to tell Innobase that to play it safe, it should fetch all columns of the tables: this is because MySQL may build row @@ -680,72 +811,52 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, */ #ifdef HAVE_INNOBASE_DB - if (need_tmp || select_distinct || group || order) + if (need_tmp || select_distinct || group_list || order) { - for (uint i_h = join.const_tables; i_h < join.tables; i_h++) + for (uint i_h = const_tables; i_h < tables; i_h++) { - TABLE* table_h = join.join_tab[i_h].table; - if (table_h->db_type == DB_TYPE_INNODB) - table_h->file->extra(HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE); + TABLE* table_h = join_tab[i_h].table; + table_h->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY); } } #endif - DBUG_EXECUTE("info",TEST_join(&join);); + DBUG_EXECUTE("info",TEST_join(this);); /* Because filesort always does a full table scan or a quick range scan we must add the removed reference to the select for the table. We only need to do this when we have a simple_order or simple_group as in other cases the join is done before the sort. - */ - if ((order || group) && join.join_tab[join.const_tables].type != JT_ALL && - join.join_tab[join.const_tables].type != JT_FT && - (order && simple_order || group && simple_group)) - { - if (add_ref_to_table_cond(thd,&join.join_tab[join.const_tables])) - goto err; + */ + if (const_tables != tables && + (order || group_list) && + join_tab[const_tables].type != JT_ALL && + join_tab[const_tables].type != JT_FT && + join_tab[const_tables].type != JT_REF_OR_NULL && + (order && simple_order || group_list && simple_group)) + { + if (add_ref_to_table_cond(thd,&join_tab[const_tables])) + DBUG_RETURN(1); } if (!(select_options & SELECT_BIG_RESULT) && - ((group && join.const_tables != join.tables && + ((group_list && const_tables != tables && (!simple_group || - !test_if_skip_sort_order(&join.join_tab[join.const_tables], group, - thd->select_limit,0))) || + !test_if_skip_sort_order(&join_tab[const_tables], group_list, + unit->select_limit_cnt, 0))) || select_distinct) && - join.tmp_table_param.quick_group && !procedure) + tmp_table_param.quick_group && !procedure) { need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort } + tmp_having= having; if (select_options & SELECT_DESCRIBE) { - /* - Check if we managed to optimize ORDER BY away and don't use temporary - table to resolve ORDER BY: in that case, we only may need to do - filesort for GROUP BY. - */ - if (!order && !no_order && (!skip_sort_order || !need_tmp)) - { - /* Reset 'order' to 'group' and reinit variables describing 'order' */ - order= group; - simple_order= simple_group; - skip_sort_order= 0; - } - if (order && - (join.const_tables == join.tables || - ((simple_order || skip_sort_order) && - test_if_skip_sort_order(&join.join_tab[join.const_tables], order, - select_limit, 0)))) - order=0; - select_describe(&join,need_tmp, - order != 0 && !skip_sort_order, - select_distinct); - error=0; - goto err; + error= 0; + DBUG_RETURN(0); } - - /* Perform FULLTEXT search before all regular searches */ - init_ftfuncs(thd,test(order)); + having= 0; /* Create a tmp table if distinct or if the sort is too complicated */ if (need_tmp) @@ -753,111 +864,368 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, DBUG_PRINT("info",("Creating tmp table")); thd->proc_info="Creating tmp table"; - join.tmp_table_param.hidden_field_count= (all_fields.elements - - fields.elements); - if (!(tmp_table = - create_tmp_table(thd,&join.tmp_table_param,all_fields, + init_items_ref_array(); + + tmp_table_param.hidden_field_count= (all_fields.elements - + fields_list.elements); + if (!(exec_tmp_table1 = + create_tmp_table(thd, &tmp_table_param, all_fields, ((!simple_group && !procedure && !(test_flags & TEST_NO_KEY_GROUP)) ? - group : (ORDER*) 0), - group ? 0 : select_distinct, - group && simple_group, - (order == 0 || skip_sort_order) && - select_limit != HA_POS_ERROR, - join.select_options))) - goto err; /* purecov: inspected */ + group_list : (ORDER*) 0), + group_list ? 0 : select_distinct, + group_list && simple_group, + select_options, + (order == 0 || skip_sort_order) ? select_limit : + HA_POS_ERROR, + (char *) ""))) + DBUG_RETURN(1); - if (having && (join.sort_and_group || (tmp_table->distinct && !group))) - join.having=having; + /* + We don't have to store rows in temp table that doesn't match HAVING if: + - we are sorting the table and writing complete group rows to the + temp table. + - We are using DISTINCT without resolving the distinct as a GROUP BY + on all columns. + + If having is not handled here, it will be checked before the row + is sent to the client. + */ + if (tmp_having && + (sort_and_group || (exec_tmp_table1->distinct && !group_list))) + having= tmp_having; /* if group or order on first table, sort first */ - if (group && simple_group) + if (group_list && simple_group) { DBUG_PRINT("info",("Sorting for group")); thd->proc_info="Sorting for group"; - if (create_sort_index(&join.join_tab[join.const_tables],group, + if (create_sort_index(thd, this, group_list, HA_POS_ERROR, HA_POS_ERROR) || - make_sum_func_list(&join,all_fields) || - alloc_group_fields(&join,group)) - goto err; - group=0; + alloc_group_fields(this, group_list) || + make_sum_func_list(all_fields, fields_list, 1)) + DBUG_RETURN(1); + group_list=0; } else { - if (make_sum_func_list(&join,all_fields)) - goto err; - if (!group && ! tmp_table->distinct && order && simple_order) + if (make_sum_func_list(all_fields, fields_list, 0)) + DBUG_RETURN(1); + if (!group_list && ! exec_tmp_table1->distinct && order && simple_order) { DBUG_PRINT("info",("Sorting for order")); thd->proc_info="Sorting for order"; - if (create_sort_index(&join.join_tab[join.const_tables],order, - HA_POS_ERROR, HA_POS_ERROR)) - goto err; /* purecov: inspected */ + if (create_sort_index(thd, this, order, + HA_POS_ERROR, HA_POS_ERROR)) + DBUG_RETURN(1); order=0; } } - + /* Optimize distinct when used on some of the tables SELECT DISTINCT t1.a FROM t1,t2 WHERE t1.b=t2.b In this case we can stop scanning t2 when we have found one t1.a */ - if (tmp_table->distinct) + if (exec_tmp_table1->distinct) { table_map used_tables= thd->used_tables; - JOIN_TAB *join_tab=join.join_tab+join.tables-1; + JOIN_TAB *last_join_tab= join_tab+tables-1; do { - if (used_tables & join_tab->table->map) + if (used_tables & last_join_tab->table->map) break; - join_tab->not_used_in_distinct=1; - } while (join_tab-- != join.join_tab); + last_join_tab->not_used_in_distinct=1; + } while (last_join_tab-- != join_tab); /* Optimize "select distinct b from t1 order by key_part_1 limit #" */ if (order && skip_sort_order) { - /* Should always succeed */ - if (test_if_skip_sort_order(&join.join_tab[join.const_tables], - order, thd->select_limit,0)) + /* Should always succeed */ + if (test_if_skip_sort_order(&join_tab[const_tables], + order, unit->select_limit_cnt, 0)) order=0; } } + + if (thd->lex->subqueries) + { + if (!(tmp_join= (JOIN*)thd->alloc(sizeof(JOIN)))) + DBUG_RETURN(-1); + error= 0; // Ensure that tmp_join.error= 0 + restore_tmp(); + } + } - /* Copy data to the temporary table */ - thd->proc_info="Copying to tmp table"; - if ((tmp_error=do_select(&join,(List<Item> *) 0,tmp_table,0))) + error= 0; + DBUG_RETURN(0); +} + + +/* + Restore values in temporary join +*/ +void JOIN::restore_tmp() +{ + memcpy(tmp_join, this, (size_t) sizeof(JOIN)); +} + + +int +JOIN::reinit() +{ + DBUG_ENTER("JOIN::reinit"); + /* TODO move to unit reinit */ + unit->offset_limit_cnt =select_lex->offset_limit; + unit->select_limit_cnt =select_lex->select_limit+select_lex->offset_limit; + if (unit->select_limit_cnt < select_lex->select_limit) + unit->select_limit_cnt= HA_POS_ERROR; // no limit + if (unit->select_limit_cnt == HA_POS_ERROR) + select_lex->options&= ~OPTION_FOUND_ROWS; + + if (!optimized && setup_tables(tables_list)) + DBUG_RETURN(1); + + /* Reset of sum functions */ + first_record= 0; + + if (exec_tmp_table1) + { + exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE); + exec_tmp_table1->file->delete_all_rows(); + free_io_cache(exec_tmp_table1); + filesort_free_buffers(exec_tmp_table1); + } + if (exec_tmp_table2) + { + exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE); + exec_tmp_table2->file->delete_all_rows(); + free_io_cache(exec_tmp_table2); + filesort_free_buffers(exec_tmp_table2); + } + if (items0) + set_items_ref_array(items0); + + if (join_tab_save) + memcpy(join_tab, join_tab_save, sizeof(JOIN_TAB) * tables); + + if (tmp_join) + restore_tmp(); + + if (sum_funcs) + { + Item_sum *func, **func_ptr= sum_funcs; + while ((func= *(func_ptr++))) + func->clear(); + } + + DBUG_RETURN(0); +} + + +bool +JOIN::save_join_tab() +{ + if (!join_tab_save && select_lex->master_unit()->uncacheable) + { + if (!(join_tab_save= (JOIN_TAB*)thd->memdup((gptr) join_tab, + sizeof(JOIN_TAB) * tables))) + return 1; + } + return 0; +} + + +/* + Exec select +*/ +void +JOIN::exec() +{ + List<Item> *columns_list= &fields_list; + int tmp_error; + DBUG_ENTER("JOIN::exec"); + + error= 0; + if (procedure) + { + procedure_fields_list= fields_list; + if (procedure->change_columns(procedure_fields_list) || + result->prepare(procedure_fields_list, unit)) { - error=tmp_error; - goto err; /* purecov: inspected */ + thd->limit_found_rows= thd->examined_row_count= 0; + DBUG_VOID_RETURN; } - if (join.having) - join.having=having=0; // Allready done + columns_list= &procedure_fields_list; + } + else if (test(select_options & OPTION_BUFFER_RESULT) && + result && result->prepare(fields_list, unit)) + { + error= 1; + thd->limit_found_rows= thd->examined_row_count= 0; + DBUG_VOID_RETURN; + } + if (!tables_list) + { // Only test of functions + if (select_options & SELECT_DESCRIBE) + select_describe(this, FALSE, FALSE, FALSE, + (zero_result_cause?zero_result_cause:"No tables used")); + else + { + result->send_fields(*columns_list, 1); + /* + We have to test for 'conds' here as the WHERE may not be constant + even if we don't have any tables for prepared statements or if + conds uses something like 'rand()'. + */ + if (cond_value != Item::COND_FALSE && + (!conds || conds->val_int()) && + (!having || having->val_int())) + { + if (do_send_rows && + (procedure ? (procedure->send_row(procedure_fields_list) || + procedure->end_of_records()) : result->send_data(fields_list))) + error= 1; + else + { + error= (int) result->send_eof(); + send_records= ((select_options & OPTION_FOUND_ROWS) ? 1 : + thd->sent_row_count); + } + } + else + { + error=(int) result->send_eof(); + send_records= 0; + } + } + /* Single select (without union) always returns 0 or 1 row */ + thd->limit_found_rows= send_records; + thd->examined_row_count= 0; + DBUG_VOID_RETURN; + } + thd->limit_found_rows= thd->examined_row_count= 0; + + if (zero_result_cause) + { + (void) return_zero_rows(this, result, tables_list, *columns_list, + send_row_on_empty_set(), + select_options, + zero_result_cause, + having, procedure, + unit); + DBUG_VOID_RETURN; + } + + if (select_options & SELECT_DESCRIBE) + { + /* + Check if we managed to optimize ORDER BY away and don't use temporary + table to resolve ORDER BY: in that case, we only may need to do + filesort for GROUP BY. + */ + if (!order && !no_order && (!skip_sort_order || !need_tmp)) + { + /* + Reset 'order' to 'group_list' and reinit variables describing + 'order' + */ + order= group_list; + simple_order= simple_group; + skip_sort_order= 0; + } + if (order && + (const_tables == tables || + ((simple_order || skip_sort_order) && + test_if_skip_sort_order(&join_tab[const_tables], order, + select_limit, 0)))) + order=0; + having= tmp_having; + select_describe(this, need_tmp, + order != 0 && !skip_sort_order, + select_distinct); + DBUG_VOID_RETURN; + } + + JOIN *curr_join= this; + List<Item> *curr_all_fields= &all_fields; + List<Item> *curr_fields_list= &fields_list; + TABLE *curr_tmp_table= 0; + + /* Create a tmp table if distinct or if the sort is too complicated */ + if (need_tmp) + { + if (tmp_join) + curr_join= tmp_join; + curr_tmp_table= exec_tmp_table1; + + /* Copy data to the temporary table */ + thd->proc_info= "Copying to tmp table"; + + if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) + { + error= tmp_error; + DBUG_VOID_RETURN; + } + curr_tmp_table->file->info(HA_STATUS_VARIABLE); + + if (curr_join->having) + curr_join->having= curr_join->tmp_having= 0; // Allready done + /* Change sum_fields reference to calculated fields in tmp_table */ - if (join.sort_and_group || tmp_table->group) + curr_join->all_fields= *curr_all_fields; + if (!items1) { - if (change_to_use_tmp_fields(all_fields)) - goto err; - join.tmp_table_param.field_count+=join.tmp_table_param.sum_func_count+ - join.tmp_table_param.func_count; - join.tmp_table_param.sum_func_count=join.tmp_table_param.func_count=0; + items1= items0 + all_fields.elements; + if (sort_and_group || curr_tmp_table->group) + { + if (change_to_use_tmp_fields(thd, items1, + tmp_fields_list1, tmp_all_fields1, + fields_list.elements, all_fields)) + DBUG_VOID_RETURN; + } + else + { + if (change_refs_to_tmp_fields(thd, items1, + tmp_fields_list1, tmp_all_fields1, + fields_list.elements, all_fields)) + DBUG_VOID_RETURN; + } + curr_join->tmp_all_fields1= tmp_all_fields1; + curr_join->tmp_fields_list1= tmp_fields_list1; + curr_join->items1= items1; + } + curr_all_fields= &tmp_all_fields1; + curr_fields_list= &tmp_fields_list1; + curr_join->set_items_ref_array(items1); + + if (sort_and_group || curr_tmp_table->group) + { + curr_join->tmp_table_param.field_count+= + curr_join->tmp_table_param.sum_func_count+ + curr_join->tmp_table_param.func_count; + curr_join->tmp_table_param.sum_func_count= + curr_join->tmp_table_param.func_count= 0; } else { - if (change_refs_to_tmp_fields(thd,all_fields)) - goto err; - join.tmp_table_param.field_count+=join.tmp_table_param.func_count; - join.tmp_table_param.func_count=0; + curr_join->tmp_table_param.field_count+= + curr_join->tmp_table_param.func_count; + curr_join->tmp_table_param.func_count= 0; } + + // procedure can't be used inside subselect => we do nothing special for it if (procedure) procedure->update_refs(); - if (tmp_table->group) + + if (curr_tmp_table->group) { // Already grouped - if (!order && !no_order && !skip_sort_order) - order=group; /* order by group */ - group=0; + if (!curr_join->order && !curr_join->no_order && !skip_sort_order) + curr_join->order= curr_join->group_list; /* order by group */ + curr_join->group_list= 0; } - + /* If we have different sort & group then we must sort the data by group and copy it to another tmp table @@ -866,193 +1234,411 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, like SEC_TO_TIME(SUM(...)). */ - if (group && (!test_if_subpart(group,order) || select_distinct) || - (select_distinct && - join.tmp_table_param.using_indirect_summary_function)) + if (curr_join->group_list && (!test_if_subpart(curr_join->group_list, + curr_join->order) || + curr_join->select_distinct) || + (curr_join->select_distinct && + curr_join->tmp_table_param.using_indirect_summary_function)) { /* Must copy to another table */ - TABLE *tmp_table2; DBUG_PRINT("info",("Creating group table")); - + /* Free first data from old join */ - join_free(&join); - if (make_simple_join(&join,tmp_table)) - goto err; - calc_group_buffer(&join,group); - count_field_types(&join.tmp_table_param,all_fields, - select_distinct && !group); - join.tmp_table_param.hidden_field_count=(all_fields.elements- - fields.elements); - - /* group data to new table */ - if (!(tmp_table2 = create_tmp_table(thd,&join.tmp_table_param,all_fields, - (ORDER*) 0, - select_distinct && !group, - 1, 0, - join.select_options))) - goto err; /* purecov: inspected */ - if (group) + curr_join->join_free(0); + if (make_simple_join(curr_join, curr_tmp_table)) + DBUG_VOID_RETURN; + calc_group_buffer(curr_join, group_list); + count_field_types(&curr_join->tmp_table_param, + curr_join->tmp_all_fields1, + curr_join->select_distinct && !curr_join->group_list); + curr_join->tmp_table_param.hidden_field_count= + (curr_join->tmp_all_fields1.elements- + curr_join->tmp_fields_list1.elements); + + + if (exec_tmp_table2) + curr_tmp_table= exec_tmp_table2; + else + { + /* group data to new table */ + if (!(curr_tmp_table= + exec_tmp_table2= create_tmp_table(thd, + &curr_join->tmp_table_param, + *curr_all_fields, + (ORDER*) 0, + curr_join->select_distinct && + !curr_join->group_list, + 1, curr_join->select_options, + HA_POS_ERROR, + (char *) ""))) + DBUG_VOID_RETURN; + curr_join->exec_tmp_table2= exec_tmp_table2; + } + if (curr_join->group_list) { - thd->proc_info="Creating sort index"; - if (create_sort_index(join.join_tab,group,HA_POS_ERROR, HA_POS_ERROR) || - alloc_group_fields(&join,group)) + thd->proc_info= "Creating sort index"; + if (curr_join->join_tab == join_tab && save_join_tab()) { - free_tmp_table(thd,tmp_table2); /* purecov: inspected */ - goto err; /* purecov: inspected */ + DBUG_VOID_RETURN; + } + if (create_sort_index(thd, curr_join, curr_join->group_list, + HA_POS_ERROR, HA_POS_ERROR) || + make_group_fields(this, curr_join)) + { + DBUG_VOID_RETURN; } - group=0; } + thd->proc_info="Copying to group table"; tmp_error= -1; - if (make_sum_func_list(&join,all_fields) || - (tmp_error=do_select(&join,(List<Item> *) 0,tmp_table2,0))) + if (curr_join != this) { - error=tmp_error; - free_tmp_table(thd,tmp_table2); - goto err; /* purecov: inspected */ + if (sum_funcs2) + { + curr_join->sum_funcs= sum_funcs2; + curr_join->sum_funcs_end= sum_funcs_end2; + } + else + { + curr_join->alloc_func_list(); + sum_funcs2= curr_join->sum_funcs; + sum_funcs_end2= curr_join->sum_funcs_end; + } } - end_read_record(&join.join_tab->read_record); - free_tmp_table(thd,tmp_table); - join.const_tables=join.tables; // Mark free for join_free() - tmp_table=tmp_table2; - join.join_tab[0].table=0; // Table is freed - - if (change_to_use_tmp_fields(all_fields)) // No sum funcs anymore - goto err; - join.tmp_table_param.field_count+=join.tmp_table_param.sum_func_count; - join.tmp_table_param.sum_func_count=0; - } - - if (tmp_table->distinct) - select_distinct=0; /* Each row is unique */ - - join_free(&join); /* Free quick selects */ - if (select_distinct && ! group) + if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, + 1)) + DBUG_VOID_RETURN; + curr_join->group_list= 0; + if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, + 0))) + { + error= tmp_error; + DBUG_VOID_RETURN; + } + end_read_record(&curr_join->join_tab->read_record); + curr_join->const_tables= curr_join->tables; // Mark free for join_free() + curr_join->join_tab[0].table= 0; // Table is freed + + // No sum funcs anymore + if (!items2) + { + items2= items1 + all_fields.elements; + if (change_to_use_tmp_fields(thd, items2, + tmp_fields_list2, tmp_all_fields2, + fields_list.elements, tmp_all_fields1)) + DBUG_VOID_RETURN; + curr_join->tmp_fields_list2= tmp_fields_list2; + curr_join->tmp_all_fields2= tmp_all_fields2; + } + curr_fields_list= &curr_join->tmp_fields_list2; + curr_all_fields= &curr_join->tmp_all_fields2; + curr_join->set_items_ref_array(items2); + curr_join->tmp_table_param.field_count+= + curr_join->tmp_table_param.sum_func_count; + curr_join->tmp_table_param.sum_func_count= 0; + } + if (curr_tmp_table->distinct) + curr_join->select_distinct=0; /* Each row is unique */ + + curr_join->join_free(0); /* Free quick selects */ + if (curr_join->select_distinct && ! curr_join->group_list) { thd->proc_info="Removing duplicates"; - if (having) - having->update_used_tables(); - if (remove_duplicates(&join,tmp_table,fields, having)) - goto err; /* purecov: inspected */ - having=0; - select_distinct=0; - } - tmp_table->reginfo.lock_type=TL_UNLOCK; - if (make_simple_join(&join,tmp_table)) - goto err; - calc_group_buffer(&join,group); - count_field_types(&join.tmp_table_param,all_fields,0); + if (curr_join->tmp_having) + curr_join->tmp_having->update_used_tables(); + if (remove_duplicates(curr_join, curr_tmp_table, + *curr_fields_list, curr_join->tmp_having)) + DBUG_VOID_RETURN; + curr_join->tmp_having=0; + curr_join->select_distinct=0; + } + curr_tmp_table->reginfo.lock_type= TL_UNLOCK; + if (make_simple_join(curr_join, curr_tmp_table)) + DBUG_VOID_RETURN; + calc_group_buffer(curr_join, curr_join->group_list); + count_field_types(&curr_join->tmp_table_param, *curr_all_fields, 0); + } if (procedure) - { - if (procedure->change_columns(fields) || - result->prepare(fields)) - goto err; - count_field_types(&join.tmp_table_param,all_fields,0); - } - else if (! is_result_prepared && result->prepare(fields)) - goto err; - if (join.group || join.tmp_table_param.sum_func_count || + count_field_types(&curr_join->tmp_table_param, *curr_all_fields, 0); + + if (curr_join->group || curr_join->tmp_table_param.sum_func_count || (procedure && (procedure->flags & PROC_GROUP))) { - if (alloc_group_fields(&join,group) || - setup_copy_fields(thd, &join.tmp_table_param,all_fields) || - make_sum_func_list(&join,all_fields) || thd->fatal_error) - goto err; /* purecov: inspected */ + if (make_group_fields(this, curr_join)) + { + DBUG_VOID_RETURN; + } + if (!items3) + { + if (!items0) + init_items_ref_array(); + items3= ref_pointer_array + (all_fields.elements*4); + setup_copy_fields(thd, &curr_join->tmp_table_param, + items3, tmp_fields_list3, tmp_all_fields3, + curr_fields_list->elements, *curr_all_fields); + tmp_table_param.save_copy_funcs= curr_join->tmp_table_param.copy_funcs; + tmp_table_param.save_copy_field= curr_join->tmp_table_param.copy_field; + tmp_table_param.save_copy_field_end= + curr_join->tmp_table_param.copy_field_end; + curr_join->tmp_all_fields3= tmp_all_fields3; + curr_join->tmp_fields_list3= tmp_fields_list3; + } + else + { + curr_join->tmp_table_param.copy_funcs= tmp_table_param.save_copy_funcs; + curr_join->tmp_table_param.copy_field= tmp_table_param.save_copy_field; + curr_join->tmp_table_param.copy_field_end= + tmp_table_param.save_copy_field_end; + } + curr_fields_list= &tmp_fields_list3; + curr_all_fields= &tmp_all_fields3; + curr_join->set_items_ref_array(items3); + + if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, + 1) || thd->is_fatal_error) + DBUG_VOID_RETURN; } - if (group || order) + if (curr_join->group_list || curr_join->order) { DBUG_PRINT("info",("Sorting for send_fields")); thd->proc_info="Sorting result"; /* If we have already done the group, add HAVING to sorted table */ - if (having && ! group && ! join.sort_and_group) - { - having->update_used_tables(); // Some tables may have been const - JOIN_TAB *table=&join.join_tab[join.const_tables]; - table_map used_tables= join.const_table_map | table->table->map; - - Item* sort_table_cond=make_cond_for_table(having,used_tables,used_tables); + if (curr_join->tmp_having && ! curr_join->group_list && + ! curr_join->sort_and_group) + { + // Some tables may have been const + curr_join->tmp_having->update_used_tables(); + JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables]; + table_map used_tables= (curr_join->const_table_map | + curr_table->table->map); + + Item* sort_table_cond= make_cond_for_table(curr_join->tmp_having, + used_tables, + used_tables); if (sort_table_cond) { - if (!table->select) - if (!(table->select=new SQL_SELECT)) - goto err; - if (!table->select->cond) - table->select->cond=sort_table_cond; + if (!curr_table->select) + if (!(curr_table->select= new SQL_SELECT)) + DBUG_VOID_RETURN; + if (!curr_table->select->cond) + curr_table->select->cond= sort_table_cond; else // This should never happen - if (!(table->select->cond=new Item_cond_and(table->select->cond, - sort_table_cond))) - goto err; - table->select_cond=table->select->cond; - table->select_cond->top_level_item(); - DBUG_EXECUTE("where",print_where(table->select->cond, + { + if (!(curr_table->select->cond= + new Item_cond_and(curr_table->select->cond, + sort_table_cond))) + DBUG_VOID_RETURN; + /* + Item_cond_and do not need fix_fields for execution, its parameters + are fixed or do not need fix_fields, too + */ + curr_table->select->cond->quick_fix_field(); + } + curr_table->select_cond= curr_table->select->cond; + curr_table->select_cond->top_level_item(); + DBUG_EXECUTE("where",print_where(curr_table->select->cond, "select and having");); - having=make_cond_for_table(having,~ (table_map) 0,~used_tables); - DBUG_EXECUTE("where",print_where(conds,"having after sort");); + curr_join->tmp_having= make_cond_for_table(curr_join->tmp_having, + ~ (table_map) 0, + ~used_tables); + DBUG_EXECUTE("where",print_where(curr_join->tmp_having, + "having after sort");); } } - if (group) - select_limit= HA_POS_ERROR; - else { - /* - We can abort sorting after thd->select_limit rows if we there is no - WHERE clause for any tables after the sorted one. - */ - JOIN_TAB *table= &join.join_tab[join.const_tables+1]; - JOIN_TAB *end_table= &join.join_tab[join.tables]; - for (; table < end_table ; table++) + if (group) + curr_join->select_limit= HA_POS_ERROR; + else { /* - table->keyuse is set in the case there was an original WHERE clause - on the table that was optimized away. - table->on_expr tells us that it was a LEFT JOIN and there will be - at least one row generated from the table. + We can abort sorting after thd->select_limit rows if we there is no + WHERE clause for any tables after the sorted one. */ - if (table->select_cond || (table->keyuse && !table->on_expr)) + JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables+1]; + JOIN_TAB *end_table= &curr_join->join_tab[curr_join->tables]; + for (; curr_table < end_table ; curr_table++) { - /* We have to sort all rows */ - select_limit= HA_POS_ERROR; - break; + /* + table->keyuse is set in the case there was an original WHERE clause + on the table that was optimized away. + table->on_expr tells us that it was a LEFT JOIN and there will be + at least one row generated from the table. + */ + if (curr_table->select_cond || + (curr_table->keyuse && !curr_table->on_expr)) + { + /* We have to sort all rows */ + curr_join->select_limit= HA_POS_ERROR; + break; + } } } + if (curr_join->join_tab == join_tab && save_join_tab()) + { + DBUG_VOID_RETURN; + } + /* + Here we sort rows for ORDER BY/GROUP BY clause, if the optimiser + chose FILESORT to be faster than INDEX SCAN or there is no + suitable index present. + Note, that create_sort_index calls test_if_skip_sort_order and may + finally replace sorting with index scan if there is a LIMIT clause in + the query. XXX: it's never shown in EXPLAIN! + OPTION_FOUND_ROWS supersedes LIMIT and is taken into account. + */ + if (create_sort_index(thd, curr_join, + curr_join->group_list ? + curr_join->group_list : curr_join->order, + curr_join->select_limit, + (select_options & OPTION_FOUND_ROWS ? + HA_POS_ERROR : unit->select_limit_cnt))) + DBUG_VOID_RETURN; } - /* - Here we sort rows for ORDER BY/GROUP BY clause, if the optimiser - chose FILESORT to be faster than INDEX SCAN or there is no - suitable index present. - Note, that create_sort_index calls test_if_skip_sort_order and may - finally replace sorting with index scan if there is a LIMIT clause in - the query. XXX: it's never shown in EXPLAIN! - OPTION_FOUND_ROWS supersedes LIMIT and is taken into account. - */ - if (create_sort_index(&join.join_tab[join.const_tables], - group ? group : order, - select_limit, - join.select_options & OPTION_FOUND_ROWS ? - HA_POS_ERROR : thd->select_limit)) - goto err; } - join.having=having; // Actually a parameter + curr_join->having= curr_join->tmp_having; thd->proc_info="Sending data"; - error=do_select(&join,&fields,NULL,procedure); + error= thd->net.report_error ? -1 : + do_select(curr_join, curr_fields_list, NULL, procedure); + thd->limit_found_rows= curr_join->send_records; + thd->examined_row_count= curr_join->examined_rows; + DBUG_VOID_RETURN; +} -err: - thd->limit_found_rows = join.send_records; - thd->examined_row_count = join.examined_rows; - thd->proc_info="end"; - join.lock=0; // It's faster to unlock later - join_free(&join); - thd->proc_info="end2"; // QQ - if (tmp_table) - free_tmp_table(thd,tmp_table); - thd->proc_info="end3"; // QQ + +/* + Clean up join. Return error that hold JOIN. +*/ + +int +JOIN::cleanup() +{ + DBUG_ENTER("JOIN::cleanup"); + select_lex->join= 0; + + if (tmp_join) + { + if (join_tab != tmp_join->join_tab) + { + JOIN_TAB *tab, *end; + for (tab= join_tab, end= tab+tables ; tab != end ; tab++) + { + tab->cleanup(); + } + } + tmp_join->tmp_join= 0; + tmp_table_param.copy_field=0; + DBUG_RETURN(tmp_join->cleanup()); + } + + lock=0; // It's faster to unlock later + join_free(1); + if (exec_tmp_table1) + free_tmp_table(thd, exec_tmp_table1); + if (exec_tmp_table2) + free_tmp_table(thd, exec_tmp_table2); delete select; delete_dynamic(&keyuse); delete procedure; - thd->proc_info="end4"; // QQ + for (SELECT_LEX_UNIT *lex_unit= select_lex->first_inner_unit(); + lex_unit != 0; + lex_unit= lex_unit->next_unit()) + { + error|= lex_unit->cleanup(); + } DBUG_RETURN(error); } + +int +mysql_select(THD *thd, Item ***rref_pointer_array, + TABLE_LIST *tables, uint wild_num, List<Item> &fields, + COND *conds, uint og_num, ORDER *order, ORDER *group, + Item *having, ORDER *proc_param, ulong select_options, + select_result *result, SELECT_LEX_UNIT *unit, + SELECT_LEX *select_lex) +{ + int err; + bool free_join= 1; + DBUG_ENTER("mysql_select"); + + JOIN *join; + if (select_lex->join != 0) + { + join= select_lex->join; + // is it single SELECT in derived table, called in derived table creation + if (select_lex->linkage != DERIVED_TABLE_TYPE || + (select_options & SELECT_DESCRIBE)) + { + if (select_lex->linkage != GLOBAL_OPTIONS_TYPE) + { + //here is EXPLAIN of subselect or derived table + if (join->change_result(result)) + { + DBUG_RETURN(-1); + } + } + else + { + if (join->prepare(rref_pointer_array, tables, wild_num, + conds, og_num, order, group, having, proc_param, + select_lex, unit)) + { + goto err; + } + } + } + free_join= 0; + join->select_options= select_options; + } + else + { + if (!(join= new JOIN(thd, fields, select_options, result))) + DBUG_RETURN(-1); + thd->proc_info="init"; + thd->used_tables=0; // Updated by setup_fields + if (join->prepare(rref_pointer_array, tables, wild_num, + conds, og_num, order, group, having, proc_param, + select_lex, unit)) + { + goto err; + } + } + + if ((err= join->optimize())) + { + goto err; // 1 + } + + if (thd->lex->describe & DESCRIBE_EXTENDED) + { + join->conds_history= join->conds; + join->having_history= (join->having?join->having:join->tmp_having); + } + + if (thd->net.report_error) + goto err; + + join->exec(); + + if (thd->lex->describe & DESCRIBE_EXTENDED) + { + select_lex->where= join->conds_history; + select_lex->having= join->having_history; + } + +err: + if (free_join) + { + thd->proc_info="end"; + err= join->cleanup(); + if (thd->net.report_error) + err= -1; + delete join; + DBUG_RETURN(err); + } + DBUG_RETURN(join->error); +} + /***************************************************************************** Create JOIN_TABS, make a guess about the table types, Approximate how many records will be used in each table @@ -1060,7 +1646,7 @@ err: static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, TABLE *table, - key_map keys,ha_rows limit) + const key_map *keys,ha_rows limit) { int error; DBUG_ENTER("get_quick_record_count"); @@ -1068,8 +1654,8 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, { select->head=table; table->reginfo.impossible_range=0; - if ((error=select->test_quick_select(thd, keys,(table_map) 0,limit)) - == 1) + if ((error= select->test_quick_select(thd, *(key_map *)keys,(table_map) 0, + limit, 0)) == 1) DBUG_RETURN(select->quick->records); if (error == -1) { @@ -1089,14 +1675,15 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, 0 ok 1 Fatal error */ - + static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, DYNAMIC_ARRAY *keyuse_array) { int error; - uint i,table_count,const_count,found_ref,refs,key,const_ref,eq_part; - table_map found_const_table_map,all_table_map; + uint i,table_count,const_count,key; + table_map found_const_table_map, all_table_map, found_ref, refs; + key_map const_ref, eq_part; TABLE **table_vector; JOIN_TAB *stat,*stat_end,*s,**stat_ref; KEYUSE *keyuse,*start_keyuse; @@ -1114,16 +1701,20 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, join->best_ref=stat_vector; stat_end=stat+table_count; - found_const_table_map=all_table_map=0; + found_const_table_map= all_table_map=0; const_count=0; for (s=stat,i=0 ; tables ; s++,tables=tables->next,i++) { TABLE *table; stat_vector[i]=s; + s->keys.init(); + s->const_keys.init(); + s->checked_keys.init(); + s->needed_reg.init(); table_vector[i]=s->table=table=tables->table; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);// record count - table->quick_keys=0; + table->quick_keys.clear_all(); table->reginfo.join_tab=s; table->reginfo.not_exists_optimize=0; bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->keys); @@ -1200,8 +1791,8 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, } if (conds || outer_join) - if (update_ref_and_keys(join->thd,keyuse_array,stat,join->tables, - conds,~outer_join)) + if (update_ref_and_keys(join->thd, keyuse_array, stat, join->tables, + conds, ~outer_join, join->select_lex)) DBUG_RETURN(1); /* Read tables with 0 or 1 rows (system tables) */ @@ -1269,23 +1860,25 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, { start_keyuse=keyuse; key=keyuse->key; - s->keys|= (key_map) 1 << key; // QQ: remove this ? + s->keys.set_bit(key); // QQ: remove this ? - refs=const_ref=eq_part=0; + refs=0; + const_ref.clear_all(); + eq_part.clear_all(); do { - if (keyuse->val->type() != Item::NULL_ITEM) + if (keyuse->val->type() != Item::NULL_ITEM && !keyuse->optimize) { if (!((~found_const_table_map) & keyuse->used_tables)) - const_ref|= (key_map) 1 << keyuse->keypart; + const_ref.set_bit(keyuse->keypart); else refs|=keyuse->used_tables; - eq_part|= (uint) 1 << keyuse->keypart; + eq_part.set_bit(keyuse->keypart); } keyuse++; } while (keyuse->table == table && keyuse->key == key); - if (eq_part == PREV_BITS(uint,table->key_info[key].key_parts) && + if (eq_part.is_prefix(table->key_info[key].key_parts) && ((table->key_info[key].flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME) && !table->fulltext_searched) @@ -1342,7 +1935,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, if (s->worst_seeks < 2.0) // Fix for small tables s->worst_seeks=2.0; - if (s->const_keys) + if (! s->const_keys.is_clear_all()) { ha_rows records; SQL_SELECT *select; @@ -1351,7 +1944,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, s->on_expr ? s->on_expr : conds, &error); records= get_quick_record_count(join->thd, select, s->table, - s->const_keys, join->row_limit); + &s->const_keys, join->row_limit); s->quick=select->quick; s->needed_reg=select->needed_reg; select->quick=0; @@ -1392,7 +1985,10 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, join->found_const_table_map=found_const_table_map; if (join->const_tables != join->tables) + { + optimize_keyuse(join, keyuse_array); find_best_combination(join,all_table_map & ~join->const_table_map); + } else { memcpy((gptr) join->best_positions,(gptr) join->positions, @@ -1414,13 +2010,37 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, typedef struct key_field_t { // Used when finding key fields Field *field; Item *val; // May be empty if diff constant - uint level,const_level; // QQ: Remove const_level + uint level; + uint optimize; bool eq_func; - bool exists_optimize; + /* + If true, the condition this struct represents will not be satisfied + when val IS NULL. + */ + bool null_rejecting; } KEY_FIELD; +/* Values in optimize */ +#define KEY_OPTIMIZE_EXISTS 1 +#define KEY_OPTIMIZE_REF_OR_NULL 2 + +/* + Merge new key definitions to old ones, remove those not used in both + + This is called for OR between different levels + + To be able to do 'ref_or_null' we merge a comparison of a column + and 'column IS NULL' to one test. This is useful for sub select queries + that are internally transformed to something like: -/* merge new key definitions to old ones, remove those not used in both */ + SELECT * FROM t1 WHERE t1.key=outer_ref_field or t1.key IS NULL + + KEY_FIELD::null_rejecting is processed as follows: + result has null_rejecting=true if it is set for both ORed references. + for example: + (t2.key = t1.field OR t2.key = t1.field) -> null_rejecting=true + (t2.key = t1.field OR t2.key <=> t1.field) -> null_rejecting=false +*/ static KEY_FIELD * merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, @@ -1442,20 +2062,52 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, { if (new_fields->val->used_tables()) { + /* + If the value matches, we can use the key reference. + If not, we keep it until we have examined all new values + */ if (old->val->eq(new_fields->val, old->field->binary())) { - old->level=old->const_level=and_level; - old->exists_optimize&=new_fields->exists_optimize; + old->level= and_level; + old->optimize= ((old->optimize & new_fields->optimize & + KEY_OPTIMIZE_EXISTS) | + ((old->optimize | new_fields->optimize) & + KEY_OPTIMIZE_REF_OR_NULL)); + old->null_rejecting= (old->null_rejecting && + new_fields->null_rejecting); } } - else if (old->val->eq(new_fields->val, old->field->binary()) && - old->eq_func && new_fields->eq_func) + else if (old->eq_func && new_fields->eq_func && + old->val->eq(new_fields->val, old->field->binary())) + { - old->level=old->const_level=and_level; - old->exists_optimize&=new_fields->exists_optimize; + old->level= and_level; + old->optimize= ((old->optimize & new_fields->optimize & + KEY_OPTIMIZE_EXISTS) | + ((old->optimize | new_fields->optimize) & + KEY_OPTIMIZE_REF_OR_NULL)); + old->null_rejecting= (old->null_rejecting && + new_fields->null_rejecting); } - else // Impossible; remove it + else if (old->eq_func && new_fields->eq_func && + (old->val->is_null() || new_fields->val->is_null())) { + /* field = expression OR field IS NULL */ + old->level= and_level; + old->optimize= KEY_OPTIMIZE_REF_OR_NULL; + /* Remember the NOT NULL value */ + if (old->val->is_null()) + old->val= new_fields->val; + /* The referred expression can be NULL: */ + old->null_rejecting= 0; + } + else + { + /* + We are comparing two different const. In this case we can't + use a key-lookup on this so it's better to remove the value + and let the range optimzier handle it + */ if (old == --first_free) // If last item break; *old= *first_free; // Remove old value @@ -1467,7 +2119,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, /* Remove all not used items */ for (KEY_FIELD *old=start ; old != first_free ;) { - if (old->level != and_level && old->const_level != and_level) + if (old->level != and_level) { // Not used in all levels if (old == --first_free) break; @@ -1480,19 +2132,39 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, } +/* + Add a possible key to array of possible keys if it's usable as a key + + SYNPOSIS + add_key_field() + key_fields Pointer to add key, if usable + and_level And level, to be stored in KEY_FIELD + field Field used in comparision + eq_func True if we used =, <=> or IS NULL + value Value used for comparison with field + usable_tables Tables which can be used for key optimization + + NOTES + If we are doing a NOT NULL comparison on a NOT NULL field in a outer join + table, we store this to be able to do not exists optimization later. + + RETURN + *key_fields is incremented if we stored a key in the array +*/ + static void -add_key_field(KEY_FIELD **key_fields,uint and_level, +add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, Field *field,bool eq_func,Item **value, uint num_values, table_map usable_tables) { - bool exists_optimize=0; + uint exists_optimize= 0; if (!(field->flags & PART_KEY_FLAG)) { // Don't remove column IS NULL on a LEFT JOIN table if (!eq_func || (*value)->type() != Item::NULL_ITEM || !field->table->maybe_null || field->null_ptr) return; // Not a key. Skip it - exists_optimize=1; + exists_optimize= KEY_OPTIMIZE_EXISTS; } else { @@ -1511,22 +2183,23 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, if (!eq_func || (*value)->type() != Item::NULL_ITEM || !field->table->maybe_null || field->null_ptr) return; // Can't use left join optimize - exists_optimize=1; + exists_optimize= KEY_OPTIMIZE_EXISTS; } else { JOIN_TAB *stat=field->table->reginfo.join_tab; - key_map possible_keys= (field->key_start & - field->table->keys_in_use_for_query); - stat[0].keys|= possible_keys; // Add possible keys - - /* Save the following cases: - Field op constant - Field LIKE constant where constant doesn't start with a wildcard - Field = field2 where field2 is in a different table - Field op formula - Field IS NULL - Field IS NOT NULL + key_map possible_keys=field->key_start; + possible_keys.intersect(field->table->keys_in_use_for_query); + stat[0].keys.merge(possible_keys); // Add possible keys + + /* + Save the following cases: + Field op constant + Field LIKE constant where constant doesn't start with a wildcard + Field = field2 where field2 is in a different table + Field op formula + Field IS NULL + Field IS NOT NULL Field BETWEEN ... Field IN ... */ @@ -1534,19 +2207,40 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, bool is_const=1; for (uint i=0; i<num_values; i++) - is_const&= (*value)->const_item(); + is_const&= value[i]->const_item(); if (is_const) - stat[0].const_keys |= possible_keys; - - /* We can't always use indexes when comparing a string index to a - number. cmp_type() is checked to allow compare of dates to numbers - also eq_func is NEVER true when num_values > 1 + stat[0].const_keys.merge(possible_keys); + /* + We can't always use indexes when comparing a string index to a + number. cmp_type() is checked to allow compare of dates to numbers. + eq_func is NEVER true when num_values > 1 */ - if (!eq_func || - field->result_type() == STRING_RESULT && - (*value)->result_type() != STRING_RESULT && - field->cmp_type() != (*value)->result_type()) - return; + if (!eq_func) + return; + if (field->result_type() == STRING_RESULT) + { + if ((*value)->result_type() != STRING_RESULT) + { + if (field->cmp_type() != (*value)->result_type()) + return; + } + else + { + /* + We can't use indexes if the effective collation + of the operation differ from the field collation. + + We can also not used index on a text column, as the column may + contain 'x' 'x\t' 'x ' and 'read_next_same' will stop after + 'x' when searching for WHERE col='x ' + */ + if (field->cmp_type() == STRING_RESULT && + (((Field_str*)field)->charset() != cond->compare_collation() || + ((*value)->type() != Item::NULL_ITEM && + (field->flags & BLOB_FLAG) && !field->binary()))) + return; + } + } } } DBUG_ASSERT(num_values == 1); @@ -1556,17 +2250,36 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, */ DBUG_ASSERT(eq_func); /* Store possible eq field */ - (*key_fields)->field=field; - (*key_fields)->eq_func=eq_func; - (*key_fields)->val= *value; - (*key_fields)->level=(*key_fields)->const_level=and_level; - (*key_fields)->exists_optimize=exists_optimize; + (*key_fields)->field= field; + (*key_fields)->eq_func= eq_func; + (*key_fields)->val= *value; + (*key_fields)->level= and_level; + (*key_fields)->optimize= exists_optimize; + /* + If the condition has form "tbl.keypart = othertbl.field" and + othertbl.field can be NULL, there will be no matches if othertbl.field + has NULL value. + We use null_rejecting in add_not_null_conds() to add + 'othertbl.field IS NOT NULL' to tab->select_cond. + */ + (*key_fields)->null_rejecting= ((cond->functype() == Item_func::EQ_FUNC) && + ((*value)->type() == Item::FIELD_ITEM) && + ((Item_field*)*value)->field->maybe_null()); (*key_fields)++; } - +/* + SYNOPSIS + add_key_fields() + key_fields Add KEY_FIELD entries to this array (and move the + pointer) + and_level AND-level (a value that is different for every n-way + AND operation) + cond Condition to analyze + usable_tables Value to pass to add_key_field +*/ static void -add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, +add_key_fields(KEY_FIELD **key_fields,uint *and_level, COND *cond, table_map usable_tables) { if (cond->type() == Item_func::COND_ITEM) @@ -1578,25 +2291,20 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, { Item *item; while ((item=li++)) - add_key_fields(stat,key_fields,and_level,item,usable_tables); + add_key_fields(key_fields,and_level,item,usable_tables); for (; org_key_fields != *key_fields ; org_key_fields++) - { - if (org_key_fields->const_level == org_key_fields->level) - org_key_fields->const_level=org_key_fields->level= *and_level; - else - org_key_fields->const_level= *and_level; - } + org_key_fields->level= *and_level; } else { (*and_level)++; - add_key_fields(stat,key_fields,and_level,li++,usable_tables); + add_key_fields(key_fields,and_level,li++,usable_tables); Item *item; while ((item=li++)) { KEY_FIELD *start_key_fields= *key_fields; (*and_level)++; - add_key_fields(stat,key_fields,and_level,item,usable_tables); + add_key_fields(key_fields,and_level,item,usable_tables); *key_fields=merge_key_fields(org_key_fields,start_key_fields, *key_fields,++(*and_level)); } @@ -1612,19 +2320,15 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, case Item_func::OPTIMIZE_NONE: break; case Item_func::OPTIMIZE_KEY: - // BETWEEN or IN - if (cond_func->key_item()->type() == Item::FIELD_ITEM) - add_key_field(key_fields,*and_level, - ((Item_field*) (cond_func->key_item()))->field, -#ifndef TO_BE_REMOVED_IN_4_1 - /* special treatment for IN. Not necessary in 4.1 */ - cond_func->argument_count() == 1, - cond_func->arguments() + (cond_func->functype() != Item_func::IN_FUNC), - cond_func->argument_count() - (cond_func->functype() != Item_func::IN_FUNC), -#else - cond_func->argument_count() == 2, + // BETWEEN, IN, NOT + if (cond_func->key_item()->real_item()->type() == Item::FIELD_ITEM && + !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) + add_key_field(key_fields,*and_level,cond_func, + ((Item_field*)(cond_func->key_item()->real_item()))->field, + cond_func->argument_count() == 2 && + cond_func->functype() == Item_func::IN_FUNC && + !((Item_func_in*)cond_func)->negated, cond_func->arguments()+1, cond_func->argument_count()-1, -#endif usable_tables); break; case Item_func::OPTIMIZE_OP: @@ -1632,18 +2336,22 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, bool equal_func=(cond_func->functype() == Item_func::EQ_FUNC || cond_func->functype() == Item_func::EQUAL_FUNC); - if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) + if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM && + !(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level, - ((Item_field*) (cond_func->arguments()[0]))->field, + add_key_field(key_fields,*and_level,cond_func, + ((Item_field*) (cond_func->arguments()[0])->real_item()) + ->field, equal_func, cond_func->arguments()+1, 1, usable_tables); } - if (cond_func->arguments()[1]->type() == Item::FIELD_ITEM && - cond_func->functype() != Item_func::LIKE_FUNC) + if (cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM && + cond_func->functype() != Item_func::LIKE_FUNC && + !(cond_func->arguments()[1]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level, - ((Item_field*) (cond_func->arguments()[1]))->field, + add_key_field(key_fields,*and_level,cond_func, + ((Item_field*) (cond_func->arguments()[1])->real_item()) + ->field, equal_func, cond_func->arguments(),1,usable_tables); } @@ -1651,13 +2359,15 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, } case Item_func::OPTIMIZE_NULL: /* column_name IS [NOT] NULL */ - if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) + if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM && + !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) { Item *tmp=new Item_null; - if (!tmp) // Should never be true + if (unlikely(!tmp)) // Should never be true return; - add_key_field(key_fields,*and_level, - ((Item_field*) (cond_func->arguments()[0]))->field, + add_key_field(key_fields,*and_level,cond_func, + ((Item_field*) (cond_func->arguments()[0])->real_item()) + ->field, cond_func->functype() == Item_func::ISNULL_FUNC, &tmp, 1, usable_tables); } @@ -1672,14 +2382,13 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, */ static uint -max_part_bit(key_map bits) +max_part_bit(key_part_map bits) { uint found; for (found=0; bits & 1 ; found++,bits>>=1) ; return found; } - static void add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) { @@ -1687,11 +2396,11 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) TABLE *form= field->table; KEYUSE keyuse; - if (key_field->eq_func && !key_field->exists_optimize) + if (key_field->eq_func && !(key_field->optimize & KEY_OPTIMIZE_EXISTS)) { for (uint key=0 ; key < form->keys ; key++) { - if (!(form->keys_in_use_for_query & (((key_map) 1) << key))) + if (!(form->keys_in_use_for_query.is_set(key))) continue; if (form->key_info[key].flags & HA_FULLTEXT) continue; // ToDo: ft-keys in non-ft queries. SerG @@ -1705,7 +2414,10 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) keyuse.val = key_field->val; keyuse.key = key; keyuse.keypart=part; + keyuse.keypart_map= (key_part_map) 1 << part; keyuse.used_tables=key_field->val->used_tables(); + keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL; + keyuse.null_rejecting= key_field->null_rejecting; VOID(insert_dynamic(keyuse_array,(gptr) &keyuse)); } } @@ -1771,44 +2483,71 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array, keyuse.key = cond_func->key; keyuse.keypart= FT_KEYPART; keyuse.used_tables=cond_func->key_item()->used_tables(); + keyuse.optimize= 0; + keyuse.keypart_map= 0; VOID(insert_dynamic(keyuse_array,(gptr) &keyuse)); } + static int sort_keyuse(KEYUSE *a,KEYUSE *b) { + int res; if (a->table->tablenr != b->table->tablenr) return (int) (a->table->tablenr - b->table->tablenr); if (a->key != b->key) return (int) (a->key - b->key); if (a->keypart != b->keypart) return (int) (a->keypart - b->keypart); - return test(a->used_tables) - test(b->used_tables); // Place const first + // Place const values before other ones + if ((res= test((a->used_tables & ~OUTER_REF_TABLE_BIT)) - + test((b->used_tables & ~OUTER_REF_TABLE_BIT)))) + return res; + /* Place rows that are not 'OPTIMIZE_REF_OR_NULL' first */ + return (int) ((a->optimize & KEY_OPTIMIZE_REF_OR_NULL) - + (b->optimize & KEY_OPTIMIZE_REF_OR_NULL)); } /* Update keyuse array with all possible keys we can use to fetch rows - join_tab is a array in tablenr_order - stat is a reference array in 'prefered' order. + + SYNOPSIS + update_ref_and_keys() + thd + keyuse OUT Put here ordered array of KEYUSE structures + join_tab Array in tablenr_order + tables Number of tables in join + cond WHERE condition (note that the function analyzes + join_tab[i]->on_expr too) + normal_tables tables not inner w.r.t some outer join (ones for which + we can make ref access based the WHERE clause) + select_lex current SELECT + + RETURN + 0 - OK + 1 - Out of memory. */ static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, - uint tables, COND *cond, table_map normal_tables) + uint tables, COND *cond, table_map normal_tables, + SELECT_LEX *select_lex) { uint and_level,i,found_eq_constant; KEY_FIELD *key_fields, *end, *field; if (!(key_fields=(KEY_FIELD*) - thd->alloc(sizeof(key_fields[0])*(thd->cond_count+1)*2))) + thd->alloc(sizeof(key_fields[0])* + (thd->lex->current_select->cond_count+1)*2))) return TRUE; /* purecov: inspected */ - and_level=0; field=end=key_fields; + and_level= 0; + field= end= key_fields; if (my_init_dynamic_array(keyuse,sizeof(KEYUSE),20,64)) return TRUE; if (cond) { - add_key_fields(join_tab,&end,&and_level,cond,normal_tables); + add_key_fields(&end,&and_level,cond,normal_tables); for (; field != end ; field++) { add_key_part(keyuse,field); @@ -1822,23 +2561,24 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, { if (join_tab[i].on_expr) { - add_key_fields(join_tab,&end,&and_level,join_tab[i].on_expr, + add_key_fields(&end,&and_level,join_tab[i].on_expr, join_tab[i].table->map); } } /* fill keyuse with found key parts */ - for (; field != end ; field++) + for ( ; field != end ; field++) add_key_part(keyuse,field); - if (thd->lex.select->ftfunc_list.elements) + if (select_lex->ftfunc_list->elements) { add_ft_keys(keyuse,join_tab,cond,normal_tables); } /* - Remove ref if there is a keypart which is a ref and a const. - Remove keyparts without previous keyparts. Special treatment for ft-keys. + Remove the following things from KEYUSE: + - ref if there is a keypart which is a ref and a const. + - keyparts without previous keyparts. */ if (keyuse->elements) { @@ -1856,8 +2596,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, for (i=0 ; i < keyuse->elements-1 ; i++,use++) { if (!use->used_tables) - use->table->const_key_parts[use->key] |= - (key_part_map) 1 << use->keypart; + use->table->const_key_parts[use->key]|= use->keypart_map; if (use->keypart != FT_KEYPART) { if (use->key == prev->key && use->table == prev->table) @@ -1876,7 +2615,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, /* Save ptr to first use */ if (!use->table->reginfo.join_tab->keyuse) use->table->reginfo.join_tab->keyuse=save_pos; - use->table->reginfo.join_tab->checked_keys|= (key_map) 1 << use->key; + use->table->reginfo.join_tab->checked_keys.set_bit(use->key); save_pos++; } i=(uint) (save_pos-(KEYUSE*) keyuse->buffer); @@ -1886,6 +2625,47 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, return FALSE; } +/* + Update some values in keyuse for faster find_best_combination() loop +*/ + +static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) +{ + KEYUSE *end,*keyuse= dynamic_element(keyuse_array, 0, KEYUSE*); + + for (end= keyuse+ keyuse_array->elements ; keyuse < end ; keyuse++) + { + table_map map; + /* + If we find a ref, assume this table matches a proportional + part of this table. + For example 100 records matching a table with 5000 records + gives 5000/100 = 50 records per key + Constant tables are ignored. + To avoid bad matches, we don't make ref_table_rows less than 100. + */ + keyuse->ref_table_rows= ~(ha_rows) 0; // If no ref + if (keyuse->used_tables & + (map= (keyuse->used_tables & ~join->const_table_map & + ~OUTER_REF_TABLE_BIT))) + { + uint tablenr; + for (tablenr=0 ; ! (map & 1) ; map>>=1, tablenr++) ; + if (map == 1) // Only one table + { + TABLE *tmp_table=join->all_tables[tablenr]; + keyuse->ref_table_rows= max(tmp_table->file->records, 100); + } + } + /* + Outer reference (external field) is constant for single executing + of subquery + */ + if (keyuse->used_tables == OUTER_REF_TABLE_BIT) + keyuse->ref_table_rows= 1; + } +} + /***************************************************************************** Go through all combinations of not marked tables and find the one @@ -1931,8 +2711,6 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, ha_rows rec; double tmp; THD *thd= join->thd; - if (thd->killed) // Abort - return; if (!rest_tables) { @@ -1979,50 +2757,47 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE; // Assumed records/key for (keyuse=s->keyuse ; keyuse->table == table ;) { - key_map found_part=0; + key_part_map found_part=0; table_map found_ref=0; uint key=keyuse->key; KEY *keyinfo=table->key_info+key; bool ft_key=(keyuse->keypart == FT_KEYPART); + uint found_ref_or_null= 0; + /* Calculate how many key segments of the current key we can use */ start_key=keyuse; do { uint keypart=keyuse->keypart; + table_map best_part_found_ref= 0; + double best_prev_record_reads= DBL_MAX; do { - if (!ft_key) - { - table_map map; - if (!(rest_tables & keyuse->used_tables)) - { - found_part|= (key_part_map) 1 << keypart; - found_ref|= keyuse->used_tables; - } + if (!(rest_tables & keyuse->used_tables) && + !(found_ref_or_null & keyuse->optimize)) + { + found_part|=keyuse->keypart_map; + double tmp= prev_record_reads(join, + (found_ref | + keyuse->used_tables)); + if (tmp < best_prev_record_reads) + { + best_part_found_ref= keyuse->used_tables; + best_prev_record_reads= tmp; + } + if (rec > keyuse->ref_table_rows) + rec= keyuse->ref_table_rows; /* - If we find a ref, assume this table matches a proportional - part of this table. - For example 100 records matching a table with 5000 records - gives 5000/100 = 50 records per key - Constant tables are ignored and to avoid bad matches, - we don't make rec less than 100. + If there is one 'key_column IS NULL' expression, we can + use this ref_or_null optimisation of this field */ - if (keyuse->used_tables & - (map=(keyuse->used_tables & ~join->const_table_map))) - { - uint tablenr; - for (tablenr=0 ; ! (map & 1) ; map>>=1, tablenr++) ; - if (map == 1) // Only one table - { - TABLE *tmp_table=join->all_tables[tablenr]; - if (rec > tmp_table->file->records && rec > 100) - rec=max(tmp_table->file->records,100); - } - } + found_ref_or_null|= (keyuse->optimize & + KEY_OPTIMIZE_REF_OR_NULL); } keyuse++; } while (keyuse->table == table && keyuse->key == key && keyuse->keypart == keypart); + found_ref|= best_part_found_ref; } while (keyuse->table == table && keyuse->key == key); /* @@ -2030,8 +2805,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, */ if (!found_part && !ft_key) continue; // Nothing usable found - if (rec == 0) - rec=1L; // Fix for small tables + if (rec < MATCHING_ROWS_IN_OTHER_TABLE) + rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables /* ft-keys require special treatment @@ -2051,7 +2826,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, /* Check if we found full key */ - if (found_part == PREV_BITS(uint,keyinfo->key_parts)) + if (found_part == PREV_BITS(uint,keyinfo->key_parts) && + !found_ref_or_null) { /* use eq key */ max_key_part= (uint) ~0; if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY | @@ -2064,7 +2840,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, { if (!found_ref) { // We found a const key - if (table->quick_keys & ((key_map) 1 << key)) + if (table->quick_keys.is_set(key)) records= (double) table->quick_rows[key]; else { @@ -2088,7 +2864,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, /* Limit the number of matched rows */ tmp= records; set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys & ((key_map) 1 << key)) + if (table->used_keys.is_set(key)) { /* we can use only index tree */ uint keys_per_block= table->file->block_size/2/ @@ -2107,14 +2883,15 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, Set tmp to (previous record count) * (records / combination) */ if ((found_part & 1) && - !(table->file->index_flags(key) & HA_ONLY_WHOLE_INDEX)) + (!(table->file->index_flags(key,0,0) & HA_ONLY_WHOLE_INDEX) || + found_part == PREV_BITS(uint,keyinfo->key_parts))) { max_key_part=max_part_bit(found_part); /* Check if quick_range could determinate how many rows we will match */ - if (table->quick_keys & ((key_map) 1 << key) && + if (table->quick_keys.is_set(key) && table->quick_key_parts[key] == max_key_part) tmp=records= (double) table->quick_rows[key]; else @@ -2142,7 +2919,6 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, rec_per_key= keyinfo->rec_per_key[keyinfo->key_parts-1] ? (double) keyinfo->rec_per_key[keyinfo->key_parts-1] : (double) s->records/rec+1; - if (!s->records) tmp=0; else if (rec_per_key/(double) s->records >= 0.01) @@ -2161,14 +2937,20 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, If quick_select was used on a part of this key, we know the maximum number of rows that the key can match. */ - if (table->quick_keys & ((key_map) 1 << key) && + if (table->quick_keys.is_set(key) && table->quick_key_parts[key] <= max_key_part && records > (double) table->quick_rows[key]) tmp= records= (double) table->quick_rows[key]; + else if (found_ref_or_null) + { + /* We need to do two key searches to find key */ + tmp*= 2.0; + records*= 2.0; + } } /* Limit the number of matched rows */ set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys & ((key_map) 1 << key)) + if (table->used_keys.is_set(key)) { /* we can use only index tree */ uint keys_per_block= table->file->block_size/2/ @@ -2207,7 +2989,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, !(s->quick && best_key && s->quick->index == best_key->key && best_max_key_part >= s->table->quick_key_parts[best_key->key]) && !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && - s->table->used_keys && best_key) && + ! s->table->used_keys.is_clear_all() && best_key) && !(s->table->force_index && best_key)) { // Check full join ha_rows rnd_records= s->found_records; @@ -2291,7 +3073,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, join->positions[idx].table= s; if (!best_key && idx == join->const_tables && s->table == join->sort_by_table && - join->thd->select_limit >= records) + join->unit->select_limit_cnt >= records) join->sort_by_table= (TABLE*) 1; // Must use temporary table /* @@ -2311,10 +3093,12 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, best_record_count=current_record_count; best_read_time=current_read_time; } - swap(JOIN_TAB*,join->best_ref[idx],*pos); + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); find_best(join,rest_tables & ~real_table_bit,idx+1, current_record_count,current_read_time); - swap(JOIN_TAB*,join->best_ref[idx],*pos); + if (thd->killed) + return; + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); } if (join->select_options & SELECT_STRAIGHT_JOIN) break; // Don't test all combinations @@ -2386,7 +3170,7 @@ static double prev_record_reads(JOIN *join,table_map found_ref) { double found=1.0; - + found_ref&= ~OUTER_REF_TABLE_BIT; for (POSITION *pos=join->positions ; found_ref ; pos++) { if (pos->table->table->map & found_ref) @@ -2420,7 +3204,7 @@ get_best_combination(JOIN *join) join->full_join=0; - used_tables=0; + used_tables= OUTER_REF_TABLE_BIT; // Outer row is already read for (j=join_tab, tablenr=0 ; tablenr < table_count ; tablenr++,j++) { TABLE *form; @@ -2438,7 +3222,7 @@ get_best_combination(JOIN *join) if (j->type == JT_SYSTEM) continue; - if (!j->keys || !(keyuse= join->best_positions[tablenr].key)) + if (j->keys.is_clear_all() || !(keyuse= join->best_positions[tablenr].key)) { j->type=JT_ALL; if (tablenr != join->const_tables) @@ -2465,9 +3249,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, TABLE *table; KEY *keyinfo; - /* - Use best key from find_best - */ + /* Use best key from find_best */ table=j->table; key=keyuse->key; keyinfo=table->key_info+key; @@ -2483,14 +3265,22 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, else { keyparts=length=0; + uint found_part_ref_or_null= 0; + /* + Calculate length for the used key + Stop if there is a missing key part or when we find second key_part + with KEY_OPTIMIZE_REF_OR_NULL + */ do { - if (!((~used_tables) & keyuse->used_tables)) + if (!(~used_tables & keyuse->used_tables)) { - if (keyparts == keyuse->keypart) + if (keyparts == keyuse->keypart && + !(found_part_ref_or_null & keyuse->optimize)) { keyparts++; - length+=keyinfo->key_part[keyuse->keypart].store_length; + length+= keyinfo->key_part[keyuse->keypart].store_length; + found_part_ref_or_null|= keyuse->optimize; } } keyuse++; @@ -2511,10 +3301,12 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, } j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length); j->ref.key_err=1; + j->ref.null_rejecting= 0; keyuse=org_keyuse; - store_key **ref_key=j->ref.key_copy; - byte *key_buff=j->ref.key_buff; + store_key **ref_key= j->ref.key_copy; + byte *key_buff=j->ref.key_buff, *null_ref_key= 0; + bool keyuse_uses_no_tables= TRUE; if (ftkey) { j->ref.items[0]=((Item_func*)(keyuse->val))->key_item(); @@ -2534,40 +3326,49 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, uint maybe_null= test(keyinfo->key_part[i].null_bit); j->ref.items[i]=keyuse->val; // Save for cond removal + if (keyuse->null_rejecting) + j->ref.null_rejecting |= 1 << i; + keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables; if (!keyuse->used_tables && !(join->select_options & SELECT_DESCRIBE)) { // Compare against constant - store_key_item *tmp=new store_key_item(thd, - keyinfo->key_part[i].field, - (char*)key_buff + - maybe_null, - maybe_null ? - (char*) key_buff : 0, - keyinfo->key_part[i].length, - keyuse->val); - if (thd->fatal_error) - { + store_key_item tmp(thd, keyinfo->key_part[i].field, + (char*)key_buff + maybe_null, + maybe_null ? (char*) key_buff : 0, + keyinfo->key_part[i].length, keyuse->val); + if (thd->is_fatal_error) return TRUE; - } - tmp->copy(); + tmp.copy(); } else *ref_key++= get_store_key(thd, keyuse,join->const_table_map, &keyinfo->key_part[i], (char*) key_buff,maybe_null); + /* + Remeber if we are going to use REF_OR_NULL + But only if field _really_ can be null i.e. we force JT_REF + instead of JT_REF_OR_NULL in case if field can't be null + */ + if ((keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL) && maybe_null) + null_ref_key= key_buff; key_buff+=keyinfo->key_part[i].store_length; } } /* not ftkey */ *ref_key=0; // end_marker - if (j->type == JT_FT) /* no-op */; - else if (j->type == JT_CONST) - j->table->const_table=1; + if (j->type == JT_FT) + return 0; + if (j->type == JT_CONST) + j->table->const_table= 1; else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY | HA_END_SPACE_KEY)) != HA_NOSAME) || - keyparts != keyinfo->key_parts) - j->type=JT_REF; /* Must read with repeat */ - else if (ref_key == j->ref.key_copy) + keyparts != keyinfo->key_parts || null_ref_key) + { + /* Must read with repeat */ + j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF; + j->ref.null_ref_key= null_ref_key; + } + else if (keyuse_uses_no_tables) { /* This happen if we are using a constant expression in the ON part @@ -2630,8 +3431,8 @@ store_val_in_field(Field *field,Item *item) store_val_in_field can be called from mysql_insert with select_insert, which make count_cuted_fields= 1 */ - bool old_count_cuted_fields= thd->count_cuted_fields; - thd->count_cuted_fields=1; + enum_check_fields old_count_cuted_fields= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_WARN; error= item->save_in_field(field, 1); thd->count_cuted_fields= old_count_cuted_fields; return error || cuted_fields != thd->cuted_fields; @@ -2656,19 +3457,19 @@ make_simple_join(JOIN *join,TABLE *tmp_table) join->tmp_table_param.func_count=0; join->tmp_table_param.copy_field=join->tmp_table_param.copy_field_end=0; join->first_record=join->sort_and_group=0; - join->sum_funcs=0; join->send_records=(ha_rows) 0; join->group=0; - join->do_send_rows = 1; - join->row_limit=join->thd->select_limit; + join->row_limit=join->unit->select_limit_cnt; + join->do_send_rows = (join->row_limit) ? 1 : 0; - join_tab->cache.buff=0; /* No cacheing */ + join_tab->cache.buff=0; /* No caching */ join_tab->table=tmp_table; join_tab->select=0; join_tab->select_cond=0; join_tab->quick=0; join_tab->type= JT_ALL; /* Map through all records */ - join_tab->keys= (uint) ~0; /* test everything in quick */ + join_tab->keys.init(); + join_tab->keys.set_all(); /* test everything in quick */ join_tab->info=0; join_tab->on_expr=0; join_tab->ref.key = -1; @@ -2682,16 +3483,133 @@ make_simple_join(JOIN *join,TABLE *tmp_table) } +inline void add_cond_and_fix(Item **e1, Item *e2) +{ + if (*e1) + { + Item *res; + if ((res= new Item_cond_and(*e1, e2))) + { + *e1= res; + res->quick_fix_field(); + } + } + else + *e1= e2; +} + + +/* + Add to join_tab->select_cond[i] "table.field IS NOT NULL" conditions we've + inferred from ref/eq_ref access performed. + + SYNOPSIS + add_not_null_conds() + join Join to process + + NOTES + This function is a part of "Early NULL-values filtering for ref access" + optimization. + + Example of this optimization: + For query SELECT * FROM t1,t2 WHERE t2.key=t1.field + and plan " any-access(t1), ref(t2.key=t1.field) " + add "t1.field IS NOT NULL" to t1's table condition. + Description of the optimization: + + We look through equalities choosen to perform ref/eq_ref access, + pick equalities that have form "tbl.part_of_key = othertbl.field" + (where othertbl is a non-const table and othertbl.field may be NULL) + and add them to conditions on correspoding tables (othertbl in this + example). + + Exception from that is the case when referred_tab->join != join. + I.e. don't add NOT NULL constraints from any embedded subquery. + Consider this query: + SELECT A.f2 FROM t1 LEFT JOIN t2 A ON A.f2 = f1 + WHERE A.f3=(SELECT MIN(f3) FROM t2 C WHERE A.f4 = C.f4) OR A.f3 IS NULL; + Here condition A.f3 IS NOT NULL is going to be added to the WHERE + condition of the embedding query. + Another example: + SELECT * FROM t10, t11 WHERE (t10.a < 10 OR t10.a IS NULL) + AND t11.b <=> t10.b AND (t11.a = (SELECT MAX(a) FROM t12 + WHERE t12.b = t10.a )); + Here condition t10.a IS NOT NULL is going to be added. + In both cases addition of NOT NULL condition will erroneously reject + some rows of the result set. + referred_tab->join != join constraint would disallow such additions. + + This optimization doesn't affect the choices that ref, range, or join + optimizer make. This was intentional because this was added after 4.1 + was GA. + + Implementation overview + 1. update_ref_and_keys() accumulates info about null-rejecting + predicates in in KEY_FIELD::null_rejecting + 1.1 add_key_part saves these to KEYUSE. + 2. create_ref_for_key copies them to TABLE_REF. + 3. add_not_null_conds adds "x IS NOT NULL" to join_tab->select_cond of + appropiate JOIN_TAB members. +*/ + +static void add_not_null_conds(JOIN *join) +{ + DBUG_ENTER("add_not_null_conds"); + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + if ((tab->type == JT_REF || tab->type == JT_REF_OR_NULL) && + !tab->table->maybe_null) + { + for (uint keypart= 0; keypart < tab->ref.key_parts; keypart++) + { + if (tab->ref.null_rejecting & (1 << keypart)) + { + Item *item= tab->ref.items[keypart]; + Item *notnull; + DBUG_ASSERT(item->type() == Item::FIELD_ITEM); + Item_field *not_null_item= (Item_field*)item; + JOIN_TAB *referred_tab= not_null_item->field->table->reginfo.join_tab; + /* + For UPDATE queries such as: + UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1); + not_null_item is the t1.f1, but it's referred_tab is 0. + */ + if (!referred_tab || referred_tab->join != join) + continue; + if (!(notnull= new Item_func_isnotnull(not_null_item))) + DBUG_VOID_RETURN; + /* + We need to do full fix_fields() call here in order to have correct + notnull->const_item(). This is needed e.g. by test_quick_select + when it is called from make_join_select after this function is + called. + */ + if (notnull->fix_fields(join->thd, join->tables_list, ¬null)) + DBUG_VOID_RETURN; + DBUG_EXECUTE("where",print_where(notnull, + referred_tab->table->table_name);); + add_cond_and_fix(&referred_tab->select_cond, notnull); + } + } + } + } + DBUG_VOID_RETURN; +} + static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { DBUG_ENTER("make_join_select"); if (select) { + add_not_null_conds(join); table_map used_tables; if (join->tables > 1) cond->update_used_tables(); // Tablenr may have changed - if (join->const_tables == join->tables) + if (join->const_tables == join->tables && + join->thd->lex->current_select->master_unit() == + &join->thd->lex->unit) // not upper level SELECT join->const_table_map|=RAND_TABLE_BIT; { // Check const tables COND *const_cond= @@ -2703,7 +3621,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) DBUG_RETURN(1); // Impossible const condition } } - used_tables=(select->const_tables=join->const_table_map) | RAND_TABLE_BIT; + used_tables=((select->const_tables=join->const_table_map) | + OUTER_REF_TABLE_BIT | RAND_TABLE_BIT); for (uint i=join->const_tables ; i < join->tables ; i++) { JOIN_TAB *tab=join->join_tab+i; @@ -2713,7 +3632,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) It solve problem with select like SELECT * FROM t1 WHERE rand() > 0.5 */ if (i == join->tables-1) - current_map|= RAND_TABLE_BIT; + current_map|= OUTER_REF_TABLE_BIT | RAND_TABLE_BIT; bool use_quick_range=0; used_tables|=current_map; @@ -2725,6 +3644,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) tab->type=JT_ALL; use_quick_range=1; tab->use_quick=1; + tab->ref.key= -1; tab->ref.key_parts=0; // Don't use ref key. join->best_positions[i].records_read= rows2double(tab->quick->records); } @@ -2740,24 +3660,25 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } if (tmp) { - DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); SQL_SELECT *sel=tab->select=(SQL_SELECT*) join->thd->memdup((gptr) select, sizeof(SQL_SELECT)); if (!sel) DBUG_RETURN(1); // End of memory - tab->select_cond=sel->cond=tmp; + add_cond_and_fix(&tab->select_cond, tmp); + sel->cond= tab->select_cond; sel->head=tab->table; + DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); if (tab->quick) { /* Use quick key read if it's a constant and it's not used with key reading */ - if (tab->needed_reg == 0 && tab->type != JT_EQ_REF + if (tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF && tab->type != JT_FT && (tab->type != JT_REF || (uint) tab->ref.key == tab->quick->index)) { sel->quick=tab->quick; // Use value from get_quick_... - sel->quick_keys=0; - sel->needed_reg=0; + sel->quick_keys.clear_all(); + sel->needed_reg.clear_all(); } else { @@ -2768,12 +3689,13 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) uint ref_key=(uint) sel->head->reginfo.join_tab->ref.key+1; if (i == join->const_tables && ref_key) { - if (tab->const_keys && tab->table->reginfo.impossible_range) + if (!tab->const_keys.is_clear_all() && + tab->table->reginfo.impossible_range) DBUG_RETURN(1); } else if (tab->type == JT_ALL && ! use_quick_range) { - if (tab->const_keys && + if (!tab->const_keys.is_clear_all() && tab->table->reginfo.impossible_range) DBUG_RETURN(1); // Impossible range /* @@ -2783,22 +3705,37 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) the index if we are using limit and this is the first table */ - if ((tab->keys & ~ tab->const_keys && i > 0) || - (tab->const_keys && i == join->const_tables && - join->thd->select_limit < join->best_positions[i].records_read && + if ((!tab->keys.is_subset(tab->const_keys) && i > 0) || + (!tab->const_keys.is_clear_all() && i == join->const_tables && + join->unit->select_limit_cnt < + join->best_positions[i].records_read && !(join->select_options & OPTION_FOUND_ROWS))) { /* Join with outer join condition */ COND *orig_cond=sel->cond; - sel->cond=and_conds(sel->cond,tab->on_expr); + sel->cond= and_conds(sel->cond, tab->on_expr); + + /* + We can't call sel->cond->fix_fields, + as it will break tab->on_expr if it's AND condition + (fix_fields currently removes extra AND/OR levels). + Yet attributes of the just built condition are not needed. + Thus we call sel->cond->quick_fix_field for safety. + */ + if (sel->cond && !sel->cond->fixed) + sel->cond->quick_fix_field(); + if (sel->test_quick_select(join->thd, tab->keys, used_tables & ~ current_map, (join->select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : - join->thd->select_limit)) < 0) - { /* before reporting "Impossible WHERE" for the whole query - we have to check isn't it only "impossible ON" instead */ + join->unit->select_limit_cnt), 0) < 0) + { + /* + Before reporting "Impossible WHERE" for the whole query + we have to check isn't it only "impossible ON" instead + */ sel->cond=orig_cond; if (!tab->on_expr || sel->test_quick_select(join->thd, tab->keys, @@ -2806,8 +3743,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) (join->select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : - join->thd->select_limit)) < 0) - DBUG_RETURN(1); // Impossible WHERE + join->unit->select_limit_cnt),0) < 0) + DBUG_RETURN(1); // Impossible WHERE } else sel->cond=orig_cond; @@ -2819,13 +3756,15 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) else { sel->needed_reg=tab->needed_reg; - sel->quick_keys=0; + sel->quick_keys.clear_all(); } - if ((sel->quick_keys | sel->needed_reg) & ~tab->checked_keys) + if (!sel->quick_keys.is_subset(tab->checked_keys) || + !sel->needed_reg.is_subset(tab->checked_keys)) { - tab->keys=sel->quick_keys | sel->needed_reg; - tab->use_quick= (sel->needed_reg && - (!select->quick_keys || + tab->keys=sel->quick_keys; + tab->keys.merge(sel->needed_reg); + tab->use_quick= (!sel->needed_reg.is_clear_all() && + (select->quick_keys.is_clear_all() || (select->quick && (select->quick->records >= 100L)))) ? 2 : 1; @@ -2854,12 +3793,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) static void -make_join_readinfo(JOIN *join,uint options) +make_join_readinfo(JOIN *join, uint options) { uint i; - SELECT_LEX *select_lex = &(join->thd->lex.select_lex); bool statistics= test(!(join->select_options & SELECT_DESCRIBE)); - DBUG_ENTER("make_join_readinfo"); for (i=join->const_tables ; i < join->tables ; i++) @@ -2879,6 +3816,12 @@ make_join_readinfo(JOIN *join,uint options) table->status=STATUS_NO_RECORD; tab->read_first_record= join_read_const; tab->read_record.read_record= join_no_more_records; + if (table->used_keys.is_set(tab->ref.key) && + !table->no_keyread) + { + table->key_read=1; + table->file->extra(HA_EXTRA_KEYREAD); + } break; case JT_EQ_REF: table->status=STATUS_NO_RECORD; @@ -2889,16 +3832,16 @@ make_join_readinfo(JOIN *join,uint options) } delete tab->quick; tab->quick=0; - table->file->index_init(tab->ref.key); tab->read_first_record= join_read_key; tab->read_record.read_record= join_no_more_records; - if (table->used_keys & ((key_map) 1 << tab->ref.key) && + if (table->used_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } break; + case JT_REF_OR_NULL: case JT_REF: table->status=STATUS_NO_RECORD; if (tab->select) @@ -2908,19 +3851,25 @@ make_join_readinfo(JOIN *join,uint options) } delete tab->quick; tab->quick=0; - table->file->index_init(tab->ref.key); - tab->read_first_record= join_read_always_key; - tab->read_record.read_record= join_read_next_same; - if (table->used_keys & ((key_map) 1 << tab->ref.key) && + if (table->used_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } + if (tab->type == JT_REF) + { + tab->read_first_record= join_read_always_key; + tab->read_record.read_record= join_read_next_same; + } + else + { + tab->read_first_record= join_read_always_key_or_null; + tab->read_record.read_record= join_read_next_same_or_null; + } break; case JT_FT: table->status=STATUS_NO_RECORD; - table->file->index_init(tab->ref.key); tab->read_first_record= join_ft_read_first; tab->read_record.read_record= join_ft_read_next; break; @@ -2942,7 +3891,7 @@ make_join_readinfo(JOIN *join,uint options) /* These init changes read_record */ if (tab->use_quick == 2) { - select_lex->options|=QUERY_NO_GOOD_INDEX_USED; + join->thd->server_status|=SERVER_QUERY_NO_GOOD_INDEX_USED; tab->read_first_record= join_init_quick_read_record; if (statistics) statistic_increment(select_range_check_count, &LOCK_status); @@ -2959,7 +3908,7 @@ make_join_readinfo(JOIN *join,uint options) } else { - select_lex->options|=QUERY_NO_INDEX_USED; + join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) statistic_increment(select_scan_count, &LOCK_status); } @@ -2973,7 +3922,7 @@ make_join_readinfo(JOIN *join,uint options) } else { - select_lex->options|=QUERY_NO_INDEX_USED; + join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) statistic_increment(select_full_join_count, &LOCK_status); } @@ -2981,15 +3930,15 @@ make_join_readinfo(JOIN *join,uint options) if (!table->no_keyread) { if (tab->select && tab->select->quick && - table->used_keys & ((key_map) 1 << tab->select->quick->index)) + table->used_keys.is_set(tab->select->quick->index)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } - else if (table->used_keys && ! (tab->select && tab->select->quick)) + else if (!table->used_keys.is_clear_all() && + !(tab->select && tab->select->quick)) { // Only read index tree - tab->index=find_shortest_key(table, table->used_keys); - tab->table->file->index_init(tab->index); + tab->index=find_shortest_key(table, & table->used_keys); tab->read_first_record= join_read_first; tab->type=JT_NEXT; // Read with index_first / index_next } @@ -3041,53 +3990,139 @@ bool error_if_full_join(JOIN *join) } -static void -join_free(JOIN *join) +/* + cleanup JOIN_TAB + + SYNOPSIS + JOIN_TAB::cleanup() +*/ + +void JOIN_TAB::cleanup() +{ + delete select; + select= 0; + delete quick; + quick= 0; + x_free(cache.buff); + cache.buff= 0; + if (table) + { + if (table->key_read) + { + table->key_read= 0; + table->file->extra(HA_EXTRA_NO_KEYREAD); + } + table->file->ha_index_or_rnd_end(); + /* + We need to reset this for next select + (Tested in part_of_refkey) + */ + table->reginfo.join_tab= 0; + } + end_read_record(&read_record); +} + + +/* + Free resources of given join + + SYNOPSIS + JOIN::join_free() + fill - true if we should free all resources, call with full==1 should be + last, before it this function can be called with full==0 + + NOTE: with subquery this function definitely will be called several times, + but even for simple query it can be called several times. +*/ +void +JOIN::join_free(bool full) { JOIN_TAB *tab,*end; - DBUG_ENTER("join_free"); + DBUG_ENTER("JOIN::join_free"); + + full= full || (!select_lex->uncacheable && + !thd->lex->subqueries && + !thd->lex->describe); // do not cleanup too early on EXPLAIN - if (join->table) + if (table) { /* Only a sorted table may be cached. This sorted table is always the first non const table in join->table */ - if (join->tables > join->const_tables) // Test for not-const tables - free_io_cache(join->table[join->const_tables]); - for (tab=join->join_tab,end=tab+join->tables ; tab != end ; tab++) + if (tables > const_tables) // Test for not-const tables { - delete tab->select; - delete tab->quick; - x_free(tab->cache.buff); - if (tab->table) + free_io_cache(table[const_tables]); + filesort_free_buffers(table[const_tables]); + } + + for (SELECT_LEX_UNIT *unit= select_lex->first_inner_unit(); unit; + unit= unit->next_unit()) + { + JOIN *join; + for (SELECT_LEX *sl= unit->first_select_in_union(); sl; + sl= sl->next_select()) + if ((join= sl->join)) + join->join_free(full); + } + + if (full) + { + for (tab= join_tab, end= tab+tables; tab != end; tab++) + tab->cleanup(); + table= 0; + tables= 0; + } + else + { + for (tab= join_tab, end= tab+tables; tab != end; tab++) { - if (tab->table->key_read) - { - tab->table->key_read=0; - tab->table->file->extra(HA_EXTRA_NO_KEYREAD); - } - /* Don't free index if we are using read_record */ - if (!tab->read_record.table) - tab->table->file->index_end(); + if (tab->table) + tab->table->file->ha_index_or_rnd_end(); } - end_read_record(&tab->read_record); } - join->table=0; } + /* We are not using tables anymore Unlock all tables. We may be in an INSERT .... SELECT statement. */ - if (join->lock && join->thd->lock && - !(join->select_options & SELECT_NO_UNLOCK)) + if (full && lock && thd->lock && !(select_options & SELECT_NO_UNLOCK) && + !select_lex->subquery_in_having) + { + // TODO: unlock tables even if the join isn't top level select in the tree + if (select_lex == (thd->lex->unit.fake_select_lex ? + thd->lex->unit.fake_select_lex : &thd->lex->select_lex)) + { + mysql_unlock_read_tables(thd, lock); // Don't free join->lock + lock=0; + } + } + + if (full) { - mysql_unlock_read_tables(join->thd, join->lock);// Don't free join->lock - join->lock=0; + group_fields.delete_elements(); + /* + We can't call delete_elements() on copy_funcs as this will cause + problems in free_elements() as some of the elements are then deleted. + */ + tmp_table_param.copy_funcs.empty(); + /* + If we have tmp_join and 'this' JOIN is not tmp_join and + tmp_table_param.copy_field's of them are equal then we have to remove + pointer to tmp_table_param.copy_field from tmp_join, because it qill + be removed in tmp_table_param.cleanup(). + */ + if (tmp_join && + tmp_join != this && + tmp_join->tmp_table_param.copy_field == + tmp_table_param.copy_field) + { + tmp_join->tmp_table_param.copy_field= + tmp_join->tmp_table_param.save_copy_field= 0; + } + tmp_table_param.cleanup(); } - join->group_fields.delete_elements(); - join->tmp_table_param.copy_funcs.delete_elements(); - join->tmp_table_param.cleanup(); DBUG_VOID_RETURN; } @@ -3187,7 +4222,8 @@ static void update_depend_map(JOIN *join) uint i; for (i=0 ; i < ref->key_parts ; i++,item++) depend_map|=(*item)->used_tables(); - ref->depend_map=depend_map; + ref->depend_map=depend_map & ~OUTER_REF_TABLE_BIT; + depend_map&= ~OUTER_REF_TABLE_BIT; for (JOIN_TAB **tab=join->map2table; depend_map ; tab++,depend_map>>=1 ) @@ -3208,7 +4244,8 @@ static void update_depend_map(JOIN *join, ORDER *order) table_map depend_map; order->item[0]->update_used_tables(); order->depend_map=depend_map=order->item[0]->used_tables(); - if (!(order->depend_map & RAND_TABLE_BIT)) // Not item_sum() or RAND() + // Not item_sum(), RAND() and no reference to table outside of sub select + if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))) { for (JOIN_TAB **tab=join->map2table; depend_map ; @@ -3223,20 +4260,39 @@ static void update_depend_map(JOIN *join, ORDER *order) /* - simple_order is set to 1 if sort_order only uses fields from head table - and the head table is not a LEFT JOIN table + Remove all constants and check if ORDER only contains simple expressions + + SYNOPSIS + remove_const() + join Join handler + first_order List of SORT or GROUP order + cond WHERE statement + change_list Set to 1 if we should remove things from list + If this is not set, then only simple_order is + calculated + simple_order Set to 1 if we are only using simple expressions + + RETURN + Returns new sort order + + simple_order is set to 1 if sort_order only uses fields from head table + and the head table is not a LEFT JOIN table + */ static ORDER * -remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order) +remove_const(JOIN *join,ORDER *first_order, COND *cond, + bool change_list, bool *simple_order) { if (join->tables == join->const_tables) - return 0; // No need to sort - DBUG_ENTER("remove_const"); + return change_list ? 0 : first_order; // No need to sort + ORDER *order,**prev_ptr; table_map first_table= join->join_tab[join->const_tables].table->map; table_map not_const_tables= ~join->const_table_map; table_map ref; + DBUG_ENTER("remove_const"); + prev_ptr= &first_order; *simple_order= join->join_tab[join->const_tables].on_expr ? 0 : 1; @@ -3255,7 +4311,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order) } else { - if (order_tables & RAND_TABLE_BIT) + if (order_tables & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT)) *simple_order=0; else { @@ -3267,7 +4323,8 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order) } if ((ref=order_tables & (not_const_tables ^ first_table))) { - if (only_eq_ref_tables(join,first_order,ref)) + if (!(order_tables & first_table) && + only_eq_ref_tables(join,first_order, ref)) { DBUG_PRINT("info",("removing: %s", order->item[0]->full_name())); continue; @@ -3276,11 +4333,13 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order) } } } - *prev_ptr= order; // use this entry + if (change_list) + *prev_ptr= order; // use this entry prev_ptr= &order->next; } - *prev_ptr=0; - if (!first_order) // Nothing to sort/group + if (change_list) + *prev_ptr=0; + if (prev_ptr == &first_order) // Nothing to sort/group *simple_order=1; DBUG_PRINT("exit",("simple_order: %d",(int) *simple_order)); DBUG_RETURN(first_order); @@ -3290,20 +4349,19 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order) static int return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables, List<Item> &fields, bool send_row, uint select_options, - const char *info, Item *having, Procedure *procedure) + const char *info, Item *having, Procedure *procedure, + SELECT_LEX_UNIT *unit) { DBUG_ENTER("return_zero_rows"); if (select_options & SELECT_DESCRIBE) { - describe_info(join, info); + select_describe(join, FALSE, FALSE, FALSE, info); DBUG_RETURN(0); } - if (procedure) - { - if (result->prepare(fields)) // This hasn't been done yet - DBUG_RETURN(-1); - } + + join->join_free(0); + if (send_row) { for (TABLE_LIST *table=tables; table ; table=table->next) @@ -3321,12 +4379,6 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables, item->no_rows_in_result(); result->send_data(fields); } - if (tables) // Not from do_select() - { - /* Close open cursors */ - for (TABLE_LIST *table=tables; table ; table=table->next) - table->table->file->index_end(); - } result->send_eof(); // Should be safe } /* Update results for FOUND_ROWS */ @@ -3374,8 +4426,9 @@ template class List_iterator<Item_func_match>; */ static void -change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father, - Item *cond, Item *field, Item *value) +change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, + Item *and_father, Item *cond, + Item *field, Item *value) { if (cond->type() == Item::COND_ITEM) { @@ -3384,7 +4437,7 @@ change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father, List_iterator<Item> li(*((Item_cond*) cond)->argument_list()); Item *item; while ((item=li++)) - change_cond_ref_to_const(save_list,and_level ? cond : item, item, + change_cond_ref_to_const(thd, save_list,and_level ? cond : item, item, field, value); return; } @@ -3392,19 +4445,20 @@ change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father, return; // Not a boolean function Item_bool_func2 *func= (Item_bool_func2*) cond; - Item *left_item= func->arguments()[0]; - Item *right_item= func->arguments()[1]; + Item **args= func->arguments(); + Item *left_item= args[0]; + Item *right_item= args[1]; Item_func::Functype functype= func->functype(); if (right_item->eq(field,0) && left_item != value && (left_item->result_type() != STRING_RESULT || value->result_type() != STRING_RESULT || - left_item->binary == value->binary)) + left_item->collation.collation == value->collation.collation)) { Item *tmp=value->new_item(); if (tmp) { - func->arguments()[1] = tmp; + thd->change_item_tree(args + 1, tmp); func->update_used_tables(); if ((functype == Item_func::EQ_FUNC || functype == Item_func::EQUAL_FUNC) && and_father != cond && !left_item->const_item()) @@ -3414,40 +4468,72 @@ change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father, if ((tmp2=new COND_CMP(and_father,func))) save_list->push_back(tmp2); } - func->set_cmp_func(item_cmp_type(func->arguments()[0]->result_type(), - func->arguments()[1]->result_type())); + func->set_cmp_func(); } } else if (left_item->eq(field,0) && right_item != value && (right_item->result_type() != STRING_RESULT || value->result_type() != STRING_RESULT || - right_item->binary == value->binary)) + right_item->collation.collation == value->collation.collation)) { Item *tmp=value->new_item(); if (tmp) { - func->arguments()[0] = value = tmp; + thd->change_item_tree(args, tmp); + value= tmp; func->update_used_tables(); if ((functype == Item_func::EQ_FUNC || functype == Item_func::EQUAL_FUNC) && and_father != cond && !right_item->const_item()) { - func->arguments()[0] = func->arguments()[1]; // For easy check - func->arguments()[1] = value; + args[0]= args[1]; // For easy check + thd->change_item_tree(args + 1, value); cond->marker=1; COND_CMP *tmp2; if ((tmp2=new COND_CMP(and_father,func))) save_list->push_back(tmp2); } - func->set_cmp_func(item_cmp_type(func->arguments()[0]->result_type(), - func->arguments()[1]->result_type())); + func->set_cmp_func(); } } } +/* + Remove additional condition inserted by IN/ALL/ANY transformation + + SYNOPSIS + remove_additional_cond() + conds - condition for processing + + RETURN VALUES + new conditions +*/ + +static Item *remove_additional_cond(Item* conds) +{ + if (conds->name == in_additional_cond) + return 0; + if (conds->type() == Item::COND_ITEM) + { + Item_cond *cnd= (Item_cond*) conds; + List_iterator<Item> li(*(cnd->argument_list())); + Item *item; + while ((item= li++)) + { + if (item->name == in_additional_cond) + { + li.remove(); + if (cnd->argument_list()->elements == 1) + return cnd->argument_list()->head(); + return conds; + } + } + } + return conds; +} static void -propagate_cond_constants(I_List<COND_CMP> *save_list,COND *and_father, - COND *cond) +propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, + COND *and_father, COND *cond) { if (cond->type() == Item::COND_ITEM) { @@ -3458,18 +4544,19 @@ propagate_cond_constants(I_List<COND_CMP> *save_list,COND *and_father, I_List<COND_CMP> save; while ((item=li++)) { - propagate_cond_constants(&save,and_level ? cond : item, item); + propagate_cond_constants(thd, &save,and_level ? cond : item, item); } if (and_level) { // Handle other found items I_List_iterator<COND_CMP> cond_itr(save); COND_CMP *cond_cmp; while ((cond_cmp=cond_itr++)) - if (!cond_cmp->cmp_func->arguments()[0]->const_item()) - change_cond_ref_to_const(&save,cond_cmp->and_level, - cond_cmp->and_level, - cond_cmp->cmp_func->arguments()[0], - cond_cmp->cmp_func->arguments()[1]); + { + Item **args= cond_cmp->cmp_func->arguments(); + if (!args[0]->const_item()) + change_cond_ref_to_const(thd, &save,cond_cmp->and_level, + cond_cmp->and_level, args[0], args[1]); + } } } else if (and_father != cond && !cond->marker) // In a AND group @@ -3479,29 +4566,25 @@ propagate_cond_constants(I_List<COND_CMP> *save_list,COND *and_father, ((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC)) { Item_func_eq *func=(Item_func_eq*) cond; - bool left_const= func->arguments()[0]->const_item(); - bool right_const=func->arguments()[1]->const_item(); + Item **args= func->arguments(); + bool left_const= args[0]->const_item(); + bool right_const= args[1]->const_item(); if (!(left_const && right_const) && - (func->arguments()[0]->result_type() == - (func->arguments()[1]->result_type()))) + args[0]->result_type() == args[1]->result_type()) { if (right_const) { - func->arguments()[1]=resolve_const_item(func->arguments()[1], - func->arguments()[0]); + resolve_const_item(thd, &args[1], args[0]); func->update_used_tables(); - change_cond_ref_to_const(save_list,and_father,and_father, - func->arguments()[0], - func->arguments()[1]); + change_cond_ref_to_const(thd, save_list, and_father, and_father, + args[0], args[1]); } else if (left_const) { - func->arguments()[0]=resolve_const_item(func->arguments()[0], - func->arguments()[1]); + resolve_const_item(thd, &args[0], args[1]); func->update_used_tables(); - change_cond_ref_to_const(save_list,and_father,and_father, - func->arguments()[1], - func->arguments()[0]); + change_cond_ref_to_const(thd, save_list, and_father, and_father, + args[1], args[0]); } } } @@ -3510,24 +4593,29 @@ propagate_cond_constants(I_List<COND_CMP> *save_list,COND *and_father, static COND * -optimize_cond(COND *conds,Item::cond_result *cond_value) +optimize_cond(THD *thd, COND *conds, Item::cond_result *cond_value) { - if (!conds) + SELECT_LEX *select= thd->lex->current_select; + DBUG_ENTER("optimize_cond"); + if (conds) + { + DBUG_EXECUTE("where", print_where(conds, "original");); + /* change field = field to field = const for each found field = const */ + propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds); + /* + Remove all instances of item == item + Remove all and-levels where CONST item != CONST item + */ + DBUG_EXECUTE("where", print_where(conds, "after const change");); + conds= remove_eq_conds(thd, conds, cond_value); + DBUG_EXECUTE("info", print_where(conds, "after remove");); + } + else { *cond_value= Item::COND_TRUE; - return conds; + select->prep_where= 0; } - /* change field = field to field = const for each found field = const */ - DBUG_EXECUTE("where",print_where(conds,"original");); - propagate_cond_constants((I_List<COND_CMP> *) 0,conds,conds); - /* - Remove all instances of item == item - Remove all and-levels where CONST item != CONST item - */ - DBUG_EXECUTE("where",print_where(conds,"after const change");); - conds=remove_eq_conds(conds,cond_value) ; - DBUG_EXECUTE("info",print_where(conds,"after remove");); - return conds; + DBUG_RETURN(conds); } @@ -3539,8 +4627,8 @@ optimize_cond(COND *conds,Item::cond_result *cond_value) COND_FALSE always false ( 1 = 2 ) */ -static COND * -remove_eq_conds(COND *cond,Item::cond_result *cond_value) +COND * +remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) { if (cond->type() == Item::COND_ITEM) { @@ -3554,19 +4642,11 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value) Item *item; while ((item=li++)) { - Item *new_item=remove_eq_conds(item,&tmp_cond_value); + Item *new_item=remove_eq_conds(thd, item, &tmp_cond_value); if (!new_item) - { -#ifdef DELETE_ITEMS - delete item; // This may be shared -#endif li.remove(); - } else if (item != new_item) { -#ifdef DELETE_ITEMS - delete item; // This may be shared -#endif VOID(li.replace(new_item)); should_fix_fields=1; } @@ -3596,7 +4676,7 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value) } } if (should_fix_fields) - cond->fix_fields(current_thd,0); + cond->update_used_tables(); if (!((Item_cond*) cond)->argument_list()->elements || *cond_value != Item::COND_OK) @@ -3623,7 +4703,6 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value) Item_func_isnull *func=(Item_func_isnull*) cond; Item **args= func->arguments(); - THD *thd=current_thd; if (args[0]->type() == Item::FIELD_ITEM) { Field *field=((Item_field*) args[0])->field; @@ -3631,7 +4710,9 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value) (thd->options & OPTION_AUTO_IS_NULL) && thd->insert_id()) { +#ifdef HAVE_QUERY_CACHE query_cache_abort(&thd->net); +#endif COND *new_cond; if ((new_cond= new Item_func_eq(args[0], new Item_int("last_insert_id()", @@ -3639,7 +4720,7 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value) 21)))) { cond=new_cond; - cond->fix_fields(thd,0); + cond->fix_fields(thd, 0, &cond); } thd->insert_id(0); // Clear for next request } @@ -3653,7 +4734,7 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value) if ((new_cond= new Item_func_eq(args[0],new Item_int("0", 0, 2)))) { cond=new_cond; - cond->fix_fields(thd,0); + cond->fix_fields(thd, 0, &cond); } } } @@ -3742,6 +4823,138 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) ****************************************************************************/ /* + Create field for temporary table from given field + + SYNOPSIS + create_tmp_field_from_field() + thd Thread handler + org_field field from which new field will be created + name New field name + item Item to create a field for + table Temporary table + item !=NULL if item->result_field should point to new field. + This is relevant for how fill_record() is going to work: + If item != NULL then fill_record() will update + the record in the original table. + If item == NULL then fill_record() will update + the temporary table + convert_blob_length If >0 create a varstring(convert_blob_length) field + instead of blob. + + RETURN + 0 on error + new_created field +*/ + +static Field* create_tmp_field_from_field(THD *thd, Field* org_field, + const char *name, TABLE *table, + Item_field *item, + uint convert_blob_length) +{ + Field *new_field; + + if (convert_blob_length && org_field->flags & BLOB_FLAG) + new_field= new Field_varstring(convert_blob_length, org_field->maybe_null(), + org_field->field_name, table, + org_field->charset()); + else + new_field= org_field->new_field(thd->mem_root, table); + if (new_field) + { + if (item) + item->result_field= new_field; + else + new_field->field_name= name; + if (org_field->maybe_null() || (item && item->maybe_null)) + new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join + if (org_field->type() == FIELD_TYPE_VAR_STRING) + table->db_create_options|= HA_OPTION_PACK_RECORD; + } + return new_field; +} + +/* + Create field for temporary table using type of given item + + SYNOPSIS + create_tmp_field_from_item() + thd Thread handler + item Item to create a field for + table Temporary table + copy_func If set and item is a function, store copy of item + in this array + modify_item 1 if item->result_field should point to new item. + This is relevent for how fill_record() is going to + work: + If modify_item is 1 then fill_record() will update + the record in the original table. + If modify_item is 0 then fill_record() will update + the temporary table + convert_blob_length If >0 create a varstring(convert_blob_length) field + instead of blob. + + RETURN + 0 on error + new_created field +*/ +static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, + Item ***copy_func, bool modify_item, + uint convert_blob_length) +{ + bool maybe_null=item->maybe_null; + Field *new_field; + LINT_INIT(new_field); + + switch (item->result_type()) { + case REAL_RESULT: + new_field=new Field_double(item->max_length, maybe_null, + item->name, table, item->decimals); + break; + case INT_RESULT: + new_field=new Field_longlong(item->max_length, maybe_null, + item->name, table, item->unsigned_flag); + break; + case STRING_RESULT: + DBUG_ASSERT(item->collation.collation); + + enum enum_field_types type; + /* + DATE/TIME fields have STRING_RESULT result type. To preserve + type they needed to be handled separately. + */ + if ((type= item->field_type()) == MYSQL_TYPE_DATETIME || + type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE) + new_field= item->tmp_table_field_from_field_type(table); + else if (item->max_length/item->collation.collation->mbmaxlen > + CONVERT_IF_BIGGER_TO_BLOB) + { + if (convert_blob_length) + new_field= new Field_varstring(convert_blob_length, maybe_null, + item->name, table, + item->collation.collation); + else + new_field= new Field_blob(item->max_length, maybe_null, item->name, + table, item->collation.collation); + } + else + new_field= new Field_string(item->max_length, maybe_null, item->name, + table, item->collation.collation); + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + new_field= 0; // to satisfy compiler (uninitialized variable) + break; + } + if (copy_func && item->is_result_field()) + *((*copy_func)++) = item; // Save for copy_funcs + if (modify_item) + item->set_result_field(new_field); + return new_field; +} + +/* Create field for temporary table SYNOPSIS @@ -3752,6 +4965,8 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) type Type of item (normally item->type) copy_func If set and item is a function, store copy of item in this array + from_field if field will be created using other field as example, + pointer example field will be written here group 1 if we are going to do a relative group by on result modify_item 1 if item->result_field should point to new item. This is relevent for how fill_record() is going to @@ -3760,15 +4975,18 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) the record in the original table. If modify_item is 0 then fill_record() will update the temporary table - + convert_blob_length If >0 create a varstring(convert_blob_length) field + instead of blob. + RETURN 0 on error new_created field */ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, - Item ***copy_func, Field **from_field, - bool group, bool modify_item) + Item ***copy_func, Field **from_field, + bool group, bool modify_item, uint convert_blob_length, + bool make_copy_field) { switch (type) { case Item::SUM_FUNC_ITEM: @@ -3779,19 +4997,29 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, case Item_sum::AVG_FUNC: /* Place for sum & count */ if (group) return new Field_string(sizeof(double)+sizeof(longlong), - maybe_null, item->name,table,1); + 0, item->name,table,&my_charset_bin); else return new Field_double(item_sum->max_length,maybe_null, item->name, table, item_sum->decimals); - case Item_sum::STD_FUNC: /* Place for sum & count */ + case Item_sum::VARIANCE_FUNC: /* Place for sum & count */ + case Item_sum::STD_FUNC: if (group) return new Field_string(sizeof(double)*2+sizeof(longlong), - maybe_null, item->name,table,1); + 0, item->name,table,&my_charset_bin); else return new Field_double(item_sum->max_length, maybe_null, item->name,table,item_sum->decimals); case Item_sum::UNIQUE_USERS_FUNC: return new Field_long(9,maybe_null,item->name,table,1); + case Item_sum::MIN_FUNC: + case Item_sum::MAX_FUNC: + if (item_sum->args[0]->type() == Item::FIELD_ITEM) + { + *from_field= ((Item_field*) item_sum->args[0])->field; + return create_tmp_field_from_field(thd, *from_field, item->name, table, + NULL, convert_blob_length); + } + /* fall through */ default: switch (item_sum->result_type()) { case REAL_RESULT: @@ -3801,39 +5029,43 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, return new Field_longlong(item_sum->max_length,maybe_null, item->name,table,item->unsigned_flag); case STRING_RESULT: - if (item_sum->max_length > 255) - return new Field_blob(item_sum->max_length,maybe_null, - item->name,table,item->binary); + if (item_sum->max_length/item_sum->collation.collation->mbmaxlen > + CONVERT_IF_BIGGER_TO_BLOB) + { + if (convert_blob_length) + return new Field_varstring(convert_blob_length, maybe_null, + item->name, table, + item->collation.collation); + else + return new Field_blob(item_sum->max_length, maybe_null, item->name, + table, item->collation.collation); + } return new Field_string(item_sum->max_length,maybe_null, - item->name,table,item->binary); + item->name,table,item->collation.collation); + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + thd->fatal_error(); + return 0; } } - thd->fatal_error=1; - return 0; // Error + /* We never come here */ } case Item::FIELD_ITEM: + case Item::DEFAULT_VALUE_ITEM: { - Field *org_field=((Item_field*) item)->field,*new_field; - - *from_field=org_field; - // The following should always be true - if ((new_field= org_field->new_field(&thd->mem_root,table))) - { - if (modify_item) - ((Item_field*) item)->result_field= new_field; - else - new_field->field_name=item->name; - if (org_field->maybe_null()) - new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join - if (org_field->type()==FIELD_TYPE_VAR_STRING) - table->db_create_options|= HA_OPTION_PACK_RECORD; - } - return new_field; + Item_field *field= (Item_field*) item; + return create_tmp_field_from_field(thd, (*from_field= field->field), + item->name, table, + modify_item ? (Item_field*) item : NULL, + convert_blob_length); } case Item::FUNC_ITEM: case Item::COND_ITEM: case Item::FIELD_AVG_ITEM: case Item::FIELD_STD_ITEM: + case Item::SUBSELECT_ITEM: /* The following can only happen with 'CREATE TABLE ... SELECT' */ case Item::PROC_ITEM: case Item::INT_ITEM: @@ -3842,35 +5074,16 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, case Item::REF_ITEM: case Item::NULL_ITEM: case Item::VARBIN_ITEM: - { - bool maybe_null=item->maybe_null; - Field *new_field; - LINT_INIT(new_field); - - switch (item->result_type()) { - case REAL_RESULT: - new_field=new Field_double(item->max_length,maybe_null, - item->name,table,item->decimals); - break; - case INT_RESULT: - new_field=new Field_longlong(item->max_length,maybe_null, - item->name,table, item->unsigned_flag); - break; - case STRING_RESULT: - if (item->max_length > 255) - new_field= new Field_blob(item->max_length,maybe_null, - item->name,table,item->binary); - else - new_field= new Field_string(item->max_length,maybe_null, - item->name,table,item->binary); - break; + if (make_copy_field) + { + DBUG_ASSERT(((Item_result_field*)item)->result_field); + *from_field= ((Item_result_field*)item)->result_field; } - if (copy_func && item->is_result_field()) - *((*copy_func)++) = item; // Save for copy_funcs - if (modify_item) - item->set_result_field(new_field); - return new_field; - } + return create_tmp_field_from_item(thd, item, table, (make_copy_field ? 0 : + copy_func), modify_item, + convert_blob_length); + case Item::TYPE_HOLDER: + return ((Item_type_holder *)item)->make_field_by_type(table); default: // Dosen't have to be stored return 0; } @@ -3887,7 +5100,8 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, TABLE * create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, ORDER *group, bool distinct, bool save_sum_fields, - bool allow_distinct_limit, ulong select_options) + ulong select_options, ha_rows rows_limit, + char *table_alias) { TABLE *table; uint i,field_count,reclength,null_count,null_pack_length, @@ -3905,11 +5119,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, Item **copy_func; MI_COLUMNDEF *recinfo; uint temp_pool_slot=MY_BIT_NONE; - + bool force_copy_fields= param->force_copy_fields; DBUG_ENTER("create_tmp_table"); - DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d allow_distinct_limit: %d group: %d", + DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d", (int) distinct, (int) save_sum_fields, - (int) allow_distinct_limit,test(group))); + (ulong) rows_limit,test(group))); statistic_increment(created_tmp_tables, &LOCK_status); @@ -3917,12 +5131,17 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, temp_pool_slot = bitmap_set_next(&temp_pool); if (temp_pool_slot != MY_BIT_NONE) // we got a slot - sprintf(path, "%s%s_%lx_%i", mysql_tmpdir, tmp_file_prefix, + sprintf(path, "%s_%lx_%i", tmp_file_prefix, current_pid, temp_pool_slot); else // if we run out of slots or we are not using tempool - sprintf(path,"%s%s%lx_%lx_%x",mysql_tmpdir,tmp_file_prefix,current_pid, + sprintf(path,"%s%lx_%lx_%x", tmp_file_prefix,current_pid, thd->thread_id, thd->tmp_table++); + fn_format(path, path, mysql_tmpdir, "", MY_REPLACE_EXT|MY_UNPACK_FILENAME); + + if (lower_case_table_names) + my_casedn_str(files_charset_info, path); + if (group) { if (!param->quick_group) @@ -3976,10 +5195,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->field=reg_field; table->blob_field= (Field_blob**) blob_field; table->real_name=table->path=tmpname; - /* - This must be "" as field may refer to it after tempory table is dropped - */ - table->table_name= (char*) ""; + table->table_name= table_alias; table->reginfo.lock_type=TL_WRITE; /* Will be updated */ table->db_stat=HA_OPEN_KEYFILE+HA_OPEN_RNDFILE; table->blob_ptr_size=mi_portable_sizeof_char_ptr; @@ -3988,6 +5204,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->db_low_byte_first=1; // True for HEAP and MyISAM table->temp_pool_slot = temp_pool_slot; table->copy_blobs= 1; + table->in_use= thd; + table->keys_for_keyread.init(); + table->keys_in_use.init(); + table->read_only_keys.init(); + table->quick_keys.init(); + table->used_keys.init(); + table->keys_in_use_for_query.init(); /* Calculate which type of fields we will store in the temporary table */ @@ -4020,25 +5243,34 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, ((Item_sum*) item)->result_field=0; for (i=0 ; i < ((Item_sum*) item)->arg_count ; i++) { - Item *arg= ((Item_sum*) item)->args[i]; + Item **argp= ((Item_sum*) item)->args + i; + Item *arg= *argp; if (!arg->const_item()) { Field *new_field= - create_tmp_field(thd, table,arg,arg->type(),©_func, - tmp_from_field, group != 0,not_all_columns); + create_tmp_field(thd, table, arg, arg->type(), ©_func, + tmp_from_field, group != 0,not_all_columns, + param->convert_blob_length, 0); if (!new_field) goto err; // Should be OOM tmp_from_field++; *(reg_field++)= new_field; reclength+=new_field->pack_length(); - if (!(new_field->flags & NOT_NULL_FLAG)) - null_count++; if (new_field->flags & BLOB_FLAG) { *blob_field++= new_field; blob_count++; } - ((Item_sum*) item)->args[i]= new Item_field(new_field); + thd->change_item_tree(argp, new Item_field(new_field)); + if (!(new_field->flags & NOT_NULL_FLAG)) + { + null_count++; + /* + new_field->maybe_null() is still false, it will be + changed below. But we have to setup Item_field correctly + */ + (*argp)->maybe_null=1; + } } } } @@ -4054,12 +5286,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, We here distinguish between UNION and multi-table-updates by the fact that in the later case group is set to the row pointer. */ - Field *new_field=create_tmp_field(thd, table, item,type, ©_func, - tmp_from_field, group != 0, - not_all_columns || group !=0); + Field *new_field= create_tmp_field(thd, table, item, type, ©_func, + tmp_from_field, group != 0, + !force_copy_fields && + (not_all_columns || group !=0), + param->convert_blob_length, + force_copy_fields); if (!new_field) { - if (thd->fatal_error) + if (thd->is_fatal_error) goto err; // Got OOM continue; // Some kindf of const item } @@ -4098,7 +5333,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, /* If result table is small; use a heap */ if (blob_count || using_unique_constraint || (select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) == - OPTION_BIG_TABLES) + OPTION_BIG_TABLES ||(select_options & TMP_TABLE_FORCE_MYISAM)) { table->file=get_new_handler(table,table->db_type=DB_TYPE_MYISAM); if (group && @@ -4137,7 +5372,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, if (!(table->record[0]= (byte *) my_malloc(alloc_length*3, MYF(MY_WME)))) goto err; table->record[1]= table->record[0]+alloc_length; - table->record[2]= table->record[1]+alloc_length; + table->default_values= table->record[1]+alloc_length; } copy_func[0]=0; // End marker @@ -4151,6 +5386,10 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, recinfo->length=null_pack_length; recinfo++; bfill(null_flags,null_pack_length,255); // Set null fields + + table->null_flags= (uchar*) table->record[0]; + table->null_fields= null_count+ hidden_null_count; + table->null_bytes= null_pack_length; } null_count= (blob_count == 0) ? 1 : 0; hidden_field_count=param->hidden_field_count; @@ -4207,11 +5446,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, recinfo->type=FIELD_NORMAL; if (!--hidden_field_count) null_count=(null_count+7) & ~7; // move to next byte + + // fix table name in field entry + field->table_name= table->table_name; } param->copy_field_end=copy; param->recinfo=recinfo; - store_record(table,2); // Make empty default record + store_record(table,default_values); // Make empty default record if (thd->variables.tmp_table_size == ~(ulong) 0) // No limit table->max_rows= ~(ha_rows) 0; @@ -4236,9 +5478,10 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, keyinfo->usable_key_parts=keyinfo->key_parts= param->group_parts; keyinfo->key_length=0; keyinfo->rec_per_key=0; + keyinfo->algorithm= HA_KEY_ALG_UNDEF; for (; group ; group=group->next,key_part_info++) { - Field *field=(*group->item)->tmp_table_field(); + Field *field=(*group->item)->get_tmp_table_field(); bool maybe_null=(*group->item)->maybe_null; key_part_info->null_bit=0; key_part_info->field= field; @@ -4252,7 +5495,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, if (!using_unique_constraint) { group->buff=(char*) group_buff; - if (!(group->field=field->new_field(&thd->mem_root,table))) + if (!(group->field=field->new_field(thd->mem_root,table))) goto err; /* purecov: inspected */ if (maybe_null) { @@ -4290,13 +5533,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, null_pack_length-=hidden_null_pack_length; keyinfo->key_parts= ((field_count-param->hidden_field_count)+ test(null_pack_length)); - if (allow_distinct_limit) - { - set_if_smaller(table->max_rows,thd->select_limit); - param->end_write_records=thd->select_limit; - } - else - param->end_write_records= HA_POS_ERROR; + set_if_smaller(table->max_rows, rows_limit); + param->end_write_records= rows_limit; table->distinct=1; table->keys=1; if (blob_count) @@ -4312,6 +5550,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL; keyinfo->key_length=(uint16) reclength; keyinfo->name=(char*) "tmp"; + keyinfo->algorithm= HA_KEY_ALG_UNDEF; + keyinfo->rec_per_key=0; if (null_pack_length) { key_part_info->null_bit=0; @@ -4322,7 +5562,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, (uchar*) 0, (uint) 0, Field::NONE, - NullS, table, (bool) 1); + NullS, table, &my_charset_bin); key_part_info->key_type=FIELDFLAG_BINARY; key_part_info->type= HA_KEYTYPE_BINARY; key_part_info++; @@ -4343,7 +5583,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, 0 : FIELDFLAG_BINARY; } } - if (thd->fatal_error) // If end of memory + if (thd->is_fatal_error) // If end of memory goto err; /* purecov: inspected */ table->db_record_offset=1; if (table->db_type == DB_TYPE_MYISAM) @@ -4351,8 +5591,6 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, if (create_myisam_tmp_table(table,param,select_options)) goto err; } - /* Set table_name for easier debugging */ - table->table_name= base_name(tmpname); if (!open_tmp_table(table)) DBUG_RETURN(table); @@ -4394,7 +5632,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, if (table->keys) { // Get keys for ni_create bool using_unique_constraint=0; - MI_KEYSEG *seg= (MI_KEYSEG*) sql_calloc(sizeof(*seg) * + HA_KEYSEG *seg= (HA_KEYSEG*) sql_calloc(sizeof(*seg) * keyinfo->key_parts); if (!seg) goto err; @@ -4431,7 +5669,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, { Field *field=keyinfo->key_part[i].field; seg->flag= 0; - seg->language= MY_CHARSET_CURRENT; + seg->language= field->charset()->number; seg->length= keyinfo->key_part[i].length; seg->start= keyinfo->key_part[i].offset; if (field->flags & BLOB_FLAG) @@ -4470,6 +5708,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, } MI_CREATE_INFO create_info; bzero((char*) &create_info,sizeof(create_info)); + if ((options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) == OPTION_BIG_TABLES) create_info.data_file_length= ~(ulonglong) 0; @@ -4521,7 +5760,7 @@ free_tmp_table(THD *thd, TABLE *entry) /* free blobs */ for (Field **ptr=entry->field ; *ptr ; ptr++) - delete *ptr; + (*ptr)->free(); my_free((gptr) entry->record[0],MYF(0)); free_io_cache(entry); @@ -4559,12 +5798,14 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, thd->proc_info="converting HEAP to MyISAM"; if (create_myisam_tmp_table(&new_table,param, - thd->lex.select_lex.options | thd->options)) + thd->lex->select_lex.options | thd->options)) goto err2; if (open_tmp_table(&new_table)) goto err1; - table->file->index_end(); - table->file->rnd_init(); + if (table->file->indexes_are_disabled()) + new_table.file->disable_indexes(HA_KEY_SWITCH_ALL); + table->file->ha_index_or_rnd_end(); + table->file->ha_rnd_init(1); if (table->no_rows) { new_table.file->extra(HA_EXTRA_NO_ROWS); @@ -4579,9 +5820,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, table->file->info(HA_STATUS_VARIABLE); /* update table->file->records */ new_table.file->start_bulk_insert(table->file->records); #else - /* - HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it explicitly. - */ + /* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */ new_table.file->extra(HA_EXTRA_WRITE_CACHE); #endif @@ -4600,7 +5839,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, } /* remove heap table and change to use myisam table */ - (void) table->file->rnd_end(); + (void) table->file->ha_rnd_end(); (void) table->file->close(); (void) table->file->delete_table(table->real_name); delete table->file; @@ -4614,7 +5853,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, err: DBUG_PRINT("error",("Got error: %d",write_err)); table->file->print_error(error,MYF(0)); // Give table is full error - (void) table->file->rnd_end(); + (void) table->file->ha_rnd_end(); (void) new_table.file->close(); err1: new_table.file->delete_table(new_table.real_name); @@ -4635,23 +5874,23 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, static int do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) { - int error; + int error= 0; JOIN_TAB *join_tab; int (*end_select)(JOIN *, struct st_join_table *,bool); DBUG_ENTER("do_select"); - + List<Item> *columns_list= procedure ? &join->procedure_fields_list : fields; join->procedure=procedure; /* Tell the client how many fields there are in a row */ if (!table) - join->result->send_fields(*fields,1); + join->result->send_fields(*columns_list, 1); else { VOID(table->file->extra(HA_EXTRA_WRITE_CACHE)); empty_record(table); } - join->tmp_table=table; /* Save for easy recursion */ + join->tmp_table= table; /* Save for easy recursion */ join->fields= fields; /* Set up select_end */ @@ -4663,7 +5902,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) { DBUG_PRINT("info",("Using end_update")); end_select=end_update; - table->file->index_init(0); + if (!table->file->inited) + table->file->ha_index_init(0); } else { @@ -4696,25 +5936,28 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) join->send_records=0; if (join->tables == join->const_tables) { - if (!(error=(*end_select)(join,join_tab,0)) || error == -3) - error=(*end_select)(join,join_tab,1); + /* + HAVING will be chcked after processing aggregate functions, + But WHERE should checkd here (we alredy have read tables) + */ + if (!join->conds || join->conds->val_int()) + { + if (!(error=(*end_select)(join,join_tab,0)) || error == -3) + error=(*end_select)(join,join_tab,1); + } + else if (join->send_row_on_empty_set()) + error= join->result->send_data(*columns_list); } else { - error=sub_select(join,join_tab,0); + error= sub_select(join,join_tab,0); if (error >= 0) - error=sub_select(join,join_tab,1); + error= sub_select(join,join_tab,1); if (error == -3) - error=0; /* select_limit used */ + error= 0; /* select_limit used */ } - /* Return 1 if error is sent; -1 if error should be sent */ - if (error < 0) - { - join->result->send_error(0,NullS); /* purecov: inspected */ - error=1; // Error sent - } - else + if (error >= 0) { error=0; if (!table) // If sending data to client @@ -4723,7 +5966,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) The following will unlock all cursors if the command wasn't an update command */ - join_free(join); + join->join_free(0); // Unlock all cursors if (join->result->send_eof()) error= 1; // Don't send error } @@ -4731,21 +5974,27 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) } if (table) { - int tmp; + int tmp, new_errno= 0; if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE))) { - my_errno=tmp; - error= -1; + DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed")); + new_errno= tmp; } - if ((tmp=table->file->index_end())) + if ((tmp=table->file->ha_index_or_rnd_end())) { - my_errno=tmp; - error= -1; + DBUG_PRINT("error",("ha_index_or_rnd_end() failed")); + new_errno= tmp; } - if (error == -1) - table->file->print_error(my_errno,MYF(0)); + if (new_errno) + table->file->print_error(new_errno,MYF(0)); } - DBUG_RETURN(error); +#ifndef DBUG_OFF + if (error) + { + DBUG_PRINT("error",("Error: do_select() failed")); + } +#endif + DBUG_RETURN(join->thd->net.report_error ? -1 : error); } @@ -4789,6 +6038,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) int error; bool found=0; COND *on_expr=join_tab->on_expr, *select_cond=join_tab->select_cond; + my_bool *report_error= &(join->thd->net.report_error); if (!(error=(*join_tab->read_first_record)(join_tab))) { @@ -4797,6 +6047,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) ha_rows found_records=join->found_records; READ_RECORD *info= &join_tab->read_record; + join->thd->row_count= 0; do { if (join->thd->killed) // Aborted by user @@ -4805,6 +6056,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) return -2; /* purecov: inspected */ } join->examined_rows++; + join->thd->row_count++; if (!on_expr || on_expr->val_int()) { found=1; @@ -4823,16 +6075,24 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) return 0; } else + { + /* + This row failed selection, release lock on it. + XXX: There is no table handler in MySQL which makes use of this + call. It's kept from Gemini times. A lot of new code was added + recently (i. e. subselects) without having it in mind. + */ info->file->unlock_row(); + } } - } while (!(error=info->read_record(info))); + } while (!(error=info->read_record(info)) && !(*report_error)); } - if (error > 0) // Fatal error + if (error > 0 || (*report_error)) // Fatal error return -1; if (!found && on_expr) { // OUTER JOIN - restore_record(join_tab->table,2); // Make empty record + restore_record(join_tab->table,default_values); // Make empty record mark_as_null_row(join_tab->table); // For group by without error if (!select_cond || select_cond->val_int()) { @@ -4845,14 +6105,14 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) static int -flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skipp_last) +flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) { int error; READ_RECORD *info; if (!join_tab->cache.records) return 0; /* Nothing to do */ - if (skipp_last) + if (skip_last) (void) store_record_in_cache(&join_tab->cache); // Must save this for later if (join_tab->use_quick == 2) { @@ -4865,8 +6125,7 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skipp_last) /* read through all records */ if ((error=join_init_read_record(join_tab))) { - reset_cache(&join_tab->cache); - join_tab->cache.records=0; join_tab->cache.ptr_record= (uint) ~0; + reset_cache_write(&join_tab->cache); return -error; /* No records or error */ } @@ -4886,24 +6145,26 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skipp_last) } SQL_SELECT *select=join_tab->select; if (!error && (!join_tab->cache.select || - !join_tab->cache.select->skipp_record())) + !join_tab->cache.select->skip_record())) { uint i; - reset_cache(&join_tab->cache); - for (i=(join_tab->cache.records- (skipp_last ? 1 : 0)) ; i-- > 0 ;) + reset_cache_read(&join_tab->cache); + for (i=(join_tab->cache.records- (skip_last ? 1 : 0)) ; i-- > 0 ;) { read_cached_record(join_tab); - if (!select || !select->skipp_record()) + if (!select || !select->skip_record()) if ((error=(join_tab->next_select)(join,join_tab+1,0)) < 0) + { + reset_cache_write(&join_tab->cache); return error; /* purecov: inspected */ + } } } } while (!(error=info->read_record(info))); - if (skipp_last) + if (skip_last) read_cached_record(join_tab); // Restore current record - reset_cache(&join_tab->cache); - join_tab->cache.records=0; join_tab->cache.ptr_record= (uint) ~0; + reset_cache_write(&join_tab->cache); if (error > 0) // Fatal error return -1; /* purecov: inspected */ for (JOIN_TAB *tmp2=join->join_tab; tmp2 != join_tab ; tmp2++) @@ -4916,6 +6177,40 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skipp_last) The different ways to read a record Returns -1 if row was not found, 0 if row was found and 1 on errors *****************************************************************************/ + +/* Help function when we get some an error from the table handler */ + +int report_error(TABLE *table, int error) +{ + if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND) + { + table->status= STATUS_GARBAGE; + return -1; // key not found; ok + } + /* + Locking reads can legally return also these errors, do not + print them to the .err log + */ + if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) + sql_print_error("Got error %d when reading table '%s'", + error, table->path); + table->file->print_error(error,MYF(0)); + return 1; +} + + +int safe_index_read(JOIN_TAB *tab) +{ + int error; + TABLE *table= tab->table; + if ((error=table->file->index_read(table->record[0], + tab->ref.key_buff, + tab->ref.key_length, HA_READ_KEY_EXACT))) + return report_error(table, error); + return 0; +} + + static int join_read_const_table(JOIN_TAB *tab, POSITION *pos) { @@ -4939,9 +6234,8 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) } else { - if (!table->key_read && - (table->used_keys & ((key_map) 1 << tab->ref.key)) && - !table->no_keyread && + if (!table->key_read && table->used_keys.is_set(tab->ref.key) && + !table->no_keyread && (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY) { table->key_read=1; @@ -4956,6 +6250,11 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) if (!table->outer_join || error > 0) DBUG_RETURN(error); } + if (table->key_read) + { + table->key_read=0; + table->file->extra(HA_EXTRA_NO_KEYREAD); + } } if (tab->on_expr && !table->null_row) { @@ -4979,18 +6278,15 @@ join_read_system(JOIN_TAB *tab) table->primary_key))) { if (error != HA_ERR_END_OF_FILE) - { - table->file->print_error(error,MYF(0)); - return 1; - } + return report_error(table, error); mark_as_null_row(tab->table); empty_record(table); // Make empty record return -1; } - store_record(table,1); + store_record(table,record[1]); } else if (!table->status) // Only happens with left join - restore_record(table,1); // restore old record + restore_record(table,record[1]); // restore old record table->null_row=0; return table->status ? -1 : 0; } @@ -5003,7 +6299,7 @@ join_read_const(JOIN_TAB *tab) TABLE *table= tab->table; if (table->status & STATUS_GARBAGE) // If first read { - if (cp_buffer_from_ref(&tab->ref)) + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) error=HA_ERR_KEY_NOT_FOUND; else { @@ -5016,23 +6312,15 @@ join_read_const(JOIN_TAB *tab) mark_as_null_row(tab->table); empty_record(table); if (error != HA_ERR_KEY_NOT_FOUND) - { - /* Locking reads can legally return also these errors, do not - print them to the .err log */ - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_const: Got error %d when reading table %s", - error, table->path); - table->file->print_error(error,MYF(0)); - return 1; - } + return report_error(table, error); return -1; } - store_record(table,1); + store_record(table,record[1]); } else if (!(table->status & ~STATUS_NULL_ROW)) // Only happens with left join { table->status=0; - restore_record(table,1); // restore old record + restore_record(table,record[1]); // restore old record } table->null_row=0; return table->status ? -1 : 0; @@ -5045,6 +6333,8 @@ join_read_key(JOIN_TAB *tab) int error; TABLE *table= tab->table; + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); if (cmp_buffer_with_ref(tab) || (table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW))) { @@ -5057,13 +6347,7 @@ join_read_key(JOIN_TAB *tab) tab->ref.key_buff, tab->ref.key_length,HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_key: Got error %d when reading table '%s'",error, - table->path); - table->file->print_error(error,MYF(0)); - return 1; - } + return report_error(table, error); } table->null_row=0; return table->status ? -1 : 0; @@ -5076,25 +6360,27 @@ join_read_always_key(JOIN_TAB *tab) int error; TABLE *table= tab->table; - if (cp_buffer_from_ref(&tab->ref)) + for (uint i= 0 ; i < tab->ref.key_parts ; i++) + { + if ((tab->ref.null_rejecting & 1 << i) && tab->ref.items[i]->is_null()) + return -1; + } + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) return -1; if ((error=table->file->index_read(table->record[0], tab->ref.key_buff, tab->ref.key_length,HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_const: Got error %d when reading table %s",error, - table->path); - table->file->print_error(error,MYF(0)); - return 1; - } + return report_error(table, error); return -1; /* purecov: inspected */ } return 0; } + /* This function is used when optimizing away ORDER BY in SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC @@ -5106,20 +6392,16 @@ join_read_last_key(JOIN_TAB *tab) int error; TABLE *table= tab->table; - if (cp_buffer_from_ref(&tab->ref)) + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) return -1; if ((error=table->file->index_read_last(table->record[0], tab->ref.key_buff, tab->ref.key_length))) { if (error != HA_ERR_KEY_NOT_FOUND) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_const: Got error %d when reading table %s",error, - table->path); - table->file->print_error(error,MYF(0)); - return 1; - } + return report_error(table, error); return -1; /* purecov: inspected */ } return 0; @@ -5146,19 +6428,14 @@ join_read_next_same(READ_RECORD *info) tab->ref.key_length))) { if (error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_next: Got error %d when reading table %s",error, - table->path); - table->file->print_error(error,MYF(0)); - return 1; - } + return report_error(table, error); table->status= STATUS_GARBAGE; return -1; } return 0; } + static int join_read_prev_same(READ_RECORD *info) { @@ -5167,23 +6444,9 @@ join_read_prev_same(READ_RECORD *info) JOIN_TAB *tab=table->reginfo.join_tab; if ((error=table->file->index_prev(table->record[0]))) - { - if (error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_next: Got error %d when reading table %s",error, - table->path); - table->file->print_error(error,MYF(0)); - error= 1; - } - else - { - table->status= STATUS_GARBAGE; - error= -1; - } - } - else if (key_cmp(table, tab->ref.key_buff, tab->ref.key, - tab->ref.key_length)) + return report_error(table, error); + if (key_cmp_if_same(table, tab->ref.key_buff, tab->ref.key, + tab->ref.key_length)) { table->status=STATUS_NOT_FOUND; error= -1; @@ -5207,7 +6470,7 @@ test_if_quick_select(JOIN_TAB *tab) delete tab->select->quick; tab->select->quick=0; return tab->select->test_quick_select(tab->join->thd, tab->keys, - (table_map) 0, HA_POS_ERROR); + (table_map) 0, HA_POS_ERROR, 0); } @@ -5221,12 +6484,13 @@ join_init_read_record(JOIN_TAB *tab) return (*tab->read_record.read_record)(&tab->read_record); } + static int join_read_first(JOIN_TAB *tab) { int error; TABLE *table=tab->table; - if (!table->key_read && (table->used_keys & ((key_map) 1 << tab->index)) && + if (!table->key_read && table->used_keys.is_set(tab->index) && !table->no_keyread) { table->key_read=1; @@ -5238,17 +6502,12 @@ join_read_first(JOIN_TAB *tab) tab->read_record.file=table->file; tab->read_record.index=tab->index; tab->read_record.record=table->record[0]; - error=tab->table->file->index_first(tab->table->record[0]); - if (error) + if (!table->file->inited) + table->file->ha_index_init(tab->index); + if ((error=tab->table->file->index_first(tab->table->record[0]))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_first_with_key: Got error %d when reading table", - error); - table->file->print_error(error,MYF(0)); - return 1; - } + report_error(table, error); return -1; } return 0; @@ -5258,29 +6517,19 @@ join_read_first(JOIN_TAB *tab) static int join_read_next(READ_RECORD *info) { - int error=info->file->index_next(info->record); - if (error) - { - if (error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error( - "read_next_with_key: Got error %d when reading table %s", - error, info->table->path); - info->file->print_error(error,MYF(0)); - return 1; - } - return -1; - } + int error; + if ((error=info->file->index_next(info->record))) + return report_error(info->table, error); return 0; } + static int join_read_last(JOIN_TAB *tab) { TABLE *table=tab->table; int error; - if (!table->key_read && (table->used_keys & ((key_map) 1 << tab->index)) && + if (!table->key_read && table->used_keys.is_set(tab->index) && !table->no_keyread) { table->key_read=1; @@ -5292,19 +6541,10 @@ join_read_last(JOIN_TAB *tab) tab->read_record.file=table->file; tab->read_record.index=tab->index; tab->read_record.record=table->record[0]; - error=tab->table->file->index_last(tab->table->record[0]); - if (error) - { - if (error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("read_last_with_key: Got error %d when reading table", - error, table->path); - table->file->print_error(error,MYF(0)); - return 1; - } - return -1; - } + if (!table->file->inited) + table->file->ha_index_init(tab->index); + if ((error= tab->table->file->index_last(tab->table->record[0]))) + return report_error(table, error); return 0; } @@ -5312,20 +6552,9 @@ join_read_last(JOIN_TAB *tab) static int join_read_prev(READ_RECORD *info) { - int error=info->file->index_prev(info->record); - if (error) - { - if (error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error( - "read_prev_with_key: Got error %d when reading table: %s", - error,info->table->path); - info->file->print_error(error,MYF(0)); - return 1; - } - return -1; - } + int error; + if ((error= info->file->index_prev(info->record))) + return report_error(info->table, error); return 0; } @@ -5336,48 +6565,65 @@ join_ft_read_first(JOIN_TAB *tab) int error; TABLE *table= tab->table; + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); #if NOT_USED_YET - if (cp_buffer_from_ref(&tab->ref)) // as ft-key doesn't use store_key's + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) // as ft-key doesn't use store_key's return -1; // see also FT_SELECT::init() #endif table->file->ft_init(); - error=table->file->ft_read(table->record[0]); - if (error) - { - if (error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("ft_read_first: Got error %d when reading table %s", - error, table->path); - table->file->print_error(error,MYF(0)); - return 1; - } - return -1; - } + if ((error= table->file->ft_read(table->record[0]))) + return report_error(table, error); return 0; } static int join_ft_read_next(READ_RECORD *info) { - int error=info->file->ft_read(info->table->record[0]); - if (error) - { - if (error != HA_ERR_END_OF_FILE) - { - if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) - sql_print_error("ft_read_next: Got error %d when reading table %s", - error, info->table->path); - info->file->print_error(error,MYF(0)); - return 1; - } - return -1; - } + int error; + if ((error= info->file->ft_read(info->table->record[0]))) + return report_error(info->table, error); return 0; } +/* + Reading of key with key reference and one part that may be NULL +*/ + +static int +join_read_always_key_or_null(JOIN_TAB *tab) +{ + int res; + + /* First read according to key which is NOT NULL */ + *tab->ref.null_ref_key= 0; // Clear null byte + if ((res= join_read_always_key(tab)) >= 0) + return res; + + /* Then read key with null value */ + *tab->ref.null_ref_key= 1; // Set null byte + return safe_index_read(tab); +} + + +static int +join_read_next_same_or_null(READ_RECORD *info) +{ + int error; + if ((error= join_read_next_same(info)) >= 0) + return error; + JOIN_TAB *tab= info->table->reginfo.join_tab; + + /* Test if we have already done a read after null key */ + if (*tab->ref.null_ref_key) + return -1; // All keys read + *tab->ref.null_ref_key= 1; // Set null byte + return safe_index_read(tab); // then read null keys +} + + /***************************************************************************** The different end of select functions These functions returns < 0 when end is reached, 0 on ok and > 0 if a @@ -5397,12 +6643,12 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); // Didn't match having error=0; if (join->procedure) - error=join->procedure->send_row(*join->fields); + error=join->procedure->send_row(join->procedure_fields_list); else if (join->do_send_rows) error=join->result->send_data(*join->fields); if (error) DBUG_RETURN(-1); /* purecov: inspected */ - if (++join->send_records >= join->thd->select_limit && + if (++join->send_records >= join->unit->select_limit_cnt && join->do_send_rows) { if (join->select_options & OPTION_FOUND_ROWS) @@ -5418,11 +6664,11 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), TABLE *table=jt->table; join->select_options ^= OPTION_FOUND_ROWS; - if (table->record_pointers || - (table->io_cache && my_b_inited(table->io_cache))) + if (table->sort.record_pointers || + (table->sort.io_cache && my_b_inited(table->sort.io_cache))) { /* Using filesort */ - join->send_records= table->found_records; + join->send_records= table->sort.found_records; } else { @@ -5432,8 +6678,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } else { - join->do_send_rows=0; - join->thd->select_limit = HA_POS_ERROR; + join->do_send_rows= 0; + if (join->unit->fake_select_lex) + join->unit->fake_select_lex->select_limit= HA_POS_ERROR; DBUG_RETURN(0); } } @@ -5471,8 +6718,12 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (join->having && join->having->val_int() == 0) error= -1; // Didn't satisfy having - else if (join->do_send_rows) - error=join->procedure->send_row(*join->fields) ? 1 : 0; + else + { + if (join->do_send_rows) + error=join->procedure->send_row(*join->fields) ? 1 : 0; + join->send_records++; + } if (end_of_records && join->procedure->end_of_records()) error= 1; // Fatal error } @@ -5481,30 +6732,33 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!join->first_record) { /* No matching rows for group function */ - clear_tables(join); - copy_fields(&join->tmp_table_param); + join->clear(); } if (join->having && join->having->val_int() == 0) error= -1; // Didn't satisfy having - else if (join->do_send_rows) - error=join->result->send_data(*join->fields) ? 1 : 0; + else + { + if (join->do_send_rows) + error=join->result->send_data(*join->fields) ? 1 : 0; + join->send_records++; + } + if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0) + { + if (join->rollup_send_data((uint) (idx+1))) + error= 1; + } } if (error > 0) DBUG_RETURN(-1); /* purecov: inspected */ if (end_of_records) - { - if (!error) - join->send_records++; DBUG_RETURN(0); - } - if (!error && - ++join->send_records >= join->thd->select_limit && + if (join->send_records >= join->unit->select_limit_cnt && join->do_send_rows) { if (!(join->select_options & OPTION_FOUND_ROWS)) DBUG_RETURN(-3); // Abort nicely join->do_send_rows=0; - join->thd->select_limit = HA_POS_ERROR; + join->unit->select_limit_cnt = HA_POS_ERROR; } } } @@ -5518,7 +6772,8 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (idx < (int) join->send_group_parts) { copy_fields(&join->tmp_table_param); - init_sum_functions(join->sum_funcs); + if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1])) + DBUG_RETURN(-1); if (join->procedure) join->procedure->add(); DBUG_RETURN(0); @@ -5561,7 +6816,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), Item *item= *group->item; if (item->maybe_null) { - Field *field=item->tmp_table_field(); + Field *field=item->get_tmp_table_field(); field->ptr[-1]= (byte) (field->is_null() ? 1 : 0); } } @@ -5586,7 +6841,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!(join->select_options & OPTION_FOUND_ROWS)) DBUG_RETURN(-3); join->do_send_rows=0; - join->thd->select_limit = HA_POS_ERROR; + join->unit->select_limit_cnt = HA_POS_ERROR; DBUG_RETURN(0); } } @@ -5622,16 +6877,18 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { Item *item= *group->item; item->save_org_in_field(group->field); +#ifdef EMBEDDED_LIBRARY + join->thd->net.last_errno= 0; +#endif /* Store in the used key if the field was 0 */ if (item->maybe_null) group->buff[-1]=item->null_value ? 1 : 0; } - // table->file->index_init(0); if (!table->file->index_read(table->record[1], join->tmp_table_param.group_buff,0, HA_READ_KEY_EXACT)) { /* Update old record */ - restore_record(table,1); + restore_record(table,record[1]); update_tmptable_sum_func(join->sum_funcs,table); if ((error=table->file->update_row(table->record[1], table->record[0]))) @@ -5657,7 +6914,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), error, 0)) DBUG_RETURN(-1); // Not a table_is_full error /* Change method to update rows */ - table->file->index_init(0); + table->file->ha_index_init(0); join->join_tab[join->tables-1].next_select=end_unique_update; } join->send_records++; @@ -5700,7 +6957,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), table->file->print_error(error,MYF(0)); /* purecov: inspected */ DBUG_RETURN(-1); /* purecov: inspected */ } - restore_record(table,1); + restore_record(table,record[1]); update_tmptable_sum_func(join->sum_funcs,table); if ((error=table->file->update_row(table->record[1], table->record[0]))) @@ -5735,26 +6992,37 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (join->procedure) join->procedure->end_group(); - if (idx < (int) join->send_group_parts) + int send_group_parts= join->send_group_parts; + if (idx < send_group_parts) { if (!join->first_record) { /* No matching rows for group function */ - clear_tables(join); - copy_fields(&join->tmp_table_param); + join->clear(); } - copy_sum_funcs(join->sum_funcs); - if (!join->having || join->having->val_int()) + copy_sum_funcs(join->sum_funcs, + join->sum_funcs_end[send_group_parts]); + if (join->having && join->having->val_int() == 0) + error= -1; + else if ((error=table->file->write_row(table->record[0]))) { - if ((error=table->file->write_row(table->record[0]))) - { - if (create_myisam_from_heap(join->thd, table, - &join->tmp_table_param, error, 0)) - DBUG_RETURN(-1); // Not a table_is_full error - } - else - join->send_records++; + if (create_myisam_from_heap(join->thd, table, + &join->tmp_table_param, + error, 0)) + DBUG_RETURN(-1); + /* + If table->file->write_row() was failed because of 'out of memory' + and tmp table succesfully created, reset error. + */ + error=0; + } + if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0) + { + if (join->rollup_write_data((uint) (idx+1), table)) + error= 1; } + if (error > 0) + DBUG_RETURN(-1); if (end_of_records) DBUG_RETURN(0); } @@ -5770,7 +7038,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { copy_fields(&join->tmp_table_param); copy_funcs(join->tmp_table_param.items_to_copy); - init_sum_functions(join->sum_funcs); + if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1])) + DBUG_RETURN(-1); if (join->procedure) join->procedure->add(); DBUG_RETURN(0); @@ -5810,8 +7079,11 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) /* We can remove binary fields and numerical fields except float, as float comparison isn't 100 % secure + We have to keep binary strings to be able to check for end spaces */ if (field->binary() && + field->real_type() != FIELD_TYPE_STRING && + field->real_type() != FIELD_TYPE_VAR_STRING && (field->type() != FIELD_TYPE_FLOAT || field->decimals() == 0)) { return !store_val_in_field(field,right_item); @@ -5824,7 +7096,7 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) static COND * -make_cond_for_table(COND *cond,table_map tables,table_map used_table) +make_cond_for_table(COND *cond, table_map tables, table_map used_table) { if (used_table && !(cond->used_tables() & used_table)) return (COND*) 0; // Already checked @@ -5850,7 +7122,13 @@ make_cond_for_table(COND *cond,table_map tables,table_map used_table) case 1: return new_cond->argument_list()->head(); default: - new_cond->used_tables_cache=((Item_cond*) cond)->used_tables_cache & + /* + Item_cond_and do not need fix_fields for execution, its parameters + are fixed or do not need fix_fields, too + */ + new_cond->quick_fix_field(); + new_cond->used_tables_cache= + ((Item_cond_and*) cond)->used_tables_cache & tables; return new_cond; } @@ -5869,7 +7147,12 @@ make_cond_for_table(COND *cond,table_map tables,table_map used_table) return (COND*) 0; // Always true new_cond->argument_list()->push_back(fix); } - new_cond->used_tables_cache=((Item_cond_or*) cond)->used_tables_cache; + /* + Item_cond_and do not need fix_fields for execution, its parameters + are fixed or do not need fix_fields, too + */ + new_cond->quick_fix_field(); + new_cond->used_tables_cache= ((Item_cond_or*) cond)->used_tables_cache; new_cond->top_level_item(); return new_cond; } @@ -5910,6 +7193,9 @@ make_cond_for_table(COND *cond,table_map tables,table_map used_table) static Item * part_of_refkey(TABLE *table,Field *field) { + if (!table->reginfo.join_tab) + return (Item*) 0; // field from outer non-select (UPDATE,...) + uint ref_parts=table->reginfo.join_tab->ref.key_parts; if (ref_parts) { @@ -5927,10 +7213,23 @@ part_of_refkey(TABLE *table,Field *field) /***************************************************************************** Test if one can use the key to resolve ORDER BY - Returns: 1 if key is ok. - 0 if key can't be used - -1 if reverse key can be used - used_key_parts is set to key parts used if length != 0 + + SYNOPSIS + test_if_order_by_key() + order Sort order + table Table to sort + idx Index to check + used_key_parts Return value for used key parts. + + + NOTES + used_key_parts is set to correct key parts used if return value != 0 + (On other cases, used_key_part may be changed) + + RETURN + 1 key is ok. + 0 Key can't be used + -1 Reverse key can be used *****************************************************************************/ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, @@ -5941,6 +7240,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, key_part_end=key_part+table->key_info[idx].key_parts; key_part_map const_key_parts=table->const_key_parts[idx]; int reverse=0; + DBUG_ENTER("test_if_order_by_key"); for (; order ; order=order->next, const_key_parts>>=1) { @@ -5951,43 +7251,115 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, Skip key parts that are constants in the WHERE clause. These are already skipped in the ORDER BY by const_expression_in_where() */ - while (const_key_parts & 1) - { - key_part++; const_key_parts>>=1; - } + for (; const_key_parts & 1 ; const_key_parts>>= 1) + key_part++; + if (key_part == key_part_end || key_part->field != field) - return 0; + DBUG_RETURN(0); /* set flag to 1 if we can use read-next on key, else to -1 */ - flag=(order->asc == !(key_part->key_part_flag & HA_REVERSE_SORT)) - ? 1 : -1; + flag= ((order->asc == !(key_part->key_part_flag & HA_REVERSE_SORT)) ? + 1 : -1); if (reverse && flag != reverse) - return 0; + DBUG_RETURN(0); reverse=flag; // Remember if reverse key_part++; } *used_key_parts= (uint) (key_part - table->key_info[idx].key_part); - return reverse; + if (reverse == -1 && !(table->file->index_flags(idx, *used_key_parts-1, 1) & + HA_READ_PREV)) + reverse= 0; // Index can't be used + DBUG_RETURN(reverse); } -static uint find_shortest_key(TABLE *table, key_map usable_keys) + +static uint find_shortest_key(TABLE *table, const key_map *usable_keys) { uint min_length= (uint) ~0; uint best= MAX_KEY; - for (uint nr=0; usable_keys ; usable_keys>>=1, nr++) + if (!usable_keys->is_clear_all()) { - if (usable_keys & 1) + for (uint nr=0; nr < table->keys ; nr++) { - if (table->key_info[nr].key_length < min_length) + if (usable_keys->is_set(nr)) { - min_length=table->key_info[nr].key_length; - best=nr; + if (table->key_info[nr].key_length < min_length) + { + min_length=table->key_info[nr].key_length; + best=nr; + } } } } return best; } +/* + Test if a second key is the subkey of the first one. + + SYNOPSIS + is_subkey() + key_part First key parts + ref_key_part Second key parts + ref_key_part_end Last+1 part of the second key + + NOTE + Second key MUST be shorter than the first one. + + RETURN + 1 is a subkey + 0 no sub key +*/ + +inline bool +is_subkey(KEY_PART_INFO *key_part, KEY_PART_INFO *ref_key_part, + KEY_PART_INFO *ref_key_part_end) +{ + for (; ref_key_part < ref_key_part_end; key_part++, ref_key_part++) + if (!key_part->field->eq(ref_key_part->field)) + return 0; + return 1; +} + +/* + Test if we can use one of the 'usable_keys' instead of 'ref' key for sorting + + SYNOPSIS + test_if_subkey() + ref Number of key, used for WHERE clause + usable_keys Keys for testing + + RETURN + MAX_KEY If we can't use other key + the number of found key Otherwise +*/ + +static uint +test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, + const key_map *usable_keys) +{ + uint nr; + uint min_length= (uint) ~0; + uint best= MAX_KEY; + uint not_used; + KEY_PART_INFO *ref_key_part= table->key_info[ref].key_part; + KEY_PART_INFO *ref_key_part_end= ref_key_part + ref_key_parts; + + for (nr= 0 ; nr < table->keys ; nr++) + { + if (usable_keys->is_set(nr) && + table->key_info[nr].key_length < min_length && + table->key_info[nr].key_parts >= ref_key_parts && + is_subkey(table->key_info[nr].key_part, ref_key_part, + ref_key_part_end) && + test_if_order_by_key(order, table, nr, ¬_used)) + { + min_length= table->key_info[nr].key_length; + best= nr; + } + } + return best; +} /* Test if we can skip the ORDER BY by using an index. @@ -6005,10 +7377,12 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, bool no_changes) { int ref_key; + uint ref_key_parts; TABLE *table=tab->table; SQL_SELECT *select=tab->select; key_map usable_keys; DBUG_ENTER("test_if_skip_sort_order"); + LINT_INIT(ref_key_parts); /* Check which keys can be used to resolve ORDER BY. @@ -6020,17 +7394,29 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, { if ((*tmp_order->item)->type() != Item::FIELD_ITEM) { - usable_keys=0; - break; + usable_keys.clear_all(); + DBUG_RETURN(0); } - usable_keys&=((Item_field*) (*tmp_order->item))->field->part_of_sortkey; + usable_keys.intersect(((Item_field*) (*tmp_order->item))-> + field->part_of_sortkey); + if (usable_keys.is_clear_all()) + DBUG_RETURN(0); // No usable keys } ref_key= -1; - if (tab->ref.key >= 0) // Constant range in WHERE - ref_key=tab->ref.key; + /* Test if constant range in WHERE */ + if (tab->ref.key >= 0 && tab->ref.key_parts) + { + ref_key= tab->ref.key; + ref_key_parts= tab->ref.key_parts; + if (tab->type == JT_REF_OR_NULL || tab->type == JT_FT) + DBUG_RETURN(0); + } else if (select && select->quick) // Range found by opt_range - ref_key=select->quick->index; + { + ref_key= select->quick->index; + ref_key_parts= select->quick->used_key_parts; + } if (ref_key >= 0) { @@ -6039,8 +7425,66 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, */ int order_direction; uint used_key_parts; + if (!usable_keys.is_set(ref_key)) + { + /* + We come here when ref_key is not among usable_keys + */ + uint new_ref_key; + /* + If using index only read, only consider other possible index only + keys + */ + if (table->used_keys.is_set(ref_key)) + usable_keys.intersect(table->used_keys); + if ((new_ref_key= test_if_subkey(order, table, ref_key, ref_key_parts, + &usable_keys)) < MAX_KEY) + { + /* Found key that can be used to retrieve data in sorted order */ + if (tab->ref.key >= 0) + { + /* + We'll use ref access method on key new_ref_key. In general case + the index search tuple for new_ref_key will be different (e.g. + when one index is defined as (part1, part2, ...) and another as + (part1, part2(N), ...) and the WHERE clause contains + "part1 = const1 AND part2=const2". + So we build tab->ref from scratch here. + */ + KEYUSE *keyuse= tab->keyuse; + while (keyuse->key != new_ref_key && keyuse->table == tab->table) + keyuse++; + if (create_ref_for_key(tab->join, tab, keyuse, + tab->join->const_table_map)) + DBUG_RETURN(0); + } + else + { + /* + The range optimizer constructed QUICK_RANGE for ref_key, and + we want to use instead new_ref_key as the index. We can't + just change the index of the quick select, because this may + result in an incosistent QUICK_SELECT object. Below we + create a new QUICK_SELECT from scratch so that all its + parameres are set correctly by the range optimizer. + */ + key_map new_ref_key_map; + new_ref_key_map.clear_all(); // Force the creation of quick select + new_ref_key_map.set_bit(new_ref_key); // only for new_ref_key. + + if (select->test_quick_select(tab->join->thd, new_ref_key_map, 0, + (tab->join->select_options & + OPTION_FOUND_ROWS) ? + HA_POS_ERROR : + tab->join->unit->select_limit_cnt,0) <= + 0) + DBUG_RETURN(0); + } + ref_key= new_ref_key; + } + } /* Check if we get the rows in requested sorted order by using the key */ - if ((usable_keys & ((key_map) 1 << ref_key)) && + if (usable_keys.is_set(ref_key) && (order_direction = test_if_order_by_key(order,table,ref_key, &used_key_parts))) { @@ -6054,8 +7498,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, */ if (!select->quick->reverse_sorted()) { - if (table->file->index_flags(ref_key) & HA_NOT_READ_PREFIX_LAST) - DBUG_RETURN(0); // Use filesort // ORDER BY range_key DESC QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick, used_key_parts); @@ -6076,13 +7518,13 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, Use a traversal function that starts by reading the last row with key part (A) and then traverse the index backwards. */ - if (table->file->index_flags(ref_key) & HA_NOT_READ_PREFIX_LAST) - DBUG_RETURN(0); // Use filesort tab->read_first_record= join_read_last_key; tab->read_record.read_record= join_read_prev_same; /* fall through */ } } + else if (select && select->quick) + select->quick->sorted= 1; DBUG_RETURN(1); /* No need to sort */ } } @@ -6091,7 +7533,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, /* check if we can use a key to resolve the group */ /* Tables using JT_NEXT are handled here */ uint nr; - key_map keys_to_use=~0,keys=usable_keys; + key_map keys; /* If not used with LIMIT, only use keys if the whole query can be @@ -6099,34 +7541,37 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, retrieving all rows through an index. */ if (select_limit >= table->file->records) - keys_to_use= (table->used_keys |table->file->keys_to_use_for_scanning()); + { + keys= *table->file->keys_to_use_for_scanning(); + keys.merge(table->used_keys); - /* - We are adding here also the index speified in FORCE INDEX clause, - if any. - This is to allow users to use index in ORDER BY. - */ - - if (table->force_index) - keys_to_use|= table->keys_in_use_for_query; - keys&= keys_to_use; + /* + We are adding here also the index specified in FORCE INDEX clause, + if any. + This is to allow users to use index in ORDER BY. + */ + if (table->force_index) + keys.merge(table->keys_in_use_for_query); + keys.intersect(usable_keys); + } + else + keys= usable_keys; - for (nr=0; keys ; keys>>=1, nr++) + for (nr=0; nr < table->keys ; nr++) { uint not_used; - if (keys & 1) + if (keys.is_set(nr)) { int flag; - if ((flag=test_if_order_by_key(order, table, nr, ¬_used))) + if ((flag= test_if_order_by_key(order, table, nr, ¬_used))) { if (!no_changes) { tab->index=nr; tab->read_first_record= (flag > 0 ? join_read_first: join_read_last); - table->file->index_init(nr); tab->type=JT_NEXT; // Read with index_first(), index_next() - if (table->used_keys & ((key_map) 1 << nr)) + if (table->used_keys.is_set(nr)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); @@ -6141,31 +7586,58 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, } -/***************************************************************************** +/* If not selecting by given key, create an index how records should be read - return: 0 ok - -1 some fatal error - 1 no records -*****************************************************************************/ + + SYNOPSIS + create_sort_index() + thd Thread handler + tab Table to sort (in join structure) + order How table should be sorted + filesort_limit Max number of rows that needs to be sorted + select_limit Max number of rows in final output + Used to decide if we should use index or not + + + IMPLEMENTATION + - If there is an index that can be used, 'tab' is modified to use + this index. + - If no index, create with filesort() an index file that can be used to + retrieve rows in order (should be done with 'read_record'). + The sorted data is stored in tab->table and will be freed when calling + free_io_cache(tab->table). + + RETURN VALUES + 0 ok + -1 Some fatal error + 1 No records +*/ static int -create_sort_index(JOIN_TAB *tab, ORDER *order, ha_rows filesort_limit, - ha_rows select_limit) +create_sort_index(THD *thd, JOIN *join, ORDER *order, + ha_rows filesort_limit, ha_rows select_limit) { SORT_FIELD *sortorder; uint length; ha_rows examined_rows; - TABLE *table=tab->table; - SQL_SELECT *select=tab->select; + TABLE *table; + SQL_SELECT *select; + JOIN_TAB *tab; DBUG_ENTER("create_sort_index"); + if (join->tables == join->const_tables) + DBUG_RETURN(0); // One row, no need to sort + tab= join->join_tab + join->const_tables; + table= tab->table; + select= tab->select; + if (test_if_skip_sort_order(tab,order,select_limit,0)) DBUG_RETURN(0); if (!(sortorder=make_unireg_sortorder(order,&length))) goto err; /* purecov: inspected */ /* It's not fatal if the following alloc fails */ - table->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), - MYF(MY_WME | MY_ZEROFILL)); + table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), + MYF(MY_WME | MY_ZEROFILL)); table->status=0; // May be wrong if quick_select // If table has a range, move it to select @@ -6190,18 +7662,22 @@ create_sort_index(JOIN_TAB *tab, ORDER *order, ha_rows filesort_limit, For impossible ranges (like when doing a lookup on NULL on a NOT NULL field, quick will contain an empty record set. */ - if (!(select->quick=get_ft_or_quick_select_for_ref(tab->join->thd, - table, tab))) + if (!(select->quick= (tab->type == JT_FT ? + new FT_SELECT(thd, table, tab->ref.key) : + get_quick_select_for_ref(thd, table, &tab->ref)))) goto err; } } if (table->tmp_table) table->file->info(HA_STATUS_VARIABLE); // Get record count - table->found_records=filesort(table,sortorder,length, - select, 0L, filesort_limit, &examined_rows); - tab->records=table->found_records; // For SQL_CALC_ROWS - delete select; // filesort did select - tab->select=0; + table->sort.found_records=filesort(thd, table,sortorder, length, + select, filesort_limit, &examined_rows); + tab->records= table->sort.found_records; // For SQL_CALC_ROWS + if (select) + { + select->cleanup(); // filesort did select + tab->select= 0; + } tab->select_cond=0; tab->type=JT_ALL; // Read with normal read_record tab->read_first_record= join_init_read_record; @@ -6211,7 +7687,7 @@ create_sort_index(JOIN_TAB *tab, ORDER *order, ha_rows filesort_limit, table->key_read=0; table->file->extra(HA_EXTRA_NO_KEYREAD); } - DBUG_RETURN(table->found_records == HA_POS_ERROR); + DBUG_RETURN(table->sort.found_records == HA_POS_ERROR); err: DBUG_RETURN(-1); } @@ -6237,8 +7713,10 @@ static bool fix_having(JOIN *join, Item **having) if (!table->select->cond) table->select->cond=sort_table_cond; else // This should never happen - if (!(table->select->cond=new Item_cond_and(table->select->cond, - sort_table_cond))) + if (!(table->select->cond= new Item_cond_and(table->select->cond, + sort_table_cond)) || + table->select->cond->fix_fields(join->thd, join->tables_list, + &table->select->cond)) return 1; table->select_cond=table->select->cond; table->select_cond->top_level_item(); @@ -6308,13 +7786,13 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having) Item *item; while ((item=it++)) { - if (item->tmp_table_field() && ! item->const_item()) + if (item->get_tmp_table_field() && ! item->const_item()) field_count++; } if (!field_count && !(join->select_options & OPTION_FOUND_ROWS)) { // only const items with no OPTION_FOUND_ROWS - join->thd->select_limit=1; // Only send first row + join->unit->select_limit_cnt= 1; // Only send first row DBUG_RETURN(0); } Field **first_field=entry->field+entry->fields - field_count; @@ -6326,7 +7804,7 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having) entry->file->info(HA_STATUS_VARIABLE); if (entry->db_type == DB_TYPE_HEAP || (!entry->blob_fields && - ((ALIGN_SIZE(reclength) +sizeof(HASH_LINK)) * entry->file->records < + ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->records < thd->variables.sortbuff_size))) error=remove_dup_with_hash_index(join->thd, entry, field_count, first_field, @@ -6353,7 +7831,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, org_record=(char*) (record=table->record[0])+offset; new_record=(char*) table->record[1]+offset; - file->rnd_init(); + file->ha_rnd_init(1); error=file->rnd_next(record); for (;;) { @@ -6380,7 +7858,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, } if (copy_blobs(first_field)) { - my_error(ER_OUT_OF_SORTMEMORY,MYF(0)); + my_error(ER_OUTOFMEMORY,MYF(0)); error=0; goto err; } @@ -6438,8 +7916,8 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, { byte *key_buffer, *key_pos, *record=table->record[0]; int error; - handler *file=table->file; - ulong extra_length=ALIGN_SIZE(key_length)-key_length; + handler *file= table->file; + ulong extra_length= ALIGN_SIZE(key_length)-key_length; uint *field_lengths,*field_length; HASH hash; DBUG_ENTER("remove_dup_with_hash_index"); @@ -6452,22 +7930,35 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, (uint) (field_count*sizeof(*field_lengths)), NullS)) DBUG_RETURN(1); - if (hash_init(&hash, (uint) file->records, 0, key_length, - (hash_get_key) 0, 0, 0)) - { - my_free((char*) key_buffer,MYF(0)); - DBUG_RETURN(1); - } + { Field **ptr; + ulong total_length= 0; for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++) - (*field_length++)= (*ptr)->pack_length(); + { + uint length= (*ptr)->pack_length(); + (*field_length++)= length; + total_length+= length; + } + DBUG_PRINT("info",("field_count: %u key_length: %lu total_length: %lu", + field_count, key_length, total_length)); + DBUG_ASSERT(total_length <= key_length); + key_length= total_length; + extra_length= ALIGN_SIZE(key_length)-key_length; + } + + if (hash_init(&hash, &my_charset_bin, (uint) file->records, 0, + key_length, (hash_get_key) 0, 0, 0)) + { + my_free((char*) key_buffer,MYF(0)); + DBUG_RETURN(1); } - file->rnd_init(); + file->ha_rnd_init(1); key_pos=key_buffer; for (;;) { + byte *org_key_pos; if (thd->killed) { my_error(ER_SERVER_SHUTDOWN,MYF(0)); @@ -6490,6 +7981,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, } /* copy fields to key buffer */ + org_key_pos= key_pos; field_length=field_lengths; for (Field **ptr= first_field ; *ptr ; ptr++) { @@ -6497,27 +7989,27 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, key_pos+= *field_length++; } /* Check if it exists before */ - if (hash_search(&hash,key_pos-key_length,key_length)) + if (hash_search(&hash, org_key_pos, key_length)) { /* Duplicated found ; Remove the row */ if ((error=file->delete_row(record))) goto err; } else - (void) hash_insert(&hash, key_pos-key_length); + (void) my_hash_insert(&hash, org_key_pos); key_pos+=extra_length; } my_free((char*) key_buffer,MYF(0)); hash_free(&hash); file->extra(HA_EXTRA_NO_CACHE); - (void) file->rnd_end(); + (void) file->ha_rnd_end(); DBUG_RETURN(0); err: my_free((char*) key_buffer,MYF(0)); hash_free(&hash); file->extra(HA_EXTRA_NO_CACHE); - (void) file->rnd_end(); + (void) file->ha_rnd_end(); if (error) file->print_error(error,MYF(0)); DBUG_RETURN(1); @@ -6544,7 +8036,7 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length) pos->field= ((Item_field*) (*order->item))->field; else if (order->item[0]->type() == Item::SUM_FUNC_ITEM && !order->item[0]->const_item()) - pos->field= ((Item_sum*) order->item[0])->tmp_table_field(); + pos->field= ((Item_sum*) order->item[0])->get_tmp_table_field(); else if (order->item[0]->type() == Item::COPY_STR_ITEM) { // Blob patch pos->item= ((Item_copy_string*) (*order->item))->item; @@ -6644,7 +8136,6 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) } } - cache->records=0; cache->ptr_record= (uint) ~0; cache->length=length+blobs*sizeof(char*); cache->blobs=blobs; *blob_ptr=0; /* End sequentel */ @@ -6652,7 +8143,7 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) if (!(cache->buff=(uchar*) my_malloc(size,MYF(0)))) DBUG_RETURN(1); /* Don't use cache */ /* purecov: inspected */ cache->end=cache->buff+size; - reset_cache(cache); + reset_cache_write(cache); DBUG_RETURN(0); } @@ -6674,7 +8165,7 @@ used_blob_length(CACHE_FIELD **ptr) static bool store_record_in_cache(JOIN_CACHE *cache) { - ulong length; + uint length; uchar *pos; CACHE_FIELD *copy,*end_field; bool last_record; @@ -6698,12 +8189,14 @@ store_record_in_cache(JOIN_CACHE *cache) { if (last_record) { - copy->blob_field->get_image((char*) pos,copy->length+sizeof(char*)); + copy->blob_field->get_image((char*) pos,copy->length+sizeof(char*), + copy->blob_field->charset()); pos+=copy->length+sizeof(char*); } else { - copy->blob_field->get_image((char*) pos,copy->length); // blob length + copy->blob_field->get_image((char*) pos,copy->length, // blob length + copy->blob_field->charset()); memcpy(pos+copy->length,copy->str,copy->blob_length); // Blob data pos+=copy->length+copy->blob_length; } @@ -6717,9 +8210,9 @@ store_record_in_cache(JOIN_CACHE *cache) end > str && end[-1] == ' ' ; end--) ; length=(uint) (end-str); - memcpy(pos+1,str,length); - *pos=(uchar) length; - pos+=length+1; + memcpy(pos+2, str, length); + int2store(pos, length); + pos+= length+2; } else { @@ -6734,13 +8227,21 @@ store_record_in_cache(JOIN_CACHE *cache) static void -reset_cache(JOIN_CACHE *cache) +reset_cache_read(JOIN_CACHE *cache) { cache->record_nr=0; cache->pos=cache->buff; } +static void reset_cache_write(JOIN_CACHE *cache) +{ + reset_cache_read(cache); + cache->records= 0; + cache->ptr_record= (uint) ~0; +} + + static void read_cached_record(JOIN_TAB *tab) { @@ -6760,7 +8261,8 @@ read_cached_record(JOIN_TAB *tab) { if (last_record) { - copy->blob_field->set_image((char*) pos,copy->length+sizeof(char*)); + copy->blob_field->set_image((char*) pos,copy->length+sizeof(char*), + copy->blob_field->charset()); pos+=copy->length+sizeof(char*); } else @@ -6773,9 +8275,10 @@ read_cached_record(JOIN_TAB *tab) { if (copy->strip) { - memcpy(copy->str,pos+1,length=(uint) *pos); - memset(copy->str+length,' ',copy->length-length); - pos+=1+length; + length= uint2korr(pos); + memcpy(copy->str, pos+2, length); + memset(copy->str+length, ' ', copy->length-length); + pos+= 2 + length; } else { @@ -6797,7 +8300,8 @@ cmp_buffer_with_ref(JOIN_TAB *tab) { memcpy(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length); } - if ((tab->ref.key_err=cp_buffer_from_ref(&tab->ref)) || diff) + if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, &tab->ref)) || + diff) return 1; return memcmp(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length) != 0; @@ -6805,11 +8309,19 @@ cmp_buffer_with_ref(JOIN_TAB *tab) bool -cp_buffer_from_ref(TABLE_REF *ref) +cp_buffer_from_ref(THD *thd, TABLE_REF *ref) { + enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; for (store_key **copy=ref->key_copy ; *copy ; copy++) - if ((*copy)->copy()) - return 1; // Something went wrong + { + if ((*copy)->copy() & 1) + { + thd->count_cuted_fields= save_count_cuted_fields; + return 1; // Something went wrong + } + } + thd->count_cuted_fields= save_count_cuted_fields; return 0; } @@ -6825,79 +8337,122 @@ cp_buffer_from_ref(TABLE_REF *ref) */ static int -find_order_in_list(THD *thd,TABLE_LIST *tables,ORDER *order,List<Item> &fields, +find_order_in_list(THD *thd, Item **ref_pointer_array, + TABLE_LIST *tables,ORDER *order, List<Item> &fields, List<Item> &all_fields) { - if ((*order->item)->type() == Item::INT_ITEM) + Item *it= *order->item; + if (it->type() == Item::INT_ITEM) { /* Order by position */ - Item *item=0; - List_iterator<Item> li(fields); - - for (uint count= (uint) ((Item_int*) (*order->item))->value ; - count-- && (item=li++) ;) ; - if (!item) + uint count= (uint) it->val_int(); + if (!count || count > fields.elements) { my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR), - MYF(0),(*order->item)->full_name(), - thd->where); + MYF(0), it->full_name(), thd->where); return 1; } - order->item=li.ref(); - order->in_field_list=1; + order->item= ref_pointer_array + count-1; + order->in_field_list= 1; return 0; } - const char *save_where=thd->where; - thd->where=0; // No error if not found - Item **item=find_item_in_list(*order->item,fields); - thd->where=save_where; - if (item) + uint counter; + bool unaliased; + Item **item= find_item_in_list(it, fields, &counter, + REPORT_EXCEPT_NOT_FOUND, &unaliased); + if (!item) + return 1; + + if (item != (Item **)not_found_item) { - order->item=item; // use it + /* + If we have found field not by its alias in select list but by its + original field name, we should additionaly check if we have conflict + for this name (in case if we would perform lookup in all tables). + */ + if (unaliased && !it->fixed && it->fix_fields(thd, tables, order->item)) + return 1; + + order->item= ref_pointer_array + counter; order->in_field_list=1; return 0; } + order->in_field_list=0; - /* Allow lookup in select's item_list to find aliased fields */ - thd->lex.select_lex.is_item_list_lookup= 1; - if ((*order->item)->fix_fields(thd,tables) || thd->fatal_error) - { - thd->lex.select_lex.is_item_list_lookup= 0; - return 1; // Wrong field - } - thd->lex.select_lex.is_item_list_lookup= 0; - all_fields.push_front(*order->item); // Add new field to field list - order->item=(Item**) all_fields.head_ref(); + /* + We check it->fixed because Item_func_group_concat can put + arguments for which fix_fields already was called. + + 'it' reassigned in if condition because fix_field can change it. + */ + thd->lex->current_select->is_item_list_lookup= 1; + if (!it->fixed && + (it->fix_fields(thd, tables, order->item) || + (it= *order->item)->check_cols(1) || + thd->is_fatal_error)) + { + thd->lex->current_select->is_item_list_lookup= 0; + return 1; // Wrong field + } + thd->lex->current_select->is_item_list_lookup= 0; + uint el= all_fields.elements; + all_fields.push_front(it); // Add new field to field list + ref_pointer_array[el]= it; + order->item= ref_pointer_array + el; return 0; } - /* Change order to point at item in select list. If item isn't a number and doesn't exits in the select list, add it the the field list. */ -int setup_order(THD *thd,TABLE_LIST *tables,List<Item> &fields, - List<Item> &all_fields, ORDER *order) +int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + List<Item> &fields, List<Item> &all_fields, ORDER *order) { thd->where="order clause"; for (; order; order=order->next) { - if (find_order_in_list(thd,tables,order,fields,all_fields)) + if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, + all_fields)) return 1; } return 0; } -static int -setup_group(THD *thd,TABLE_LIST *tables,List<Item> &fields, - List<Item> &all_fields, ORDER *order, bool *hidden_group_fields) +/* + Intitialize the GROUP BY list. + + SYNOPSIS + setup_group() + thd Thread handler + ref_pointer_array We store references to all fields that was not in + 'fields' here. + fields All fields in the select part. Any item in 'order' + that is part of these list is replaced by a pointer + to this fields. + all_fields Total list of all unique fields used by the select. + All items in 'order' that was not part of fields will + be added first to this list. + order The fields we should do GROUP BY on. + hidden_group_fields Pointer to flag that is set to 1 if we added any fields + to all_fields. + + RETURN + 0 ok + 1 error (probably out of memory) +*/ + +int +setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + List<Item> &fields, List<Item> &all_fields, ORDER *order, + bool *hidden_group_fields) { *hidden_group_fields=0; if (!order) return 0; /* Everything is ok */ - if (thd->sql_mode & MODE_ONLY_FULL_GROUP_BY) + if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) { Item *item; List_iterator<Item> li(fields); @@ -6909,7 +8464,8 @@ setup_group(THD *thd,TABLE_LIST *tables,List<Item> &fields, thd->where="group statement"; for (; order; order=order->next) { - if (find_order_in_list(thd,tables,order,fields,all_fields)) + if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, + all_fields)) return 1; (*order->item)->marker=1; /* Mark found */ if ((*order->item)->with_sum_func) @@ -6919,7 +8475,7 @@ setup_group(THD *thd,TABLE_LIST *tables,List<Item> &fields, return 1; } } - if (thd->sql_mode & MODE_ONLY_FULL_GROUP_BY) + if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) { /* Don't allow one to use fields that is not used in GROUP BY */ Item *item; @@ -6954,17 +8510,18 @@ setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields, DBUG_ENTER("setup_new_fields"); thd->set_query_id=1; // Not really needed, but... - thd->where=0; // Don't give error - for (; new_field ; new_field=new_field->next) + uint counter; + bool not_used; + for (; new_field ; new_field= new_field->next) { - if ((item=find_item_in_list(*new_field->item,fields))) + if ((item= find_item_in_list(*new_field->item, fields, &counter, + IGNORE_ERRORS, ¬_used))) new_field->item=item; /* Change to shared Item */ else { thd->where="procedure list"; - if ((*new_field->item)->fix_fields(thd,tables)) + if ((*new_field->item)->fix_fields(thd, tables, new_field->item)) DBUG_RETURN(1); /* purecov: inspected */ - thd->where=0; all_fields.push_front(*new_field->item); new_field->item=all_fields.head_ref(); } @@ -6979,12 +8536,14 @@ setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields, */ static ORDER * -create_distinct_group(THD *thd, ORDER *order_list, List<Item> &fields, +create_distinct_group(THD *thd, Item **ref_pointer_array, + ORDER *order_list, List<Item> &fields, bool *all_order_by_fields_used) { List_iterator<Item> li(fields); Item *item; ORDER *order,*group,**prev; + uint index= 0; *all_order_by_fields_used= 1; while ((item=li++)) @@ -7009,18 +8568,22 @@ create_distinct_group(THD *thd, ORDER *order_list, List<Item> &fields, li.rewind(); while ((item=li++)) { - if (item->const_item() || item->with_sum_func) - continue; - if (!item->marker) + if (!item->const_item() && !item->with_sum_func && !item->marker) { ORDER *ord=(ORDER*) thd->calloc(sizeof(ORDER)); if (!ord) return 0; - ord->item=li.ref(); + /* + We have here only field_list (not all_field_list), so we can use + simple indexing of ref_pointer_array (order in the array and in the + list are same) + */ + ord->item= ref_pointer_array + index; ord->asc=1; *prev=ord; prev= &ord->next; } + index++; } *prev=0; return group; @@ -7116,7 +8679,7 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables) DBUG_RETURN(0); map|=a->item[0]->used_tables(); } - if (!map || (map & RAND_TABLE_BIT)) + if (!map || (map & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT))) DBUG_RETURN(0); for (; !(map & tables->table->map) ; tables=tables->next) ; @@ -7138,7 +8701,7 @@ calc_group_buffer(JOIN *join,ORDER *group) join->group= 1; for (; group ; group=group->next) { - Field *field=(*group->item)->tmp_table_field(); + Field *field=(*group->item)->get_tmp_table_field(); if (field) { if (field->type() == FIELD_TYPE_BLOB) @@ -7163,6 +8726,37 @@ calc_group_buffer(JOIN *join,ORDER *group) /* + allocate group fields or take prepared (cached) + + SYNOPSIS + make_group_fields() + main_join - join of current select + curr_join - current join (join of current select or temporary copy of it) + + RETURN + 0 - ok + 1 - failed +*/ + +static bool +make_group_fields(JOIN *main_join, JOIN *curr_join) +{ + if (main_join->group_fields_cache.elements) + { + curr_join->group_fields= main_join->group_fields_cache; + curr_join->sort_and_group= 1; + } + else + { + if (alloc_group_fields(curr_join, curr_join->group_list)) + return (1); + main_join->group_fields_cache= curr_join->group_fields; + } + return (0); +} + + +/* Get a list of buffers for saveing last group Groups are saved in reverse order for easyer check loop */ @@ -7174,7 +8768,7 @@ alloc_group_fields(JOIN *join,ORDER *group) { for (; group ; group=group->next) { - Item_buff *tmp=new_Item_buff(*group->item); + Item_buff *tmp=new_Item_buff(join->thd, *group->item); if (!tmp || join->group_fields.push_front(tmp)) return TRUE; } @@ -7187,6 +8781,7 @@ alloc_group_fields(JOIN *join,ORDER *group) static int test_if_group_changed(List<Item_buff> &list) { + DBUG_ENTER("test_if_group_changed"); List_iterator<Item_buff> li(list); int idx= -1,i; Item_buff *buff; @@ -7196,40 +8791,67 @@ test_if_group_changed(List<Item_buff> &list) if (buff->cmp()) idx=i; } - return idx; + DBUG_PRINT("info", ("idx: %d", idx)); + DBUG_RETURN(idx); } - /* Setup copy_fields to save fields at start of new group - Only FIELD_ITEM:s and FUNC_ITEM:s needs to be saved between groups. - Change old item_field to use a new field with points at saved fieldvalue - This function is only called before use of send_fields + + setup_copy_fields() + thd - THD pointer + param - temporary table parameters + ref_pointer_array - array of pointers to top elements of filed list + res_selected_fields - new list of items of select item list + res_all_fields - new list of all items + elements - number of elements in select item list + all_fields - all fields list + + DESCRIPTION + Setup copy_fields to save fields at start of new group + Only FIELD_ITEM:s and FUNC_ITEM:s needs to be saved between groups. + Change old item_field to use a new field with points at saved fieldvalue + This function is only called before use of send_fields + + RETURN + 0 - ok + !=0 - error */ bool -setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields) +setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, + Item **ref_pointer_array, + List<Item> &res_selected_fields, List<Item> &res_all_fields, + uint elements, List<Item> &all_fields) { Item *pos; - List_iterator<Item> li(fields); - Copy_field *copy; + List_iterator_fast<Item> li(all_fields); + Copy_field *copy= NULL; + res_selected_fields.empty(); + res_all_fields.empty(); + List_iterator_fast<Item> itr(res_all_fields); + List<Item> extra_funcs; + uint i, border= all_fields.elements - elements; DBUG_ENTER("setup_copy_fields"); - if (!(copy=param->copy_field= new Copy_field[param->field_count])) + if (param->field_count && + !(copy=param->copy_field= new Copy_field[param->field_count])) goto err2; param->copy_funcs.empty(); - while ((pos=li++)) + for (i= 0; (pos= li++); i++) { if (pos->type() == Item::FIELD_ITEM) { - Item_field *item=(Item_field*) pos; + Item_field *item; + if (!(item= new Item_field(thd, ((Item_field*) pos)))) + goto err; + pos= item; if (item->field->flags & BLOB_FLAG) { - if (!(pos=new Item_copy_string(pos))) + if (!(pos= new Item_copy_string(pos))) goto err; - VOID(li.replace(pos)); /* Item_copy_string::copy for function can call Item_copy_string::val_int for blob via Item_ref. @@ -7241,20 +8863,26 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields) */ if (param->copy_funcs.push_front(pos)) goto err; - continue; } - - /* set up save buffer and change result_field to point at saved value */ - Field *field= item->field; - item->result_field=field->new_field(&thd->mem_root,field->table); - char *tmp=(char*) sql_alloc(field->pack_length()+1); - if (!tmp) - goto err; - copy->set(tmp, item->result_field); - item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1); - copy++; + else + { + /* + set up save buffer and change result_field to point at + saved value + */ + Field *field= item->field; + item->result_field=field->new_field(thd->mem_root,field->table); + char *tmp=(char*) sql_alloc(field->pack_length()+1); + if (!tmp) + goto err; + copy->set(tmp, item->result_field); + item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1); + copy++; + } } else if ((pos->type() == Item::FUNC_ITEM || + pos->type() == Item::SUBSELECT_ITEM || + pos->type() == Item::CACHE_ITEM || pos->type() == Item::COND_ITEM) && !pos->with_sum_func) { // Save for send fields @@ -7266,16 +8894,34 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields) */ if (!(pos=new Item_copy_string(pos))) goto err; - VOID(li.replace(pos)); - if (param->copy_funcs.push_back(pos)) + if (i < border) // HAVING, ORDER and GROUP BY + { + if (extra_funcs.push_back(pos)) + goto err; + } + else if (param->copy_funcs.push_back(pos)) goto err; } + res_all_fields.push_back(pos); + ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]= + pos; } param->copy_field_end= copy; + + for (i= 0; i < border; i++) + itr++; + itr.sublist(res_selected_fields, elements); + /* + Put elements from HAVING, ORDER BY and GROUP BY last to ensure that any + reference used in these will resolve to a item that is already calculated + */ + param->copy_funcs.concat(&extra_funcs); + DBUG_RETURN(0); err: - delete [] param->copy_field; // This is never 0 + if (copy) + delete [] param->copy_field; param->copy_field=0; err2: DBUG_RETURN(TRUE); @@ -7283,7 +8929,10 @@ err2: /* - Copy fields and null values between two tables + Make a copy of all simple SELECT'ed items + + This is done at the start of a new group so that we can retrieve + these later when the group changes. */ void @@ -7295,92 +8944,180 @@ copy_fields(TMP_TABLE_PARAM *param) for (; ptr != end; ptr++) (*ptr->do_copy)(ptr); - List_iterator_fast<Item> &it=param->copy_funcs_it; - it.rewind(); + List_iterator_fast<Item> it(param->copy_funcs); Item_copy_string *item; while ((item = (Item_copy_string*) it++)) item->copy(); } -/***************************************************************************** - Make an array of pointer to sum_functions to speed up sum_func calculation -*****************************************************************************/ +/* + Make an array of pointers to sum_functions to speed up sum_func calculation -static bool -make_sum_func_list(JOIN *join,List<Item> &fields) + SYNOPSIS + alloc_func_list() + + RETURN + 0 ok + 1 Error +*/ + +bool JOIN::alloc_func_list() +{ + uint func_count, group_parts; + DBUG_ENTER("alloc_func_list"); + + func_count= tmp_table_param.sum_func_count; + /* + If we are using rollup, we need a copy of the summary functions for + each level + */ + if (rollup.state != ROLLUP::STATE_NONE) + func_count*= (send_group_parts+1); + + group_parts= send_group_parts; + /* + If distinct, reserve memory for possible + disctinct->group_by optimization + */ + if (select_distinct) + group_parts+= fields_list.elements; + + /* This must use calloc() as rollup_make_fields depends on this */ + sum_funcs= (Item_sum**) thd->calloc(sizeof(Item_sum**) * (func_count+1) + + sizeof(Item_sum***) * (group_parts+1)); + sum_funcs_end= (Item_sum***) (sum_funcs+func_count+1); + DBUG_RETURN(sum_funcs == 0); +} + + +/* + Initialize 'sum_funcs' array with all Item_sum objects + + SYNOPSIS + make_sum_func_list() + field_list All items + send_fields Items in select list + before_group_by Set to 1 if this is called before GROUP BY handling + + NOTES + Calls ::setup() for all item_sum objects in field_list + + RETURN + 0 ok + 1 error +*/ + +bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields, + bool before_group_by) { + List_iterator_fast<Item> it(field_list); + Item_sum **func; + Item *item; DBUG_ENTER("make_sum_func_list"); - Item_sum **func = - (Item_sum**) sql_alloc(sizeof(Item_sum*)* - (join->tmp_table_param.sum_func_count+1)); - if (!func) - DBUG_RETURN(TRUE); - List_iterator<Item> it(fields); - join->sum_funcs=func; - Item *field; - while ((field=it++)) + func= sum_funcs; + while ((item=it++)) { - if (field->type() == Item::SUM_FUNC_ITEM && !field->const_item()) + if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) { - *func++=(Item_sum*) field; + *func++= (Item_sum*) item; /* let COUNT(DISTINCT) create the temporary table */ - if (((Item_sum*) field)->setup(join->thd)) + if (((Item_sum*) item)->setup(thd)) DBUG_RETURN(TRUE); } } + if (before_group_by && rollup.state == ROLLUP::STATE_INITED) + { + rollup.state= ROLLUP::STATE_READY; + if (rollup_make_fields(field_list, send_fields, &func)) + DBUG_RETURN(TRUE); // Should never happen + } + else if (rollup.state == ROLLUP::STATE_NONE) + { + for (uint i=0 ; i <= send_group_parts ;i++) + sum_funcs_end[i]= func; + } + else if (rollup.state == ROLLUP::STATE_READY) + DBUG_RETURN(FALSE); // Don't put end marker *func=0; // End marker DBUG_RETURN(FALSE); } /* - Change all funcs and sum_funcs to fields in tmp table + Change all funcs and sum_funcs to fields in tmp table, and create + new list of all items. + + change_to_use_tmp_fields() + thd - THD pointer + ref_pointer_array - array of pointers to top elements of filed list + res_selected_fields - new list of items of select item list + res_all_fields - new list of all items + elements - number of elements in select item list + all_fields - all fields list + + RETURN + 0 - ok + !=0 - error */ static bool -change_to_use_tmp_fields(List<Item> &items) +change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, + List<Item> &res_selected_fields, + List<Item> &res_all_fields, + uint elements, List<Item> &all_fields) { - List_iterator<Item> it(items); + List_iterator_fast<Item> it(all_fields); Item *item_field,*item; + res_selected_fields.empty(); + res_all_fields.empty(); - while ((item=it++)) + uint i, border= all_fields.elements - elements; + for (i= 0; (item= it++); i++) { Field *field; + if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) - continue; - if (item->type() == Item::FIELD_ITEM) - { - ((Item_field*) item)->field= - ((Item_field*) item)->result_field; - } - else if ((field=item->tmp_table_field())) + item_field= item; + else { - if (item->type() == Item::SUM_FUNC_ITEM && field->table->group) - item_field=((Item_sum*) item)->result_item(field); - else - item_field=(Item*) new Item_field(field); - if (!item_field) - return TRUE; // Fatal error - item_field->name=item->name; /*lint -e613 */ -#ifndef DBUG_OFF - if (_db_on_ && !item_field->name) + if (item->type() == Item::FIELD_ITEM) { - char buff[256]; - String str(buff,sizeof(buff)); - str.length(0); - item->print(&str); - item_field->name=sql_strmake(str.ptr(),str.length()); + item_field= item->get_tmp_table_item(thd); } + else if ((field= item->get_tmp_table_field())) + { + if (item->type() == Item::SUM_FUNC_ITEM && field->table->group) + item_field= ((Item_sum*) item)->result_item(field); + else + item_field= (Item*) new Item_field(field); + if (!item_field) + return TRUE; // Fatal error + item_field->name= item->name; /*lint -e613 */ +#ifndef DBUG_OFF + if (_db_on_ && !item_field->name) + { + char buff[256]; + String str(buff,sizeof(buff),&my_charset_bin); + str.length(0); + item->print(&str); + item_field->name= sql_strmake(str.ptr(),str.length()); + } #endif -#ifdef DELETE_ITEMS - delete it.replace(item_field); /*lint -e613 */ -#else - (void) it.replace(item_field); /*lint -e613 */ -#endif + } + else + item_field= item; } + res_all_fields.push_back(item_field); + ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]= + item_field; } + + List_iterator_fast<Item> itr(res_all_fields); + for (i= 0; i < border; i++) + itr++; + itr.sublist(res_selected_fields, elements); return FALSE; } @@ -7388,56 +9125,45 @@ change_to_use_tmp_fields(List<Item> &items) /* Change all sum_func refs to fields to point at fields in tmp table Change all funcs to be fields in tmp table + + change_refs_to_tmp_fields() + thd - THD pointer + ref_pointer_array - array of pointers to top elements of filed list + res_selected_fields - new list of items of select item list + res_all_fields - new list of all items + elements - number of elements in select item list + all_fields - all fields list + + RETURN + 0 ok + 1 error */ static bool -change_refs_to_tmp_fields(THD *thd,List<Item> &items) +change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array, + List<Item> &res_selected_fields, + List<Item> &res_all_fields, uint elements, + List<Item> &all_fields) { - List_iterator<Item> it(items); - Item *item; + List_iterator_fast<Item> it(all_fields); + Item *item, *new_item; + res_selected_fields.empty(); + res_all_fields.empty(); - while ((item= it++)) + uint i, border= all_fields.elements - elements; + for (i= 0; (item= it++); i++) { - if (item->type() == Item::SUM_FUNC_ITEM) - { - if (!item->const_item()) - { - Item_sum *sum_item= (Item_sum*) item; - if (sum_item->result_field) // If not a const sum func - { - Field *result_field=sum_item->result_field; - for (uint i=0 ; i < sum_item->arg_count ; i++) - { - Item *arg= sum_item->args[i]; - if (!arg->const_item()) - { - if (arg->type() == Item::FIELD_ITEM) - ((Item_field*) arg)->field= result_field++; - else - sum_item->args[i]= new Item_field(result_field++); - } - } - } - } - } - else if (item->with_sum_func) - continue; - else if ((item->type() == Item::FUNC_ITEM || - item->type() == Item::COND_ITEM) && - !item->const_item()) - { /* All funcs are stored */ -#ifdef DELETE_ITEMS - delete it.replace(new Item_field(((Item_func*) item)->result_field)); -#else - (void) it.replace(new Item_field(((Item_func*) item)->result_field)); -#endif - } - else if (item->type() == Item::FIELD_ITEM) /* Change refs */ - { - ((Item_field*)item)->field=((Item_field*) item)->result_field; - } + res_all_fields.push_back(new_item= item->get_tmp_table_item(thd)); + ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]= + new_item; } - return thd->fatal_error; + + List_iterator_fast<Item> itr(res_all_fields); + for (i= 0; i < border; i++) + itr++; + itr.sublist(res_selected_fields, elements); + + return thd->is_fatal_error; } @@ -7470,21 +9196,29 @@ update_tmptable_sum_func(Item_sum **func_ptr, /* Copy result of sum functions to record in tmp_table */ static void -copy_sum_funcs(Item_sum **func_ptr) +copy_sum_funcs(Item_sum **func_ptr, Item_sum **end_ptr) { - Item_sum *func; - for (; (func = *func_ptr) ; func_ptr++) - (void) func->save_in_result_field(1); + for (; func_ptr != end_ptr ; func_ptr++) + (void) (*func_ptr)->save_in_result_field(1); return; } -static void -init_sum_functions(Item_sum **func_ptr) +static bool +init_sum_functions(Item_sum **func_ptr, Item_sum **end_ptr) { - Item_sum *func; - for (; (func= (Item_sum*) *func_ptr) ; func_ptr++) - func->reset(); + for (; func_ptr != end_ptr ;func_ptr++) + { + if ((*func_ptr)->reset()) + return 1; + } + /* If rollup, calculate the upper sum levels */ + for ( ; *func_ptr ; func_ptr++) + { + if ((*func_ptr)->add()) + return 1; + } + return 0; } @@ -7509,10 +9243,10 @@ copy_funcs(Item **func_ptr) } -/***************************************************************************** +/* Create a condition for a const reference and add this to the currenct select for the table -*****************************************************************************/ +*/ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) { @@ -7528,18 +9262,16 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) for (uint i=0 ; i < join_tab->ref.key_parts ; i++) { - Field *field=table->field[table->key_info[join_tab->ref.key].key_part[i].fieldnr-1]; + Field *field=table->field[table->key_info[join_tab->ref.key].key_part[i]. + fieldnr-1]; Item *value=join_tab->ref.items[i]; - cond->add(new Item_func_equal(new Item_field(field),value)); + cond->add(new Item_func_equal(new Item_field(field), value)); } - if (thd->fatal_error) + if (thd->is_fatal_error) DBUG_RETURN(TRUE); - /* - Here we pass 0 as the first argument to fix_fields that don't need - to do any stack checking (This is already done in the initial fix_fields). - */ - cond->fix_fields((THD *) 0,(TABLE_LIST *) 0); + if (!cond->fixed) + cond->fix_fields(thd,(TABLE_LIST *) 0, (Item**)&cond); if (join_tab->select) { error=(int) cond->add(join_tab->select->cond); @@ -7551,7 +9283,427 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) DBUG_RETURN(error ? TRUE : FALSE); } + +/* + Free joins of subselect of this select. + + free_underlaid_joins() + thd - THD pointer + select - pointer to st_select_lex which subselects joins we will free +*/ + +void free_underlaid_joins(THD *thd, SELECT_LEX *select) +{ + for (SELECT_LEX_UNIT *unit= select->first_inner_unit(); + unit; + unit= unit->next_unit()) + unit->cleanup(); +} + +/**************************************************************************** + ROLLUP handling +****************************************************************************/ + +/* + Replace occurences of group by fields in an expression by ref items + + SYNOPSIS + change_group_ref() + thd reference to the context + expr expression to make replacement + group_list list of references to group by items + changed out: returns 1 if item contains a replaced field item + + DESCRIPTION + The function replaces occurrences of group by fields in expr + by ref objects for these fields unless they are under aggregate + functions. + The function also corrects value of the the maybe_null attribute + for the items of all subexpressions containing group by fields. + + IMPLEMENTATION + The function recursively traverses the tree of the expr expression, + looks for occurrences of the group by fields that are not under + aggregate functions and replaces them for the corresponding ref items. + + NOTES + This substitution is needed GROUP BY queries with ROLLUP if + SELECT list contains expressions over group by attributes. + + TODO: Some functions are not null-preserving. For those functions + updating of the maybe_null attribute is an overkill. + + EXAMPLES + SELECT a+1 FROM t1 GROUP BY a WITH ROLLUP + SELECT SUM(a)+a FROM t1 GROUP BY a WITH ROLLUP + + RETURN + 0 if ok + 1 on error +*/ + +static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list, + bool *changed) +{ + if (expr->arg_count) + { + Item **arg,**arg_end; + bool arg_changed= FALSE; + for (arg= expr->arguments(), + arg_end= expr->arguments()+expr->arg_count; + arg != arg_end; arg++) + { + Item *item= *arg; + if (item->type() == Item::FIELD_ITEM || item->type() == Item::REF_ITEM) + { + ORDER *group_tmp; + for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next) + { + if (item->eq(*group_tmp->item,0)) + { + Item *new_item; + if(!(new_item= new Item_ref(group_tmp->item, 0, item->name))) + return 1; // fatal_error is set + thd->change_item_tree(arg, new_item); + arg_changed= TRUE; + } + } + } + else if (item->type() == Item::FUNC_ITEM) + { + if (change_group_ref(thd, (Item_func *) item, group_list, &arg_changed)) + return 1; + } + } + if (arg_changed) + { + expr->maybe_null= 1; + *changed= TRUE; + } + } + return 0; +} + + +/* Allocate memory needed for other rollup functions */ + +bool JOIN::rollup_init() +{ + uint i,j; + Item **ref_array; + + tmp_table_param.quick_group= 0; // Can't create groups in tmp table + rollup.state= ROLLUP::STATE_INITED; + + /* + Create pointers to the different sum function groups + These are updated by rollup_make_fields() + */ + tmp_table_param.group_parts= send_group_parts; + + if (!(rollup.null_items= (Item_null_result**) thd->alloc((sizeof(Item*) + + sizeof(Item**) + + sizeof(List<Item>) + + ref_pointer_array_size) + * send_group_parts ))) + return 1; + + rollup.fields= (List<Item>*) (rollup.null_items + send_group_parts); + rollup.ref_pointer_arrays= (Item***) (rollup.fields + send_group_parts); + ref_array= (Item**) (rollup.ref_pointer_arrays+send_group_parts); + + /* + Prepare space for field list for the different levels + These will be filled up in rollup_make_fields() + */ + for (i= 0 ; i < send_group_parts ; i++) + { + rollup.null_items[i]= new (thd->mem_root) Item_null_result(); + List<Item> *rollup_fields= &rollup.fields[i]; + rollup_fields->empty(); + rollup.ref_pointer_arrays[i]= ref_array; + ref_array+= all_fields.elements; + } + for (i= 0 ; i < send_group_parts; i++) + { + for (j=0 ; j < fields_list.elements ; j++) + rollup.fields[i].push_back(rollup.null_items[i]); + } + List_iterator_fast<Item> it(all_fields); + Item *item; + while ((item= it++)) + { + ORDER *group_tmp; + for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next) + { + if (*group_tmp->item == item) + item->maybe_null= 1; + } + if (item->type() == Item::FUNC_ITEM) + { + bool changed= FALSE; + if (change_group_ref(thd, (Item_func *) item, group_list, &changed)) + return 1; + /* + We have to prevent creation of a field in a temporary table for + an expression that contains GROUP BY attributes. + Marking the expression item as 'with_sum_func' will ensure this. + */ + if (changed) + item->with_sum_func= 1; + } + } + return 0; +} + + +/* + Fill up rollup structures with pointers to fields to use + + SYNOPSIS + rollup_make_fields() + fields_arg List of all fields (hidden and real ones) + sel_fields Pointer to selected fields + func Store here a pointer to all fields + + IMPLEMENTATION: + Creates copies of item_sum items for each sum level + + RETURN + 0 if ok + In this case func is pointing to next not used element. + 1 on error +*/ + +bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields, + Item_sum ***func) +{ + List_iterator_fast<Item> it(fields_arg); + Item *first_field= sel_fields.head(); + uint level; + + /* + Create field lists for the different levels + + The idea here is to have a separate field list for each rollup level to + avoid all runtime checks of which columns should be NULL. + + The list is stored in reverse order to get sum function in such an order + in func that it makes it easy to reset them with init_sum_functions() + + Assuming: SELECT a, b, c SUM(b) FROM t1 GROUP BY a,b WITH ROLLUP + + rollup.fields[0] will contain list where a,b,c is NULL + rollup.fields[1] will contain list where b,c is NULL + ... + rollup.ref_pointer_array[#] points to fields for rollup.fields[#] + ... + sum_funcs_end[0] points to all sum functions + sum_funcs_end[1] points to all sum functions, except grand totals + ... + */ + + for (level=0 ; level < send_group_parts ; level++) + { + uint i; + uint pos= send_group_parts - level -1; + bool real_fields= 0; + Item *item; + List_iterator<Item> new_it(rollup.fields[pos]); + Item **ref_array_start= rollup.ref_pointer_arrays[pos]; + ORDER *start_group; + + /* Point to first hidden field */ + Item **ref_array= ref_array_start + fields_arg.elements-1; + + /* Remember where the sum functions ends for the previous level */ + sum_funcs_end[pos+1]= *func; + + /* Find the start of the group for this level */ + for (i= 0, start_group= group_list ; + i++ < pos ; + start_group= start_group->next) + ; + + it.rewind(); + while ((item= it++)) + { + if (item == first_field) + { + real_fields= 1; // End of hidden fields + ref_array= ref_array_start; + } + + if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) + { + /* + This is a top level summary function that must be replaced with + a sum function that is reset for this level. + + NOTE: This code creates an object which is not that nice in a + sub select. Fortunately it's not common to have rollup in + sub selects. + */ + item= item->copy_or_same(thd); + ((Item_sum*) item)->make_unique(); + if (((Item_sum*) item)->setup(thd)) + return 1; + *(*func)= (Item_sum*) item; + (*func)++; + } + else + { + /* Check if this is something that is part of this group by */ + ORDER *group_tmp; + for (group_tmp= start_group, i= pos ; + group_tmp ; group_tmp= group_tmp->next, i++) + { + if (*group_tmp->item == item) + { + /* + This is an element that is used by the GROUP BY and should be + set to NULL in this level + */ + Item_null_result *null_item; + item->maybe_null= 1; // Value will be null sometimes + null_item= rollup.null_items[i]; + null_item->result_field= item->get_tmp_table_field(); + item= null_item; + break; + } + } + } + *ref_array= item; + if (real_fields) + { + (void) new_it++; // Point to next item + new_it.replace(item); // Replace previous + ref_array++; + } + else + ref_array--; + } + } + sum_funcs_end[0]= *func; // Point to last function + return 0; +} + +/* + Send all rollup levels higher than the current one to the client + + SYNOPSIS: + rollup_send_data() + idx Level we are on: + 0 = Total sum level + 1 = First group changed (a) + 2 = Second group changed (a,b) + + SAMPLE + SELECT a, b, c SUM(b) FROM t1 GROUP BY a,b WITH ROLLUP + + RETURN + 0 ok + 1 If send_data_failed() +*/ + +int JOIN::rollup_send_data(uint idx) +{ + uint i; + for (i= send_group_parts ; i-- > idx ; ) + { + /* Get reference pointers to sum functions in place */ + memcpy((char*) ref_pointer_array, + (char*) rollup.ref_pointer_arrays[i], + ref_pointer_array_size); + if ((!having || having->val_int())) + { + if (send_records < unit->select_limit_cnt && do_send_rows && + result->send_data(rollup.fields[i])) + return 1; + send_records++; + } + } + /* Restore ref_pointer_array */ + set_items_ref_array(current_ref_pointer_array); + return 0; +} + +/* + Write all rollup levels higher than the current one to a temp table + + SYNOPSIS: + rollup_write_data() + idx Level we are on: + 0 = Total sum level + 1 = First group changed (a) + 2 = Second group changed (a,b) + table reference to temp table + + SAMPLE + SELECT a, b, SUM(c) FROM t1 GROUP BY a,b WITH ROLLUP + + RETURN + 0 ok + 1 if write_data_failed() +*/ + +int JOIN::rollup_write_data(uint idx, TABLE *table) +{ + uint i; + for (i= send_group_parts ; i-- > idx ; ) + { + /* Get reference pointers to sum functions in place */ + memcpy((char*) ref_pointer_array, + (char*) rollup.ref_pointer_arrays[i], + ref_pointer_array_size); + if ((!having || having->val_int())) + { + int error; + Item *item; + List_iterator_fast<Item> it(rollup.fields[i]); + while ((item= it++)) + { + if (item->type() == Item::NULL_ITEM && item->is_result_field()) + item->save_in_result_field(1); + } + copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]); + if ((error= table->file->write_row(table->record[0]))) + { + if (create_myisam_from_heap(thd, table, &tmp_table_param, + error, 0)) + return 1; + } + } + } + /* Restore ref_pointer_array */ + set_items_ref_array(current_ref_pointer_array); + return 0; +} + +/* + clear results if there are not rows found for group + (end_send_group/end_write_group) + + SYNOPSYS + JOIN::clear() +*/ + +void JOIN::clear() +{ + clear_tables(this); + copy_fields(&tmp_table_param); + + if (sum_funcs) + { + Item_sum *func, **func_ptr= sum_funcs; + while ((func= *(func_ptr++))) + func->clear(); + } +} + /**************************************************************************** + EXPLAIN handling + Send a description about what how the select will be done to stdout ****************************************************************************/ @@ -7559,46 +9711,94 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, bool distinct,const char *message) { List<Item> field_list; - Item *item; List<Item> item_list; THD *thd=join->thd; - MYSQL_LOCK *save_lock; - SELECT_LEX *select_lex = &(join->thd->lex.select_lex); select_result *result=join->result; Item *item_null= new Item_null(); + CHARSET_INFO *cs= system_charset_info; DBUG_ENTER("select_describe"); - + DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s", + (ulong)join->select_lex, join->select_lex->type, + message ? message : "NULL")); /* Don't log this into the slow query log */ - select_lex->options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED); - thd->offset_limit=0; - if (thd->lex.select == select_lex) - { - field_list.push_back(new Item_empty_string("table",NAME_LEN)); - field_list.push_back(new Item_empty_string("type",10)); - field_list.push_back(item=new Item_empty_string("possible_keys", - NAME_LEN*MAX_KEY)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("key",NAME_LEN)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("key_len",0,3)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("ref", - NAME_LEN*MAX_REF_PARTS)); - item->maybe_null=1; - field_list.push_back(new Item_real("rows",0.0,0,10)); - field_list.push_back(new Item_empty_string("Extra",255)); - if (result->send_fields(field_list,1)) - return; - } + thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED); + join->unit->offset_limit_cnt= 0; if (message) { - Item *empty= new Item_empty_string("",0); + item_list.push_back(new Item_int((int32) + join->select_lex->select_number)); + item_list.push_back(new Item_string(join->select_lex->type, + strlen(join->select_lex->type), cs)); for (uint i=0 ; i < 7; i++) - item_list.push_back(empty); - item_list.push_back(new Item_string(message,strlen(message))); + item_list.push_back(item_null); + item_list.push_back(new Item_string(message,strlen(message),cs)); if (result->send_data(item_list)) - result->send_error(0,NullS); + join->error= 1; + } + else if (join->select_lex == join->unit->fake_select_lex) + { + /* + here we assume that the query will return at least two rows, so we + show "filesort" in EXPLAIN. Of course, sometimes we'll be wrong + and no filesort will be actually done, but executing all selects in + the UNION to provide precise EXPLAIN information will hardly be + appreciated :) + */ + char table_name_buffer[NAME_LEN]; + item_list.empty(); + /* id */ + item_list.push_back(new Item_null); + /* select_type */ + item_list.push_back(new Item_string(join->select_lex->type, + strlen(join->select_lex->type), + cs)); + /* table */ + { + SELECT_LEX *sl= join->unit->first_select(); + uint len= 6, lastop= 0; + memcpy(table_name_buffer, "<union", 6); + for (; sl && len + lastop + 5 < NAME_LEN; sl= sl->next_select()) + { + len+= lastop; + lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len, + "%u,", sl->select_number); + } + if (sl || len + lastop >= NAME_LEN) + { + memcpy(table_name_buffer + len, "...>", 5); + len+= 4; + } + else + { + len+= lastop; + table_name_buffer[len - 1]= '>'; // change ',' to '>' + } + item_list.push_back(new Item_string(table_name_buffer, len, cs)); + } + /* type */ + item_list.push_back(new Item_string(join_type_str[JT_ALL], + strlen(join_type_str[JT_ALL]), + cs)); + /* possible_keys */ + item_list.push_back(item_null); + /* key*/ + item_list.push_back(item_null); + /* key_len */ + item_list.push_back(item_null); + /* ref */ + item_list.push_back(item_null); + /* rows */ + item_list.push_back(item_null); + /* extra */ + if (join->unit->global_parameters->order_list.first) + item_list.push_back(new Item_string("Using filesort", + 14, cs)); + else + item_list.push_back(new Item_string("", 0, cs)); + + if (result->send_data(item_list)) + join->error= 1; } else { @@ -7608,53 +9808,82 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, JOIN_TAB *tab=join->join_tab+i; TABLE *table=tab->table; char buff[512],*buff_ptr=buff; - char buff1[512], buff2[512], buff3[512]; - String tmp1(buff1,sizeof(buff1)); - String tmp2(buff2,sizeof(buff2)); + char buff1[512], buff2[512]; + char table_name_buffer[NAME_LEN]; + String tmp1(buff1,sizeof(buff1),cs); + String tmp2(buff2,sizeof(buff2),cs); tmp1.length(0); tmp2.length(0); - item_list.empty(); + item_list.empty(); + /* id */ + item_list.push_back(new Item_uint((uint32) + join->select_lex->select_number)); + /* select_type */ + item_list.push_back(new Item_string(join->select_lex->type, + strlen(join->select_lex->type), + cs)); if (tab->type == JT_ALL && tab->select && tab->select->quick) tab->type= JT_RANGE; - item_list.push_back(new Item_string(table->table_name, - strlen(table->table_name))); + /* table */ + if (table->derived_select_number) + { + /* Derived table name generation */ + int len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1, + "<derived%u>", + table->derived_select_number); + item_list.push_back(new Item_string(table_name_buffer, len, cs)); + } + else + item_list.push_back(new Item_string(table->table_name, + strlen(table->table_name), + cs)); + /* type */ item_list.push_back(new Item_string(join_type_str[tab->type], - strlen(join_type_str[tab->type]))); - key_map bits; + strlen(join_type_str[tab->type]), + cs)); uint j; - for (j=0,bits=tab->keys ; bits ; j++,bits>>=1) + /* possible_keys */ + if (!tab->keys.is_clear_all()) { - if (bits & 1) - { - if (tmp1.length()) - tmp1.append(','); - tmp1.append(table->key_info[j].name); - } + for (j=0 ; j < table->keys ; j++) + { + if (tab->keys.is_set(j)) + { + if (tmp1.length()) + tmp1.append(','); + tmp1.append(table->key_info[j].name, + strlen(table->key_info[j].name), + system_charset_info); + } + } } if (tmp1.length()) - item_list.push_back(new Item_string(tmp1.ptr(),tmp1.length())); + item_list.push_back(new Item_string(tmp1.ptr(),tmp1.length(),cs)); else item_list.push_back(item_null); + /* key key_len ref */ if (tab->ref.key_parts) { KEY *key_info=table->key_info+ tab->ref.key; item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name))); + strlen(key_info->name), + system_charset_info)); item_list.push_back(new Item_int((int32) tab->ref.key_length)); for (store_key **ref=tab->ref.key_copy ; *ref ; ref++) { if (tmp2.length()) tmp2.append(','); - tmp2.append((*ref)->name()); + tmp2.append((*ref)->name(), strlen((*ref)->name()), + system_charset_info); } - item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length())); + item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs)); } else if (tab->type == JT_NEXT) { KEY *key_info=table->key_info+ tab->index; item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name))); + strlen(key_info->name),cs)); item_list.push_back(new Item_int((int32) key_info->key_length)); item_list.push_back(item_null); } @@ -7662,8 +9891,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { KEY *key_info=table->key_info+ tab->select->quick->index; item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name))); - item_list.push_back(new Item_int((int32) tab->select->quick->max_used_key_length)); + strlen(key_info->name),cs)); + item_list.push_back(new Item_int((int32) tab->select->quick-> + max_used_key_length)); item_list.push_back(item_null); } else @@ -7672,23 +9902,27 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(item_null); item_list.push_back(item_null); } - sprintf(buff3,"%.0f",join->best_positions[i].records_read); - item_list.push_back(new Item_string(buff3,strlen(buff3))); + /* rows */ + item_list.push_back(new Item_int((longlong) (ulonglong) + join->best_positions[i]. records_read, + 21)); + /* extra */ my_bool key_read=table->key_read; if ((tab->type == JT_NEXT || tab->type == JT_CONST) && - ((table->used_keys & ((key_map) 1 << tab->index)))) + table->used_keys.is_set(tab->index)) key_read=1; - + if (tab->info) - item_list.push_back(new Item_string(tab->info,strlen(tab->info))); + item_list.push_back(new Item_string(tab->info,strlen(tab->info),cs)); else { if (tab->select) { if (tab->use_quick == 2) { - sprintf(buff_ptr,"; Range checked for each record (index map: %u)", - tab->keys); + char buf[MAX_KEY/8+1]; + sprintf(buff_ptr,"; Range checked for each record (index map: 0x%s)", + tab->keys.print(buf)); buff_ptr=strend(buff_ptr); } else @@ -7708,49 +9942,265 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, need_order=0; buff_ptr= strmov(buff_ptr,"; Using filesort"); } - if (distinct && test_all_bits(used_tables,thd->used_tables)) + if (distinct & test_all_bits(used_tables,thd->used_tables)) buff_ptr= strmov(buff_ptr,"; Distinct"); if (buff_ptr == buff) - buff_ptr+= 2; - item_list.push_back(new Item_string(buff+2,(uint) (buff_ptr - buff)-2)); + buff_ptr+= 2; // Skip inital "; " + item_list.push_back(new Item_string(buff+2,(uint) (buff_ptr - buff)-2, + cs)); } // For next iteration used_tables|=table->map; if (result->send_data(item_list)) - result->send_error(0,NullS); + join->error= 1; } } - if (!thd->lex.select->next) // Not union + for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit(); + unit; + unit= unit->next_unit()) { - save_lock=thd->lock; - thd->lock=(MYSQL_LOCK *)0; - result->send_eof(); - thd->lock=save_lock; + if (mysql_explain_union(thd, unit, result)) + DBUG_VOID_RETURN; } DBUG_VOID_RETURN; } -static void describe_info(JOIN *join, const char *info) +int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) { - THD *thd= join->thd; + DBUG_ENTER("mysql_explain_union"); + int res= 0; + SELECT_LEX *first= unit->first_select(); + + for (SELECT_LEX *sl= first; + sl; + sl= sl->next_select()) + { + // drop UNCACHEABLE_EXPLAIN, because it is for internal usage only + uint8 uncacheable= (sl->uncacheable & ~UNCACHEABLE_EXPLAIN); + sl->type= (((&thd->lex->select_lex)==sl)? + ((thd->lex->all_selects_list != sl) ? + primary_key_name : "SIMPLE"): + ((sl == first)? + ((sl->linkage == DERIVED_TABLE_TYPE) ? + "DERIVED": + ((uncacheable & UNCACHEABLE_DEPENDENT) ? + "DEPENDENT SUBQUERY": + (uncacheable?"UNCACHEABLE SUBQUERY": + "SUBQUERY"))): + ((uncacheable & UNCACHEABLE_DEPENDENT) ? + "DEPENDENT UNION": + uncacheable?"UNCACHEABLE UNION": + "UNION"))); + sl->options|= SELECT_DESCRIBE; + } + if (first->next_select()) + { + unit->fake_select_lex->select_number= UINT_MAX; // jost for initialization + unit->fake_select_lex->type= "UNION RESULT"; + unit->fake_select_lex->options|= SELECT_DESCRIBE; + if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | SELECT_DESCRIBE, + ""))) + res= unit->exec(); + res|= unit->cleanup(); + } + else + { + thd->lex->current_select= first; + res= mysql_select(thd, &first->ref_pointer_array, + (TABLE_LIST*) first->table_list.first, + first->with_wild, first->item_list, + first->where, + first->order_list.elements + + first->group_list.elements, + (ORDER*) first->order_list.first, + (ORDER*) first->group_list.first, + first->having, + (ORDER*) thd->lex->proc_list.first, + first->options | thd->options | SELECT_DESCRIBE, + result, unit, first); + } + if (res > 0 || thd->net.report_error) + res= -1; // mysql_explain_select do not report error + DBUG_RETURN(res); +} + + +void st_select_lex::print(THD *thd, String *str) +{ + if (!thd) + thd= current_thd; - if (thd->lex.select_lex.next) /* If in UNION */ + str->append("select ", 7); + + //options + if (options & SELECT_STRAIGHT_JOIN) + str->append("straight_join ", 14); + if ((thd->lex->lock_option == TL_READ_HIGH_PRIORITY) && + (this == &thd->lex->select_lex)) + str->append("high_priority ", 14); + if (options & SELECT_DISTINCT) + str->append("distinct ", 9); + if (options & SELECT_SMALL_RESULT) + str->append("sql_small_result ", 17); + if (options & SELECT_BIG_RESULT) + str->append("sql_big_result ", 15); + if (options & OPTION_BUFFER_RESULT) + str->append("sql_buffer_result ", 18); + if (options & OPTION_FOUND_ROWS) + str->append("sql_calc_found_rows ", 20); + if (!thd->lex->safe_to_cache_query) + str->append("sql_no_cache ", 13); + if (options & OPTION_TO_QUERY_CACHE) + str->append("sql_cache ", 10); + + //Item List + bool first= 1; + List_iterator_fast<Item> it(item_list); + Item *item; + while ((item= it++)) { - select_describe(join,FALSE,FALSE,FALSE,info); - return; + if (first) + first= 0; + else + str->append(','); + item->print_item_w_name(str); } - List<Item> field_list; - String *packet= &thd->packet; - /* Don't log this into the slow query log */ - thd->lex.select_lex.options&= ~(QUERY_NO_INDEX_USED | - QUERY_NO_GOOD_INDEX_USED); - field_list.push_back(new Item_empty_string("Comment",80)); - if (send_fields(thd,field_list,1)) - return; /* purecov: inspected */ - packet->length(0); - net_store_data(packet,info); - if (!my_net_write(&thd->net,(char*) packet->ptr(),packet->length())) - send_eof(&thd->net); + /* + from clause + TODO: support USING/FORCE/IGNORE index + */ + if (table_list.elements) + { + str->append(" from ", 6); + Item *next_on= 0; + for (TABLE_LIST *table= (TABLE_LIST *) table_list.first; + table; + table= table->next) + { + if (table->derived) + { + str->append('('); + table->derived->print(str); + str->append(") "); + str->append(table->alias); + } + else + { + str->append(table->db); + str->append('.'); + str->append(table->real_name); + if (my_strcasecmp(table_alias_charset, table->real_name, table->alias)) + { + str->append(' '); + str->append(table->alias); + } + } + + if (table->on_expr && ((table->outer_join & JOIN_TYPE_LEFT) || + !(table->outer_join & JOIN_TYPE_RIGHT))) + next_on= table->on_expr; + + if (next_on) + { + str->append(" on(", 4); + next_on->print(str); + str->append(')'); + next_on= 0; + } + + TABLE_LIST *next_table; + if ((next_table= table->next)) + { + if (table->outer_join & JOIN_TYPE_RIGHT) + { + str->append(" right join ", 12); + if (!(table->outer_join & JOIN_TYPE_LEFT) && + table->on_expr) + next_on= table->on_expr; + } + else if (next_table->straight) + str->append(" straight_join ", 15); + else if (next_table->outer_join & JOIN_TYPE_LEFT) + str->append(" left join ", 11); + else + str->append(" join ", 6); + } + } + } + + // Where + Item *cur_where= where; + if (join) + cur_where= join->conds; + if (cur_where) + { + str->append(" where ", 7); + cur_where->print(str); + } + + // group by & olap + if (group_list.elements) + { + str->append(" group by ", 10); + print_order(str, (ORDER *) group_list.first); + switch (olap) + { + case CUBE_TYPE: + str->append(" with cube", 10); + break; + case ROLLUP_TYPE: + str->append(" with rollup", 12); + break; + default: + ; //satisfy compiler + } + } + + // having + Item *cur_having= having; + if (join) + cur_having= join->having; + + if (cur_having) + { + str->append(" having ", 8); + cur_having->print(str); + } + + if (order_list.elements) + { + str->append(" order by ", 10); + print_order(str, (ORDER *) order_list.first); + } + + // limit + print_limit(thd, str); + + // PROCEDURE unsupported here +} + + +/* + change select_result object of JOIN + + SYNOPSIS + JOIN::change_result() + res new select_result object + + RETURN + 0 - OK + -1 - error +*/ + +int JOIN::change_result(select_result *res) +{ + DBUG_ENTER("JOIN::change_result"); + result= res; + if (!procedure && result->prepare(fields_list, select_lex->master_unit())) + { + DBUG_RETURN(-1); + } + DBUG_RETURN(0); } diff --git a/sql/sql_select.h b/sql/sql_select.h index 5c987e74163..94cc8839466 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -17,7 +17,7 @@ /* classes to use when handling where clause */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -27,8 +27,15 @@ typedef struct keyuse_t { TABLE *table; Item *val; /* or value if no field */ - uint key,keypart; table_map used_tables; + uint key, keypart, optimize; + key_part_map keypart_map; + ha_rows ref_table_rows; + /* + If true, the comparison this value was created from will not be + satisfied if val has NULL 'value'. + */ + bool null_rejecting; } KEYUSE; class store_key; @@ -43,7 +50,14 @@ typedef struct st_table_ref byte *key_buff2; // key_buff+key_length store_key **key_copy; // Item **items; // val()'s for each keypart + /* + (null_rejecting & (1<<i)) means the condition is '=' and no matching + rows will be produced if items[i] IS NULL (see add_not_null_conds()) + */ + key_part_map null_rejecting; table_map depend_map; // Table depends on these tables. + byte *null_ref_key; // null byte position in the key_buf. + // used for REF_OR_NULL optimization. } TABLE_REF; /* @@ -73,7 +87,8 @@ typedef struct st_join_cache { */ enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF, - JT_ALL, JT_RANGE, JT_NEXT, JT_FT}; + JT_ALL, JT_RANGE, JT_NEXT, JT_FT, JT_REF_OR_NULL, + JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY}; class JOIN; @@ -92,9 +107,9 @@ typedef struct st_join_table { key_map const_keys; /* Keys with constant part */ key_map checked_keys; /* Keys checked in find_best */ key_map needed_reg; + key_map keys; /* all keys with can be used */ ha_rows records,found_records,read_time; table_map dependent,key_dependent; - uint keys; /* all keys with can be used */ uint use_quick,index; uint status; // Save status for cache uint used_fields,used_fieldlength,used_blobs; @@ -103,77 +118,212 @@ typedef struct st_join_table { TABLE_REF ref; JOIN_CACHE cache; JOIN *join; + + void cleanup(); } JOIN_TAB; -typedef struct st_position { /* Used in find_best */ +typedef struct st_position /* Used in find_best */ +{ double records_read; JOIN_TAB *table; KEYUSE *key; } POSITION; - -/* Param to create temporary tables when doing SELECT:s */ - -class TMP_TABLE_PARAM :public Sql_alloc +typedef struct st_rollup { - public: - List<Item> copy_funcs; - List_iterator_fast<Item> copy_funcs_it; - Copy_field *copy_field, *copy_field_end; - byte *group_buff; - Item **items_to_copy; /* Fields in tmp table */ - MI_COLUMNDEF *recinfo,*start_recinfo; - KEY *keyinfo; - ha_rows end_write_records; - uint field_count,sum_func_count,func_count; - uint hidden_field_count; - uint group_parts,group_length,group_null_parts; - uint quick_group; - bool using_indirect_summary_function; - - TMP_TABLE_PARAM() - :copy_funcs_it(copy_funcs), copy_field(0), group_parts(0), - group_length(0), group_null_parts(0) - {} - ~TMP_TABLE_PARAM() - { - cleanup(); - } - inline void cleanup(void) - { - if (copy_field) /* Fix for Intel compiler */ - { - delete [] copy_field; - copy_field=0; - } - } -}; + enum State { STATE_NONE, STATE_INITED, STATE_READY }; + State state; + Item_null_result **null_items; + Item ***ref_pointer_arrays; + List<Item> *fields; +} ROLLUP; -class JOIN { +class JOIN :public Sql_alloc +{ public: JOIN_TAB *join_tab,**best_ref,**map2table; + JOIN_TAB *join_tab_save; //saved join_tab for subquery reexecution TABLE **table,**all_tables,*sort_by_table; uint tables,const_tables; uint send_group_parts; bool sort_and_group,first_record,full_join,group, no_field_update; bool do_send_rows; table_map const_table_map,found_const_table_map,outer_join; - ha_rows send_records,found_records,examined_rows,row_limit; + ha_rows send_records,found_records,examined_rows,row_limit, select_limit; POSITION positions[MAX_TABLES+1],best_positions[MAX_TABLES+1]; double best_read; List<Item> *fields; - List<Item_buff> group_fields; + List<Item_buff> group_fields, group_fields_cache; TABLE *tmp_table; + // used to store 2 possible tmp table of SELECT + TABLE *exec_tmp_table1, *exec_tmp_table2; THD *thd; - Item_sum **sum_funcs; + Item_sum **sum_funcs, ***sum_funcs_end; + /* second copy of sumfuncs (for queries with 2 temporary tables */ + Item_sum **sum_funcs2, ***sum_funcs_end2; Procedure *procedure; Item *having; + Item *tmp_having; // To store having when processed temporary table + Item *having_history; // Store having for explain uint select_options; select_result *result; TMP_TABLE_PARAM tmp_table_param; MYSQL_LOCK *lock; + // unit structure (with global parameters) for this select + SELECT_LEX_UNIT *unit; + // select that processed + SELECT_LEX *select_lex; + + JOIN *tmp_join; // copy of this JOIN to be used with temporary tables + ROLLUP rollup; // Used with rollup + + bool select_distinct; // Set if SELECT DISTINCT + + /* + simple_xxxxx is set if ORDER/GROUP BY doesn't include any references + to other tables than the first non-constant table in the JOIN. + It's also set if ORDER/GROUP BY is empty. + */ + bool simple_order, simple_group; + /* + Is set only in case if we have a GROUP BY clause + and no ORDER BY after constant elimination of 'order'. + */ + bool no_order; + /* Is set if we have a GROUP BY and we have ORDER BY on a constant. */ + bool skip_sort_order; + + bool need_tmp, hidden_group_fields, buffer_result; + DYNAMIC_ARRAY keyuse; + Item::cond_result cond_value; + List<Item> all_fields; // to store all fields that used in query + //Above list changed to use temporary table + List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3; + //Part, shared with list above, emulate following list + List<Item> tmp_fields_list1, tmp_fields_list2, tmp_fields_list3; + List<Item> &fields_list; // hold field list passed to mysql_select + List<Item> procedure_fields_list; + int error; + + ORDER *order, *group_list, *proc_param; //hold parameters of mysql_select + COND *conds; // ---"--- + Item *conds_history; // store WHERE for explain + TABLE_LIST *tables_list; //hold 'tables' parameter of mysql_selec + SQL_SELECT *select; //created in optimisation phase + Item **ref_pointer_array; //used pointer reference for this select + // Copy of above to be used with different lists + Item **items0, **items1, **items2, **items3, **current_ref_pointer_array; + uint ref_pointer_array_size; // size of above in bytes + const char *zero_result_cause; // not 0 if exec must return zero result + + bool union_part; // this subselect is part of union + bool optimized; // flag to avoid double optimization in EXPLAIN + + JOIN(THD *thd_arg, List<Item> &fields_arg, ulong select_options_arg, + select_result *result_arg) + :fields_list(fields_arg) + { + init(thd_arg, fields_arg, select_options_arg, result_arg); + } + + JOIN(JOIN &join) + :fields_list(join.fields_list) + { + init(join.thd, join.fields_list, join.select_options, + join.result); + } + + void init(THD *thd_arg, List<Item> &fields_arg, ulong select_options_arg, + select_result *result_arg) + { + join_tab= join_tab_save= 0; + table= 0; + tables= 0; + const_tables= 0; + sort_and_group= 0; + first_record= 0; + do_send_rows= 1; + send_records= 0; + found_records= 0; + examined_rows= 0; + exec_tmp_table1= 0; + exec_tmp_table2= 0; + thd= thd_arg; + sum_funcs= sum_funcs2= 0; + procedure= 0; + having= tmp_having= having_history= 0; + select_options= select_options_arg; + result= result_arg; + lock= thd_arg->lock; + select_lex= 0; //for safety + tmp_join= 0; + select_distinct= test(select_options & SELECT_DISTINCT); + no_order= 0; + simple_order= 0; + simple_group= 0; + skip_sort_order= 0; + need_tmp= 0; + hidden_group_fields= 0; /*safety*/ + buffer_result= test(select_options & OPTION_BUFFER_RESULT) && + !test(select_options & OPTION_FOUND_ROWS); + all_fields= fields_arg; + fields_list= fields_arg; + error= 0; + select= 0; + ref_pointer_array= items0= items1= items2= items3= 0; + ref_pointer_array_size= 0; + zero_result_cause= 0; + optimized= 0; + + fields_list= fields_arg; + bzero((char*) &keyuse,sizeof(keyuse)); + tmp_table_param.init(); + tmp_table_param.end_write_records= HA_POS_ERROR; + rollup.state= ROLLUP::STATE_NONE; + } + + int prepare(Item ***rref_pointer_array, TABLE_LIST *tables, uint wind_num, + COND *conds, uint og_num, ORDER *order, ORDER *group, + Item *having, ORDER *proc_param, SELECT_LEX *select, + SELECT_LEX_UNIT *unit); + int optimize(); + int reinit(); + void exec(); + int cleanup(); + void restore_tmp(); + bool alloc_func_list(); + bool make_sum_func_list(List<Item> &all_fields, List<Item> &send_fields, + bool before_group_by); + + inline void set_items_ref_array(Item **ptr) + { + memcpy((char*) ref_pointer_array, (char*) ptr, ref_pointer_array_size); + current_ref_pointer_array= ptr; + } + inline void init_items_ref_array() + { + items0= ref_pointer_array + all_fields.elements; + memcpy(items0, ref_pointer_array, ref_pointer_array_size); + current_ref_pointer_array= items0; + } + + bool rollup_init(); + bool rollup_make_fields(List<Item> &all_fields, List<Item> &fields, + Item_sum ***func); + int rollup_send_data(uint idx); + int rollup_write_data(uint idx, TABLE *table); + bool test_in_subselect(Item **where); + void join_free(bool full); + void clear(); + bool save_join_tab(); + bool send_row_on_empty_set() + { + return (do_send_rows && tmp_table_param.sum_func_count != 0 && + !group_list); + } + int change_result(select_result *result); }; @@ -188,46 +338,49 @@ void TEST_join(JOIN *join); bool store_val_in_field(Field *field,Item *val); TABLE *create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, ORDER *group, bool distinct, bool save_sum_fields, - bool allow_distinct_limit, ulong select_options); + ulong select_options, ha_rows rows_limit, + char* alias); void free_tmp_table(THD *thd, TABLE *entry); void count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields, bool reset_with_sum_func); -bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,List<Item> &fields); +bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, + Item **ref_pointer_array, + List<Item> &new_list1, List<Item> &new_list2, + uint elements, List<Item> &fields); void copy_fields(TMP_TABLE_PARAM *param); void copy_funcs(Item **func_ptr); -bool create_myisam_from_heap(THD *Thd, TABLE *table, TMP_TABLE_PARAM *param, +bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, int error, bool ignore_last_dupp_error); /* functions from opt_sum.cc */ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); - /* class to copying an field/item to a key struct */ class store_key :public Sql_alloc { protected: Field *to_field; // Store data here - Field *key_field; // Copy of key field char *null_ptr; char err; public: + enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) :null_ptr(null),err(0) { if (field_arg->type() == FIELD_TYPE_BLOB) to_field=new Field_varstring(ptr, length, (uchar*) null, 1, Field::NONE, field_arg->field_name, - field_arg->table, field_arg->binary()); + field_arg->table, field_arg->charset()); else { - to_field=field_arg->new_field(&thd->mem_root,field_arg->table); + to_field=field_arg->new_field(thd->mem_root,field_arg->table); if (to_field) to_field->move_field(ptr, (uchar*) null, 1); } } virtual ~store_key() {} /* Not actually needed */ - virtual bool copy()=0; + virtual enum store_key_result copy()=0; virtual const char *name() const=0; }; @@ -248,10 +401,10 @@ class store_key_field: public store_key copy_field.set(to_field,from_field,0); } } - bool copy() + enum store_key_result copy() { copy_field.do_copy(©_field); - return err != 0; + return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } const char *name() const { return field_name; } }; @@ -268,9 +421,11 @@ public: null_ptr_arg ? null_ptr_arg : item_arg->maybe_null ? &err : NullS, length), item(item_arg) {} - bool copy() + enum store_key_result copy() { - return item->save_in_field(to_field, 1) || err != 0; + int res= item->save_in_field(to_field, 1); + return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res); + } const char *name() const { return "func"; } }; @@ -288,18 +443,25 @@ public: &err : NullS, length, item_arg), inited(0) { } - bool copy() + enum store_key_result copy() { + int res; if (!inited) { inited=1; - if (item->save_in_field(to_field, 1)) - err= 1; + if ((res= item->save_in_field(to_field, 1))) + { + if (!err) + err= res; + } } - return err != 0; + return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } const char *name() const { return "const"; } }; -bool cp_buffer_from_ref(TABLE_REF *ref); +bool cp_buffer_from_ref(THD *thd, TABLE_REF *ref); bool error_if_full_join(JOIN *join); +int report_error(TABLE *table, int error); +int safe_index_read(JOIN_TAB *tab); +COND *remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 27246729162..268292022e4 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -19,7 +19,6 @@ #include "mysql_priv.h" #include "sql_select.h" // For select_describe -#include "sql_acl.h" #include "repl_failsafe.h" #include <my_dir.h> @@ -31,32 +30,30 @@ static const char *grant_names[]={ "select","insert","update","delete","create","drop","reload","shutdown", "process","file","grant","references","index","alter"}; +#ifndef NO_EMBEDDED_ACCESS_CHECKS static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), "grant_types", - grant_names}; + grant_names, NULL}; +#endif static int store_create_info(THD *thd, TABLE *table, String *packet); -static void -append_identifier(THD *thd, String *packet, const char *name); - -extern struct st_VioSSLAcceptorFd * ssl_acceptor_fd; - -/**************************************************************************** -** Send list of databases -** A database is a directory in the mysql_data_home directory -****************************************************************************/ +/* + Report list of databases + A database is a directory in the mysql_data_home directory +*/ int mysqld_show_dbs(THD *thd,const char *wild) { - Item_string *field=new Item_string("",0); + Item_string *field=new Item_string("",0,thd->charset()); List<Item> field_list; char *end; List<char> files; char *file_name; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_show_dbs"); field->name=(char*) thd->alloc(20+ (wild ? (uint) strlen(wild)+4: 0)); @@ -66,64 +63,66 @@ mysqld_show_dbs(THD *thd,const char *wild) strxmov(end," (",wild,")",NullS); field_list.push_back(field); - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); if (mysql_find_files(thd,&files,NullS,mysql_data_home,wild,1)) DBUG_RETURN(1); List_iterator_fast<char> it(files); + while ((file_name=it++)) { +#ifndef NO_EMBEDDED_ACCESS_CHECKS if (thd->master_access & (DB_ACLS | SHOW_DB_ACL) || - acl_get(thd->host, thd->ip, (char*) &thd->remote.sin_addr, - thd->priv_user, file_name, 0) || + acl_get(thd->host, thd->ip, thd->priv_user, file_name,0) || (grant_option && !check_grant_db(thd, file_name))) +#endif { - thd->packet.length(0); - net_store_data(&thd->packet, thd->variables.convert_set, file_name); - if (my_net_write(&thd->net, (char*) thd->packet.ptr(), - thd->packet.length())) + protocol->prepare_for_resend(); + protocol->store(file_name, system_charset_info); + if (protocol->write()) DBUG_RETURN(-1); } } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } + /*************************************************************************** -** List all open tables in a database + List all open tables in a database ***************************************************************************/ int mysqld_show_open_tables(THD *thd,const char *wild) { List<Item> field_list; OPEN_TABLE_LIST *open_list; - CONVERT *convert=thd->variables.convert_set; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_show_open_tables"); field_list.push_back(new Item_empty_string("Database",NAME_LEN)); field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - field_list.push_back(new Item_int("In_use",0, 4)); - field_list.push_back(new Item_int("Name_locked",0, 4)); + field_list.push_back(new Item_return_int("In_use", 1, MYSQL_TYPE_TINY)); + field_list.push_back(new Item_return_int("Name_locked", 4, MYSQL_TYPE_TINY)); - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); - if (!(open_list=list_open_tables(thd,wild)) && thd->fatal_error) + if (!(open_list=list_open_tables(thd,wild)) && thd->is_fatal_error) DBUG_RETURN(-1); for (; open_list ; open_list=open_list->next) { - thd->packet.length(0); - net_store_data(&thd->packet,convert, open_list->db); - net_store_data(&thd->packet,convert, open_list->table); - net_store_data(&thd->packet,open_list->in_use); - net_store_data(&thd->packet,open_list->locked); - if (my_net_write(&thd->net,(char*) thd->packet.ptr(),thd->packet.length())) + protocol->prepare_for_resend(); + protocol->store(open_list->db, system_charset_info); + protocol->store(open_list->table, system_charset_info); + protocol->store_tiny((longlong) open_list->in_use); + protocol->store_tiny((longlong) open_list->locked); + if (protocol->write()) { DBUG_RETURN(-1); } } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } @@ -135,14 +134,16 @@ int mysqld_show_open_tables(THD *thd,const char *wild) int mysqld_show_tables(THD *thd,const char *db,const char *wild) { - Item_string *field=new Item_string("",0); + Item_string *field=new Item_string("",0,thd->charset()); List<Item> field_list; char path[FN_LEN],*end; List<char> files; char *file_name; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_show_tables"); - field->name=(char*) thd->alloc(20+(uint) strlen(db)+(wild ? (uint) strlen(wild)+4:0)); + field->name=(char*) thd->alloc(20+(uint) strlen(db)+ + (wild ? (uint) strlen(wild)+4:0)); end=strxmov(field->name,"Tables_in_",db,NullS); if (wild && wild[0]) strxmov(end," (",wild,")",NullS); @@ -150,19 +151,208 @@ int mysqld_show_tables(THD *thd,const char *db,const char *wild) (void) sprintf(path,"%s/%s",mysql_data_home,db); (void) unpack_dirname(path,path); field_list.push_back(field); - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); if (mysql_find_files(thd,&files,db,path,wild,0)) DBUG_RETURN(-1); List_iterator_fast<char> it(files); while ((file_name=it++)) { - thd->packet.length(0); - net_store_data(&thd->packet, thd->variables.convert_set, file_name); - if (my_net_write(&thd->net,(char*) thd->packet.ptr(),thd->packet.length())) + protocol->prepare_for_resend(); + protocol->store(file_name, system_charset_info); + if (protocol->write()) + DBUG_RETURN(-1); + } + send_eof(thd); + DBUG_RETURN(0); +} + +/*************************************************************************** +** List all table types supported +***************************************************************************/ + +int mysqld_show_storage_engines(THD *thd) +{ + List<Item> field_list; + Protocol *protocol= thd->protocol; + DBUG_ENTER("mysqld_show_storage_engines"); + + field_list.push_back(new Item_empty_string("Engine",10)); + field_list.push_back(new Item_empty_string("Support",10)); + field_list.push_back(new Item_empty_string("Comment",80)); + + if (protocol->send_fields(&field_list,1)) + DBUG_RETURN(1); + + const char *default_type_name= + ha_get_storage_engine((enum db_type)thd->variables.table_type); + + show_table_type_st *types; + for (types= sys_table_types; types->type; types++) + { + protocol->prepare_for_resend(); + protocol->store(types->type, system_charset_info); + const char *option_name= show_comp_option_name[(int) *types->value]; + + if (*types->value == SHOW_OPTION_YES && + !my_strcasecmp(system_charset_info, default_type_name, types->type)) + option_name= "DEFAULT"; + protocol->store(option_name, system_charset_info); + protocol->store(types->comment, system_charset_info); + if (protocol->write()) + DBUG_RETURN(-1); + } + send_eof(thd); + DBUG_RETURN(0); +} + + +/*************************************************************************** + List all privileges supported +***************************************************************************/ + +struct show_privileges_st { + const char *privilege; + const char *context; + const char *comment; +}; + +static struct show_privileges_st sys_privileges[]= +{ + {"Alter", "Tables", "To alter the table"}, + {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"}, + {"Create", "Databases,Tables,Indexes", "To create new databases and tables"}, + {"Delete", "Tables", "To delete existing rows"}, + {"Drop", "Databases,Tables", "To drop databases and tables"}, + {"File", "File access on server", "To read and write files on the server"}, + {"Grant option", "Databases,Tables", "To give to other users those privileges you possess"}, + {"Index", "Tables", "To create or drop indexes"}, + {"Insert", "Tables", "To insert data into tables"}, + {"Lock tables","Databases","To use LOCK TABLES (together with SELECT privilege)"}, + {"Process", "Server Admin", "To view the plain text of currently executing queries"}, + {"References", "Databases,Tables", "To have references on tables"}, + {"Reload", "Server Admin", "To reload or refresh tables, logs and privileges"}, + {"Replication client","Server Admin","To ask where the slave or master servers are"}, + {"Replication slave","Server Admin","To read binary log events from the master"}, + {"Select", "Tables", "To retrieve rows from table"}, + {"Show databases","Server Admin","To see all databases with SHOW DATABASES"}, + {"Shutdown","Server Admin", "To shutdown the server"}, + {"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."}, + {"Update", "Tables", "To update existing rows"}, + {"Usage","Server Admin","No privileges - allow connect only"}, + {NullS, NullS, NullS} +}; + +int mysqld_show_privileges(THD *thd) +{ + List<Item> field_list; + Protocol *protocol= thd->protocol; + DBUG_ENTER("mysqld_show_privileges"); + + field_list.push_back(new Item_empty_string("Privilege",10)); + field_list.push_back(new Item_empty_string("Context",15)); + field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); + + if (protocol->send_fields(&field_list,1)) + DBUG_RETURN(1); + + show_privileges_st *privilege= sys_privileges; + for (privilege= sys_privileges; privilege->privilege ; privilege++) + { + protocol->prepare_for_resend(); + protocol->store(privilege->privilege, system_charset_info); + protocol->store(privilege->context, system_charset_info); + protocol->store(privilege->comment, system_charset_info); + if (protocol->write()) + DBUG_RETURN(-1); + } + send_eof(thd); + DBUG_RETURN(0); +} + + +/*************************************************************************** + List all column types +***************************************************************************/ + +struct show_column_type_st +{ + const char *type; + uint size; + const char *min_value; + const char *max_value; + uint precision; + uint scale; + const char *nullable; + const char *auto_increment; + const char *unsigned_attr; + const char *zerofill; + const char *searchable; + const char *case_sensitivity; + const char *default_value; + const char *comment; +}; + +/* TODO: Add remaning types */ + +static struct show_column_type_st sys_column_types[]= +{ + {"tinyint", + 1, "-128", "127", 0, 0, "YES", "YES", + "NO", "YES", "YES", "NO", "NULL,0", + "A very small integer"}, + {"tinyint unsigned", + 1, "0" , "255", 0, 0, "YES", "YES", + "YES", "YES", "YES", "NO", "NULL,0", + "A very small integer"}, +}; + +int mysqld_show_column_types(THD *thd) +{ + List<Item> field_list; + Protocol *protocol= thd->protocol; + DBUG_ENTER("mysqld_show_column_types"); + + field_list.push_back(new Item_empty_string("Type",30)); + field_list.push_back(new Item_int("Size",(longlong) 1,21)); + field_list.push_back(new Item_empty_string("Min_Value",20)); + field_list.push_back(new Item_empty_string("Max_Value",20)); + field_list.push_back(new Item_return_int("Prec", 4, MYSQL_TYPE_SHORT)); + field_list.push_back(new Item_return_int("Scale", 4, MYSQL_TYPE_SHORT)); + field_list.push_back(new Item_empty_string("Nullable",4)); + field_list.push_back(new Item_empty_string("Auto_Increment",4)); + field_list.push_back(new Item_empty_string("Unsigned",4)); + field_list.push_back(new Item_empty_string("Zerofill",4)); + field_list.push_back(new Item_empty_string("Searchable",4)); + field_list.push_back(new Item_empty_string("Case_Sensitive",4)); + field_list.push_back(new Item_empty_string("Default",NAME_LEN)); + field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); + + if (protocol->send_fields(&field_list,1)) + DBUG_RETURN(1); + + /* TODO: Change the loop to not use 'i' */ + for (uint i=0; i < sizeof(sys_column_types)/sizeof(sys_column_types[0]); i++) + { + protocol->prepare_for_resend(); + protocol->store(sys_column_types[i].type, system_charset_info); + protocol->store((ulonglong) sys_column_types[i].size); + protocol->store(sys_column_types[i].min_value, system_charset_info); + protocol->store(sys_column_types[i].max_value, system_charset_info); + protocol->store_short((longlong) sys_column_types[i].precision); + protocol->store_short((longlong) sys_column_types[i].scale); + protocol->store(sys_column_types[i].nullable, system_charset_info); + protocol->store(sys_column_types[i].auto_increment, system_charset_info); + protocol->store(sys_column_types[i].unsigned_attr, system_charset_info); + protocol->store(sys_column_types[i].zerofill, system_charset_info); + protocol->store(sys_column_types[i].searchable, system_charset_info); + protocol->store(sys_column_types[i].case_sensitivity, system_charset_info); + protocol->store(sys_column_types[i].default_value, system_charset_info); + protocol->store(sys_column_types[i].comment, system_charset_info); + if (protocol->write()) DBUG_RETURN(-1); } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } @@ -175,12 +365,15 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, char *ext; MY_DIR *dirp; FILEINFO *file; +#ifndef NO_EMBEDDED_ACCESS_CHECKS uint col_access=thd->col_access; +#endif TABLE_LIST table_list; DBUG_ENTER("mysql_find_files"); if (wild && !wild[0]) wild=0; + bzero((char*) &table_list,sizeof(table_list)); if (!(dirp = my_dir(path,MYF(MY_WME | (dir ? MY_WANT_STAT : 0))))) @@ -208,13 +401,13 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, } #endif if (file->name[0] == '.' || !MY_S_ISDIR(file->mystat->st_mode) || - (wild && wild_compare(file->name,wild, 0))) + (wild && wild_compare(file->name,wild,0))) continue; } else { // Return only .frm files which aren't temp files. - if (my_strcasecmp(ext=fn_ext(file->name),reg_ext) || + if (my_strcasecmp(system_charset_info, ext=fn_ext(file->name),reg_ext) || is_prefix(file->name,tmp_file_prefix)) continue; *ext=0; @@ -222,22 +415,24 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, { if (lower_case_table_names) { - if (wild_case_compare(file->name,wild)) + if (wild_case_compare(files_charset_info, file->name, wild)) continue; } - else if (wild_compare(file->name,wild, 0)) + else if (wild_compare(file->name,wild,0)) continue; } } +#ifndef NO_EMBEDDED_ACCESS_CHECKS /* Don't show tables where we don't have any privileges */ if (db && !(col_access & TABLE_ACLS)) { table_list.db= (char*) db; table_list.real_name=file->name; table_list.grant.privilege=col_access; - if (check_grant(thd,TABLE_ACLS,&table_list,1,1)) + if (check_grant(thd, TABLE_ACLS, &table_list, 1, UINT_MAX, 1)) continue; } +#endif if (files->push_back(thd->strdup(file->name))) { my_dirend(dirp); @@ -246,11 +441,15 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, } DBUG_PRINT("info",("found: %d files", files->elements)); my_dirend(dirp); + + VOID(ha_find_files(thd,db,path,wild,dir,files)); + DBUG_RETURN(0); } + /*************************************************************************** -** Extended version of mysqld_show_tables + Extended version of mysqld_show_tables ***************************************************************************/ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) @@ -261,16 +460,17 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) char path[FN_LEN]; char *file_name; TABLE *table; - String *packet= &thd->packet; - CONVERT *convert=thd->variables.convert_set; + Protocol *protocol= thd->protocol; + TIME time; + int res= 0; DBUG_ENTER("mysqld_extend_show_tables"); (void) sprintf(path,"%s/%s",mysql_data_home,db); (void) unpack_dirname(path,path); - field_list.push_back(item=new Item_empty_string("Name",NAME_LEN)); + field_list.push_back(item=new Item_empty_string("Engine",10)); item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Type",10)); + field_list.push_back(item=new Item_int("Version", (longlong) 0, 21)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("Row_format",10)); item->maybe_null=1; @@ -294,11 +494,15 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) item->maybe_null=1; field_list.push_back(item=new Item_datetime("Check_time")); item->maybe_null=1; + field_list.push_back(item=new Item_empty_string("Collation",32)); + item->maybe_null=1; + field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); + item->maybe_null=1; field_list.push_back(item=new Item_empty_string("Create_options",255)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("Comment",80)); item->maybe_null=1; - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); if (mysql_find_files(thd,&files,db,path,wild,0)) @@ -308,70 +512,78 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) { TABLE_LIST table_list; bzero((char*) &table_list,sizeof(table_list)); - packet->length(0); - net_store_data(packet,convert, file_name); + protocol->prepare_for_resend(); + protocol->store(file_name, system_charset_info); table_list.db=(char*) db; table_list.real_name= table_list.alias= file_name; if (lower_case_table_names) - casedn_str(file_name); + my_casedn_str(files_charset_info, file_name); if (!(table = open_ltable(thd, &table_list, TL_READ))) { for (uint i=2 ; i < field_list.elements ; i++) - net_store_null(packet); - net_store_data(packet,convert, thd->net.last_error); - thd->net.last_error[0]=0; + protocol->store_null(); + // Send error to Comment field + protocol->store(thd->net.last_error, system_charset_info); + thd->clear_error(); } else { - struct tm tm_tmp; + const char *str; handler *file=table->file; file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_NO_LOCK); - net_store_data(packet, convert, file->table_type()); - net_store_data(packet, convert, - (table->db_options_in_use & HA_OPTION_COMPRESS_RECORD) ? - "Compressed" : - (table->db_options_in_use & HA_OPTION_PACK_RECORD) ? - "Dynamic" : "Fixed"); - net_store_data(packet, (longlong) file->records); - net_store_data(packet, (uint32) file->mean_rec_length); - net_store_data(packet, (longlong) file->data_file_length); + protocol->store(file->table_type(), system_charset_info); + protocol->store((ulonglong) table->frm_version); + str= ((table->db_options_in_use & HA_OPTION_COMPRESS_RECORD) ? + "Compressed" : + (table->db_options_in_use & HA_OPTION_PACK_RECORD) ? + "Dynamic" : "Fixed"); + protocol->store(str, system_charset_info); + protocol->store((ulonglong) file->records); + protocol->store((ulonglong) file->mean_rec_length); + protocol->store((ulonglong) file->data_file_length); if (file->max_data_file_length) - net_store_data(packet, (longlong) file->max_data_file_length); + protocol->store((ulonglong) file->max_data_file_length); else - net_store_null(packet); - net_store_data(packet, (longlong) file->index_file_length); - net_store_data(packet, (longlong) file->delete_length); + protocol->store_null(); + protocol->store((ulonglong) file->index_file_length); + protocol->store((ulonglong) file->delete_length); if (table->found_next_number_field) { table->next_number_field=table->found_next_number_field; table->next_number_field->reset(); file->update_auto_increment(); - net_store_data(packet, table->next_number_field->val_int()); + protocol->store(table->next_number_field->val_int()); table->next_number_field=0; } else - net_store_null(packet); + protocol->store_null(); if (!file->create_time) - net_store_null(packet); + protocol->store_null(); else { - localtime_r(&file->create_time,&tm_tmp); - net_store_data(packet, &tm_tmp); + thd->variables.time_zone->gmt_sec_to_TIME(&time, file->create_time); + protocol->store(&time); } if (!file->update_time) - net_store_null(packet); + protocol->store_null(); else { - localtime_r(&file->update_time,&tm_tmp); - net_store_data(packet, &tm_tmp); + thd->variables.time_zone->gmt_sec_to_TIME(&time, file->update_time); + protocol->store(&time); } if (!file->check_time) - net_store_null(packet); + protocol->store_null(); else { - localtime_r(&file->check_time,&tm_tmp); - net_store_data(packet, &tm_tmp); + thd->variables.time_zone->gmt_sec_to_TIME(&time, file->check_time); + protocol->store(&time); } + str= (table->table_charset ? table->table_charset->name : "default"); + protocol->store(str, system_charset_info); + if (file->table_flags() & HA_HAS_CHECKSUM) + protocol->store((ulonglong)file->checksum()); + else + protocol->store_null(); // Checksum { char option_buff[350],*ptr; ptr=option_buff; @@ -408,27 +620,31 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) my_raid_type(file->raid_type), file->raid_chunks, file->raid_chunksize/RAID_BLOCK_SIZE); ptr=strmov(ptr,buff); } - net_store_data(packet, convert, option_buff+1, - (ptr == option_buff ? 0 : (uint) (ptr-option_buff)-1)); + protocol->store(option_buff+1, + (ptr == option_buff ? 0 : (uint) (ptr-option_buff)-1) + , system_charset_info); } { char *comment=table->file->update_table_comment(table->comment); - net_store_data(packet, comment); + protocol->store(comment, system_charset_info); if (comment != table->comment) my_free(comment,MYF(0)); } close_thread_tables(thd,0); } - if (my_net_write(&thd->net,(char*) packet->ptr(), - packet->length())) - DBUG_RETURN(-1); + if (protocol->write()) + { + res= -1; + break; + } } - send_eof(&thd->net); - DBUG_RETURN(0); + thd->insert_id(0); + if (!res) + send_eof(thd); + DBUG_RETURN(res); } - /*************************************************************************** ** List all columns in a table_list->real_name ***************************************************************************/ @@ -440,105 +656,129 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, TABLE *table; handler *file; char tmp[MAX_FIELD_WIDTH]; + char tmp1[MAX_FIELD_WIDTH]; Item *item; - CONVERT *convert=thd->variables.convert_set; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_show_fields"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, table_list->real_name)); if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) { - send_error(&thd->net); + send_error(thd); DBUG_RETURN(1); } file=table->file; file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); +#ifndef NO_EMBEDDED_ACCESS_CHECKS (void) get_table_grant(thd, table_list); - +#endif List<Item> field_list; field_list.push_back(new Item_empty_string("Field",NAME_LEN)); - field_list.push_back(new Item_empty_string("Type",40)); + field_list.push_back(new Item_empty_string("Type", 40)); + if (verbose) + field_list.push_back(new Item_empty_string("Collation",40)); field_list.push_back(new Item_empty_string("Null",1)); field_list.push_back(new Item_empty_string("Key",3)); field_list.push_back(item=new Item_empty_string("Default",NAME_LEN)); item->maybe_null=1; field_list.push_back(new Item_empty_string("Extra",20)); if (verbose) - field_list.push_back(new Item_empty_string("Privileges",80)); - - // Send first number of fields and records { - char *pos; - pos=net_store_length(tmp, (uint) field_list.elements); - pos=net_store_length(pos,(ulonglong) file->records); - (void) my_net_write(&thd->net,tmp,(uint) (pos-tmp)); + field_list.push_back(new Item_empty_string("Privileges",80)); + field_list.push_back(new Item_empty_string("Comment",255)); } - - if (send_fields(thd,field_list,0)) + // Send first number of fields and records + if (protocol->send_records_num(&field_list, (ulonglong)file->records) || + protocol->send_fields(&field_list,0)) DBUG_RETURN(1); - restore_record(table,2); // Get empty record + restore_record(table,default_values); // Get empty record Field **ptr,*field; - String *packet= &thd->packet; for (ptr=table->field; (field= *ptr) ; ptr++) { - if (!wild || !wild[0] || !wild_case_compare(field->field_name,wild)) + if (!wild || !wild[0] || + !wild_case_compare(system_charset_info, field->field_name,wild)) { { byte *pos; uint flags=field->flags; - String type(tmp,sizeof(tmp)); + String type(tmp,sizeof(tmp), system_charset_info); +#ifndef NO_EMBEDDED_ACCESS_CHECKS uint col_access; - bool null_default_value=0; - - packet->length(0); - net_store_data(packet,convert,field->field_name); +#endif + protocol->prepare_for_resend(); + protocol->store(field->field_name, system_charset_info); field->sql_type(type); - net_store_data(packet,convert,type.ptr(),type.length()); - + protocol->store(type.ptr(), type.length(), system_charset_info); + if (verbose) + protocol->store(field->has_charset() ? field->charset()->name : "NULL", + system_charset_info); /* - Altough TIMESTAMP fields can't contain NULL as its value they + Even if TIMESTAMP field can't contain NULL as its value it will accept NULL if you will try to insert such value and will - convert it to current TIMESTAMP. So YES here means that NULL - is allowed for assignment but can't be returned. + convert NULL value to current TIMESTAMP. So YES here means + that NULL is allowed for assignment (but may be won't be + returned). */ pos=(byte*) ((flags & NOT_NULL_FLAG) && field->type() != FIELD_TYPE_TIMESTAMP ? "" : "YES"); - net_store_data(packet,convert,(const char*) pos); + protocol->store((const char*) pos, system_charset_info); pos=(byte*) ((field->flags & PRI_KEY_FLAG) ? "PRI" : (field->flags & UNIQUE_KEY_FLAG) ? "UNI" : (field->flags & MULTIPLE_KEY_FLAG) ? "MUL":""); - net_store_data(packet,convert,(char*) pos); + protocol->store((char*) pos, system_charset_info); - /* - We handle first TIMESTAMP column in special way because its - default value is ignored and current timestamp used instead. - */ - if (table->timestamp_field == field || - field->unireg_check == Field::NEXT_NUMBER) - null_default_value=1; - if (!null_default_value && !field->is_null()) + if (table->timestamp_field == field && + field->unireg_check != Field::TIMESTAMP_UN_FIELD) + { + /* + We have NOW() as default value but we use CURRENT_TIMESTAMP form + because it is more SQL standard comatible + */ + protocol->store("CURRENT_TIMESTAMP", system_charset_info); + } + else if (field->unireg_check != Field::NEXT_NUMBER && + !field->is_null()) { // Not null by default - type.set(tmp,sizeof(tmp)); - field->val_str(&type,&type); - net_store_data(packet,convert,type.ptr(),type.length()); + /* + Note: we have to convert the default value into + system_charset_info before sending. + This is necessary for "SET NAMES binary": + If the client character set is binary, we want to + send metadata in UTF8 rather than in the column's + character set. + This conversion also makes "SHOW COLUMNS" and + "SHOW CREATE TABLE" output consistent. Without + this conversion the default values were displayed + differently. + */ + String def(tmp1,sizeof(tmp1), system_charset_info); + type.set(tmp, sizeof(tmp), field->charset()); + field->val_str(&type); + uint dummy_errors; + def.copy(type.ptr(), type.length(), type.charset(), + system_charset_info, &dummy_errors); + protocol->store(def.ptr(), def.length(), def.charset()); } - else if (field->maybe_null() || null_default_value) - net_store_null(packet); // Null as default + else if (field->unireg_check == Field::NEXT_NUMBER || + field->maybe_null()) + protocol->store_null(); // Null as default else - net_store_data(packet,convert,tmp,0); + protocol->store("",0, system_charset_info); // empty string char *end=tmp; if (field->unireg_check == Field::NEXT_NUMBER) end=strmov(tmp,"auto_increment"); - net_store_data(packet,convert,tmp,(uint) (end-tmp)); + protocol->store(tmp,(uint) (end-tmp), system_charset_info); if (verbose) { - /* Add grant options */ - col_access= get_column_grant(thd,table_list,field) & COL_ACLS; + /* Add grant options & comments */ end=tmp; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + col_access= get_column_grant(thd,table_list,field) & COL_ACLS; for (uint bitnr=0; col_access ; col_access>>=1,bitnr++) { if (col_access & 1) @@ -547,22 +787,31 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, end=strmov(end,grant_types.type_names[bitnr]); } } - net_store_data(packet,convert, tmp+1,end == tmp ? 0 : (uint) (end-tmp-1)); +#else + end=strmov(end,""); +#endif + protocol->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), + system_charset_info); + protocol->store(field->comment.str, field->comment.length, + system_charset_info); } - if (my_net_write(&thd->net,(char*) packet->ptr(),packet->length())) + if (protocol->write()) DBUG_RETURN(1); } } } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } + int mysqld_show_create(THD *thd, TABLE_LIST *table_list) { TABLE *table; - CONVERT *convert=thd->variables.convert_set; + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); DBUG_ENTER("mysqld_show_create"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, table_list->real_name)); @@ -570,77 +819,142 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) /* Only one table for now */ if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) { - send_error(&thd->net); + send_error(thd); DBUG_RETURN(1); } - char buff[1024]; - String packet(buff,sizeof(buff)); - packet.length(0); - net_store_data(&packet,convert, table->table_name); - /* - A hack - we need to reserve some space for the length before - we know what it is - let's assume that the length of create table - statement will fit into 3 bytes ( 16 MB max :-) ) - */ - ulong store_len_offset = packet.length(); - packet.length(store_len_offset + 4); - if (store_create_info(thd, table, &packet)) + buffer.length(0); + if (store_create_info(thd, table, &buffer)) DBUG_RETURN(-1); - ulong create_len = packet.length() - store_len_offset - 4; - if (create_len > 0x00ffffff) // better readable in HEX ... + + List<Item> field_list; + field_list.push_back(new Item_empty_string("Table",NAME_LEN)); + // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Create Table", + max(buffer.length(),1024))); + + if (protocol->send_fields(&field_list, 1)) + DBUG_RETURN(1); + protocol->prepare_for_resend(); + protocol->store(table->table_name, system_charset_info); + protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); + if (protocol->write()) + DBUG_RETURN(1); + send_eof(thd); + DBUG_RETURN(0); +} + +int mysqld_show_create_db(THD *thd, char *dbname, + HA_CREATE_INFO *create_info) +{ + int length; + char path[FN_REFLEN]; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint db_access; +#endif + bool found_libchar; + HA_CREATE_INFO create; + uint create_options = create_info ? create_info->options : 0; + Protocol *protocol=thd->protocol; + DBUG_ENTER("mysql_show_create_db"); + + if (check_db_name(dbname)) { - /* - Just in case somebody manages to create a table - with *that* much stuff in the definition - */ + net_printf(thd,ER_WRONG_DB_NAME, dbname); DBUG_RETURN(1); } - /* - Now we have to store the length in three bytes, even if it would fit - into fewer bytes, so we cannot use net_store_data() anymore, - and do it ourselves - */ - char* p = (char*)packet.ptr() + store_len_offset; - *p++ = (char) 253; // The client the length is stored using 3-bytes - int3store(p, create_len); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (test_all_bits(thd->master_access,DB_ACLS)) + db_access=DB_ACLS; + else + db_access= (acl_get(thd->host,thd->ip, thd->priv_user,dbname,0) | + thd->master_access); + if (!(db_access & DB_ACLS) && (!grant_option || check_grant_db(thd,dbname))) + { + net_printf(thd,ER_DBACCESS_DENIED_ERROR, + thd->priv_user, thd->host_or_ip, dbname); + mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), + thd->priv_user, thd->host_or_ip, dbname); + DBUG_RETURN(1); + } +#endif + + (void) sprintf(path,"%s/%s",mysql_data_home, dbname); + length=unpack_dirname(path,path); // Convert if not unix + found_libchar= 0; + if (length && path[length-1] == FN_LIBCHAR) + { + found_libchar= 1; + path[length-1]=0; // remove ending '\' + } + if (access(path,F_OK)) + { + net_printf(thd,ER_BAD_DB_ERROR,dbname); + DBUG_RETURN(1); + } + if (found_libchar) + path[length-1]= FN_LIBCHAR; + strmov(path+length, MY_DB_OPT_FILE); + load_db_opt(thd, path, &create); List<Item> field_list; - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - field_list.push_back(new Item_empty_string("Create Table", - max(packet.length(),1024))); // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Database",NAME_LEN)); + field_list.push_back(new Item_empty_string("Create Database",1024)); - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); - if (my_net_write(&thd->net, (char*)packet.ptr(), packet.length())) - DBUG_RETURN(1); + protocol->prepare_for_resend(); + protocol->store(dbname, strlen(dbname), system_charset_info); + buffer.length(0); + buffer.append("CREATE DATABASE ", 16); + if (create_options & HA_LEX_CREATE_IF_NOT_EXISTS) + buffer.append("/*!32312 IF NOT EXISTS*/ ", 25); + append_identifier(thd, &buffer, dbname, strlen(dbname)); - send_eof(&thd->net); + if (create.default_table_charset) + { + buffer.append(" /*!40100", 9); + buffer.append(" DEFAULT CHARACTER SET ", 23); + buffer.append(create.default_table_charset->csname); + if (!(create.default_table_charset->state & MY_CS_PRIMARY)) + { + buffer.append(" COLLATE ", 9); + buffer.append(create.default_table_charset->name); + } + buffer.append(" */", 3); + } + protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); + + if (protocol->write()) + DBUG_RETURN(1); + send_eof(thd); DBUG_RETURN(0); } - int mysqld_show_logs(THD *thd) { + List<Item> field_list; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_show_logs"); - List<Item> field_list; field_list.push_back(new Item_empty_string("File",FN_REFLEN)); field_list.push_back(new Item_empty_string("Type",10)); field_list.push_back(new Item_empty_string("Status",10)); - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); #ifdef HAVE_BERKELEY_DB - if (!berkeley_skip && berkeley_show_logs(thd)) + if ((have_berkeley_db == SHOW_OPTION_YES) && berkeley_show_logs(protocol)) DBUG_RETURN(-1); #endif - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } @@ -649,30 +963,30 @@ int mysqld_show_keys(THD *thd, TABLE_LIST *table_list) { TABLE *table; - char buff[256]; - CONVERT *convert=thd->variables.convert_set; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_show_keys"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, table_list->real_name)); if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) { - send_error(&thd->net); + send_error(thd); DBUG_RETURN(1); } List<Item> field_list; Item *item; field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - field_list.push_back(new Item_int("Non_unique",0,1)); + field_list.push_back(new Item_return_int("Non_unique",1, MYSQL_TYPE_TINY)); field_list.push_back(new Item_empty_string("Key_name",NAME_LEN)); - field_list.push_back(new Item_int("Seq_in_index",0,2)); + field_list.push_back(new Item_return_int("Seq_in_index",2, MYSQL_TYPE_TINY)); field_list.push_back(new Item_empty_string("Column_name",NAME_LEN)); field_list.push_back(item=new Item_empty_string("Collation",1)); item->maybe_null=1; field_list.push_back(item=new Item_int("Cardinality",0,21)); item->maybe_null=1; - field_list.push_back(item=new Item_int("Sub_part",0,3)); + field_list.push_back(item=new Item_return_int("Sub_part",3, + MYSQL_TYPE_SHORT)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("Packed",10)); item->maybe_null=1; @@ -681,77 +995,70 @@ mysqld_show_keys(THD *thd, TABLE_LIST *table_list) field_list.push_back(new Item_empty_string("Comment",255)); item->maybe_null=1; - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); - String *packet= &thd->packet; KEY *key_info=table->key_info; - table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME | - HA_STATUS_CONST); + table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); for (uint i=0 ; i < table->keys ; i++,key_info++) { KEY_PART_INFO *key_part= key_info->key_part; - char *end; + const char *str; for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) { - packet->length(0); - net_store_data(packet,convert,table->table_name); - net_store_data(packet,convert,((key_info->flags & HA_NOSAME) ? "0" :"1"), 1); - net_store_data(packet,convert,key_info->name); - end=int10_to_str((long) (j+1),(char*) buff,10); - net_store_data(packet,convert,buff,(uint) (end-buff)); - net_store_data(packet,convert, - key_part->field ? key_part->field->field_name : - "?unknown field?"); - if (table->file->index_flags(i) & HA_READ_ORDER) - net_store_data(packet,convert, - ((key_part->key_part_flag & HA_REVERSE_SORT) ? - "D" : "A"), 1); + protocol->prepare_for_resend(); + protocol->store(table->table_name, system_charset_info); + protocol->store_tiny((longlong) ((key_info->flags & HA_NOSAME) ? 0 :1)); + protocol->store(key_info->name, system_charset_info); + protocol->store_tiny((longlong) (j+1)); + str=(key_part->field ? key_part->field->field_name : + "?unknown field?"); + protocol->store(str, system_charset_info); + if (table->file->index_flags(i, j, 0) & HA_READ_ORDER) + protocol->store(((key_part->key_part_flag & HA_REVERSE_SORT) ? + "D" : "A"), 1, system_charset_info); else - net_store_null(packet); /* purecov: inspected */ + protocol->store_null(); /* purecov: inspected */ KEY *key=table->key_info+i; if (key->rec_per_key[j]) { ha_rows records=(table->file->records / key->rec_per_key[j]); - end=longlong10_to_str((longlong) records, buff, 10); - net_store_data(packet,convert,buff,(uint) (end-buff)); + protocol->store((ulonglong) records); } else - net_store_null(packet); + protocol->store_null(); /* Check if we have a key part that only uses part of the field */ if (!(key_info->flags & HA_FULLTEXT) && (!key_part->field || key_part->length != table->field[key_part->fieldnr-1]->key_length())) - { - end=int10_to_str((long) key_part->length, buff,10); /* purecov: inspected */ - net_store_data(packet,convert,buff,(uint) (end-buff)); /* purecov: inspected */ - } + protocol->store_short((longlong) key_part->length / + key_part->field->charset()->mbmaxlen); else - net_store_null(packet); - net_store_null(packet); // No pack_information yet + protocol->store_null(); + protocol->store_null(); // No pack_information yet /* Null flag */ uint flags= key_part->field ? key_part->field->flags : 0; char *pos=(char*) ((flags & NOT_NULL_FLAG) ? "" : "YES"); - net_store_data(packet,convert,(const char*) pos); - net_store_data(packet,convert,table->file->index_type(i)); + protocol->store((const char*) pos, system_charset_info); + protocol->store(table->file->index_type(i), system_charset_info); /* Comment */ - if (!(table->keys_in_use & ((key_map) 1 << i))) - net_store_data(packet,convert,"disabled",8); + if (!table->keys_in_use.is_set(i)) + protocol->store("disabled",8, system_charset_info); else - net_store_data(packet,convert,""); - if (my_net_write(&thd->net,(char*) packet->ptr(),packet->length())) + protocol->store("", 0, system_charset_info); + if (protocol->write()) DBUG_RETURN(1); /* purecov: inspected */ } } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); } /**************************************************************************** -** Return only fields for API mysql_list_fields -** Use "show table wildcard" in mysql instead of this + Return only fields for API mysql_list_fields + Use "show table wildcard" in mysql instead of this ****************************************************************************/ void @@ -763,7 +1070,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) { - send_error(&thd->net); + send_error(thd); DBUG_VOID_RETURN; } List<Item> field_list; @@ -771,35 +1078,35 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) Field **ptr,*field; for (ptr=table->field ; (field= *ptr); ptr++) { - if (!wild || !wild[0] || !wild_case_compare(field->field_name,wild)) + if (!wild || !wild[0] || + !wild_case_compare(system_charset_info, field->field_name,wild)) field_list.push_back(new Item_field(field)); } - restore_record(table,2); // Get empty record - if (send_fields(thd,field_list,2)) + restore_record(table,default_values); // Get empty record + if (thd->protocol->send_fields(&field_list,2)) DBUG_VOID_RETURN; - VOID(net_flush(&thd->net)); + thd->protocol->flush(); DBUG_VOID_RETURN; } + int mysqld_dump_create_info(THD *thd, TABLE *table, int fd) { - CONVERT *convert=thd->variables.convert_set; + Protocol *protocol= thd->protocol; + String *packet= protocol->storage_packet(); DBUG_ENTER("mysqld_dump_create_info"); DBUG_PRINT("enter",("table: %s",table->real_name)); - String* packet = &thd->packet; - packet->length(0); - if (store_create_info(thd,table,packet)) + protocol->prepare_for_resend(); + if (store_create_info(thd, table, packet)) DBUG_RETURN(-1); - if (convert) - convert->convert((char*) packet->ptr(), packet->length()); if (fd < 0) { - if (my_net_write(&thd->net, (char*)packet->ptr(), packet->length())) + if (protocol->write()) DBUG_RETURN(-1); - VOID(net_flush(&thd->net)); + protocol->flush(); } else { @@ -810,19 +1117,108 @@ mysqld_dump_create_info(THD *thd, TABLE *table, int fd) DBUG_RETURN(0); } -static void -append_identifier(THD *thd, String *packet, const char *name) +/* + Go through all character combinations and ensure that sql_lex.cc can + parse it as an identifer. + + SYNOPSIS + require_quotes() + name attribute name + name_length length of name + + RETURN + # Pointer to conflicting character + 0 No conflicting character +*/ + +static const char *require_quotes(const char *name, uint name_length) { - if (thd->options & OPTION_QUOTE_SHOW_CREATE) + uint length; + const char *end= name + name_length; + + for ( ; name < end ; name++) { - packet->append("`", 1); - packet->append(name); - packet->append("`", 1); + uchar chr= (uchar) *name; + length= my_mbcharlen(system_charset_info, chr); + if (length == 1 && !system_charset_info->ident_map[chr]) + return name; } - else + return 0; +} + + +void +append_identifier(THD *thd, String *packet, const char *name, uint length) +{ + const char *name_end; + char quote_char; + int q= get_quote_char_for_identifier(thd, name, length); + + if (q == EOF) + { + packet->append(name, length, system_charset_info); + return; + } + + /* + The identifier must be quoted as it includes a quote character or + it's a keyword + */ + + packet->reserve(length*2 + 2); + quote_char= (char) q; + packet->append("e_char, 1, system_charset_info); + + for (name_end= name+length ; name < name_end ; name+= length) { - packet->append(name); + uchar chr= (uchar) *name; + length= my_mbcharlen(system_charset_info, chr); + /* + my_mbcharlen can retur 0 on a wrong multibyte + sequence. It is possible when upgrading from 4.0, + and identifier contains some accented characters. + The manual says it does not work. So we'll just + change length to 1 not to hang in the endless loop. + */ + if (!length) + length= 1; + if (length == 1 && chr == (uchar) quote_char) + packet->append("e_char, 1, system_charset_info); + packet->append(name, length, packet->charset()); } + packet->append("e_char, 1, system_charset_info); +} + + +/* + Get the quote character for displaying an identifier. + + SYNOPSIS + get_quote_char_for_identifier() + thd Thread handler + name name to quote + length length of name + + IMPLEMENTATION + If name is a keyword or includes a special character, then force + quoting. + Otherwise identifier is quoted only if the option OPTION_QUOTE_SHOW_CREATE + is set. + + RETURN + EOF No quote character is needed + # Quote character +*/ + +int get_quote_char_for_identifier(THD *thd, const char *name, uint length) +{ + if (!is_keyword(name,length) && + !require_quotes(name, length) && + !(thd->options & OPTION_QUOTE_SHOW_CREATE)) + return EOF; + if (thd->variables.sql_mode & MODE_ANSI_QUOTES) + return '"'; + return '`'; } @@ -831,86 +1227,167 @@ append_identifier(THD *thd, String *packet, const char *name) static void append_directory(THD *thd, String *packet, const char *dir_type, const char *filename) { - uint length; - if (filename && !(thd->sql_mode & MODE_NO_DIR_IN_CREATE)) + if (filename && !(thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE)) { - length= dirname_length(filename); + uint length= dirname_length(filename); packet->append(' '); packet->append(dir_type); packet->append(" DIRECTORY='", 12); +#ifdef __WIN__ + char *winfilename = thd->memdup(filename, length); + for (uint i=0; i < length; i++) + if (winfilename[i] == '\\') + winfilename[i] = '/'; + packet->append(winfilename, length); +#else packet->append(filename, length); +#endif packet->append('\''); } } +#define LIST_PROCESS_HOST_LEN 64 + static int store_create_info(THD *thd, TABLE *table, String *packet) { List<Item> field_list; - char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end; - String type(tmp, sizeof(tmp)); + char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end, *alias; + String type(tmp, sizeof(tmp), system_charset_info); Field **ptr,*field; uint primary_key; KEY *key_info; handler *file= table->file; HA_CREATE_INFO create_info; + my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | + MODE_ORACLE | + MODE_MSSQL | + MODE_DB2 | + MODE_MAXDB | + MODE_ANSI)) != 0; + my_bool limited_mysql_mode= (thd->variables.sql_mode & + (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 | + MODE_MYSQL40)) != 0; + DBUG_ENTER("store_create_info"); DBUG_PRINT("enter",("table: %s",table->real_name)); - restore_record(table,2); // Get empty record + restore_record(table,default_values); // Get empty record + if (table->tmp_table) packet->append("CREATE TEMPORARY TABLE ", 23); else packet->append("CREATE TABLE ", 13); - append_identifier(thd,packet, - (lower_case_table_names == 2 ? table->table_name : - table->real_name)); + alias= (lower_case_table_names == 2 ? table->table_name : + table->real_name); + append_identifier(thd, packet, alias, strlen(alias)); packet->append(" (\n", 3); for (ptr=table->field ; (field= *ptr); ptr++) { bool has_default; + bool has_now_default; uint flags = field->flags; if (ptr != table->field) packet->append(",\n", 2); + packet->append(" ", 2); - append_identifier(thd,packet,field->field_name); + append_identifier(thd,packet,field->field_name, strlen(field->field_name)); packet->append(' '); // check for surprises from the previous call to Field::sql_type() if (type.ptr() != tmp) - type.set(tmp, sizeof(tmp)); + type.set(tmp, sizeof(tmp), system_charset_info); + else + type.set_charset(system_charset_info); field->sql_type(type); - packet->append(type.ptr(),type.length()); + packet->append(type.ptr(), type.length(), system_charset_info); + + if (field->has_charset() && + !(thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) + { + if (field->charset() != table->table_charset) + { + packet->append(" character set ", 15); + packet->append(field->charset()->csname); + } + /* + For string types dump collation name only if + collation is not primary for the given charset + */ + if (!(field->charset()->state & MY_CS_PRIMARY)) + { + packet->append(" collate ", 9); + packet->append(field->charset()->name); + } + } - has_default= (field->type() != FIELD_TYPE_BLOB && - table->timestamp_field != field && - field->unireg_check != Field::NEXT_NUMBER); if (flags & NOT_NULL_FLAG) packet->append(" NOT NULL", 9); + else if (field->type() == FIELD_TYPE_TIMESTAMP) + { + /* + TIMESTAMP field require explicit NULL flag, because unlike + all other fields they are treated as NOT NULL by default. + */ + packet->append(" NULL", 5); + } + + /* + Again we are using CURRENT_TIMESTAMP instead of NOW because it is + more standard + */ + has_now_default= table->timestamp_field == field && + field->unireg_check != Field::TIMESTAMP_UN_FIELD; + + has_default= (field->type() != FIELD_TYPE_BLOB && + field->unireg_check != Field::NEXT_NUMBER && + !((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) && + has_now_default)); if (has_default) { packet->append(" default ", 9); - if (!field->is_null()) + if (has_now_default) + packet->append("CURRENT_TIMESTAMP",17); + else if (!field->is_null()) { // Not null by default - type.set(tmp,sizeof(tmp)); - field->val_str(&type,&type); - packet->append('\''); + type.set(tmp, sizeof(tmp), field->charset()); + field->val_str(&type); if (type.length()) - append_unescaped(packet, type.c_ptr()); - packet->append('\''); + { + String def_val; + uint dummy_errors; + /* convert to system_charset_info == utf8 */ + def_val.copy(type.ptr(), type.length(), field->charset(), + system_charset_info, &dummy_errors); + append_unescaped(packet, def_val.ptr(), def_val.length()); + } + else + packet->append("''",2); } else if (field->maybe_null()) packet->append("NULL", 4); // Null as default else - packet->append(tmp,0); + packet->append(tmp); } - if (field->unireg_check == Field::NEXT_NUMBER) - packet->append(" auto_increment", 15 ); + if (!(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS) && + table->timestamp_field == field && + field->unireg_check != Field::TIMESTAMP_DN_FIELD) + packet->append(" on update CURRENT_TIMESTAMP",28); + + if (field->unireg_check == Field::NEXT_NUMBER && + !(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS)) + packet->append(" auto_increment", 15 ); + + if (field->comment.length) + { + packet->append(" COMMENT ",9); + append_unescaped(packet, field->comment.str, field->comment.length); + } } key_info= table->key_info; @@ -925,7 +1402,7 @@ store_create_info(THD *thd, TABLE *table, String *packet) bool found_primary=0; packet->append(",\n ", 4); - if (i == primary_key && !strcmp(key_info->name,"PRIMARY")) + if (i == primary_key && !strcmp(key_info->name, primary_key_name)) { found_primary=1; packet->append("PRIMARY ", 8); @@ -934,11 +1411,29 @@ store_create_info(THD *thd, TABLE *table, String *packet) packet->append("UNIQUE ", 7); else if (key_info->flags & HA_FULLTEXT) packet->append("FULLTEXT ", 9); + else if (key_info->flags & HA_SPATIAL) + packet->append("SPATIAL ", 8); packet->append("KEY ", 4); if (!found_primary) - append_identifier(thd,packet,key_info->name); + append_identifier(thd, packet, key_info->name, strlen(key_info->name)); + if (!(thd->variables.sql_mode & MODE_NO_KEY_OPTIONS) && + !limited_mysql_mode && !foreign_db_mode) + { + if (key_info->algorithm == HA_KEY_ALG_BTREE) + packet->append(" USING BTREE", 12); + + if (key_info->algorithm == HA_KEY_ALG_HASH) + packet->append(" USING HASH", 11); + + // +BAR: send USING only in non-default case: non-spatial rtree + if ((key_info->algorithm == HA_KEY_ALG_RTREE) && + !(key_info->flags & HA_SPATIAL)) + packet->append(" USING RTREE", 12); + + // No need to send TYPE FULLTEXT, it is sent as FULLTEXT KEY + } packet->append(" (", 2); for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) @@ -947,14 +1442,17 @@ store_create_info(THD *thd, TABLE *table, String *packet) packet->append(','); if (key_part->field) - append_identifier(thd,packet,key_part->field->field_name); + append_identifier(thd,packet,key_part->field->field_name, + strlen(key_part->field->field_name)); if (!key_part->field || (key_part->length != table->field[key_part->fieldnr-1]->key_length() && !(key_info->flags & HA_FULLTEXT))) { buff[0] = '('; - char* end=int10_to_str((long) key_part->length, buff + 1,10); + char* end=int10_to_str((long) key_part->length / + key_part->field->charset()->mbmaxlen, + buff + 1,10); *end++ = ')'; packet->append(buff,(uint) (end-buff)); } @@ -974,66 +1472,86 @@ store_create_info(THD *thd, TABLE *table, String *packet) } packet->append("\n)", 2); - packet->append(" TYPE=", 6); - packet->append(file->table_type()); - - if (table->min_rows) - { - packet->append(" MIN_ROWS="); - end= longlong10_to_str(table->min_rows, buff, 10); - packet->append(buff, (uint) (end- buff)); - } - if (table->max_rows) - { - packet->append(" MAX_ROWS="); - end= longlong10_to_str(table->max_rows, buff, 10); - packet->append(buff, (uint) (end - buff)); - } - if (table->avg_row_length) + if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode) { - packet->append(" AVG_ROW_LENGTH="); - end= longlong10_to_str(table->avg_row_length, buff,10); - packet->append(buff, (uint) (end - buff)); - } + if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) + packet->append(" TYPE=", 6); + else + packet->append(" ENGINE=", 8); + packet->append(file->table_type()); + + if (table->table_charset && + !(thd->variables.sql_mode & MODE_MYSQL323) && + !(thd->variables.sql_mode & MODE_MYSQL40)) + { + packet->append(" DEFAULT CHARSET=", 17); + packet->append(table->table_charset->csname); + if (!(table->table_charset->state & MY_CS_PRIMARY)) + { + packet->append(" COLLATE=", 9); + packet->append(table->table_charset->name); + } + } - if (table->db_create_options & HA_OPTION_PACK_KEYS) - packet->append(" PACK_KEYS=1", 12); - if (table->db_create_options & HA_OPTION_NO_PACK_KEYS) - packet->append(" PACK_KEYS=0", 12); - if (table->db_create_options & HA_OPTION_CHECKSUM) - packet->append(" CHECKSUM=1", 11); - if (table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) - packet->append(" DELAY_KEY_WRITE=1",18); - if (table->row_type != ROW_TYPE_DEFAULT) - { - packet->append(" ROW_FORMAT=",12); - packet->append(ha_row_type[(uint) table->row_type]); - } - table->file->append_create_info(packet); - if (table->comment && table->comment[0]) - { - packet->append(" COMMENT='", 10); - append_unescaped(packet, table->comment); - packet->append('\''); - } - if (file->raid_type) - { - uint length; - length= my_snprintf(buff,sizeof(buff), - " RAID_TYPE=%s RAID_CHUNKS=%d RAID_CHUNKSIZE=%ld", - my_raid_type(file->raid_type), file->raid_chunks, - file->raid_chunksize/RAID_BLOCK_SIZE); - packet->append(buff, length); + if (table->min_rows) + { + packet->append(" MIN_ROWS=", 10); + end= longlong10_to_str(table->min_rows, buff, 10); + packet->append(buff, (uint) (end- buff)); + } + + if (table->max_rows) + { + packet->append(" MAX_ROWS=", 10); + end= longlong10_to_str(table->max_rows, buff, 10); + packet->append(buff, (uint) (end - buff)); + } + + if (table->avg_row_length) + { + packet->append(" AVG_ROW_LENGTH=", 16); + end= longlong10_to_str(table->avg_row_length, buff,10); + packet->append(buff, (uint) (end - buff)); + } + + if (table->db_create_options & HA_OPTION_PACK_KEYS) + packet->append(" PACK_KEYS=1", 12); + if (table->db_create_options & HA_OPTION_NO_PACK_KEYS) + packet->append(" PACK_KEYS=0", 12); + if (table->db_create_options & HA_OPTION_CHECKSUM) + packet->append(" CHECKSUM=1", 11); + if (table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) + packet->append(" DELAY_KEY_WRITE=1",18); + if (table->row_type != ROW_TYPE_DEFAULT) + { + packet->append(" ROW_FORMAT=",12); + packet->append(ha_row_type[(uint) table->row_type]); + } + table->file->append_create_info(packet); + if (table->comment && table->comment[0]) + { + packet->append(" COMMENT=", 9); + append_unescaped(packet, table->comment, strlen(table->comment)); + } + if (file->raid_type) + { + uint length; + length= my_snprintf(buff,sizeof(buff), + " RAID_TYPE=%s RAID_CHUNKS=%d RAID_CHUNKSIZE=%ld", + my_raid_type(file->raid_type), file->raid_chunks, + file->raid_chunksize/RAID_BLOCK_SIZE); + packet->append(buff, length); + } + append_directory(thd, packet, "DATA", create_info.data_file_name); + append_directory(thd, packet, "INDEX", create_info.index_file_name); } - append_directory(thd, packet, "DATA", create_info.data_file_name); - append_directory(thd, packet, "INDEX", create_info.index_file_name); DBUG_RETURN(0); } /**************************************************************************** -** Return info about all processes -** returns for each thread: thread id, user, host, db, command, info + Return info about all processes + returns for each thread: thread id, user, host, db, command, info ****************************************************************************/ class thread_info :public ilink { @@ -1053,8 +1571,6 @@ public: template class I_List<thread_info>; #endif -#define LIST_PROCESS_HOST_LEN 64 - void mysqld_list_processes(THD *thd,const char *user, bool verbose) { Item *field; @@ -1062,21 +1578,21 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) I_List<thread_info> thread_infos; ulong max_query_length= (verbose ? thd->variables.max_allowed_packet : PROCESS_LIST_WIDTH); - CONVERT *convert=thd->variables.convert_set; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_list_processes"); - field_list.push_back(new Item_int("Id",0,7)); + field_list.push_back(new Item_int("Id",0,11)); field_list.push_back(new Item_empty_string("User",16)); field_list.push_back(new Item_empty_string("Host",LIST_PROCESS_HOST_LEN)); field_list.push_back(field=new Item_empty_string("db",NAME_LEN)); field->maybe_null=1; field_list.push_back(new Item_empty_string("Command",16)); - field_list.push_back(new Item_empty_string("Time",7)); + field_list.push_back(new Item_return_int("Time",7, FIELD_TYPE_LONG)); field_list.push_back(field=new Item_empty_string("State",30)); field->maybe_null=1; field_list.push_back(field=new Item_empty_string("Info",max_query_length)); field->maybe_null=1; - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_VOID_RETURN; VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list @@ -1087,7 +1603,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) while ((tmp=it++)) { struct st_my_thread_var *mysys_var; - if ((tmp->net.vio || tmp->system_thread) && + if ((tmp->vio_ok() || tmp->system_thread) && (!user || (tmp->user && !strcmp(tmp->user,user)))) { thread_info *thd_info=new thread_info; @@ -1110,6 +1626,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) if ((mysys_var= tmp->mysys_var)) pthread_mutex_lock(&mysys_var->mutex); thd_info->proc_info= (char*) (tmp->killed ? "Killed" : 0); +#ifndef EMBEDDED_LIBRARY thd_info->state_info= (char*) (tmp->locked ? "Locked" : tmp->net.reading_or_writing ? (tmp->net.reading_or_writing == 2 ? @@ -1120,6 +1637,9 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) tmp->mysys_var && tmp->mysys_var->current_cond ? "Waiting on cond" : NullS); +#else + thd_info->state_info= (char*)"Writing to net"; +#endif if (mysys_var) pthread_mutex_unlock(&mysys_var->mutex); @@ -1150,308 +1670,455 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) VOID(pthread_mutex_unlock(&LOCK_thread_count)); thread_info *thd_info; - String *packet= &thd->packet; + time_t now= time(0); while ((thd_info=thread_infos.get())) { - char buff[20],*end; - packet->length(0); - end=int10_to_str((long) thd_info->thread_id, buff,10); - net_store_data(packet,convert,buff,(uint) (end-buff)); - net_store_data(packet,convert,thd_info->user); - net_store_data(packet,convert,thd_info->host); - if (thd_info->db) - net_store_data(packet,convert,thd_info->db); - else - net_store_null(packet); + protocol->prepare_for_resend(); + protocol->store((ulonglong) thd_info->thread_id); + protocol->store(thd_info->user, system_charset_info); + protocol->store(thd_info->host, system_charset_info); + protocol->store(thd_info->db, system_charset_info); if (thd_info->proc_info) - net_store_data(packet,convert,thd_info->proc_info); + protocol->store(thd_info->proc_info, system_charset_info); else - net_store_data(packet,convert,command_name[thd_info->command]); + protocol->store(command_name[thd_info->command], system_charset_info); if (thd_info->start_time) - net_store_data(packet, - (uint32) (time((time_t*) 0) - thd_info->start_time)); + protocol->store((uint32) (now - thd_info->start_time)); else - net_store_null(packet); - if (thd_info->state_info) - net_store_data(packet,convert,thd_info->state_info); - else - net_store_null(packet); - if (thd_info->query) - net_store_data(packet,convert,thd_info->query); - else - net_store_null(packet); - if (my_net_write(&thd->net,(char*) packet->ptr(),packet->length())) + protocol->store_null(); + protocol->store(thd_info->state_info, system_charset_info); + protocol->store(thd_info->query, system_charset_info); + if (protocol->write()) break; /* purecov: inspected */ } - send_eof(&thd->net); + send_eof(thd); DBUG_VOID_RETURN; } - /***************************************************************************** -** Status functions + Status functions *****************************************************************************/ +static bool write_collation(Protocol *protocol, CHARSET_INFO *cs) +{ + protocol->prepare_for_resend(); + protocol->store(cs->name, system_charset_info); + protocol->store(cs->csname, system_charset_info); + protocol->store_short((longlong) cs->number); + protocol->store((cs->state & MY_CS_PRIMARY) ? "Yes" : "",system_charset_info); + protocol->store((cs->state & MY_CS_COMPILED)? "Yes" : "",system_charset_info); + protocol->store_short((longlong) cs->strxfrm_multiply); + return protocol->write(); +} + +int mysqld_show_collations(THD *thd, const char *wild) +{ + char buff[8192]; + String packet2(buff,sizeof(buff),thd->charset()); + List<Item> field_list; + CHARSET_INFO **cs; + Protocol *protocol= thd->protocol; + + DBUG_ENTER("mysqld_show_charsets"); + + field_list.push_back(new Item_empty_string("Collation",30)); + field_list.push_back(new Item_empty_string("Charset",30)); + field_list.push_back(new Item_return_int("Id",11, FIELD_TYPE_SHORT)); + field_list.push_back(new Item_empty_string("Default",30)); + field_list.push_back(new Item_empty_string("Compiled",30)); + field_list.push_back(new Item_return_int("Sortlen",3, FIELD_TYPE_SHORT)); + + if (protocol->send_fields(&field_list, 1)) + DBUG_RETURN(1); + + for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + { + CHARSET_INFO **cl; + if (!cs[0] || !(cs[0]->state & MY_CS_AVAILABLE) || + !(cs[0]->state & MY_CS_PRIMARY)) + continue; + for ( cl= all_charsets; cl < all_charsets+255 ;cl ++) + { + if (!cl[0] || !(cl[0]->state & MY_CS_AVAILABLE) || + !my_charset_same(cs[0],cl[0])) + continue; + if (!(wild && wild[0] && + wild_case_compare(system_charset_info,cl[0]->name,wild))) + { + if (write_collation(protocol, cl[0])) + goto err; + } + } + } + send_eof(thd); + DBUG_RETURN(0); +err: + DBUG_RETURN(1); +} + +static bool write_charset(Protocol *protocol, CHARSET_INFO *cs) +{ + protocol->prepare_for_resend(); + protocol->store(cs->csname, system_charset_info); + protocol->store(cs->comment ? cs->comment : "", system_charset_info); + protocol->store(cs->name, system_charset_info); + protocol->store_short((longlong) cs->mbmaxlen); + return protocol->write(); +} + +int mysqld_show_charsets(THD *thd, const char *wild) +{ + char buff[8192]; + String packet2(buff,sizeof(buff),thd->charset()); + List<Item> field_list; + CHARSET_INFO **cs; + Protocol *protocol= thd->protocol; + + DBUG_ENTER("mysqld_show_charsets"); + + field_list.push_back(new Item_empty_string("Charset",30)); + field_list.push_back(new Item_empty_string("Description",60)); + field_list.push_back(new Item_empty_string("Default collation",60)); + field_list.push_back(new Item_return_int("Maxlen",3, FIELD_TYPE_SHORT)); + + if (protocol->send_fields(&field_list, 1)) + DBUG_RETURN(1); + + for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + { + if (cs[0] && (cs[0]->state & MY_CS_PRIMARY) && + (cs[0]->state & MY_CS_AVAILABLE) && + !(wild && wild[0] && + wild_case_compare(system_charset_info,cs[0]->csname,wild))) + { + if (write_charset(protocol, cs[0])) + goto err; + } + } + send_eof(thd); + DBUG_RETURN(0); +err: + DBUG_RETURN(1); +} + + int mysqld_show(THD *thd, const char *wild, show_var_st *variables, enum enum_var_type value_type, pthread_mutex_t *mutex) { - char buff[8192]; - String packet2(buff,sizeof(buff)); + char buff[1024]; List<Item> field_list; - CONVERT *convert=thd->variables.convert_set; - + Protocol *protocol= thd->protocol; + LEX_STRING null_lex_str; DBUG_ENTER("mysqld_show"); + field_list.push_back(new Item_empty_string("Variable_name",30)); field_list.push_back(new Item_empty_string("Value",256)); - if (send_fields(thd,field_list,1)) + if (protocol->send_fields(&field_list,1)) DBUG_RETURN(1); /* purecov: inspected */ + null_lex_str.str= 0; // For sys_var->value_ptr() + null_lex_str.length= 0; pthread_mutex_lock(mutex); for (; variables->name; variables++) { - if (!(wild && wild[0] && wild_case_compare(variables->name,wild))) + if (!(wild && wild[0] && wild_case_compare(system_charset_info, + variables->name,wild))) { - packet2.length(0); - net_store_data(&packet2,convert,variables->name); + protocol->prepare_for_resend(); + protocol->store(variables->name, system_charset_info); SHOW_TYPE show_type=variables->type; char *value=variables->value; + const char *pos, *end; + long nr; + if (show_type == SHOW_SYS) { show_type= ((sys_var*) value)->type(); - value= (char*) ((sys_var*) value)->value_ptr(thd, value_type); + value= (char*) ((sys_var*) value)->value_ptr(thd, value_type, + &null_lex_str); } + pos= end= buff; switch (show_type) { case SHOW_LONG: case SHOW_LONG_CONST: - net_store_data(&packet2,(uint32) *(ulong*) value); + end= int10_to_str(*(long*) value, buff, 10); break; case SHOW_LONGLONG: - net_store_data(&packet2,(longlong) *(longlong*) value); - break; + end= longlong10_to_str(*(longlong*) value, buff, 10); + break; case SHOW_HA_ROWS: - net_store_data(&packet2,(longlong) *(ha_rows*) value); + end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10); break; case SHOW_BOOL: - net_store_data(&packet2,(ulong) *(bool*) value ? "ON" : "OFF"); + end= strmov(buff, *(bool*) value ? "ON" : "OFF"); break; case SHOW_MY_BOOL: - net_store_data(&packet2,(ulong) *(my_bool*) value ? "ON" : "OFF"); + end= strmov(buff, *(my_bool*) value ? "ON" : "OFF"); break; case SHOW_INT_CONST: case SHOW_INT: - net_store_data(&packet2,(uint32) *(int*) value); + end= int10_to_str((long) *(uint32*) value, buff, 10); break; case SHOW_HAVE: { SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value; - net_store_data(&packet2, (tmp == SHOW_OPTION_NO ? "NO" : - tmp == SHOW_OPTION_YES ? "YES" : - "DISABLED")); + pos= show_comp_option_name[(int) tmp]; + end= strend(pos); break; } case SHOW_CHAR: - net_store_data(&packet2,convert, value); + { + if (!(pos= value)) + pos= ""; + end= strend(pos); break; + } case SHOW_STARTTIME: - net_store_data(&packet2,(uint32) (thd->query_start() - start_time)); + nr= (long) (thd->query_start() - start_time); + end= int10_to_str(nr, buff, 10); break; case SHOW_QUESTION: - net_store_data(&packet2,(uint32) thd->query_id); + end= int10_to_str((long) thd->query_id, buff, 10); break; +#ifdef HAVE_REPLICATION case SHOW_RPL_STATUS: - net_store_data(&packet2, rpl_status_type[(int)rpl_status]); + end= strmov(buff, rpl_status_type[(int)rpl_status]); break; -#ifdef HAVE_REPLICATION case SHOW_SLAVE_RUNNING: { pthread_mutex_lock(&LOCK_active_mi); - net_store_data(&packet2, (active_mi->slave_running && - active_mi->rli.slave_running) - ? "ON" : "OFF"); + end= strmov(buff, (active_mi && active_mi->slave_running && + active_mi->rli.slave_running) ? "ON" : "OFF"); pthread_mutex_unlock(&LOCK_active_mi); break; } -#endif + case SHOW_SLAVE_RETRIED_TRANS: + { + /* + TODO: in 5.1 with multimaster, have one such counter per line in SHOW + SLAVE STATUS, and have the sum over all lines here. + */ + pthread_mutex_lock(&LOCK_active_mi); + if (active_mi) + { + pthread_mutex_lock(&active_mi->rli.data_lock); + end= int10_to_str(active_mi->rli.retried_trans, buff, 10); + pthread_mutex_unlock(&active_mi->rli.data_lock); + } + pthread_mutex_unlock(&LOCK_active_mi); + break; + } +#endif /* HAVE_REPLICATION */ case SHOW_OPENTABLES: - net_store_data(&packet2,(uint32) cached_tables()); + end= int10_to_str((long) cached_tables(), buff, 10); break; case SHOW_CHAR_PTR: { - value= *(char**) value; - net_store_data(&packet2,convert, value ? value : ""); - break; + if (!(pos= *(char**) value)) + pos= ""; + end= strend(pos); + break; } #ifdef HAVE_OPENSSL /* First group - functions relying on CTX */ case SHOW_SSL_CTX_SESS_ACCEPT: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept(ssl_acceptor_fd-> + ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_ACCEPT_GOOD: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept_good(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept_good(ssl_acceptor_fd-> + ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_CONNECT_GOOD: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect_good(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect_good(ssl_acceptor_fd-> + ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd-> ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_CB_HITS: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_cb_hits(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_cb_hits(ssl_acceptor_fd-> + ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_HITS: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_hits(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_hits(ssl_acceptor_fd-> + ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_CACHE_FULL: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_cache_full(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_cache_full(ssl_acceptor_fd-> + ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_MISSES: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_misses(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_misses(ssl_acceptor_fd-> + ssl_context)), + buff, 10); break; case SHOW_SSL_CTX_SESS_TIMEOUTS: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context)), + buff,10); break; case SHOW_SSL_CTX_SESS_NUMBER: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context)), + buff,10); break; case SHOW_SSL_CTX_SESS_CONNECT: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context)), + buff,10); break; case SHOW_SSL_CTX_SESS_GET_CACHE_SIZE: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context)), + buff,10); break; case SHOW_SSL_CTX_GET_VERIFY_MODE: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context)), + buff,10); break; case SHOW_SSL_CTX_GET_VERIFY_DEPTH: - net_store_data(&packet2,(uint32) - (!ssl_acceptor_fd ? 0 : - SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context))); + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context)), + buff,10); break; case SHOW_SSL_CTX_GET_SESSION_CACHE_MODE: if (!ssl_acceptor_fd) { - net_store_data(&packet2,"NONE" ); + pos= "NONE"; + end= pos+4; break; } switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context)) { case SSL_SESS_CACHE_OFF: - net_store_data(&packet2,"OFF" ); + pos= "OFF"; break; case SSL_SESS_CACHE_CLIENT: - net_store_data(&packet2,"CLIENT" ); + pos= "CLIENT"; break; case SSL_SESS_CACHE_SERVER: - net_store_data(&packet2,"SERVER" ); + pos= "SERVER"; break; case SSL_SESS_CACHE_BOTH: - net_store_data(&packet2,"BOTH" ); + pos= "BOTH"; break; case SSL_SESS_CACHE_NO_AUTO_CLEAR: - net_store_data(&packet2,"NO_AUTO_CLEAR" ); + pos= "NO_AUTO_CLEAR"; break; case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP: - net_store_data(&packet2,"NO_INTERNAL_LOOKUP" ); + pos= "NO_INTERNAL_LOOKUP"; break; default: - net_store_data(&packet2,"Unknown"); + pos= "Unknown"; break; } + end= strend(pos); break; /* First group - functions relying on SSL */ case SHOW_SSL_GET_VERSION: - net_store_data(&packet2, thd->net.vio->ssl_arg ? - SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); + pos= (thd->net.vio->ssl_arg ? + SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); + end= strend(pos); break; case SHOW_SSL_SESSION_REUSED: - net_store_data(&packet2,(uint32) (thd->net.vio->ssl_arg ? - SSL_session_reused((SSL*) thd->net.vio->ssl_arg) : 0)); + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_session_reused((SSL*) thd->net.vio-> + ssl_arg) : + 0), + buff, 10); break; case SHOW_SSL_GET_DEFAULT_TIMEOUT: - net_store_data(&packet2,(uint32) (thd->net.vio->ssl_arg ? - SSL_get_default_timeout((SSL*) thd->net.vio->ssl_arg) : - 0)); + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_default_timeout((SSL*) thd->net.vio-> + ssl_arg) : + 0), + buff, 10); break; case SHOW_SSL_GET_VERIFY_MODE: - net_store_data(&packet2,(uint32) (thd->net.vio->ssl_arg ? - SSL_get_verify_mode((SSL*) thd->net.vio->ssl_arg):0)); + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_verify_mode((SSL*) thd->net.vio-> + ssl_arg): + 0), + buff, 10); break; case SHOW_SSL_GET_VERIFY_DEPTH: - net_store_data(&packet2,(uint32) (thd->net.vio->ssl_arg ? - SSL_get_verify_depth((SSL*) thd->net.vio->ssl_arg):0)); + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_verify_depth((SSL*) thd->net.vio-> + ssl_arg): + 0), + buff, 10); break; case SHOW_SSL_GET_CIPHER: - net_store_data(&packet2, thd->net.vio->ssl_arg ? - SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : ""); + pos= (thd->net.vio->ssl_arg ? + SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : "" ); + end= strend(pos); break; case SHOW_SSL_GET_CIPHER_LIST: if (thd->net.vio->ssl_arg) { - char buf[1024], *pos; - pos=buf; + char *to= buff; for (int i=0 ; i++ ;) { - const char *p=SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i); + const char *p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i); if (p == NULL) break; - pos=strmov(pos, p); - *pos++= ':'; + to= strmov(to, p); + *to++= ':'; } - if (pos != buf) - pos--; // Remove last ':' - *pos=0; - net_store_data(&packet2, buf); + if (to != buff) + to--; // Remove last ':' + end= to; } - else - net_store_data(&packet2, ""); break; #endif /* HAVE_OPENSSL */ + case SHOW_KEY_CACHE_LONG: + case SHOW_KEY_CACHE_CONST_LONG: + value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache; + end= int10_to_str(*(long*) value, buff, 10); + break; + case SHOW_KEY_CACHE_LONGLONG: + value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache; + end= longlong10_to_str(*(longlong*) value, buff, 10); + break; case SHOW_UNDEF: // Show never happen case SHOW_SYS: - net_store_data(&packet2, ""); // Safety + break; // Return empty string + default: break; } - if (my_net_write(&thd->net, (char*) packet2.ptr(),packet2.length())) + if (protocol->store(pos, (uint32) (end - pos), system_charset_info) || + protocol->write()) goto err; /* purecov: inspected */ } } pthread_mutex_unlock(mutex); - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); err: diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 62c5f1cb164..9f95ffa4884 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -19,6 +19,30 @@ #define MERGEBUFF 7 #define MERGEBUFF2 15 +/* + The structure SORT_ADDON_FIELD describes a fixed layout + for field values appended to sorted values in records to be sorted + in the sort buffer. + Only fixed layout is supported now. + Null bit maps for the appended values is placed before the values + themselves. Offsets are from the last sorted field, that is from the + record referefence, which is still last component of sorted records. + It is preserved for backward compatiblility. + The structure is used tp store values of the additional fields + in the sort buffer. It is used also when these values are read + from a temporary file/buffer. As the reading procedures are beyond the + scope of the 'filesort' code the values have to be retrieved via + the callback function 'unpack_addon_fields'. +*/ + +typedef struct st_sort_addon_field { /* Sort addon packed field */ + Field *field; /* Original field */ + uint offset; /* Offset from the last sorted field */ + uint null_offset; /* Offset to to null bit from the last sorted field */ + uint length; /* Length in the sort buffer */ + uint8 null_bit; /* Null bit mask for the field */ +} SORT_ADDON_FIELD; + typedef struct st_buffpek { /* Struktur om sorteringsbuffrarna */ my_off_t file_pos; /* Where we are in the sort file */ uchar *base,*key; /* key pointers */ @@ -27,20 +51,21 @@ typedef struct st_buffpek { /* Struktur om sorteringsbuffrarna */ ulong max_keys; /* Max keys in buffert */ } BUFFPEK; - typedef struct st_sort_param { - uint sort_length; /* Length of sort columns */ - uint keys; /* Max keys / buffert */ + uint rec_length; /* Length of sorted records */ + uint sort_length; /* Length of sorted columns */ uint ref_length; /* Length of record ref. */ + uint addon_length; /* Length of added packed fields */ + uint res_length; /* Length of records in final sorted file/buffer */ + uint keys; /* Max keys / buffer */ ha_rows max_rows,examined_rows; TABLE *sort_form; /* For quicker make_sortkey */ SORT_FIELD *local_sortorder; SORT_FIELD *end; + SORT_ADDON_FIELD *addon_field; /* Descriptors for companion fields */ uchar *unique_buff; bool not_killable; -#ifdef USE_STRCOLL char* tmp_buffer; -#endif } SORTPARAM; diff --git a/sql/sql_state.c b/sql/sql_state.c new file mode 100644 index 00000000000..355b847f239 --- /dev/null +++ b/sql/sql_state.c @@ -0,0 +1,53 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* Functions to map mysqld errno to sql_state */ + +#include <my_global.h> +#include <mysqld_error.h> + +struct st_map_errno_to_sqlstate +{ + uint mysql_errno; + const char *odbc_state; + const char *jdbc_state; +}; + +struct st_map_errno_to_sqlstate sqlstate_map[]= +{ +#include <sql_state.h> +}; + +const char *mysql_errno_to_sqlstate(uint mysql_errno) +{ + uint first=0, end= array_elements(sqlstate_map)-1; + struct st_map_errno_to_sqlstate *map; + + /* Do binary search in the sorted array */ + while (first != end) + { + uint mid= (first+end)/2; + map= sqlstate_map+mid; + if (map->mysql_errno < mysql_errno) + first= mid+1; + else + end= mid; + } + map= sqlstate_map+first; + if (map->mysql_errno == mysql_errno) + return map->odbc_state; + return "HY000"; /* General error */ +} diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 658cd6d2411..939ffe8d9d2 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -16,7 +16,7 @@ /* This file is originally from the mysql distribution. Coded by monty */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif @@ -28,6 +28,11 @@ #include <floatingpoint.h> #endif +/* + The following extern declarations are ok as these are interface functions + required by the string function +*/ + extern gptr sql_alloc(unsigned size); extern void sql_element_free(void *ptr); @@ -91,36 +96,45 @@ bool String::realloc(uint32 alloc_length) return FALSE; } -bool String::set(longlong num) +bool String::set(longlong num, CHARSET_INFO *cs) { - if (alloc(21)) + uint l=20*cs->mbmaxlen+1; + + if (alloc(l)) return TRUE; - str_length=(uint32) (longlong10_to_str(num,Ptr,-10)-Ptr); + str_length=(uint32) (cs->cset->longlong10_to_str)(cs,Ptr,l,-10,num); + str_charset=cs; return FALSE; } -bool String::set(ulonglong num) +bool String::set(ulonglong num, CHARSET_INFO *cs) { - if (alloc(21)) + uint l=20*cs->mbmaxlen+1; + + if (alloc(l)) return TRUE; - str_length=(uint32) (longlong10_to_str(num,Ptr,10)-Ptr); + str_length=(uint32) (cs->cset->longlong10_to_str)(cs,Ptr,l,10,num); + str_charset=cs; return FALSE; } -bool String::set(double num,uint decimals) +bool String::set(double num,uint decimals, CHARSET_INFO *cs) { char buff[331]; + uint dummy_errors; + + str_charset=cs; if (decimals >= NOT_FIXED_DEC) { - sprintf(buff,"%.14g",num); // Enough for a DATETIME - return copy(buff, (uint32) strlen(buff)); + uint32 len= my_sprintf(buff,(buff, "%.14g",num));// Enough for a DATETIME + return copy(buff, len, &my_charset_latin1, cs, &dummy_errors); } #ifdef HAVE_FCONVERT int decpt,sign; char *pos,*to; VOID(fconvert(num,(int) decimals,&decpt,&sign,buff+1)); - if (!isdigit(buff[1])) + if (!my_isdigit(&my_charset_latin1, buff[1])) { // Nan or Inf pos=buff+1; if (sign) @@ -128,7 +142,8 @@ bool String::set(double num,uint decimals) buff[0]='-'; pos=buff; } - return copy(pos,(uint32) strlen(pos)); + uint dummy_errors; + return copy(pos,(uint32) strlen(pos), &my_charset_latin1, cs, &dummy_errors); } if (alloc((uint32) ((uint32) decpt+3+decimals))) return TRUE; @@ -178,7 +193,8 @@ end: #else sprintf(buff,"%.*f",(int) decimals,num); #endif - return copy(buff,(uint32) strlen(buff)); + return copy(buff,(uint32) strlen(buff), &my_charset_latin1, cs, + &dummy_errors); #endif } @@ -200,19 +216,178 @@ bool String::copy(const String &str) str_length=str.str_length; bmove(Ptr,str.Ptr,str_length); // May be overlapping Ptr[str_length]=0; + str_charset=str.str_charset; return FALSE; } -bool String::copy(const char *str,uint32 arg_length) +bool String::copy(const char *str,uint32 arg_length, CHARSET_INFO *cs) { if (alloc(arg_length)) return TRUE; if ((str_length=arg_length)) memcpy(Ptr,str,arg_length); Ptr[arg_length]=0; + str_charset=cs; + return FALSE; +} + + +/* + Checks that the source string can be just copied to the destination string + without conversion. + + SYNPOSIS + + needs_conversion() + arg_length Length of string to copy. + from_cs Character set to copy from + to_cs Character set to copy to + uint32 *offset Returns number of unaligned characters. + + RETURN + 0 No conversion needed + 1 Either character set conversion or adding leading zeros + (e.g. for UCS-2) must be done +*/ + +bool String::needs_conversion(uint32 arg_length, + CHARSET_INFO *from_cs, + CHARSET_INFO *to_cs, + uint32 *offset) +{ + *offset= 0; + if ((to_cs == &my_charset_bin) || + (to_cs == from_cs) || + my_charset_same(from_cs, to_cs) || + ((from_cs == &my_charset_bin) && + (!(*offset=(arg_length % to_cs->mbminlen))))) + return FALSE; + return TRUE; +} + + +/* + Copy a multi-byte character sets with adding leading zeros. + + SYNOPSIS + + copy_aligned() + str String to copy + arg_length Length of string. This should NOT be dividable with + cs->mbminlen. + offset arg_length % cs->mb_minlength + cs Character set for 'str' + + NOTES + For real multi-byte, ascii incompatible charactser sets, + like UCS-2, add leading zeros if we have an incomplete character. + Thus, + SELECT _ucs2 0xAA + will automatically be converted into + SELECT _ucs2 0x00AA + + RETURN + 0 ok + 1 error +*/ + +bool String::copy_aligned(const char *str,uint32 arg_length, uint32 offset, + CHARSET_INFO *cs) +{ + /* How many bytes are in incomplete character */ + offset= cs->mbmaxlen - offset; /* How many zeros we should prepend */ + DBUG_ASSERT(offset && offset != cs->mbmaxlen); + + uint32 aligned_length= arg_length + offset; + if (alloc(aligned_length)) + return TRUE; + + /* + Note, this is only safe for little-endian UCS-2. + If we add big-endian UCS-2 sometimes, this code + will be more complicated. But it's OK for now. + */ + bzero((char*) Ptr, offset); + memcpy(Ptr + offset, str, arg_length); + Ptr[aligned_length]=0; + /* str_length is always >= 0 as arg_length is != 0 */ + str_length= aligned_length; + str_charset= cs; + return FALSE; +} + + +bool String::set_or_copy_aligned(const char *str,uint32 arg_length, + CHARSET_INFO *cs) +{ + /* How many bytes are in incomplete character */ + uint32 offset= (arg_length % cs->mbminlen); + + if (!offset) /* All characters are complete, just copy */ + { + set(str, arg_length, cs); + return FALSE; + } + return copy_aligned(str, arg_length, offset, cs); +} + + /* Copy with charset convertion */ + +bool String::copy(const char *str, uint32 arg_length, + CHARSET_INFO *from_cs, CHARSET_INFO *to_cs, uint *errors) +{ + uint32 offset; + if (!needs_conversion(arg_length, from_cs, to_cs, &offset)) + { + *errors= 0; + return copy(str, arg_length, to_cs); + } + if ((from_cs == &my_charset_bin) && offset) + { + *errors= 0; + return copy_aligned(str, arg_length, offset, to_cs); + } + uint32 new_length= to_cs->mbmaxlen*arg_length; + if (alloc(new_length)) + return TRUE; + str_length=copy_and_convert((char*) Ptr, new_length, to_cs, + str, arg_length, from_cs, errors); + str_charset=to_cs; return FALSE; } + +/* + Set a string to the value of a latin1-string, keeping the original charset + + SYNOPSIS + copy_or_set() + str String of a simple charset (latin1) + arg_length Length of string + + IMPLEMENTATION + If string object is of a simple character set, set it to point to the + given string. + If not, make a copy and convert it to the new character set. + + RETURN + 0 ok + 1 Could not allocate result buffer + +*/ + +bool String::set_ascii(const char *str, uint32 arg_length) +{ + if (str_charset->mbminlen == 1) + { + set(str, arg_length, str_charset); + return 0; + } + uint dummy_errors; + return copy(str, arg_length, &my_charset_latin1, str_charset, &dummy_errors); +} + + /* This is used by mysql.cc */ bool String::fill(uint32 max_length,char fill_char) @@ -231,7 +406,7 @@ bool String::fill(uint32 max_length,char fill_char) void String::strip_sp() { - while (str_length && isspace(Ptr[str_length-1])) + while (str_length && my_isspace(str_charset,Ptr[str_length-1])) str_length--; } @@ -247,11 +422,34 @@ bool String::append(const String &s) return FALSE; } + +/* + Append an ASCII string to the a string of the current character set +*/ + bool String::append(const char *s,uint32 arg_length) { - if (!arg_length) // Default argument - if (!(arg_length= (uint32) strlen(s))) - return FALSE; + if (!arg_length) + return FALSE; + + /* + For an ASCII incompatible string, e.g. UCS-2, we need to convert + */ + if (str_charset->mbminlen > 1) + { + uint32 add_length=arg_length * str_charset->mbmaxlen; + uint dummy_errors; + if (realloc(str_length+ add_length)) + return TRUE; + str_length+= copy_and_convert(Ptr+str_length, add_length, str_charset, + s, arg_length, &my_charset_latin1, + &dummy_errors); + return FALSE; + } + + /* + For an ASCII compatinble string we can just append. + */ if (realloc(str_length+arg_length)) return TRUE; memcpy(Ptr+str_length,s,arg_length); @@ -259,6 +457,46 @@ bool String::append(const char *s,uint32 arg_length) return FALSE; } + +/* + Append a 0-terminated ASCII string +*/ + +bool String::append(const char *s) +{ + return append(s, strlen(s)); +} + + +/* + Append a string in the given charset to the string + with character set recoding +*/ + +bool String::append(const char *s,uint32 arg_length, CHARSET_INFO *cs) +{ + uint32 dummy_offset; + + if (needs_conversion(arg_length, cs, str_charset, &dummy_offset)) + { + uint32 add_length= arg_length / cs->mbminlen * str_charset->mbmaxlen; + uint dummy_errors; + if (realloc(str_length + add_length)) + return TRUE; + str_length+= copy_and_convert(Ptr+str_length, add_length, str_charset, + s, arg_length, cs, &dummy_errors); + } + else + { + if (realloc(str_length + arg_length)) + return TRUE; + memcpy(Ptr + str_length, s, arg_length); + str_length+= arg_length; + } + return FALSE; +} + + #ifdef TO_BE_REMOVED bool String::append(FILE* file, uint32 arg_length, myf my_flags) { @@ -287,48 +525,33 @@ bool String::append(IO_CACHE* file, uint32 arg_length) return FALSE; } -uint32 String::numchars() +bool String::append_with_prefill(const char *s,uint32 arg_length, + uint32 full_length, char fill_char) { -#ifdef USE_MB - register uint32 n=0,mblen; - register const char *mbstr=Ptr; - register const char *end=mbstr+str_length; - if (use_mb(default_charset_info)) + int t_length= arg_length > full_length ? arg_length : full_length; + + if (realloc(str_length + t_length)) + return TRUE; + t_length= full_length - arg_length; + if (t_length > 0) { - while (mbstr < end) { - if ((mblen=my_ismbchar(default_charset_info, mbstr,end))) mbstr+=mblen; - else ++mbstr; - ++n; - } - return n; + bfill(Ptr+str_length, t_length, fill_char); + str_length=str_length + t_length; } - else -#endif - return str_length; + append(s, arg_length); + return FALSE; +} + +uint32 String::numchars() +{ + return str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length); } int String::charpos(int i,uint32 offset) { -#ifdef USE_MB - register uint32 mblen; - register const char *mbstr=Ptr+offset; - register const char *end=Ptr+str_length; - if (use_mb(default_charset_info)) - { - if (i<=0) return i; - while (i && mbstr < end) { - if ((mblen=my_ismbchar(default_charset_info, mbstr,end))) mbstr+=mblen; - else ++mbstr; - --i; - } - if ( INT_MAX32-i <= (int) (mbstr-Ptr-offset)) - return INT_MAX32; - else - return (int) ((mbstr-Ptr-offset)+i); - } - else -#endif + if (i <= 0) return i; + return str_charset->cset->charpos(str_charset,Ptr+offset,Ptr+str_length,i); } int String::strstr(const String &s,uint32 offset) @@ -342,7 +565,7 @@ int String::strstr(const String &s,uint32 offset) register const char *search=s.ptr(); const char *end=Ptr+str_length-s.length()+1; const char *search_end=s.ptr()+s.length(); -skipp: +skip: while (str != end) { if (*str++ == *search) @@ -350,39 +573,7 @@ skipp: register char *i,*j; i=(char*) str; j=(char*) search+1; while (j != search_end) - if (*i++ != *j++) goto skipp; - return (int) (str-Ptr) -1; - } - } - } - return -1; -} - -/* - Search after a string without regarding to case - This needs to be replaced when we have character sets per string -*/ - -int String::strstr_case(const String &s,uint32 offset) -{ - if (s.length()+offset <= str_length) - { - if (!s.length()) - return ((int) offset); // Empty string is always found - - register const char *str = Ptr+offset; - register const char *search=s.ptr(); - const char *end=Ptr+str_length-s.length()+1; - const char *search_end=s.ptr()+s.length(); -skipp: - while (str != end) - { - if (my_sort_order[*str++] == my_sort_order[*search]) - { - register char *i,*j; - i=(char*) str; j=(char*) search+1; - while (j != search_end) - if (my_sort_order[*i++] != my_sort_order[*j++]) goto skipp; + if (*i++ != *j++) goto skip; return (int) (str-Ptr) -1; } } @@ -405,7 +596,7 @@ int String::strrstr(const String &s,uint32 offset) const char *end=Ptr+s.length()-2; const char *search_end=s.ptr()-1; -skipp: +skip: while (str != end) { if (*str-- == *search) @@ -413,7 +604,7 @@ skipp: register char *i,*j; i=(char*) str; j=(char*) search-1; while (j != search_end) - if (*i-- != *j--) goto skipp; + if (*i-- != *j--) goto skip; return (int) (i-Ptr) +1; } } @@ -429,14 +620,20 @@ skipp: bool String::replace(uint32 offset,uint32 arg_length,const String &to) { - long diff = (long) to.length()-(long) arg_length; + return replace(offset,arg_length,to.ptr(),to.length()); +} + +bool String::replace(uint32 offset,uint32 arg_length, + const char *to,uint32 length) +{ + long diff = (long) length-(long) arg_length; if (offset+arg_length <= str_length) { if (diff < 0) { - if (to.length()) - memcpy(Ptr+offset,to.ptr(),to.length()); - bmove(Ptr+offset+to.length(),Ptr+offset+arg_length, + if (length) + memcpy(Ptr+offset,to,length); + bmove(Ptr+offset+length,Ptr+offset+arg_length, str_length-offset-arg_length); } else @@ -448,8 +645,8 @@ bool String::replace(uint32 offset,uint32 arg_length,const String &to) bmove_upp(Ptr+str_length+diff,Ptr+str_length, str_length-offset-arg_length); } - if (to.length()) - memcpy(Ptr+offset,to.ptr(),to.length()); + if (length) + memcpy(Ptr+offset,to,length); } str_length+=(uint32) diff; } @@ -457,75 +654,87 @@ bool String::replace(uint32 offset,uint32 arg_length,const String &to) } -int sortcmp(const String *x,const String *y) +// added by Holyfoot for "geometry" needs +int String::reserve(uint32 space_needed, uint32 grow_by) { - const char *s= x->ptr(); - const char *t= y->ptr(); - uint32 x_len=x->length(),y_len=y->length(),len=min(x_len,y_len); - -#ifdef USE_STRCOLL - if (use_strcoll(default_charset_info)) - { -#ifndef CMP_ENDSPACE - while (x_len && s[x_len-1] == ' ') - x_len--; - while (y_len && t[y_len-1] == ' ') - y_len--; -#endif - return my_strnncoll(default_charset_info, - (unsigned char *)s,x_len,(unsigned char *)t,y_len); - } - else + if (Alloced_length < str_length + space_needed) { -#endif /* USE_STRCOLL */ - x_len-=len; // For easy end space test - y_len-=len; - while (len--) - { - if (my_sort_order[(uchar) *s++] != my_sort_order[(uchar) *t++]) - return ((int) my_sort_order[(uchar) s[-1]] - - (int) my_sort_order[(uchar) t[-1]]); - } -#ifndef CMP_ENDSPACE - /* Don't compare end space in strings */ - { - if (y_len) - { - const char *end=t+y_len; - for (; t != end ; t++) - if (*t != ' ') - return -1; - } - else - { - const char *end=s+x_len; - for (; s != end ; s++) - if (*s != ' ') - return 1; - } - return 0; - } -#else - return (int) (x_len-y_len); -#endif /* CMP_ENDSPACE */ -#ifdef USE_STRCOLL + if (realloc(Alloced_length + max(space_needed, grow_by) - 1)) + return TRUE; } -#endif + return FALSE; } +void String::qs_append(const char *str, uint32 len) +{ + memcpy(Ptr + str_length, str, len + 1); + str_length += len; +} -int stringcmp(const String *x,const String *y) +void String::qs_append(double d) { - const char *s= x->ptr(); - const char *t= y->ptr(); - uint32 x_len=x->length(),y_len=y->length(),len=min(x_len,y_len); + char *buff = Ptr + str_length; + str_length+= my_sprintf(buff, (buff, "%.14g", d)); +} - while (len--) - { - if (*s++ != *t++) - return ((int) (uchar) s[-1] - (int) (uchar) t[-1]); - } - return (int) (x_len-y_len); +void String::qs_append(double *d) +{ + double ld; + float8get(ld, (char*) d); + qs_append(ld); +} + + +/* + Compare strings according to collation, without end space. + + SYNOPSIS + sortcmp() + s First string + t Second string + cs Collation + + NOTE: + Normally this is case sensitive comparison + + RETURN + < 0 s < t + 0 s == t + > 0 s > t +*/ + + +int sortcmp(const String *s,const String *t, CHARSET_INFO *cs) +{ + return cs->coll->strnncollsp(cs, + (unsigned char *) s->ptr(),s->length(), + (unsigned char *) t->ptr(),t->length()); +} + + +/* + Compare strings byte by byte. End spaces are also compared. + + SYNOPSIS + stringcmp() + s First string + t Second string + + NOTE: + Strings are compared as a stream of unsigned chars + + RETURN + < 0 s < t + 0 s == t + > 0 s > t +*/ + + +int stringcmp(const String *s,const String *t) +{ + uint32 s_len=s->length(),t_len=t->length(),len=min(s_len,t_len); + int cmp= memcmp(s->ptr(), t->ptr(), len); + return (cmp) ? cmp : (int) (s_len - t_len); } @@ -542,263 +751,139 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length) return from; // Actually an error if ((to->str_length=min(from->str_length,from_length))) memcpy(to->Ptr,from->Ptr,to->str_length); + to->str_charset=from->str_charset; return to; } -/* Make it easier to handle different charactersets */ -#ifdef USE_MB -#define INC_PTR(A,B) A+=((use_mb_flag && \ - my_ismbchar(default_charset_info,A,B)) ? \ - my_ismbchar(default_charset_info,A,B) : 1) -#else -#define INC_PTR(A,B) A++ -#endif +/**************************************************************************** + Help functions +****************************************************************************/ /* -** Compare string against string with wildcard -** 0 if matched -** -1 if not matched with wildcard -** 1 if matched with wildcard + copy a string from one character set to another + + SYNOPSIS + copy_and_convert() + to Store result here + to_cs Character set of result string + from Copy from here + from_length Length of from string + from_cs From character set + + NOTES + 'to' must be big enough as form_length * to_cs->mbmaxlen + + RETURN + length of bytes copied to 'to' */ -#ifdef LIKE_CMP_TOUPPER -#define likeconv(A) (uchar) toupper(A) -#else -#define likeconv(A) (uchar) my_sort_order[(uchar) (A)] -#endif -int wild_case_compare(const char *str,const char *str_end, - const char *wildstr,const char *wildend, - char escape) +uint32 +copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, + const char *from, uint32 from_length, CHARSET_INFO *from_cs, + uint *errors) { - int result= -1; // Not found, using wildcards -#ifdef USE_MB - bool use_mb_flag=use_mb(default_charset_info); -#endif - while (wildstr != wildend) + int cnvres; + my_wc_t wc; + const uchar *from_end= (const uchar*) from+from_length; + char *to_start= to; + uchar *to_end= (uchar*) to+to_length; + int (*mb_wc)(struct charset_info_st *, my_wc_t *, const uchar *, + const uchar *) = from_cs->cset->mb_wc; + int (*wc_mb)(struct charset_info_st *, my_wc_t, uchar *s, uchar *e)= + to_cs->cset->wc_mb; + uint error_count= 0; + + while (1) { - while (*wildstr != wild_many && *wildstr != wild_one) + if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from, + from_end)) > 0) + from+= cnvres; + else if (cnvres == MY_CS_ILSEQ) { - if (*wildstr == escape && wildstr+1 != wildend) - wildstr++; -#ifdef USE_MB - int l; - if (use_mb_flag && - (l = my_ismbchar(default_charset_info, wildstr, wildend))) - { - if (str+l > str_end || memcmp(str, wildstr, l) != 0) - return 1; - str += l; - wildstr += l; - } - else -#endif - if (str == str_end || likeconv(*wildstr++) != likeconv(*str++)) - return(1); // No match - if (wildstr == wildend) - return (str != str_end); // Match if both are at end - result=1; // Found an anchor char + error_count++; + from++; + wc= '?'; } - if (*wildstr == wild_one) + else if (cnvres > MY_CS_TOOSMALL) { - do - { - if (str == str_end) // Skip one char if possible - return (result); - INC_PTR(str,str_end); - } while (++wildstr < wildend && *wildstr == wild_one); - if (wildstr == wildend) - break; + /* + A correct multibyte sequence detected + But it doesn't have Unicode mapping. + */ + error_count++; + from+= (-cnvres); + wc= '?'; } - if (*wildstr == wild_many) - { // Found wild_many - wildstr++; - /* Remove any '%' and '_' from the wild search string */ - for (; wildstr != wildend ; wildstr++) - { - if (*wildstr == wild_many) - continue; - if (*wildstr == wild_one) - { - if (str == str_end) - return (-1); - INC_PTR(str,str_end); - continue; - } - break; // Not a wild character - } - if (wildstr == wildend) - return(0); // Ok if wild_many is last - if (str == str_end) - return -1; - - uchar cmp; - if ((cmp= *wildstr) == escape && wildstr+1 != wildend) - cmp= *++wildstr; -#ifdef USE_MB - const char* mb = wildstr; - int mblen; - LINT_INIT(mblen); - if (use_mb_flag) - mblen = my_ismbchar(default_charset_info, wildstr, wildend); -#endif - INC_PTR(wildstr,wildend); // This is compared trough cmp - cmp=likeconv(cmp); - do - { -#ifdef USE_MB - if (use_mb_flag) - { - for (;;) - { - if (str >= str_end) - return -1; - if (mblen) - { - if (str+mblen <= str_end && memcmp(str, mb, mblen) == 0) - { - str += mblen; - break; - } - } - else if (!my_ismbchar(default_charset_info, str, str_end) && - likeconv(*str) == cmp) - { - str++; - break; - } - INC_PTR(str, str_end); - } - } - else - { -#endif /* USE_MB */ - while (str != str_end && likeconv(*str) != cmp) - str++; - if (str++ == str_end) return (-1); -#ifdef USE_MB - } -#endif - { - int tmp=wild_case_compare(str,str_end,wildstr,wildend,escape); - if (tmp <= 0) - return (tmp); - } - } while (str != str_end && wildstr[0] != wild_many); - return(-1); + else + break; // Not enough characters + +outp: + if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0) + to+= cnvres; + else if (cnvres == MY_CS_ILUNI && wc != '?') + { + error_count++; + wc= '?'; + goto outp; } + else + break; } - return (str != str_end ? 1 : 0); + *errors= error_count; + return (uint32) (to - to_start); } -int wild_case_compare(String &match,String &wild, char escape) -{ - DBUG_ENTER("wild_case_compare"); - DBUG_PRINT("enter",("match='%s', wild='%s', escape='%c'" - ,match.ptr(),wild.ptr(),escape)); - DBUG_RETURN(wild_case_compare(match.ptr(),match.ptr()+match.length(), - wild.ptr(), wild.ptr()+wild.length(),escape)); -} - -/* -** The following is used when using LIKE on binary strings -*/ - -int wild_compare(const char *str,const char *str_end, - const char *wildstr,const char *wildend,char escape) +void String::print(String *str) { - DBUG_ENTER("wild_compare"); - DBUG_PRINT("enter",("str='%s', str_end='%s', wildstr='%s', wildend='%s', escape='%c'" - ,str,str_end,wildstr,wildend,escape)); - int result= -1; // Not found, using wildcards - while (wildstr != wildend) + char *st= (char*)Ptr, *end= st+str_length; + for (; st < end; st++) { - while (*wildstr != wild_many && *wildstr != wild_one) + uchar c= *st; + switch (c) { - if (*wildstr == escape && wildstr+1 != wildend) - wildstr++; - if (str == str_end || *wildstr++ != *str++) - { - DBUG_RETURN(1); - } - if (wildstr == wildend) - { - DBUG_RETURN(str != str_end); // Match if both are at end - } - result=1; // Found an anchor char - } - if (*wildstr == wild_one) - { - do - { - if (str == str_end) // Skip one char if possible - DBUG_RETURN(result); - str++; - } while (*++wildstr == wild_one && wildstr != wildend); - if (wildstr == wildend) - break; - } - if (*wildstr == wild_many) - { // Found wild_many - wildstr++; - /* Remove any '%' and '_' from the wild search string */ - for (; wildstr != wildend ; wildstr++) - { - if (*wildstr == wild_many) - continue; - if (*wildstr == wild_one) - { - if (str == str_end) - { - DBUG_RETURN(-1); - } - str++; - continue; - } - break; // Not a wild character - } - if (wildstr == wildend) - { - DBUG_RETURN(0); // Ok if wild_many is last - } - if (str == str_end) - { - DBUG_RETURN(-1); - } - char cmp; - if ((cmp= *wildstr) == escape && wildstr+1 != wildend) - cmp= *++wildstr; - wildstr++; // This is compared trough cmp - do - { - while (str != str_end && *str != cmp) - str++; - if (str++ == str_end) - { - DBUG_RETURN(-1); - } - { - int tmp=wild_compare(str,str_end,wildstr,wildend,escape); - if (tmp <= 0) - { - DBUG_RETURN(tmp); - } - } - } while (str != str_end && wildstr[0] != wild_many); - DBUG_RETURN(-1); + case '\\': + str->append("\\\\", 2); + break; + case '\0': + str->append("\\0", 2); + break; + case '\'': + str->append("\\'", 2); + break; + case '\n': + str->append("\\n", 2); + break; + case '\r': + str->append("\\r", 2); + break; + case 26: //Ctrl-Z + str->append("\\z", 2); + break; + default: + str->append(c); } } - DBUG_RETURN(str != str_end ? 1 : 0); } -int wild_compare(String &match,String &wild, char escape) +/* + Exchange state of this object and argument. + + SYNOPSIS + String::swap() + + RETURN + Target string will contain state of this object and vice versa. +*/ + +void String::swap(String &s) { - DBUG_ENTER("wild_compare"); - DBUG_PRINT("enter",("match='%s', wild='%s', escape='%c'" - ,match.ptr(),wild.ptr(),escape)); - DBUG_RETURN(wild_compare(match.ptr(),match.ptr()+match.length(), - wild.ptr(), wild.ptr()+wild.length(),escape)); + swap_variables(char *, Ptr, s.Ptr); + swap_variables(uint32, str_length, s.str_length); + swap_variables(uint32, Alloced_length, s.Alloced_length); + swap_variables(bool, alloced, s.alloced); + swap_variables(CHARSET_INFO*, str_charset, s.str_charset); } diff --git a/sql/sql_string.h b/sql/sql_string.h index ad7455ecbf1..31cdd6efb8a 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -16,7 +16,7 @@ /* This file is originally from the mysql distribution. Coded by monty */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -25,37 +25,60 @@ #endif class String; -int sortcmp(const String *a,const String *b); -int stringcmp(const String *a,const String *b); +int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); -int wild_case_compare(String &match,String &wild,char escape); -int wild_compare(String &match,String &wild,char escape); +uint32 copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, + const char *from, uint32 from_length, + CHARSET_INFO *from_cs, uint *errors); class String { char *Ptr; uint32 str_length,Alloced_length; bool alloced; + CHARSET_INFO *str_charset; public: String() - { Ptr=0; str_length=Alloced_length=0; alloced=0; } + { + Ptr=0; str_length=Alloced_length=0; alloced=0; + str_charset= &my_charset_bin; + } String(uint32 length_arg) - { alloced=0; Alloced_length=0; (void) real_alloc(length_arg); } - String(const char *str) - { Ptr=(char*) str; str_length=(uint) strlen(str); Alloced_length=0; alloced=0;} - String(const char *str,uint32 len) - { Ptr=(char*) str; str_length=len; Alloced_length=0; alloced=0;} - String(char *str,uint32 len) - { Ptr=(char*) str; Alloced_length=str_length=len; alloced=0;} + { + alloced=0; Alloced_length=0; (void) real_alloc(length_arg); + str_charset= &my_charset_bin; + } + String(const char *str, CHARSET_INFO *cs) + { + Ptr=(char*) str; str_length=(uint) strlen(str); Alloced_length=0; alloced=0; + str_charset=cs; + } + String(const char *str,uint32 len, CHARSET_INFO *cs) + { + Ptr=(char*) str; str_length=len; Alloced_length=0; alloced=0; + str_charset=cs; + } + String(char *str,uint32 len, CHARSET_INFO *cs) + { + Ptr=(char*) str; Alloced_length=str_length=len; alloced=0; + str_charset=cs; + } String(const String &str) - { Ptr=str.Ptr ; str_length=str.str_length ; - Alloced_length=str.Alloced_length; alloced=0; } - - static void *operator new(size_t size) { return (void*) sql_alloc((uint) size); } - static void operator delete(void *ptr_arg,size_t size) /*lint -e715 */ - { sql_element_free(ptr_arg); } + { + Ptr=str.Ptr ; str_length=str.str_length ; + Alloced_length=str.Alloced_length; alloced=0; + str_charset=str.str_charset; + } + static void *operator new(size_t size, MEM_ROOT *mem_root) + { return (void*) alloc_root(mem_root, (uint) size); } + static void operator delete(void *ptr_arg,size_t size) + {} + static void operator delete(void *ptr_arg, MEM_ROOT *mem_root) + {} ~String() { free(); } + inline void set_charset(CHARSET_INFO *charset) { str_charset= charset; } + inline CHARSET_INFO *charset() const { return str_charset; } inline uint32 length() const { return str_length;} inline uint32 alloced_length() const { return Alloced_length;} inline char& operator [] (uint32 i) const { return Ptr[i]; } @@ -74,37 +97,50 @@ public: Ptr[str_length]=0; return Ptr; } + inline char *c_ptr_safe() + { + if (Ptr && str_length < Alloced_length) + Ptr[str_length]=0; + else + (void) realloc(str_length); + return Ptr; + } void set(String &str,uint32 offset,uint32 arg_length) { + DBUG_ASSERT(&str != this); free(); Ptr=(char*) str.ptr()+offset; str_length=arg_length; alloced=0; if (str.Alloced_length) Alloced_length=str.Alloced_length-offset; else Alloced_length=0; + str_charset=str.str_charset; } - inline void set(char *str,uint32 arg_length) + inline void set(char *str,uint32 arg_length, CHARSET_INFO *cs) { free(); Ptr=(char*) str; str_length=Alloced_length=arg_length ; alloced=0; + str_charset=cs; } - inline void set(const char *str,uint32 arg_length) + inline void set(const char *str,uint32 arg_length, CHARSET_INFO *cs) { free(); Ptr=(char*) str; str_length=arg_length; Alloced_length=0 ; alloced=0; + str_charset=cs; } - inline void set_quick(char *str,uint32 arg_length) + bool set_ascii(const char *str, uint32 arg_length); + inline void set_quick(char *str,uint32 arg_length, CHARSET_INFO *cs) { if (!alloced) { Ptr=(char*) str; str_length=Alloced_length=arg_length; } + str_charset=cs; } - bool set(longlong num); - /* bool set(long num); */ - bool set(ulonglong num); - bool set(double num,uint decimals=2); + bool set(longlong num, CHARSET_INFO *cs); + bool set(ulonglong num, CHARSET_INFO *cs); + bool set(double num,uint decimals, CHARSET_INFO *cs); inline void free() { if (alloced) @@ -141,11 +177,20 @@ public: } } } + inline void shrink_to_length() + { + Alloced_length= str_length; + } bool is_alloced() { return alloced; } inline String& operator = (const String &s) { if (&s != this) { + /* + It is forbidden to do assignments like + some_string = substring_of_that_string + */ + DBUG_ASSERT(!s.uses_buffer_owned_by(this)); free(); Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length; alloced=0; @@ -155,13 +200,25 @@ public: bool copy(); // Alloc string if not alloced bool copy(const String &s); // Allocate new string - bool copy(const char *s,uint32 arg_length); // Allocate new string + bool copy(const char *s,uint32 arg_length, CHARSET_INFO *cs); // Allocate new string + static bool needs_conversion(uint32 arg_length, + CHARSET_INFO *cs_from, CHARSET_INFO *cs_to, + uint32 *offset); + bool copy_aligned(const char *s, uint32 arg_length, uint32 offset, + CHARSET_INFO *cs); + bool set_or_copy_aligned(const char *s, uint32 arg_length, CHARSET_INFO *cs); + bool copy(const char*s,uint32 arg_length, CHARSET_INFO *csfrom, + CHARSET_INFO *csto, uint *errors); bool append(const String &s); - bool append(const char *s,uint32 arg_length=0); + bool append(const char *s); + bool append(const char *s,uint32 arg_length); + bool append(const char *s,uint32 arg_length, CHARSET_INFO *cs); bool append(IO_CACHE* file, uint32 arg_length); + bool append_with_prefill(const char *s, uint32 arg_length, + uint32 full_length, char fill_char); int strstr(const String &search,uint32 offset=0); // Returns offset to substring or -1 - int strstr_case(const String &s,uint32 offset=0); int strrstr(const String &search,uint32 offset=0); // Returns offset to substring or -1 + bool replace(uint32 offset,uint32 arg_length,const char *to,uint32 length); bool replace(uint32 offset,uint32 arg_length,const String &to); inline bool append(char chr) { @@ -179,13 +236,95 @@ public: } bool fill(uint32 max_length,char fill); void strip_sp(); - inline void caseup() { ::caseup(Ptr,str_length); } - inline void casedn() { ::casedn(Ptr,str_length); } - friend int sortcmp(const String *a,const String *b); + inline void caseup() { my_caseup(str_charset,Ptr,str_length); } + inline void casedn() { my_casedn(str_charset,Ptr,str_length); } + friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); friend int stringcmp(const String *a,const String *b); friend String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); - friend int wild_case_compare(String &match,String &wild,char escape); - friend int wild_compare(String &match,String &wild,char escape); uint32 numchars(); int charpos(int i,uint32 offset=0); + + int reserve(uint32 space_needed) + { + return realloc(str_length + space_needed); + } + int reserve(uint32 space_needed, uint32 grow_by); + + /* + The following append operations do NOT check alloced memory + q_*** methods writes values of parameters itself + qs_*** methods writes string representation of value + */ + void q_append(const char c) + { + Ptr[str_length++] = c; + } + void q_append(const uint32 n) + { + int4store(Ptr + str_length, n); + str_length += 4; + } + void q_append(double d) + { + float8store(Ptr + str_length, d); + str_length += 8; + } + void q_append(double *d) + { + float8store(Ptr + str_length, *d); + str_length += 8; + } + void q_append(const char *data, uint32 data_len) + { + memcpy(Ptr + str_length, data, data_len); + str_length += data_len; + } + + void write_at_position(int position, uint32 value) + { + int4store(Ptr + position,value); + } + + void qs_append(const char *str, uint32 len); + void qs_append(double d); + void qs_append(double *d); + inline void qs_append(const char c) + { + Ptr[str_length]= c; + str_length++; + } + + /* Inline (general) functions used by the protocol functions */ + + inline char *prep_append(uint32 arg_length, uint32 step_alloc) + { + uint32 new_length= arg_length + str_length; + if (new_length > Alloced_length) + { + if (realloc(new_length + step_alloc)) + return 0; + } + uint32 old_length= str_length; + str_length+= arg_length; + return Ptr+ old_length; /* Area to use */ + } + + inline bool append(const char *s, uint32 arg_length, uint32 step_alloc) + { + uint32 new_length= arg_length + str_length; + if (new_length > Alloced_length && realloc(new_length + step_alloc)) + return TRUE; + memcpy(Ptr+str_length, s, arg_length); + str_length+= arg_length; + return FALSE; + } + void print(String *print); + + /* Swap two string objects. Efficient way to exchange data without memcpy. */ + void swap(String &s); + + inline bool uses_buffer_owned_by(const String *s) const + { + return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length); + } }; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 987d12ccb40..c9513ec71ab 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (C) 2000-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -24,25 +24,50 @@ #include <hash.h> #include <myisam.h> #include <my_dir.h> -#include <assert.h> #ifdef __WIN__ #include <io.h> #endif -#include "sql_acl.h" // for SUPER_ACL - -extern HASH open_cache; -static const char *primary_key_name="PRIMARY"; +const char *primary_key_name="PRIMARY"; static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end); static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end); static int copy_data_between_tables(TABLE *from,TABLE *to, List<create_field> &create, enum enum_duplicates handle_duplicates, - ORDER *order, + bool ignore, + uint order_num, ORDER *order, ha_rows *copied,ha_rows *deleted); + +/* + Build the path to a file for a table (or the base path that can + then have various extensions stuck on to it). + + SYNOPSIS + build_table_path() + buff Buffer to build the path into + bufflen sizeof(buff) + db Name of database + table Name of table + ext Filename extension + + RETURN + 0 Error + # Size of path + */ + +static uint build_table_path(char *buff, size_t bufflen, const char *db, + const char *table, const char *ext) +{ + strxnmov(buff, bufflen-1, mysql_data_home, "/", db, "/", table, ext, + NullS); + return unpack_filename(buff,buff); +} + + + /* delete (drop) tables. @@ -66,7 +91,8 @@ static int copy_data_between_tables(TABLE *from,TABLE *to, */ -int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists) +int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, + my_bool drop_temporary) { int error= 0; DBUG_ENTER("mysql_rm_table"); @@ -77,7 +103,7 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists) thd->mysys_var->current_cond= &COND_refresh; VOID(pthread_mutex_lock(&LOCK_open)); - if (global_read_lock) + if (!drop_temporary && global_read_lock) { if (thd->global_read_lock) { @@ -92,7 +118,7 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists) } } - error=mysql_rm_table_part2(thd,tables,if_exists,0); + error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, 0); err: pthread_mutex_unlock(&LOCK_open); @@ -104,7 +130,7 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists) if (error) DBUG_RETURN(-1); - send_ok(&thd->net); + send_ok(thd); DBUG_RETURN(0); } @@ -130,14 +156,15 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists) int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables, bool if_exists, - bool dont_log_query) + bool drop_temporary, bool dont_log_query) { int error; thd->mysys_var->current_mutex= &LOCK_open; thd->mysys_var->current_cond= &COND_refresh; VOID(pthread_mutex_lock(&LOCK_open)); - error=mysql_rm_table_part2(thd,tables, if_exists, dont_log_query); + error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, + dont_log_query); pthread_mutex_unlock(&LOCK_open); @@ -150,6 +177,17 @@ int mysql_rm_table_part2_with_lock(THD *thd, /* + Execute the drop of a normal or temporary table + + SYNOPSIS + mysql_rm_table_part2() + thd Thread handler + tables Tables to drop + if_exists If set, don't give an error if table doesn't exists. + In this case we give an warning of level 'NOTE' + drop_temporary Only drop temporary tables + dont_log_query Don't log the query + TODO: When logging to the binary log, we should log tmp_tables and transactional tables as separate statements if we @@ -159,15 +197,19 @@ int mysql_rm_table_part2_with_lock(THD *thd, The current code only writes DROP statements that only uses temporary tables to the cache binary log. This should be ok on most cases, but not all. + + RETURN + 0 ok + 1 Error + -1 Thread was killed */ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool dont_log_query) + bool drop_temporary, bool dont_log_query) { TABLE_LIST *table; char path[FN_REFLEN], *alias; String wrong_tables; - db_type table_type; int error; bool some_tables_deleted=0, tmp_table_deleted=0, foreign_key_error=0; DBUG_ENTER("mysql_rm_table_part2"); @@ -178,7 +220,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, for (table=tables ; table ; table=table->next) { char *db=table->db; - uint flags; mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, TRUE); if (!close_temporary_table(thd, db, table->real_name)) { @@ -186,34 +227,45 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, continue; // removed temporary table } - abort_locked_tables(thd,db,table->real_name); - flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG; - remove_table_from_cache(thd,db,table->real_name,flags); - drop_locked_tables(thd,db,table->real_name); - if (thd->killed) - DBUG_RETURN(-1); - alias= (lower_case_table_names == 2) ? table->alias : table->real_name; - /* remove form file and isam files */ - strxmov(path, mysql_data_home, "/", db, "/", alias, reg_ext, NullS); - (void) unpack_filename(path,path); error=0; - - if (access(path,F_OK)) + if (!drop_temporary) + { + abort_locked_tables(thd,db,table->real_name); + remove_table_from_cache(thd,db,table->real_name, + RTFC_WAIT_OTHER_THREAD_FLAG | + RTFC_CHECK_KILLED_FLAG); + drop_locked_tables(thd,db,table->real_name); + if (thd->killed) + DBUG_RETURN(-1); + alias= (lower_case_table_names == 2) ? table->alias : table->real_name; + /* remove form file and isam files */ + build_table_path(path, sizeof(path), db, alias, reg_ext); + } + if (drop_temporary || + (access(path,F_OK) && + ha_create_table_from_engine(thd, db, alias))) { - if (!if_exists) - error=1; + // Table was not found on disk and table can't be created from engine + if (if_exists) + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), + table->real_name); + else + error= 1; } else { char *end; - table_type= get_table_type(path); + db_type table_type= get_table_type(path); *(end=fn_ext(path))=0; // Remove extension for delete error=ha_delete_table(table_type, path); if (error == ENOENT && if_exists) error = 0; if (error == HA_ERR_ROW_IS_REFERENCED) - foreign_key_error=1; /* the table is referenced by a foreign key - constraint */ + { + /* the table is referenced by a foreign key constraint */ + foreign_key_error=1; + } if (!error || error == ENOENT) { /* Delete the table definition file */ @@ -226,22 +278,22 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, { if (wrong_tables.length()) wrong_tables.append(','); - wrong_tables.append(String(table->real_name)); + wrong_tables.append(String(table->real_name,system_charset_info)); } } - + thd->tmp_table_used= tmp_table_deleted; error= 0; if (wrong_tables.length()) { if (!foreign_key_error) my_printf_error(ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), MYF(0), - wrong_tables.c_ptr()); + wrong_tables.c_ptr()); else - my_error(ER_ROW_IS_REFERENCED,MYF(0)); + my_error(ER_ROW_IS_REFERENCED, MYF(0)); error= 1; } - if (some_tables_deleted || tmp_table_deleted) + if (some_tables_deleted || tmp_table_deleted || !error) { query_cache_invalidate3(thd, tables, 0); if (!dont_log_query) @@ -252,7 +304,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if (!error) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - tmp_table_deleted && !some_tables_deleted); + tmp_table_deleted && !some_tables_deleted, + FALSE); mysql_bin_log.write(&qinfo); } } @@ -268,12 +321,10 @@ int quick_rm_table(enum db_type base,const char *db, { char path[FN_REFLEN]; int error=0; - (void) sprintf(path,"%s/%s/%s%s",mysql_data_home,db,table_name,reg_ext); - unpack_filename(path,path); + build_table_path(path, sizeof(path), db, table_name, reg_ext); if (my_delete(path,MYF(0))) error=1; /* purecov: inspected */ - sprintf(path,"%s/%s/%s",mysql_data_home,db,table_name); - unpack_filename(path,path); + *fn_ext(path)= 0; // Remove reg_ext return ha_delete_table(base,path) || error; } @@ -289,7 +340,6 @@ int quick_rm_table(enum db_type base,const char *db, PRIMARY keys are prioritized. */ - static int sort_keys(KEY *a, KEY *b) { if (a->flags & HA_NOSAME) @@ -314,7 +364,7 @@ static int sort_keys(KEY *a, KEY *b) return (a->flags & HA_FULLTEXT) ? 1 : -1; } /* - Prefer original key order. usable_key_parts contains here + Prefer original key order. usable_key_parts contains here the original key position. */ return ((a->usable_key_parts < b->usable_key_parts) ? -1 : @@ -322,76 +372,283 @@ static int sort_keys(KEY *a, KEY *b) 0); } +/* + Check TYPELIB (set or enum) for duplicates + + SYNOPSIS + check_duplicates_in_interval() + set_or_name "SET" or "ENUM" string for warning message + name name of the checked column + typelib list of values for the column + + DESCRIPTION + This function prints an warning for each value in list + which has some duplicates on its right + + RETURN VALUES + void +*/ + +void check_duplicates_in_interval(const char *set_or_name, + const char *name, TYPELIB *typelib, + CHARSET_INFO *cs) +{ + TYPELIB tmp= *typelib; + const char **cur_value= typelib->type_names; + unsigned int *cur_length= typelib->type_lengths; + + for ( ; tmp.count > 1; cur_value++, cur_length++) + { + tmp.type_names++; + tmp.type_lengths++; + tmp.count--; + if (find_type2(&tmp, (const char*)*cur_value, *cur_length, cs)) + { + push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_DUPLICATED_VALUE_IN_TYPE, + ER(ER_DUPLICATED_VALUE_IN_TYPE), + name,*cur_value,set_or_name); + } + } +} + /* - Create a table + Check TYPELIB (set or enum) max and total lengths SYNOPSIS - mysql_create_table() + calculate_interval_lengths() + cs charset+collation pair of the interval + typelib list of values for the column + max_length length of the longest item + tot_length sum of the item lengths + + DESCRIPTION + After this function call: + - ENUM uses max_length + - SET uses tot_length. + + RETURN VALUES + void +*/ +void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval, + uint32 *max_length, uint32 *tot_length) +{ + const char **pos; + uint *len; + *max_length= *tot_length= 0; + for (pos= interval->type_names, len= interval->type_lengths; + *pos ; pos++, len++) + { + uint length= cs->cset->numchars(cs, *pos, *pos + *len); + *tot_length+= length; + set_if_bigger(*max_length, (uint32)length); + } +} + + +/* + Preparation for table creation + + SYNOPSIS + mysql_prepare_table() thd Thread object - db Database - table_name Table name create_info Create information (like MAX_ROWS) fields List of fields to create keys List of keys to create - tmp_table Set to 1 if this is an internal temporary table - (From ALTER TABLE) - - DESCRIPTION - If one creates a temporary table, this is automaticly opened - no_log is needed for the case of CREATE ... SELECT, - as the logging will be done later in sql_insert.cc - select_field_count is also used for CREATE ... SELECT, - and must be zero for standard create of table. + DESCRIPTION + Prepares the table and key structures for table creation. RETURN VALUES 0 ok -1 error */ -int mysql_create_table(THD *thd,const char *db, const char *table_name, - HA_CREATE_INFO *create_info, - List<create_field> &fields, - List<Key> &keys,bool tmp_table) +int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, + List<create_field> &fields, + List<Key> &keys, bool tmp_table, uint &db_options, + handler *file, KEY *&key_info_buffer, + uint *key_count, int select_field_count) { - char path[FN_REFLEN]; - const char *key_name, *alias; + const char *key_name; create_field *sql_field,*dup_field; - int error= -1; - uint db_options,field,null_fields,blob_columns; - ulong record_offset; - KEY *key_info,*key_info_buffer; + uint field,null_fields,blob_columns; + ulong record_offset= 0; + KEY *key_info; KEY_PART_INFO *key_part_info; - int auto_increment=0; - handler *file; - DBUG_ENTER("mysql_create_table"); + int timestamps= 0, timestamps_with_niladic= 0; + int field_no,dup_no; + int select_field_pos,auto_increment=0; + DBUG_ENTER("mysql_prepare_table"); - /* Check for duplicate fields and check type of table to create */ - if (!fields.elements) - { - my_error(ER_TABLE_MUST_HAVE_COLUMNS,MYF(0)); - DBUG_RETURN(-1); - } List_iterator<create_field> it(fields),it2(fields); + select_field_pos=fields.elements - select_field_count; null_fields=blob_columns=0; - db_options=create_info->table_options; - if (create_info->row_type == ROW_TYPE_DYNAMIC) - db_options|=HA_OPTION_PACK_RECORD; - alias= table_case_name(create_info, table_name); - file=get_new_handler((TABLE*) 0, create_info->db_type); - if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) && - (file->table_flags() & HA_NO_TEMP_TABLES)) + for (field_no=0; (sql_field=it++) ; field_no++) { - my_error(ER_ILLEGAL_HA,MYF(0),table_name); - DBUG_RETURN(-1); - } + /* + Initialize length from its original value (number of characters), + which was set in the parser. This is necessary if we're + executing a prepared statement for the second time. + */ + sql_field->length= sql_field->char_length; + if (!sql_field->charset) + sql_field->charset= create_info->default_table_charset; + /* + table_charset is set in ALTER TABLE if we want change character set + for all varchar/char columns. + But the table charset must not affect the BLOB fields, so don't + allow to change my_charset_bin to somethig else. + */ + if (create_info->table_charset && sql_field->charset != &my_charset_bin) + sql_field->charset= create_info->table_charset; - /* Don't pack keys in old tables if the user has requested this */ + CHARSET_INFO *savecs= sql_field->charset; + if ((sql_field->flags & BINCMP_FLAG) && + !(sql_field->charset= get_charset_by_csname(sql_field->charset->csname, + MY_CS_BINSORT,MYF(0)))) + { + char tmp[64]; + strmake(strmake(tmp, savecs->csname, sizeof(tmp)-4), "_bin", 4); + my_error(ER_UNKNOWN_COLLATION, MYF(0), tmp); + DBUG_RETURN(-1); + } - while ((sql_field=it++)) - { + if (sql_field->sql_type == FIELD_TYPE_SET || + sql_field->sql_type == FIELD_TYPE_ENUM) + { + uint32 dummy; + CHARSET_INFO *cs= sql_field->charset; + TYPELIB *interval= sql_field->interval; + + /* + Create typelib from interval_list, and if necessary + convert strings from client character set to the + column character set. + */ + if (!interval) + { + /* + Create the typelib in prepared statement memory if we're + executing one. + */ + MEM_ROOT *stmt_root= thd->current_arena->mem_root; + + interval= sql_field->interval= typelib(stmt_root, + sql_field->interval_list); + List_iterator<String> it(sql_field->interval_list); + String conv, *tmp; + char comma_buf[2]; + int comma_length= cs->cset->wc_mb(cs, ',', (uchar*) comma_buf, + (uchar*) comma_buf + + sizeof(comma_buf)); + DBUG_ASSERT(comma_length > 0); + for (uint i= 0; (tmp= it++); i++) + { + if (String::needs_conversion(tmp->length(), tmp->charset(), + cs, &dummy)) + { + uint cnv_errs; + conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs); + char *buf= (char*) alloc_root(stmt_root, conv.length()+1); + memcpy(buf, conv.ptr(), conv.length()); + buf[conv.length()]= '\0'; + interval->type_names[i]= buf; + interval->type_lengths[i]= conv.length(); + } + + // Strip trailing spaces. + uint lengthsp= cs->cset->lengthsp(cs, interval->type_names[i], + interval->type_lengths[i]); + interval->type_lengths[i]= lengthsp; + ((uchar *)interval->type_names[i])[lengthsp]= '\0'; + if (sql_field->sql_type == FIELD_TYPE_SET) + { + if (cs->coll->instr(cs, interval->type_names[i], + interval->type_lengths[i], + comma_buf, comma_length, NULL, 0)) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Illegal %s '%-.64s' value found during parsing", + MYF(0), "set", tmp->ptr()); + DBUG_RETURN(-1); + } + } + } + sql_field->interval_list.empty(); // Don't need interval_list anymore + } + + /* + Convert the default value from client character + set into the column character set if necessary. + */ + if (sql_field->def && cs != sql_field->def->collation.collation) + { + Item_arena backup_arena; + bool need_to_change_arena= + !thd->current_arena->is_conventional_execution(); + if (need_to_change_arena) + { + /* Asser that we don't do that at every PS execute */ + DBUG_ASSERT(thd->current_arena->is_first_stmt_execute()); + thd->set_n_backup_item_arena(thd->current_arena, &backup_arena); + } + + sql_field->def= sql_field->def->safe_charset_converter(cs); + + if (need_to_change_arena) + thd->restore_backup_item_arena(thd->current_arena, &backup_arena); + + if (! sql_field->def) + { + /* Could not convert */ + my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); + DBUG_RETURN(-1); + } + } + + if (sql_field->sql_type == FIELD_TYPE_SET) + { + if (sql_field->def) + { + char *not_used; + uint not_used2; + bool not_found= 0; + String str, *def= sql_field->def->val_str(&str); + def->length(cs->cset->lengthsp(cs, def->ptr(), def->length())); + (void) find_set(interval, def->ptr(), def->length(), + cs, ¬_used, ¬_used2, ¬_found); + if (not_found) + { + my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); + DBUG_RETURN(-1); + } + } + calculate_interval_lengths(cs, interval, &dummy, &sql_field->length); + sql_field->length+= (interval->count - 1); + } + else /* FIELD_TYPE_ENUM */ + { + if (sql_field->def) + { + String str, *def= sql_field->def->val_str(&str); + def->length(cs->cset->lengthsp(cs, def->ptr(), def->length())); + if (!find_type2(interval, def->ptr(), def->length(), cs)) + { + my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); + DBUG_RETURN(-1); + } + } + calculate_interval_lengths(cs, interval, &sql_field->length, &dummy); + } + set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1); + } + + sql_field->create_length_to_internal_length(); + + /* Don't pack keys in old tables if the user has requested this */ if ((sql_field->flags & BLOB_FLAG) || sql_field->sql_type == FIELD_TYPE_VAR_STRING && create_info->row_type != ROW_TYPE_FIXED) @@ -400,17 +657,55 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, } if (!(sql_field->flags & NOT_NULL_FLAG)) null_fields++; + if (check_column_name(sql_field->field_name)) { my_error(ER_WRONG_COLUMN_NAME, MYF(0), sql_field->field_name); DBUG_RETURN(-1); } - while ((dup_field=it2++) != sql_field) + + /* Check if we have used the same field name before */ + for (dup_no=0; (dup_field=it2++) != sql_field; dup_no++) { - if (my_strcasecmp(sql_field->field_name, dup_field->field_name) == 0) + if (my_strcasecmp(system_charset_info, + sql_field->field_name, + dup_field->field_name) == 0) { - my_error(ER_DUP_FIELDNAME,MYF(0),sql_field->field_name); - DBUG_RETURN(-1); + /* + If this was a CREATE ... SELECT statement, accept a field + redefinition if we are changing a field in the SELECT part + */ + if (field_no < select_field_pos || dup_no >= select_field_pos) + { + my_error(ER_DUP_FIELDNAME,MYF(0),sql_field->field_name); + DBUG_RETURN(-1); + } + else + { + /* Field redefined */ + sql_field->def= dup_field->def; + sql_field->sql_type= dup_field->sql_type; + sql_field->charset= (dup_field->charset ? + dup_field->charset : + create_info->default_table_charset); + sql_field->length= dup_field->char_length; + sql_field->pack_length= dup_field->pack_length; + sql_field->create_length_to_internal_length(); + sql_field->decimals= dup_field->decimals; + sql_field->unireg_check= dup_field->unireg_check; + /* + We're making one field from two, the result field will have + dup_field->flags as flags. If we've incremented null_fields + because of sql_field->flags, decrement it back. + */ + if (!(sql_field->flags & NOT_NULL_FLAG)) + null_fields--; + sql_field->flags= dup_field->flags; + sql_field->interval= dup_field->interval; + it2.remove(); // Remove first (create) definition + select_field_pos--; + break; + } } } it2.rewind(); @@ -422,6 +717,8 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, it.rewind(); while ((sql_field=it++)) { + DBUG_ASSERT(sql_field->charset); + switch (sql_field->sql_type) { case FIELD_TYPE_BLOB: case FIELD_TYPE_MEDIUM_BLOB: @@ -430,27 +727,59 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, sql_field->pack_flag=FIELDFLAG_BLOB | pack_length_to_packflag(sql_field->pack_length - portable_sizeof_char_ptr); - if (sql_field->flags & BINARY_FLAG) + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; + sql_field->length=8; // Unireg field length + sql_field->unireg_check=Field::BLOB_FIELD; + blob_columns++; + break; + case FIELD_TYPE_GEOMETRY: +#ifdef HAVE_SPATIAL + if (!(file->table_flags() & HA_CAN_GEOMETRY)) + { + my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED), + MYF(0), "GEOMETRY"); + DBUG_RETURN(-1); + } + sql_field->pack_flag=FIELDFLAG_GEOM | + pack_length_to_packflag(sql_field->pack_length - + portable_sizeof_char_ptr); + if (sql_field->charset->state & MY_CS_BINSORT) sql_field->pack_flag|=FIELDFLAG_BINARY; sql_field->length=8; // Unireg field length sql_field->unireg_check=Field::BLOB_FIELD; blob_columns++; break; +#else + my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED), MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); + DBUG_RETURN(-1); +#endif /*HAVE_SPATIAL*/ case FIELD_TYPE_VAR_STRING: case FIELD_TYPE_STRING: sql_field->pack_flag=0; - if (sql_field->flags & BINARY_FLAG) + if (sql_field->charset->state & MY_CS_BINSORT) sql_field->pack_flag|=FIELDFLAG_BINARY; break; case FIELD_TYPE_ENUM: sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | FIELDFLAG_INTERVAL; + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; sql_field->unireg_check=Field::INTERVAL_FIELD; + check_duplicates_in_interval("ENUM",sql_field->field_name, + sql_field->interval, + sql_field->charset); break; case FIELD_TYPE_SET: sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | FIELDFLAG_BITFIELD; + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; sql_field->unireg_check=Field::BIT_FIELD; + check_duplicates_in_interval("SET",sql_field->field_name, + sql_field->interval, + sql_field->charset); break; case FIELD_TYPE_DATE: // Rest of string types case FIELD_TYPE_NEWDATE: @@ -460,8 +789,22 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, sql_field->pack_flag=f_settype((uint) sql_field->sql_type); break; case FIELD_TYPE_TIMESTAMP: - sql_field->unireg_check=Field::TIMESTAMP_FIELD; - /* fall through */ + /* We should replace old TIMESTAMP fields with their newer analogs */ + if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD) + { + if (!timestamps) + { + sql_field->unireg_check= Field::TIMESTAMP_DNUN_FIELD; + timestamps_with_niladic++; + } + else + sql_field->unireg_check= Field::NONE; + } + else if (sql_field->unireg_check != Field::NONE) + timestamps_with_niladic++; + + timestamps++; + /* fall-through */ default: sql_field->pack_flag=(FIELDFLAG_NUMBER | (sql_field->flags & UNSIGNED_FLAG ? 0 : @@ -479,6 +822,11 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, auto_increment++; record_offset+= sql_field->pack_length; } + if (timestamps_with_niladic > 1) + { + my_error(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS,MYF(0)); + DBUG_RETURN(-1); + } if (auto_increment > 1) { my_error(ER_WRONG_AUTO_KEY,MYF(0)); @@ -499,73 +847,204 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, /* Create keys */ - List_iterator<Key> key_iterator(keys); - uint key_parts=0,key_count=keys.elements; - List<Key> keys_in_order; // Add new keys here + List_iterator<Key> key_iterator(keys), key_iterator2(keys); + uint key_parts=0, fk_key_count=0; bool primary_key=0,unique_key=0; - Key *key; + Key *key, *key2; uint tmp, key_number; - tmp=min(file->max_keys(), MAX_KEY); - if (key_count > tmp) - { - my_error(ER_TOO_MANY_KEYS,MYF(0),tmp); - DBUG_RETURN(-1); - } + /* special marker for keys to be ignored */ + static char ignore_key[1]; /* Calculate number of key segements */ + *key_count= 0; while ((key=key_iterator++)) { - tmp=max(file->max_key_parts(),MAX_REF_PARTS); + if (key->type == Key::FOREIGN_KEY) + { + fk_key_count++; + foreign_key *fk_key= (foreign_key*) key; + if (fk_key->ref_columns.elements && + fk_key->ref_columns.elements != fk_key->columns.elements) + { + my_error(ER_WRONG_FK_DEF, MYF(0), fk_key->name ? fk_key->name : + "foreign key without name", + ER(ER_KEY_REF_DO_NOT_MATCH_TABLE_REF)); + DBUG_RETURN(-1); + } + continue; + } + (*key_count)++; + tmp=file->max_key_parts(); if (key->columns.elements > tmp) { my_error(ER_TOO_MANY_KEY_PARTS,MYF(0),tmp); DBUG_RETURN(-1); } - if (key->name() && strlen(key->name()) > NAME_LEN) + if (key->name && strlen(key->name) > NAME_LEN) { - my_error(ER_TOO_LONG_IDENT, MYF(0), key->name()); + my_error(ER_TOO_LONG_IDENT, MYF(0), key->name); DBUG_RETURN(-1); } - key_parts+=key->columns.elements; + key_iterator2.rewind (); + if (key->type != Key::FOREIGN_KEY) + { + while ((key2 = key_iterator2++) != key) + { + /* + foreign_key_prefix(key, key2) returns 0 if key or key2, or both, is + 'generated', and a generated key is a prefix of the other key. + Then we do not need the generated shorter key. + */ + if ((key2->type != Key::FOREIGN_KEY && + key2->name != ignore_key && + !foreign_key_prefix(key, key2))) + { + /* TODO: issue warning message */ + /* mark that the generated key should be ignored */ + if (!key2->generated || + (key->generated && key->columns.elements < + key2->columns.elements)) + key->name= ignore_key; + else + { + key2->name= ignore_key; + key_parts-= key2->columns.elements; + (*key_count)--; + } + break; + } + } + } + if (key->name != ignore_key) + key_parts+=key->columns.elements; + else + (*key_count)--; + if (key->name && !tmp_table && + !my_strcasecmp(system_charset_info,key->name,primary_key_name)) + { + my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name); + DBUG_RETURN(-1); + } + } + tmp=file->max_keys(); + if (*key_count > tmp) + { + my_error(ER_TOO_MANY_KEYS,MYF(0),tmp); + DBUG_RETURN(-1); } - key_info_buffer=key_info=(KEY*) sql_calloc(sizeof(KEY)*key_count); + key_info_buffer=key_info=(KEY*) sql_calloc(sizeof(KEY)* *key_count); key_part_info=(KEY_PART_INFO*) sql_calloc(sizeof(KEY_PART_INFO)*key_parts); if (!key_info_buffer || ! key_part_info) DBUG_RETURN(-1); // Out of memory key_iterator.rewind(); key_number=0; - for (; (key=key_iterator++) ; key_info++, key_number++) + for (; (key=key_iterator++) ; key_number++) { uint key_length=0; key_part_spec *column; - key_info->flags= (key->type == Key::MULTIPLE) ? 0 : - (key->type == Key::FULLTEXT) ? HA_FULLTEXT : HA_NOSAME; + if (key->name == ignore_key) + { + /* ignore redundant keys */ + do + key=key_iterator++; + while (key && key->name == ignore_key); + if (!key) + break; + } + + switch(key->type){ + case Key::MULTIPLE: + key_info->flags= 0; + break; + case Key::FULLTEXT: + key_info->flags= HA_FULLTEXT; + break; + case Key::SPATIAL: +#ifdef HAVE_SPATIAL + key_info->flags= HA_SPATIAL; + break; +#else + my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED),MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); + DBUG_RETURN(-1); +#endif + case Key::FOREIGN_KEY: + key_number--; // Skip this key + continue; + default: + key_info->flags = HA_NOSAME; + break; + } + if (key->generated) + key_info->flags|= HA_GENERATED_KEY; + key_info->key_parts=(uint8) key->columns.elements; key_info->key_part=key_part_info; key_info->usable_key_parts= key_number; key_info->algorithm=key->algorithm; - /* TODO: Add proper checks if handler supports key_type and algorithm */ if (key->type == Key::FULLTEXT) { if (!(file->table_flags() & HA_CAN_FULLTEXT)) { - my_error(ER_TABLE_CANT_HANDLE_FULLTEXT, MYF(0)); - DBUG_RETURN(-1); + my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0)); + DBUG_RETURN(-1); + } + } + /* + Make SPATIAL to be RTREE by default + SPATIAL only on BLOB or at least BINARY, this + actually should be replaced by special GEOM type + in near future when new frm file is ready + checking for proper key parts number: + */ + + /* TODO: Add proper checks if handler supports key_type and algorithm */ + if (key_info->flags & HA_SPATIAL) + { + if (key_info->key_parts != 1) + { + my_printf_error(ER_WRONG_ARGUMENTS, + ER(ER_WRONG_ARGUMENTS),MYF(0),"SPATIAL INDEX"); + DBUG_RETURN(-1); + } + } + else if (key_info->algorithm == HA_KEY_ALG_RTREE) + { +#ifdef HAVE_RTREE_KEYS + if ((key_info->key_parts & 1) == 1) + { + my_printf_error(ER_WRONG_ARGUMENTS, + ER(ER_WRONG_ARGUMENTS),MYF(0),"RTREE INDEX"); + DBUG_RETURN(-1); } + /* TODO: To be deleted */ + my_printf_error(ER_NOT_SUPPORTED_YET, ER(ER_NOT_SUPPORTED_YET), + MYF(0), "RTREE INDEX"); + DBUG_RETURN(-1); +#else + my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED),MYF(0), + sym_group_rtree.name, sym_group_rtree.needed_define); + DBUG_RETURN(-1); +#endif } - List_iterator<key_part_spec> cols(key->columns); + List_iterator<key_part_spec> cols(key->columns), cols2(key->columns); + CHARSET_INFO *ft_key_charset=0; // for FULLTEXT for (uint column_nr=0 ; (column=cols++) ; column_nr++) { + key_part_spec *dup_column; + it.rewind(); field=0; while ((sql_field=it++) && - my_strcasecmp(column->field_name,sql_field->field_name)) + my_strcasecmp(system_charset_info, + column->field_name, + sql_field->field_name)) field++; if (!sql_field) { @@ -574,54 +1053,104 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, column->field_name); DBUG_RETURN(-1); } - /* - for fulltext keys keyseg length is 1 for blobs (it's ignored in - ft code anyway, and 0 (set to column width later) for char's. - it has to be correct col width for char's, as char data are not - prefixed with length (unlike blobs, where ft code takes data length - from a data prefix, ignoring column->length). - */ - if (key->type == Key::FULLTEXT) - column->length=test(f_is_blob(sql_field->pack_flag)); - if (f_is_blob(sql_field->pack_flag)) + while ((dup_column= cols2++) != column) { - if (!(file->table_flags() & HA_BLOB_KEY)) + if (!my_strcasecmp(system_charset_info, + column->field_name, dup_column->field_name)) { - my_printf_error(ER_BLOB_USED_AS_KEY,ER(ER_BLOB_USED_AS_KEY),MYF(0), + my_printf_error(ER_DUP_FIELDNAME, + ER(ER_DUP_FIELDNAME),MYF(0), column->field_name); DBUG_RETURN(-1); } - if (!column->length) + } + cols2.rewind(); + if (key->type == Key::FULLTEXT) + { + if ((sql_field->sql_type != FIELD_TYPE_STRING && + sql_field->sql_type != FIELD_TYPE_VAR_STRING && + !f_is_blob(sql_field->pack_flag)) || + sql_field->charset == &my_charset_bin || + sql_field->charset->mbminlen > 1 || // ucs2 doesn't work yet + (ft_key_charset && sql_field->charset != ft_key_charset)) { - my_printf_error(ER_BLOB_KEY_WITHOUT_LENGTH, - ER(ER_BLOB_KEY_WITHOUT_LENGTH),MYF(0), - column->field_name); - DBUG_RETURN(-1); + my_printf_error(ER_BAD_FT_COLUMN,ER(ER_BAD_FT_COLUMN),MYF(0), + column->field_name); + DBUG_RETURN(-1); } + ft_key_charset=sql_field->charset; + /* + for fulltext keys keyseg length is 1 for blobs (it's ignored in ft + code anyway, and 0 (set to column width later) for char's. it has + to be correct col width for char's, as char data are not prefixed + with length (unlike blobs, where ft code takes data length from a + data prefix, ignoring column->length). + */ + column->length=test(f_is_blob(sql_field->pack_flag)); } - if (!(sql_field->flags & NOT_NULL_FLAG)) + else { - if (key->type == Key::PRIMARY) + column->length*= sql_field->charset->mbmaxlen; + + if (f_is_blob(sql_field->pack_flag)) { - /* Implicitly set primary key fields to NOT NULL for ISO conf. */ - sql_field->flags|= NOT_NULL_FLAG; - sql_field->pack_flag&= ~FIELDFLAG_MAYBE_NULL; - null_fields--; + if (!(file->table_flags() & HA_CAN_INDEX_BLOBS)) + { + my_printf_error(ER_BLOB_USED_AS_KEY,ER(ER_BLOB_USED_AS_KEY),MYF(0), + column->field_name); + DBUG_RETURN(-1); + } + if (!column->length) + { + my_printf_error(ER_BLOB_KEY_WITHOUT_LENGTH, + ER(ER_BLOB_KEY_WITHOUT_LENGTH),MYF(0), + column->field_name); + DBUG_RETURN(-1); + } } - else - key_info->flags|= HA_NULL_PART_KEY; - if (!(file->table_flags() & HA_NULL_KEY)) +#ifdef HAVE_SPATIAL + if (key->type == Key::SPATIAL) { - my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), - MYF(0),column->field_name); - DBUG_RETURN(-1); + if (!column->length ) + { + /* + BAR: 4 is: (Xmin,Xmax,Ymin,Ymax), this is for 2D case + Lately we'll extend this code to support more dimensions + */ + column->length=4*sizeof(double); + } + } +#endif + if (!(sql_field->flags & NOT_NULL_FLAG)) + { + if (key->type == Key::PRIMARY) + { + /* Implicitly set primary key fields to NOT NULL for ISO conf. */ + sql_field->flags|= NOT_NULL_FLAG; + sql_field->pack_flag&= ~FIELDFLAG_MAYBE_NULL; + null_fields--; + } + else + key_info->flags|= HA_NULL_PART_KEY; + if (!(file->table_flags() & HA_NULL_IN_KEY)) + { + my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), + MYF(0),column->field_name); + DBUG_RETURN(-1); + } + if (key->type == Key::SPATIAL) + { + my_error(ER_SPATIAL_CANT_HAVE_NULL, MYF(0)); + DBUG_RETURN(-1); + } + } + if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER) + { + if (column_nr == 0 || (file->table_flags() & HA_AUTO_PART_KEY)) + auto_increment--; // Field is used } } - if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER) - { - if (column_nr == 0 || (file->table_flags() & HA_AUTO_PART_KEY)) - auto_increment--; // Field is used - } + key_part_info->fieldnr= field; key_part_info->offset= (uint16) sql_field->offset; key_part_info->key_type=sql_field->pack_flag; @@ -633,20 +1162,34 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if ((length=column->length) > file->max_key_length() || length > file->max_key_part_length()) { - my_error(ER_WRONG_SUB_KEY,MYF(0)); - DBUG_RETURN(-1); + length=min(file->max_key_length(), file->max_key_part_length()); + if (key->type == Key::MULTIPLE) + { + /* not a critical problem */ + char warn_buff[MYSQL_ERRMSG_SIZE]; + my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), + length); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TOO_LONG_KEY, warn_buff); + } + else + { + my_error(ER_TOO_LONG_KEY,MYF(0),length); + DBUG_RETURN(-1); + } } } - else if (column->length > length || - ((f_is_packed(sql_field->pack_flag) || - ((file->table_flags() & HA_NO_PREFIX_CHAR_KEYS) && - (key_info->flags & HA_NOSAME))) && - column->length != length)) + else if (!f_is_geom(sql_field->pack_flag) && + (column->length > length || + ((f_is_packed(sql_field->pack_flag) || + ((file->table_flags() & HA_NO_PREFIX_CHAR_KEYS) && + (key_info->flags & HA_NOSAME))) && + column->length != length))) { my_error(ER_WRONG_SUB_KEY,MYF(0)); DBUG_RETURN(-1); } - if (!(file->table_flags() & HA_NO_PREFIX_CHAR_KEYS)) + else if (!(file->table_flags() & HA_NO_PREFIX_CHAR_KEYS)) length=column->length; } else if (length == 0) @@ -655,7 +1198,25 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, column->field_name); DBUG_RETURN(-1); } - key_part_info->length=(uint8) length; + if (length > file->max_key_part_length()) + { + length=file->max_key_part_length(); + if (key->type == Key::MULTIPLE) + { + /* not a critical problem */ + char warn_buff[MYSQL_ERRMSG_SIZE]; + my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), + length); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TOO_LONG_KEY, warn_buff); + } + else + { + my_error(ER_TOO_LONG_KEY,MYF(0),length); + DBUG_RETURN(-1); + } + } + key_part_info->length=(uint16) length; /* Use packed keys for long strings on the first column */ if (!(db_options & HA_OPTION_NO_PACK_KEYS) && (length >= KEY_DEFAULT_PACK_LENGTH && @@ -684,7 +1245,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, key_name=primary_key_name; primary_key=1; } - else if (!(key_name = key->name())) + else if (!(key_name = key->name)) key_name=make_unique_key_name(sql_field->field_name, key_info_buffer,key_info); if (check_if_keyname_exists(key_name,key_info_buffer,key_info)) @@ -695,15 +1256,21 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, key_info->name=(char*) key_name; } } + if (!key_info->name || check_column_name(key_info->name)) + { + my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key_info->name); + DBUG_RETURN(-1); + } if (!(key_info->flags & HA_NULL_PART_KEY)) unique_key=1; key_info->key_length=(uint16) key_length; - uint max_key_length= min(file->max_key_length(), MAX_KEY_LENGTH); + uint max_key_length= file->max_key_length(); if (key_length > max_key_length && key->type != Key::FULLTEXT) { my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length); DBUG_RETURN(-1); } + key_info++; } if (!unique_key && !primary_key && (file->table_flags() & HA_REQUIRE_PRIMARY_KEY)) @@ -717,18 +1284,128 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, DBUG_RETURN(-1); } /* Sort keys in optimized order */ - qsort((gptr) key_info_buffer, key_count, sizeof(KEY), (qsort_cmp) sort_keys); + qsort((gptr) key_info_buffer, *key_count, sizeof(KEY), + (qsort_cmp) sort_keys); + create_info->null_bits= null_fields; + + DBUG_RETURN(0); +} + + +/* + Create a table + + SYNOPSIS + mysql_create_table() + thd Thread object + db Database + table_name Table name + create_info Create information (like MAX_ROWS) + fields List of fields to create + keys List of keys to create + tmp_table Set to 1 if this is an internal temporary table + (From ALTER TABLE) + + DESCRIPTION + If one creates a temporary table, this is automaticly opened + + no_log is needed for the case of CREATE ... SELECT, + as the logging will be done later in sql_insert.cc + select_field_count is also used for CREATE ... SELECT, + and must be zero for standard create of table. + + RETURN VALUES + 0 ok + -1 error +*/ + +int mysql_create_table(THD *thd,const char *db, const char *table_name, + HA_CREATE_INFO *create_info, + List<create_field> &fields, + List<Key> &keys,bool tmp_table, + uint select_field_count) +{ + char path[FN_REFLEN]; + const char *alias; + int error= -1; + uint db_options, key_count; + KEY *key_info_buffer; + handler *file; + enum db_type new_db_type; + DBUG_ENTER("mysql_create_table"); + + /* Check for duplicate fields and check type of table to create */ + if (!fields.elements) + { + my_error(ER_TABLE_MUST_HAVE_COLUMNS,MYF(0)); + DBUG_RETURN(-1); + } + if ((new_db_type= ha_checktype(create_info->db_type)) != + create_info->db_type) + { + create_info->db_type= new_db_type; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_USING_OTHER_HANDLER, + ER(ER_WARN_USING_OTHER_HANDLER), + ha_get_storage_engine(new_db_type), + table_name); + } + db_options=create_info->table_options; + if (create_info->row_type == ROW_TYPE_DYNAMIC) + db_options|=HA_OPTION_PACK_RECORD; + alias= table_case_name(create_info, table_name); + file=get_new_handler((TABLE*) 0, create_info->db_type); + +#ifdef NOT_USED + /* + if there is a technical reason for a handler not to have support + for temp. tables this code can be re-enabled. + Otherwise, if a handler author has a wish to prohibit usage of + temporary tables for his handler he should implement a check in + ::create() method + */ + if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) && + (file->table_flags() & HA_NO_TEMP_TABLES)) + { + my_error(ER_ILLEGAL_HA,MYF(0),table_name); + DBUG_RETURN(-1); + } +#endif + + /* + If the table character set was not given explicitely, + let's fetch the database default character set and + apply it to the table. + */ + if (!create_info->default_table_charset) + { + HA_CREATE_INFO db_info; + char path[FN_REFLEN]; + /* Abuse build_table_path() to build the path to the db.opt file */ + build_table_path(path, sizeof(path), db, MY_DB_OPT_FILE, ""); + load_db_opt(thd, path, &db_info); + create_info->default_table_charset= db_info.default_table_charset; + } + + if (mysql_prepare_table(thd, create_info, fields, + keys, tmp_table, db_options, file, + key_info_buffer, &key_count, + select_field_count)) + DBUG_RETURN(-1); /* Check if table exists */ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { - sprintf(path,"%s%s%lx_%lx_%x%s",mysql_tmpdir,tmp_file_prefix, - current_pid, thd->thread_id, thd->tmp_table++,reg_ext); + my_snprintf(path, sizeof(path), "%s%s%lx_%lx_%x%s", + mysql_tmpdir, tmp_file_prefix, current_pid, thd->thread_id, + thd->tmp_table++, reg_ext); + if (lower_case_table_names) + my_casedn_str(files_charset_info, path); create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE; } else - (void) sprintf(path,"%s/%s/%s%s",mysql_data_home,db,alias,reg_ext); - unpack_filename(path,path); + build_table_path(path, sizeof(path), db, alias, reg_ext); + /* Check if table already exists */ if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) && find_temporary_table(thd,db,table_name)) @@ -736,41 +1413,63 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) { create_info->table_existed= 1; // Mark that table existed + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR), + alias); DBUG_RETURN(0); } - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias); DBUG_RETURN(-1); } + if (wait_if_global_read_lock(thd, 0, 1)) + DBUG_RETURN(error); VOID(pthread_mutex_lock(&LOCK_open)); if (!tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) { if (!access(path,F_OK)) { - VOID(pthread_mutex_unlock(&LOCK_open)); if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) - { - create_info->table_existed= 1; // Mark that table existed - DBUG_RETURN(0); - } - my_error(ER_TABLE_EXISTS_ERROR,MYF(0), alias); - DBUG_RETURN(-1); + goto warn; + my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name); + goto end; + } + } + + /* + Check that table with given name does not already + exist in any storage engine. In such a case it should + be discovered and the error ER_TABLE_EXISTS_ERROR be returned + unless user specified CREATE TABLE IF EXISTS + The LOCK_open mutex has been locked to make sure no + one else is attempting to discover the table. Since + it's not on disk as a frm file, no one could be using it! + */ + if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE)) + { + bool create_if_not_exists = + create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS; + if (ha_table_exists_in_engine(thd, db, table_name)) + { + DBUG_PRINT("info", ("Table with same name already existed in handler")); + + if (create_if_not_exists) + goto warn; + my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name); + goto end; } } thd->proc_info="creating table"; create_info->table_existed= 0; // Mark that table is created - if (thd->sql_mode & MODE_NO_DIR_IN_CREATE) + if (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE) create_info->data_file_name= create_info->index_file_name= 0; create_info->table_options=db_options; - create_info->null_bits= null_fields; - if (rea_create_table(path, create_info, fields, key_count, + if (rea_create_table(thd, path, db, table_name, + create_info, fields, key_count, key_info_buffer)) - { - /* my_error(ER_CANT_CREATE_TABLE,MYF(0),table_name,my_errno); */ goto end; - } if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { /* Open table and put in temporary table list */ @@ -779,6 +1478,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, (void) rm_temporary_table(create_info->db_type, path); goto end; } + thd->tmp_table_used= 1; } if (!tmp_table) { @@ -789,13 +1489,24 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, test(create_info->options & - HA_LEX_CREATE_TMP_TABLE)); + HA_LEX_CREATE_TMP_TABLE), + FALSE); mysql_bin_log.write(&qinfo); } } error=0; + goto end; + +warn: + error= 0; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR), + alias); + create_info->table_existed= 1; // Mark that table existed + end: VOID(pthread_mutex_unlock(&LOCK_open)); + start_waiting_global_read_lock(thd); thd->proc_info="After create"; DBUG_RETURN(error); } @@ -808,7 +1519,7 @@ static bool check_if_keyname_exists(const char *name, KEY *start, KEY *end) { for (KEY *key=start ; key != end ; key++) - if (!my_strcasecmp(name,key->name)) + if (!my_strcasecmp(system_charset_info,name,key->name)) return 1; return 0; } @@ -819,17 +1530,26 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end) { char buff[MAX_FIELD_NAME],*buff_end; - if (!check_if_keyname_exists(field_name,start,end)) + if (!check_if_keyname_exists(field_name,start,end) && + my_strcasecmp(system_charset_info,field_name,primary_key_name)) return (char*) field_name; // Use fieldname - buff_end=strmake(buff,field_name,MAX_FIELD_NAME-4); - for (uint i=2 ; ; i++) + buff_end=strmake(buff,field_name, sizeof(buff)-4); + + /* + Only 3 chars + '\0' left, so need to limit to 2 digit + This is ok as we can't have more than 100 keys anyway + */ + for (uint i=2 ; i< 100; i++) { - sprintf(buff_end,"_%d",i); + *buff_end= '_'; + int10_to_str(i, buff_end+1, 10); if (!check_if_keyname_exists(buff,start,end)) return sql_strdup(buff); } + return (char*) "not_specified"; // Should never happen } + /**************************************************************************** ** Create table from a list of fields and items ****************************************************************************/ @@ -844,6 +1564,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, TABLE tmp_table; // Used during 'create_field()' TABLE *table= 0; tmp_table.table_name=0; + uint select_field_count= items->elements; DBUG_ENTER("create_table_from_items"); /* Add selected items to field list */ @@ -864,7 +1585,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, field=item->tmp_table_field(&tmp_table); else field=create_tmp_field(thd, &tmp_table, item, item->type(), - (Item ***) 0, &tmp_field,0,0); + (Item ***) 0, &tmp_field, 0, 0, 0, 0); if (!field || !(cr_field=new create_field(field,(item->type() == Item::FIELD_ITEM ? ((Item_field *)item)->field : @@ -884,7 +1605,8 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, open_table(). */ tmp_disable_binlog(thd); - if (!mysql_create_table(thd,db,name,create_info,*extra_fields,*keys,0)) + if (!mysql_create_table(thd,db,name,create_info,*extra_fields, + *keys,0,select_field_count)) { if (!(table=open_table(thd,db,name,name,(bool*) 0))) quick_rm_table(create_info->db_type,db,table_case_name(create_info,name)); @@ -917,35 +1639,41 @@ mysql_rename_table(enum db_type base, const char *new_db, const char *new_name) { - char from[FN_REFLEN], to[FN_REFLEN]; - char tmp_from[NAME_LEN+1], tmp_to[NAME_LEN+1]; + char from[FN_REFLEN], to[FN_REFLEN], lc_from[FN_REFLEN], lc_to[FN_REFLEN]; + char *from_base= from, *to_base= to; + char tmp_name[NAME_LEN+1]; handler *file=get_new_handler((TABLE*) 0, base); int error=0; DBUG_ENTER("mysql_rename_table"); + build_table_path(from, sizeof(from), old_db, old_name, ""); + build_table_path(to, sizeof(to), new_db, new_name, ""); + + /* + If lower_case_table_names == 2 (case-preserving but case-insensitive + file system) and the storage is not HA_FILE_BASED, we need to provide + a lowercase file name, but we leave the .frm in mixed case. + */ if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED)) { - /* Table handler expects to get all file names as lower case */ - strmov(tmp_from, old_name); - casedn_str(tmp_from); - old_name= tmp_from; + strmov(tmp_name, old_name); + my_casedn_str(files_charset_info, tmp_name); + build_table_path(lc_from, sizeof(lc_from), old_db, tmp_name, ""); + from_base= lc_from; - strmov(tmp_to, new_name); - casedn_str(tmp_to); - new_name= tmp_to; + strmov(tmp_name, new_name); + my_casedn_str(files_charset_info, tmp_name); + build_table_path(lc_to, sizeof(lc_to), new_db, tmp_name, ""); + to_base= lc_to; } - (void) sprintf(from,"%s/%s/%s",mysql_data_home,old_db,old_name); - (void) sprintf(to,"%s/%s/%s",mysql_data_home,new_db,new_name); - fn_format(from,from,"","",4); - fn_format(to,to, "","",4); - if (!(error=file->rename_table((const char*) from,(const char *) to))) + if (!(error=file->rename_table(from_base, to_base))) { if (rename_file_ext(from,to,reg_ext)) { error=my_errno; /* Restore old file name */ - file->rename_table((const char*) to,(const char *) from); + file->rename_table(to_base, from_base); } } delete file; @@ -963,7 +1691,7 @@ mysql_rename_table(enum db_type base, thd Thread handler table Table to remove from cache function HA_EXTRA_PREPARE_FOR_DELETE if table is to be deleted - HA_EXTRA_FORCE_REOPEN if table is not be used + HA_EXTRA_FORCE_REOPEN if table is not be used NOTES When returning, the table will be unusable for other threads until the table is closed. @@ -1006,11 +1734,11 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, Lock on LOCK_open Win32 clients must also have a WRITE LOCK on the table ! */ - + static bool close_cached_table(THD *thd, TABLE *table) { DBUG_ENTER("close_cached_table"); - + wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_DELETE); /* Close lock if this is not got with LOCK TABLES */ if (thd->lock) @@ -1026,20 +1754,18 @@ static bool close_cached_table(THD *thd, TABLE *table) DBUG_RETURN(0); } -static int send_check_errmsg(THD* thd, TABLE_LIST* table, +static int send_check_errmsg(THD *thd, TABLE_LIST* table, const char* operator_name, const char* errmsg) { - - String* packet = &thd->packet; - packet->length(0); - net_store_data(packet, table->alias); - net_store_data(packet, (char*)operator_name); - net_store_data(packet, "error"); - net_store_data(packet, errmsg); + Protocol *protocol= thd->protocol; + protocol->prepare_for_resend(); + protocol->store(table->alias, system_charset_info); + protocol->store((char*) operator_name, system_charset_info); + protocol->store("error", 5, system_charset_info); + protocol->store(errmsg, system_charset_info); thd->net.last_error[0]=0; - if (my_net_write(&thd->net, (char*) thd->packet.ptr(), - packet->length())) + if (protocol->write()) return -1; return 1; } @@ -1058,7 +1784,7 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table, } else { - char* backup_dir = thd->lex.backup_dir; + char* backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; char* table_name = table->real_name; char* db = thd->db ? thd->db : table->db; @@ -1067,7 +1793,8 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table, reg_ext)) DBUG_RETURN(-1); // protect buffer overflow - sprintf(dst_path, "%s/%s/%s", mysql_real_data_home, db, table_name); + my_snprintf(dst_path, sizeof(dst_path), "%s%s/%s", + mysql_real_data_home, db, table_name); if (lock_and_wait_for_table_name(thd,table)) DBUG_RETURN(-1); @@ -1119,8 +1846,8 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, if (!(table= table_list->table)) /* if open_ltable failed */ { char name[FN_REFLEN]; - strxmov(name, mysql_data_home, "/", table_list->db, "/", - table_list->real_name, NullS); + build_table_path(name, sizeof(name), table_list->db, + table_list->real_name, ""); if (openfrm(name, "", 0, 0, 0, &tmp_table)) DBUG_RETURN(0); // Can't open frm file table= &tmp_table; @@ -1151,7 +1878,8 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, if (!my_stat(from, &stat_info, MYF(0))) goto end; // Can't use USE_FRM flag - sprintf(tmp,"%s-%lx_%lx", from, current_pid, thd->thread_id); + my_snprintf(tmp, sizeof(tmp), "%s-%lx_%lx", + from, current_pid, thd->thread_id); /* If we could open the table, close it */ if (table_list->table) @@ -1211,6 +1939,12 @@ end: } +/* + RETURN VALUES + 0 Message sent to net (admin operation went ok) + -1 Message should be sent by caller + (admin operation or network communication failed) +*/ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt, const char *operator_name, @@ -1224,8 +1958,8 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, { TABLE_LIST *table; List<Item> field_list; - Item* item; - String* packet = &thd->packet; + Item *item; + Protocol *protocol= thd->protocol; DBUG_ENTER("mysql_admin_table"); field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); @@ -1236,21 +1970,24 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, item->maybe_null = 1; field_list.push_back(item = new Item_empty_string("Msg_text", 255)); item->maybe_null = 1; - if (send_fields(thd, field_list, 1)) + if (protocol->send_fields(&field_list, 1)) DBUG_RETURN(-1); mysql_ha_flush(thd, tables, MYSQL_HA_CLOSE_FINAL, FALSE); for (table = tables; table; table = table->next) { char table_name[NAME_LEN*2+2]; - char* db = (table->db) ? table->db : thd->db; + char* db = table->db; bool fatal_error=0; - strxmov(table_name,db ? db : "",".",table->real_name,NullS); + strxmov(table_name, db, ".", table->real_name, NullS); thd->open_options|= extra_open_options; table->table = open_ltable(thd, table, lock_type); +#ifdef EMBEDDED_LIBRARY + thd->net.last_errno= 0; // these errors shouldn't get client +#endif thd->open_options&= ~extra_open_options; - packet->length(0); + if (prepare_func) { switch ((*prepare_func)(thd, table, check_opt)) { @@ -1267,30 +2004,31 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, if (!table->table) { const char *err_msg; - net_store_data(packet, table_name); - net_store_data(packet, operator_name); - net_store_data(packet, "error"); + protocol->prepare_for_resend(); + protocol->store(table_name, system_charset_info); + protocol->store(operator_name, system_charset_info); + protocol->store("error",5, system_charset_info); if (!(err_msg=thd->net.last_error)) err_msg=ER(ER_CHECK_NO_SUCH_TABLE); - net_store_data(packet, err_msg); + protocol->store(err_msg, system_charset_info); thd->net.last_error[0]=0; - if (my_net_write(&thd->net, (char*) thd->packet.ptr(), - packet->length())) + if (protocol->write()) goto err; continue; } + table->table->pos_in_table_list= table; if ((table->table->db_stat & HA_READ_ONLY) && open_for_modify) { char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE]; - net_store_data(packet, table_name); - net_store_data(packet, operator_name); - net_store_data(packet, "error"); - sprintf(buff, ER(ER_OPEN_AS_READONLY), table_name); - net_store_data(packet, buff); + protocol->prepare_for_resend(); + protocol->store(table_name, system_charset_info); + protocol->store(operator_name, system_charset_info); + protocol->store("error", 5, system_charset_info); + my_snprintf(buff, sizeof(buff), ER(ER_OPEN_AS_READONLY), table_name); + protocol->store(buff, system_charset_info); close_thread_tables(thd); table->table=0; // For query cache - if (my_net_write(&thd->net, (char*) thd->packet.ptr(), - packet->length())) + if (protocol->write()) goto err; continue; } @@ -1298,71 +2036,127 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, /* Close all instances of the table to allow repair to rename files */ if (lock_type == TL_WRITE && table->table->version) { - uint flags; pthread_mutex_lock(&LOCK_open); const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open, "Waiting to get writelock"); mysql_lock_abort(thd,table->table); - flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG; remove_table_from_cache(thd, table->table->table_cache_key, - table->table->real_name, flags); + table->table->real_name, + RTFC_WAIT_OTHER_THREAD_FLAG | + RTFC_CHECK_KILLED_FLAG); thd->exit_cond(old_message); if (thd->killed) goto err; - open_for_modify=0; + /* Flush entries in the query cache involving this table. */ + query_cache_invalidate3(thd, table->table, 0); + open_for_modify= 0; } int result_code = (table->table->file->*operator_func)(thd, check_opt); - packet->length(0); - net_store_data(packet, table_name); - net_store_data(packet, operator_name); +#ifdef EMBEDDED_LIBRARY + thd->net.last_errno= 0; // these errors shouldn't get client +#endif + protocol->prepare_for_resend(); + protocol->store(table_name, system_charset_info); + protocol->store(operator_name, system_charset_info); +send_result_message: + + DBUG_PRINT("info", ("result_code: %d", result_code)); switch (result_code) { case HA_ADMIN_NOT_IMPLEMENTED: { - char buf[ERRMSGSIZE+20]; - my_snprintf(buf, ERRMSGSIZE, - ER(ER_CHECK_NOT_IMPLEMENTED), operator_name); - net_store_data(packet, "error"); - net_store_data(packet, buf); + char buf[ERRMSGSIZE+20]; + uint length=my_snprintf(buf, ERRMSGSIZE, + ER(ER_CHECK_NOT_IMPLEMENTED), operator_name); + protocol->store("note", 4, system_charset_info); + protocol->store(buf, length, system_charset_info); } break; case HA_ADMIN_OK: - net_store_data(packet, "status"); - net_store_data(packet, "OK"); + protocol->store("status", 6, system_charset_info); + protocol->store("OK",2, system_charset_info); break; case HA_ADMIN_FAILED: - net_store_data(packet, "status"); - net_store_data(packet, "Operation failed"); + protocol->store("status", 6, system_charset_info); + protocol->store("Operation failed",16, system_charset_info); break; case HA_ADMIN_REJECT: - net_store_data(packet,"status"); - net_store_data(packet,"Operation need committed state"); - open_for_modify= false; + protocol->store("status", 6, system_charset_info); + protocol->store("Operation need committed state",30, system_charset_info); + open_for_modify= FALSE; break; case HA_ADMIN_ALREADY_DONE: - net_store_data(packet, "status"); - net_store_data(packet, "Table is already up to date"); + protocol->store("status", 6, system_charset_info); + protocol->store("Table is already up to date", 27, system_charset_info); break; case HA_ADMIN_CORRUPT: - net_store_data(packet, "error"); - net_store_data(packet, "Corrupt"); + protocol->store("error", 5, system_charset_info); + protocol->store("Corrupt", 7, system_charset_info); fatal_error=1; break; case HA_ADMIN_INVALID: - net_store_data(packet, "error"); - net_store_data(packet, "Invalid argument"); + protocol->store("error", 5, system_charset_info); + protocol->store("Invalid argument",16, system_charset_info); break; + case HA_ADMIN_TRY_ALTER: + { + /* + This is currently used only by InnoDB. ha_innobase::optimize() answers + "try with alter", so here we close the table, do an ALTER TABLE, + reopen the table and do ha_innobase::analyze() on it. + */ + close_thread_tables(thd); + TABLE_LIST *save_next= table->next; + table->next= 0; + tmp_disable_binlog(thd); // binlogging is done by caller if wanted + result_code= mysql_recreate_table(thd, table, 0); + reenable_binlog(thd); + close_thread_tables(thd); + if (!result_code) // recreation went ok + { + if ((table->table= open_ltable(thd, table, lock_type)) && + ((result_code= table->table->file->analyze(thd, check_opt)) > 0)) + result_code= 0; // analyze went ok + } + if (result_code) // either mysql_recreate_table or analyze failed + { + const char *err_msg; + if ((err_msg= thd->net.last_error)) + { + if (!thd->vio_ok()) + { + sql_print_error(err_msg); + } + else + { + /* Hijack the row already in-progress. */ + protocol->store("error", 5, system_charset_info); + protocol->store(err_msg, system_charset_info); + (void)protocol->write(); + /* Start off another row for HA_ADMIN_FAILED */ + protocol->prepare_for_resend(); + protocol->store(table_name, system_charset_info); + protocol->store(operator_name, system_charset_info); + } + } + } + result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK; + table->next= save_next; + goto send_result_message; + } + default: // Probably HA_ADMIN_INTERNAL_ERROR - net_store_data(packet, "error"); - net_store_data(packet, "Unknown - internal error during operation"); + protocol->store("error", 5, system_charset_info); + protocol->store("Unknown - internal error during operation", 41 + , system_charset_info); fatal_error=1; break; } @@ -1379,12 +2173,11 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, } close_thread_tables(thd); table->table=0; // For query cache - if (my_net_write(&thd->net, (char*) packet->ptr(), - packet->length())) + if (protocol->write()) goto err; } - send_eof(&thd->net); + send_eof(thd); DBUG_RETURN(0); err: close_thread_tables(thd); // Shouldn't be needed @@ -1408,7 +2201,7 @@ int mysql_restore_table(THD* thd, TABLE_LIST* table_list) DBUG_ENTER("mysql_restore_table"); DBUG_RETURN(mysql_admin_table(thd, table_list, 0, "restore", TL_WRITE, 1, 0, - &prepare_for_restore, + &prepare_for_restore, &handler::restore)); } @@ -1418,7 +2211,7 @@ int mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) DBUG_ENTER("mysql_repair_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, "repair", TL_WRITE, 1, HA_OPEN_FOR_REPAIR, - &prepare_for_repair, + &prepare_for_repair, &handler::repair)); } @@ -1432,6 +2225,270 @@ int mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) } +/* + Assigned specified indexes for a table into key cache + + SYNOPSIS + mysql_assign_to_keycache() + thd Thread object + tables Table list (one table only) + + RETURN VALUES + 0 ok + -1 error +*/ + +int mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, + LEX_STRING *key_cache_name) +{ + HA_CHECK_OPT check_opt; + KEY_CACHE *key_cache; + DBUG_ENTER("mysql_assign_to_keycache"); + + check_opt.init(); + pthread_mutex_lock(&LOCK_global_system_variables); + if (!(key_cache= get_key_cache(key_cache_name))) + { + pthread_mutex_unlock(&LOCK_global_system_variables); + my_error(ER_UNKNOWN_KEY_CACHE, MYF(0), key_cache_name->str); + DBUG_RETURN(-1); + } + pthread_mutex_unlock(&LOCK_global_system_variables); + check_opt.key_cache= key_cache; + DBUG_RETURN(mysql_admin_table(thd, tables, &check_opt, + "assign_to_keycache", TL_READ_NO_INSERT, 0, + 0, 0, &handler::assign_to_keycache)); +} + + +/* + Reassign all tables assigned to a key cache to another key cache + + SYNOPSIS + reassign_keycache_tables() + thd Thread object + src_cache Reference to the key cache to clean up + dest_cache New key cache + + NOTES + This is called when one sets a key cache size to zero, in which + case we have to move the tables associated to this key cache to + the "default" one. + + One has to ensure that one never calls this function while + some other thread is changing the key cache. This is assured by + the caller setting src_cache->in_init before calling this function. + + We don't delete the old key cache as there may still be pointers pointing + to it for a while after this function returns. + + RETURN VALUES + 0 ok +*/ + +int reassign_keycache_tables(THD *thd, KEY_CACHE *src_cache, + KEY_CACHE *dst_cache) +{ + DBUG_ENTER("reassign_keycache_tables"); + + DBUG_ASSERT(src_cache != dst_cache); + DBUG_ASSERT(src_cache->in_init); + src_cache->param_buff_size= 0; // Free key cache + ha_resize_key_cache(src_cache); + ha_change_key_cache(src_cache, dst_cache); + DBUG_RETURN(0); +} + + +/* + Preload specified indexes for a table into key cache + + SYNOPSIS + mysql_preload_keys() + thd Thread object + tables Table list (one table only) + + RETURN VALUES + 0 ok + -1 error +*/ + +int mysql_preload_keys(THD* thd, TABLE_LIST* tables) +{ + DBUG_ENTER("mysql_preload_keys"); + DBUG_RETURN(mysql_admin_table(thd, tables, 0, + "preload_keys", TL_READ, 0, 0, 0, + &handler::preload_keys)); +} + + +/* + Create a table identical to the specified table + + SYNOPSIS + mysql_create_like_table() + thd Thread object + table Table list (one table only) + create_info Create info + table_ident Src table_ident + + RETURN VALUES + 0 ok + -1 error +*/ + +int mysql_create_like_table(THD* thd, TABLE_LIST* table, + HA_CREATE_INFO *create_info, + Table_ident *table_ident) +{ + TABLE **tmp_table; + char src_path[FN_REFLEN], dst_path[FN_REFLEN]; + char *db= table->db; + char *table_name= table->real_name; + char *src_db; + char *src_table= table_ident->table.str; + int err, res= -1; + TABLE_LIST src_tables_list; + DBUG_ENTER("mysql_create_like_table"); + src_db= table_ident->db.str ? table_ident->db.str : thd->db; + + /* + Validate the source table + */ + if (table_ident->table.length > NAME_LEN || + (table_ident->table.length && + check_table_name(src_table,table_ident->table.length))) + { + my_error(ER_WRONG_TABLE_NAME, MYF(0), src_table); + DBUG_RETURN(-1); + } + if (!src_db || check_db_name(src_db)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), src_db ? src_db : "NULL"); + DBUG_RETURN(-1); + } + + src_tables_list.db= src_db; + src_tables_list.real_name= src_table; + src_tables_list.next= 0; + + if (lock_and_wait_for_table_name(thd, &src_tables_list)) + goto err; + + if ((tmp_table= find_temporary_table(thd, src_db, src_table))) + strxmov(src_path, (*tmp_table)->path, reg_ext, NullS); + else + { + strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table, + reg_ext, NullS); + /* Resolve symlinks (for windows) */ + fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME)); + if (lower_case_table_names) + my_casedn_str(files_charset_info, src_path); + if (access(src_path, F_OK)) + { + my_error(ER_BAD_TABLE_ERROR, MYF(0), src_table); + goto err; + } + } + + /* + Validate the destination table + + skip the destination table name checking as this is already + validated. + */ + if (create_info->options & HA_LEX_CREATE_TMP_TABLE) + { + if (find_temporary_table(thd, db, table_name)) + goto table_exists; + my_snprintf(dst_path, sizeof(dst_path), "%s%s%lx_%lx_%x%s", + mysql_tmpdir, tmp_file_prefix, current_pid, + thd->thread_id, thd->tmp_table++, reg_ext); + if (lower_case_table_names) + my_casedn_str(files_charset_info, dst_path); + create_info->table_options|= HA_CREATE_DELAY_KEY_WRITE; + } + else + { + strxmov(dst_path, mysql_data_home, "/", db, "/", table_name, + reg_ext, NullS); + fn_format(dst_path, dst_path, "", "", MYF(MY_UNPACK_FILENAME)); + if (!access(dst_path, F_OK)) + goto table_exists; + } + + /* + Create a new table by copying from source table + */ + if (my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE))) + { + if (my_errno == ENOENT) + my_error(ER_BAD_DB_ERROR,MYF(0),db); + else + my_error(ER_CANT_CREATE_FILE,MYF(0),dst_path,my_errno); + goto err; + } + + /* + As mysql_truncate don't work on a new table at this stage of + creation, instead create the table directly (for both normal + and temporary tables). + */ + *fn_ext(dst_path)= 0; + err= ha_create_table(dst_path, create_info, 1); + + if (create_info->options & HA_LEX_CREATE_TMP_TABLE) + { + if (err || !open_temporary_table(thd, dst_path, db, table_name, 1)) + { + (void) rm_temporary_table(create_info->db_type, + dst_path); /* purecov: inspected */ + goto err; /* purecov: inspected */ + } + } + else if (err) + { + (void) quick_rm_table(create_info->db_type, db, + table_name); /* purecov: inspected */ + goto err; /* purecov: inspected */ + } + + // Must be written before unlock + mysql_update_log.write(thd,thd->query, thd->query_length); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, + test(create_info->options & + HA_LEX_CREATE_TMP_TABLE), + FALSE); + mysql_bin_log.write(&qinfo); + } + res= 0; + goto err; + +table_exists: + if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) + { + char warn_buff[MYSQL_ERRMSG_SIZE]; + my_snprintf(warn_buff, sizeof(warn_buff), + ER(ER_TABLE_EXISTS_ERROR), table_name); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TABLE_EXISTS_ERROR,warn_buff); + res= 0; + } + else + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name); + +err: + pthread_mutex_lock(&LOCK_open); + unlock_table_name(thd, &src_tables_list); + pthread_mutex_unlock(&LOCK_open); + DBUG_RETURN(res); +} + + int mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) { #ifdef OS2 @@ -1463,27 +2520,300 @@ int mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) } +/* table_list should contain just one table */ +static int +mysql_discard_or_import_tablespace(THD *thd, + TABLE_LIST *table_list, + enum tablespace_op_type tablespace_op) +{ + TABLE *table; + my_bool discard; + int error; + DBUG_ENTER("mysql_discard_or_import_tablespace"); + + /* + Note that DISCARD/IMPORT TABLESPACE always is the only operation in an + ALTER TABLE + */ + + thd->proc_info="discard_or_import_tablespace"; + + discard= test(tablespace_op == DISCARD_TABLESPACE); + + /* + We set this flag so that ha_innobase::open and ::external_lock() do + not complain when we lock the table + */ + thd->tablespace_op= TRUE; + if (!(table=open_ltable(thd,table_list,TL_WRITE))) + { + thd->tablespace_op=FALSE; + DBUG_RETURN(-1); + } + + error=table->file->discard_or_import_tablespace(discard); + + thd->proc_info="end"; + + if (error) + goto err; + + /* + The 0 in the call below means 'not in a transaction', which means + immediate invalidation; that is probably what we wish here + */ + query_cache_invalidate3(thd, table_list, 0); + + /* The ALTER TABLE is always in its own transaction */ + error = ha_commit_stmt(thd); + if (ha_commit(thd)) + error=1; + if (error) + goto err; + mysql_update_log.write(thd, thd->query,thd->query_length); + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } +err: + close_thread_tables(thd); + thd->tablespace_op=FALSE; + if (error == 0) + { + send_ok(thd); + DBUG_RETURN(0); + } + + if (error == HA_ERR_ROW_IS_REFERENCED) + my_error(ER_ROW_IS_REFERENCED, MYF(0)); + + DBUG_RETURN(-1); +} + + +#ifdef NOT_USED +/* + CREATE INDEX and DROP INDEX are implemented by calling ALTER TABLE with + the proper arguments. This isn't very fast but it should work for most + cases. + One should normally create all indexes with CREATE TABLE or ALTER TABLE. +*/ + +int mysql_create_indexes(THD *thd, TABLE_LIST *table_list, List<Key> &keys) +{ + List<create_field> fields; + List<Alter_drop> drop; + List<Alter_column> alter; + HA_CREATE_INFO create_info; + int rc; + uint idx; + uint db_options; + uint key_count; + TABLE *table; + Field **f_ptr; + KEY *key_info_buffer; + char path[FN_REFLEN+1]; + DBUG_ENTER("mysql_create_index"); + + /* + Try to use online generation of index. + This requires that all indexes can be created online. + Otherwise, the old alter table procedure is executed. + + Open the table to have access to the correct table handler. + */ + if (!(table=open_ltable(thd,table_list,TL_WRITE_ALLOW_READ))) + DBUG_RETURN(-1); + + /* + The add_index method takes an array of KEY structs for the new indexes. + Preparing a new table structure generates this array. + It needs a list with all fields of the table, which does not need to + be correct in every respect. The field names are important. + */ + for (f_ptr= table->field; *f_ptr; f_ptr++) + { + create_field *c_fld= new create_field(*f_ptr, *f_ptr); + c_fld->unireg_check= Field::NONE; /*avoid multiple auto_increments*/ + fields.push_back(c_fld); + } + bzero((char*) &create_info,sizeof(create_info)); + create_info.db_type=DB_TYPE_DEFAULT; + create_info.default_table_charset= thd->variables.collation_database; + db_options= 0; + if (mysql_prepare_table(thd, &create_info, fields, + keys, /*tmp_table*/ 0, db_options, table->file, + key_info_buffer, key_count, + /*select_field_count*/ 0)) + DBUG_RETURN(-1); + + /* + Check if all keys can be generated with the add_index method. + If anyone cannot, then take the old way. + */ + for (idx=0; idx< key_count; idx++) + { + DBUG_PRINT("info", ("creating index %s", key_info_buffer[idx].name)); + if (!(table->file->index_ddl_flags(key_info_buffer+idx)& + (HA_DDL_ONLINE| HA_DDL_WITH_LOCK))) + break ; + } + if ((idx < key_count)|| !key_count) + { + /* Re-initialize the create_info, which was changed by prepare table. */ + bzero((char*) &create_info,sizeof(create_info)); + create_info.db_type=DB_TYPE_DEFAULT; + create_info.default_table_charset= thd->variables.collation_database; + /* Cleanup the fields list. We do not want to create existing fields. */ + fields.delete_elements(); + if (real_alter_table(thd, table_list->db, table_list->real_name, + &create_info, table_list, table, + fields, keys, drop, alter, 0, (ORDER*)0, + ALTER_ADD_INDEX, DUP_ERROR)) + /* Don't need to free((gptr) key_info_buffer);*/ + DBUG_RETURN(-1); + } + else + { + if (table->file->add_index(table, key_info_buffer, key_count)|| + build_table_path(path, sizeof(path), table_list->db, + (lower_case_table_names == 2) ? + table_list->alias : table_list->real_name, + reg_ext) == 0 || + mysql_create_frm(thd, path, &create_info, + fields, key_count, key_info_buffer, table->file)) + /* don't need to free((gptr) key_info_buffer);*/ + DBUG_RETURN(-1); + } + /* don't need to free((gptr) key_info_buffer);*/ + DBUG_RETURN(0); +} + + +int mysql_drop_indexes(THD *thd, TABLE_LIST *table_list, + List<Alter_drop> &drop) +{ + List<create_field> fields; + List<Key> keys; + List<Alter_column> alter; + HA_CREATE_INFO create_info; + uint idx; + uint db_options; + uint key_count; + uint *key_numbers; + TABLE *table; + Field **f_ptr; + KEY *key_info; + KEY *key_info_buffer; + char path[FN_REFLEN]; + DBUG_ENTER("mysql_drop_index"); + + /* + Try to use online generation of index. + This requires that all indexes can be created online. + Otherwise, the old alter table procedure is executed. + + Open the table to have access to the correct table handler. + */ + if (!(table=open_ltable(thd,table_list,TL_WRITE_ALLOW_READ))) + DBUG_RETURN(-1); + + /* + The drop_index method takes an array of key numbers. + It cannot get more entries than keys in the table. + */ + key_numbers= (uint*) thd->alloc(sizeof(uint*)*table->keys); + key_count= 0; + + /* + Get the number of each key and check if it can be created online. + */ + List_iterator<Alter_drop> drop_it(drop); + Alter_drop *drop_key; + while ((drop_key= drop_it++)) + { + /* Find the key in the table. */ + key_info=table->key_info; + for (idx=0; idx< table->keys; idx++, key_info++) + { + if (!my_strcasecmp(system_charset_info, key_info->name, drop_key->name)) + break; + } + if (idx>= table->keys) + { + my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), drop_key->name); + /*don't need to free((gptr) key_numbers);*/ + DBUG_RETURN(-1); + } + /* + Check if the key can be generated with the add_index method. + If anyone cannot, then take the old way. + */ + DBUG_PRINT("info", ("dropping index %s", table->key_info[idx].name)); + if (!(table->file->index_ddl_flags(table->key_info+idx)& + (HA_DDL_ONLINE| HA_DDL_WITH_LOCK))) + break ; + key_numbers[key_count++]= idx; + } + + bzero((char*) &create_info,sizeof(create_info)); + create_info.db_type=DB_TYPE_DEFAULT; + create_info.default_table_charset= thd->variables.collation_database; + + if ((drop_key)|| (drop.elements<= 0)) + { + if (real_alter_table(thd, table_list->db, table_list->real_name, + &create_info, table_list, table, + fields, keys, drop, alter, 0, (ORDER*)0, + ALTER_DROP_INDEX, DUP_ERROR)) + /*don't need to free((gptr) key_numbers);*/ + DBUG_RETURN(-1); + } + else + { + db_options= 0; + if (table->file->drop_index(table, key_numbers, key_count)|| + mysql_prepare_table(thd, &create_info, fields, + keys, /*tmp_table*/ 0, db_options, table->file, + key_info_buffer, key_count, + /*select_field_count*/ 0)|| + build_table_path(path, sizeof(path), table_list->db, + (lower_case_table_names == 2) ? + table_list->alias : table_list->real_name, + reg_ext) == 0 || + mysql_create_frm(thd, path, &create_info, + fields, key_count, key_info_buffer, table->file)) + /*don't need to free((gptr) key_numbers);*/ + DBUG_RETURN(-1); + } + + /*don't need to free((gptr) key_numbers);*/ + DBUG_RETURN(0); +} +#endif /* NOT_USED */ + + +/* + Alter table +*/ + int mysql_alter_table(THD *thd,char *new_db, char *new_name, HA_CREATE_INFO *create_info, TABLE_LIST *table_list, - List<create_field> &fields, - List<Key> &keys,List<Alter_drop> &drop_list, - List<Alter_column> &alter_list, - ORDER *order, - bool drop_primary, - enum enum_duplicates handle_duplicates, - enum enum_enable_or_disable keys_onoff, - bool simple_alter) + List<create_field> &fields, List<Key> &keys, + uint order_num, ORDER *order, + enum enum_duplicates handle_duplicates, bool ignore, + ALTER_INFO *alter_info, bool do_send_ok) { TABLE *table,*new_table; int error; char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN]; char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias; char index_file[FN_REFLEN], data_file[FN_REFLEN]; - bool use_timestamp=0; ha_rows copied,deleted; ulonglong next_insert_id; - uint save_time_stamp,db_create_options, used_fields; + uint db_create_options, used_fields; enum db_type old_db_type,new_db_type; DBUG_ENTER("mysql_alter_table"); @@ -1492,15 +2822,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, alias= (lower_case_table_names == 2) ? table_list->alias : table_name; db=table_list->db; - if (!new_db || - lower_case_table_names && !my_strcasecmp(new_db, db) || - !lower_case_table_names && !strcmp(new_db, db)) - { + if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db)) new_db= db; - } used_fields=create_info->used_fields; mysql_ha_flush(thd, table_list, MYSQL_HA_CLOSE_FINAL, FALSE); + + /* DISCARD/IMPORT TABLESPACE is always alone in an ALTER TABLE */ + if (alter_info->tablespace_op != NO_TABLESPACE_OP) + DBUG_RETURN(mysql_discard_or_import_tablespace(thd,table_list, + alter_info->tablespace_op)); if (!(table=open_ltable(thd,table_list,TL_WRITE_ALLOW_READ))) DBUG_RETURN(-1); @@ -1513,20 +2844,17 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (lower_case_table_names != 2) { - casedn_str(new_name_buff); + my_casedn_str(files_charset_info, new_name_buff); new_alias= new_name; // Create lower case table name } - casedn_str(new_name); + my_casedn_str(files_charset_info, new_name); } if (new_db == db && - (lower_case_table_names && - !my_strcasecmp(new_name_buff,table_name) || - !lower_case_table_names && - !strcmp(new_name_buff,table_name))) + !my_strcasecmp(table_alias_charset, new_name_buff, table_name)) { /* - Source and destination table names are equal: make later check - easier. + Source and destination table names are equal: make later check + easier. */ new_alias= new_name= table_name; } @@ -1542,7 +2870,9 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } else { - if (!access(fn_format(new_name_buff,new_name_buff,new_db,reg_ext,0), + char dir_buff[FN_REFLEN]; + strxnmov(dir_buff, FN_REFLEN, mysql_real_data_home, new_db, NullS); + if (!access(fn_format(new_name_buff,new_name_buff,dir_buff,reg_ext,0), F_OK)) { /* Table will be closed in do_command() */ @@ -1561,14 +2891,21 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, old_db_type=table->db_type; if (create_info->db_type == DB_TYPE_DEFAULT) create_info->db_type=old_db_type; - new_db_type=create_info->db_type= ha_checktype(create_info->db_type); + if ((new_db_type= ha_checktype(create_info->db_type)) != + create_info->db_type) + { + create_info->db_type= new_db_type; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_USING_OTHER_HANDLER, + ER(ER_WARN_USING_OTHER_HANDLER), + ha_get_storage_engine(new_db_type), + new_name); + } if (create_info->row_type == ROW_TYPE_NOT_USED) create_info->row_type=table->row_type; - /* In some simple cases we need not to recreate the table */ - thd->proc_info="setup"; - if (simple_alter && !table->tmp_table) + if (alter_info->is_simple && !table->tmp_table) { error=0; if (new_name != table_name || new_db != db) @@ -1579,49 +2916,63 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, error=0; if (!access(new_name_buff,F_OK)) { - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name); - error= -1; + my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name); + error= -1; } else { - *fn_ext(new_name)=0; - close_cached_table(thd, table); - if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias)) + *fn_ext(new_name)=0; + close_cached_table(thd, table); + if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias)) error= -1; } VOID(pthread_mutex_unlock(&LOCK_open)); } + if (!error) { - switch (keys_onoff) { + switch (alter_info->keys_onoff) { case LEAVE_AS_IS: break; case ENABLE: VOID(pthread_mutex_lock(&LOCK_open)); wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); VOID(pthread_mutex_unlock(&LOCK_open)); - error= (table->file->activate_all_index(thd) ? -1 : 0); + error= table->file->enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); /* COND_refresh will be signaled in close_thread_tables() */ break; case DISABLE: VOID(pthread_mutex_lock(&LOCK_open)); wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); VOID(pthread_mutex_unlock(&LOCK_open)); - table->file->deactivate_non_unique_index(HA_POS_ERROR); + error=table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); /* COND_refresh will be signaled in close_thread_tables() */ break; } } + if (error == HA_ERR_WRONG_COMMAND) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), + table->table_name); + error=0; + } if (!error) { mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } - send_ok(&thd->net); + if (do_send_ok) + send_ok(thd); + } + else if (error > 0) + { + table->file->print_error(error, MYF(0)); + error= -1; } table_list->table=0; // For query cache query_cache_invalidate3(thd, table_list, 0); @@ -1629,28 +2980,39 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } /* Full alter table */ - restore_record(table,2); // Empty record for DEFAULT - List_iterator<Alter_drop> drop_it(drop_list); + + /* let new create options override the old ones */ + if (!(used_fields & HA_CREATE_USED_MIN_ROWS)) + create_info->min_rows=table->min_rows; + if (!(used_fields & HA_CREATE_USED_MAX_ROWS)) + create_info->max_rows=table->max_rows; + if (!(used_fields & HA_CREATE_USED_AVG_ROW_LENGTH)) + create_info->avg_row_length=table->avg_row_length; + if (!(used_fields & HA_CREATE_USED_DEFAULT_CHARSET)) + create_info->default_table_charset= table->table_charset; + + restore_record(table,default_values); // Empty record for DEFAULT + List_iterator<Alter_drop> drop_it(alter_info->drop_list); List_iterator<create_field> def_it(fields); - List_iterator<Alter_column> alter_it(alter_list); + List_iterator<Alter_column> alter_it(alter_info->alter_list); List<create_field> create_list; // Add new fields here List<Key> key_list; // Add new keys here + create_field *def; /* - ** First collect all fields from table which isn't in drop_list + First collect all fields from table which isn't in drop_list */ - create_field *def; Field **f_ptr,*field; for (f_ptr=table->field ; (field= *f_ptr) ; f_ptr++) { - /* Check if field should be droped */ + /* Check if field should be dropped */ Alter_drop *drop; drop_it.rewind(); while ((drop=drop_it++)) { if (drop->type == Alter_drop::COLUMN && - !my_strcasecmp(field->field_name, drop->name)) + !my_strcasecmp(system_charset_info,field->field_name, drop->name)) { /* Reset auto_increment value if it was dropped */ if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER && @@ -1671,14 +3033,13 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, def_it.rewind(); while ((def=def_it++)) { - if (def->change && !my_strcasecmp(field->field_name, def->change)) + if (def->change && + !my_strcasecmp(system_charset_info,field->field_name, def->change)) break; } if (def) { // Field is changed def->field=field; - if (def->sql_type == FIELD_TYPE_TIMESTAMP) - use_timestamp=1; if (!def->after) { create_list.push_back(def); @@ -1688,23 +3049,20 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, else { // Use old field value create_list.push_back(def=new create_field(field,field)); - if (def->sql_type == FIELD_TYPE_TIMESTAMP) - use_timestamp=1; - alter_it.rewind(); // Change default if ALTER Alter_column *alter; while ((alter=alter_it++)) { - if (!my_strcasecmp(field->field_name, alter->name)) + if (!my_strcasecmp(system_charset_info,field->field_name, alter->name)) break; } if (alter) { - if (def->sql_type == FIELD_TYPE_BLOB) - { - my_error(ER_BLOB_CANT_HAVE_DEFAULT,MYF(0),def->change); - DBUG_RETURN(-1); - } + if (def->sql_type == FIELD_TYPE_BLOB) + { + my_error(ER_BLOB_CANT_HAVE_DEFAULT,MYF(0),def->change); + DBUG_RETURN(-1); + } def->def=alter->def; // Use new default alter_it.remove(); } @@ -1729,7 +3087,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, find_it.rewind(); while ((find=find_it++)) // Add new columns { - if (!my_strcasecmp(def->after, find->field_name)) + if (!my_strcasecmp(system_charset_info,def->after, find->field_name)) break; } if (!find) @@ -1740,9 +3098,10 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, find_it.after(def); // Put element after this } } - if (alter_list.elements) + if (alter_info->alter_list.elements) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),alter_list.head()->name,table_name); + my_error(ER_BAD_FIELD_ERROR,MYF(0),alter_info->alter_list.head()->name, + table_name); DBUG_RETURN(-1); } if (!create_list.elements) @@ -1763,19 +3122,13 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, KEY *key_info=table->key_info; for (uint i=0 ; i < table->keys ; i++,key_info++) { - if (drop_primary && (key_info->flags & HA_NOSAME)) - { - drop_primary=0; - continue; - } - - char *key_name=key_info->name; + char *key_name= key_info->name; Alter_drop *drop; drop_it.rewind(); while ((drop=drop_it++)) { if (drop->type == Alter_drop::KEY && - !my_strcasecmp(key_name, drop->name)) + !my_strcasecmp(system_charset_info,key_name, drop->name)) break; } if (drop) @@ -1785,11 +3138,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } KEY_PART_INFO *key_part= key_info->key_part; - enum Key::Keytype key_type= key_info->flags & HA_NOSAME ? - (!my_strcasecmp(key_name, "PRIMARY") ? - Key::PRIMARY : Key::UNIQUE) : - (key_info->flags & HA_FULLTEXT ? - Key::FULLTEXT : Key::MULTIPLE); key_parts.empty(); for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) { @@ -1802,11 +3150,13 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (cfield->change) { - if (!my_strcasecmp(key_part_name, cfield->change)) + if (!my_strcasecmp(system_charset_info, key_part_name, + cfield->change)) break; } - else if (!my_strcasecmp(key_part_name, cfield->field_name)) - break; + else if (!my_strcasecmp(system_charset_info, + key_part_name, cfield->field_name)) + break; } if (!cfield) continue; // Field is removed @@ -1815,49 +3165,69 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { // Check if sub key if (cfield->field->type() != FIELD_TYPE_BLOB && (cfield->field->pack_length() == key_part_length || - cfield->length != cfield->pack_length || - cfield->pack_length <= key_part_length)) + cfield->length <= key_part_length / + key_part->field->charset()->mbmaxlen)) key_part_length=0; // Use whole field } + key_part_length /= key_part->field->charset()->mbmaxlen; key_parts.push_back(new key_part_spec(cfield->field_name, key_part_length)); } if (key_parts.elements) - key_list.push_back(new Key(key_type,key_name,key_parts)); + key_list.push_back(new Key(key_info->flags & HA_SPATIAL ? Key::SPATIAL : + (key_info->flags & HA_NOSAME ? + (!my_strcasecmp(system_charset_info, + key_name, primary_key_name) ? + Key::PRIMARY : Key::UNIQUE) : + (key_info->flags & HA_FULLTEXT ? + Key::FULLTEXT : Key::MULTIPLE)), + key_name, + key_info->algorithm, + test(key_info->flags & HA_GENERATED_KEY), + key_parts)); } - key_it.rewind(); { Key *key; while ((key=key_it++)) // Add new keys - key_list.push_back(key); + { + if (key->type != Key::FOREIGN_KEY) + key_list.push_back(key); + if (key->name && + !my_strcasecmp(system_charset_info,key->name,primary_key_name)) + { + my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name); + DBUG_RETURN(-1); + } + } } - if (drop_list.elements) + if (alter_info->drop_list.elements) { - my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0),drop_list.head()->name); + my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0), + alter_info->drop_list.head()->name); goto err; } - if (alter_list.elements) + if (alter_info->alter_list.elements) { - my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0),alter_list.head()->name); + my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0), + alter_info->alter_list.head()->name); goto err; } db_create_options=table->db_create_options & ~(HA_OPTION_PACK_RECORD); - (void) sprintf(tmp_name,"%s-%lx_%lx", tmp_file_prefix, current_pid, - thd->thread_id); + my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix, + current_pid, thd->thread_id); + /* Safety fix for innodb */ + if (lower_case_table_names) + my_casedn_str(files_charset_info, tmp_name); + if (new_db_type != old_db_type && !table->file->can_switch_engines()) { + my_error(ER_ROW_IS_REFERENCED, MYF(0)); + goto err; + } create_info->db_type=new_db_type; if (!create_info->comment) create_info->comment=table->comment; - /* let new create options override the old ones */ - if (!(used_fields & HA_CREATE_USED_MIN_ROWS)) - create_info->min_rows=table->min_rows; - if (!(used_fields & HA_CREATE_USED_MAX_ROWS)) - create_info->max_rows=table->max_rows; - if (!(used_fields & HA_CREATE_USED_AVG_ROW_LENGTH)) - create_info->avg_row_length=table->avg_row_length; - table->file->update_create_info(create_info); if ((create_info->table_options & (HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS)) || @@ -1925,7 +3295,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, /* We don't log the statement, it will be logged later. */ tmp_disable_binlog(thd); error= mysql_create_table(thd, new_db, tmp_name, - create_info,create_list,key_list,1); + create_info,create_list,key_list,1,0); reenable_binlog(thd); if (error) DBUG_RETURN(error); @@ -1935,8 +3305,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, else { char path[FN_REFLEN]; - (void) sprintf(path,"%s/%s/%s",mysql_data_home,new_db,tmp_name); - fn_format(path,path,"","",4); + build_table_path(path, sizeof(path), new_db, tmp_name, ""); new_table=open_temporary_table(thd, path, new_db, tmp_name,0); } if (!new_table) @@ -1945,22 +3314,21 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err; } - save_time_stamp=new_table->time_stamp; - if (use_timestamp) - new_table->time_stamp=0; + + /* We don't want update TIMESTAMP fields during ALTER TABLE. */ + new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; new_table->next_number_field=new_table->found_next_number_field; - thd->count_cuted_fields=1; // calc cuted fields + thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields thd->cuted_fields=0L; thd->proc_info="copy to tmp table"; next_insert_id=thd->next_insert_id; // Remember for loggin copied=deleted=0; if (!new_table->is_view) error=copy_data_between_tables(table,new_table,create_list, - handle_duplicates, - order, &copied, &deleted); + handle_duplicates, ignore, + order_num, order, &copied, &deleted); thd->last_insert_id=next_insert_id; // Needed for correct log - thd->count_cuted_fields=0; // Don`t calc cuted fields - new_table->time_stamp=save_time_stamp; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; if (table->tmp_table) { @@ -1982,7 +3350,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } /* Remove link to old table and rename the new one */ close_temporary_table(thd,table->table_cache_key,table_name); - if (rename_temporary_table(thd, new_table, new_db, new_alias)) + /* Should pass the 'new_name' as we store table name in the cache */ + if (rename_temporary_table(thd, new_table, new_db, new_name)) { // Fatal error close_temporary_table(thd,new_db,tmp_name); my_free((gptr) new_table,MYF(0)); @@ -1992,7 +3361,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } goto end_temporary; @@ -2015,8 +3384,10 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, */ thd->proc_info="rename result table"; - sprintf(old_name,"%s2-%lx-%lx", tmp_file_prefix, current_pid, - thd->thread_id); + my_snprintf(old_name, sizeof(old_name), "%s2-%lx-%lx", tmp_file_prefix, + current_pid, thd->thread_id); + if (lower_case_table_names) + my_casedn_str(files_charset_info, old_name); if (new_name != table_name || new_db != db) { if (!access(new_name_buff,F_OK)) @@ -2097,9 +3468,10 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (table) { VOID(table->file->extra(HA_EXTRA_FORCE_REOPEN)); // Use new file + /* Mark in-use copies old */ remove_table_from_cache(thd,db,table_name,RTFC_NO_FLAG); - // Mark in-use copies old - mysql_lock_abort(thd,table); // end threads waiting on lock + /* end threads waiting on lock */ + mysql_lock_abort(thd,table); } VOID(quick_rm_table(old_db_type,db,old_name)); if (close_data_tables(thd,db,table_name) || @@ -2111,8 +3483,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err; } } - - /* The ALTER TABLE is always in it's own transaction */ + /* The ALTER TABLE is always in its own transaction */ error = ha_commit_stmt(thd); if (ha_commit(thd)) error=1; @@ -2127,7 +3498,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } VOID(pthread_cond_broadcast(&COND_refresh)); @@ -2141,8 +3512,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, shutdown. */ char path[FN_REFLEN]; - (void) sprintf(path,"%s/%s/%s",mysql_data_home,new_db,table_name); - fn_format(path,path,"","",4); + build_table_path(path, sizeof(path), new_db, table_name, ""); table=open_temporary_table(thd, path, new_db, tmp_name,0); if (table) { @@ -2150,8 +3520,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, my_free((char*) table, MYF(0)); } else - sql_print_error("Warning: Could not open BDB table %s.%s after rename\n", - new_db,table_name); + sql_print_warning("Could not open BDB table %s.%s after rename\n", + new_db,table_name); (void) berkeley_flush_logs(); } #endif @@ -2159,9 +3529,11 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, query_cache_invalidate3(thd, table_list, 0); end_temporary: - sprintf(tmp_name,ER(ER_INSERT_INFO),(ulong) (copied+deleted), - (ulong) deleted, thd->cuted_fields); - send_ok(&thd->net,copied+deleted,0L,tmp_name); + my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO), + (ulong) (copied + deleted), (ulong) deleted, + (ulong) thd->cuted_fields); + if (do_send_ok) + send_ok(thd,copied+deleted,0L,tmp_name); thd->some_tables_deleted=0; DBUG_RETURN(0); @@ -2172,11 +3544,12 @@ end_temporary: static int copy_data_between_tables(TABLE *from,TABLE *to, - List<create_field> &create, + List<create_field> &create, enum enum_duplicates handle_duplicates, - ORDER *order, + bool ignore, + uint order_num, ORDER *order, ha_rows *copied, - ha_rows *deleted) + ha_rows *deleted) { int error; Copy_field *copy,*copy_end; @@ -2185,21 +3558,33 @@ copy_data_between_tables(TABLE *from,TABLE *to, uint length; SORT_FIELD *sortorder; READ_RECORD info; - Field *next_field; TABLE_LIST tables; List<Item> fields; List<Item> all_fields; ha_rows examined_rows; + bool auto_increment_field_copied= 0; + ulong save_sql_mode; DBUG_ENTER("copy_data_between_tables"); + /* + Turn off recovery logging since rollback of an alter table is to + delete the new table so there is no need to log the changes to it. + + This needs to be done before external_lock + */ + error= ha_enable_transaction(thd, FALSE); + if (error) + DBUG_RETURN(-1); + if (!(copy= new Copy_field[to->fields])) DBUG_RETURN(-1); /* purecov: inspected */ if (to->file->external_lock(thd, F_WRLCK)) DBUG_RETURN(-1); - to->file->extra(HA_EXTRA_WRITE_CACHE); from->file->info(HA_STATUS_VARIABLE); - to->file->deactivate_non_unique_index(from->file->records); + to->file->start_bulk_insert(from->file->records); + + save_sql_mode= thd->variables.sql_mode; List_iterator<create_field> it(create); create_field *def; @@ -2208,45 +3593,56 @@ copy_data_between_tables(TABLE *from,TABLE *to, { def=it++; if (def->field) + { + if (*ptr == to->next_number_field) + { + auto_increment_field_copied= TRUE; + /* + If we are going to copy contents of one auto_increment column to + another auto_increment column it is sensible to preserve zeroes. + This condition also covers case when we are don't actually alter + auto_increment column. + */ + if (def->field == from->found_next_number_field) + thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO; + } (copy_end++)->set(*ptr,def->field,0); + } + } found_count=delete_count=0; if (order) { - from->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), - MYF(MY_FAE | MY_ZEROFILL)); + from->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), + MYF(MY_FAE | MY_ZEROFILL)); bzero((char*) &tables,sizeof(tables)); tables.table = from; tables.alias = tables.real_name= from->real_name; tables.db = from->table_cache_key; error=1; - if (setup_order(thd, &tables, fields, all_fields, order) || - !(sortorder=make_unireg_sortorder(order, &length)) || - (from->found_records = filesort(from, sortorder, length, - (SQL_SELECT *) 0, 0L, HA_POS_ERROR, - &examined_rows)) - == HA_POS_ERROR) + if (thd->lex->select_lex.setup_ref_array(thd, order_num) || + setup_order(thd, thd->lex->select_lex.ref_pointer_array, + &tables, fields, all_fields, order) || + !(sortorder=make_unireg_sortorder(order, &length)) || + (from->sort.found_records = filesort(thd, from, sortorder, length, + (SQL_SELECT *) 0, HA_POS_ERROR, + &examined_rows)) + == HA_POS_ERROR) goto err; }; - /* Turn off recovery logging since rollback of an - alter table is to delete the new table so there - is no need to log the changes to it. */ - error = ha_recovery_logging(thd,FALSE); - if (error) - { - error = 1; - goto err; - } - + /* Handler must be told explicitly to retrieve all columns, because + this function does not set field->query_id in the columns to the + current query id */ + from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); - if (handle_duplicates == DUP_IGNORE || + if (ignore || handle_duplicates == DUP_REPLACE) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - next_field=to->next_number_field; + thd->row_count= 0; while (!(error=info.read_record(&info))) { if (thd->killed) @@ -2255,13 +3651,21 @@ copy_data_between_tables(TABLE *from,TABLE *to, error= 1; break; } - if (next_field) - next_field->reset(); + thd->row_count++; + if (to->next_number_field) + { + if (auto_increment_field_copied) + to->auto_increment_field_not_null= TRUE; + else + to->next_number_field->reset(); + } for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++) + { copy_ptr->do_copy(copy_ptr); + } if ((error=to->file->write_row((byte*) to->record[0]))) { - if ((handle_duplicates != DUP_IGNORE && + if ((!ignore && handle_duplicates != DUP_REPLACE) || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) @@ -2277,17 +3681,16 @@ copy_data_between_tables(TABLE *from,TABLE *to, end_read_record(&info); free_io_cache(from); delete [] copy; // This is never 0 - uint tmp_error; - if ((tmp_error=to->file->extra(HA_EXTRA_NO_CACHE))) + + if (to->file->end_bulk_insert() && !error) { - to->file->print_error(tmp_error,MYF(0)); + to->file->print_error(my_errno,MYF(0)); error=1; } to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); - if (to->file->activate_all_index(thd)) - error=1; - tmp_error = ha_recovery_logging(thd,TRUE); + ha_enable_transaction(thd,TRUE); + /* Ensure that the new table is saved properly to disk so that we can do a rename @@ -2298,6 +3701,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, error=1; err: + thd->variables.sql_mode= save_sql_mode; free_io_cache(from); *copied= found_count; *deleted=delete_count; @@ -2305,3 +3709,148 @@ copy_data_between_tables(TABLE *from,TABLE *to, error=1; DBUG_RETURN(error > 0 ? -1 : 0); } + + +/* + Recreates tables by calling mysql_alter_table(). + + SYNOPSIS + mysql_recreate_table() + thd Thread handler + tables Tables to recreate + do_send_ok If we should send_ok() or leave it to caller + + RETURN + Like mysql_alter_table(). +*/ +int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, + bool do_send_ok) +{ + DBUG_ENTER("mysql_recreate_table"); + LEX *lex= thd->lex; + HA_CREATE_INFO create_info; + lex->create_list.empty(); + lex->key_list.empty(); + lex->col_list.empty(); + lex->alter_info.reset(); + lex->alter_info.is_simple= 0; // Force full recreate + bzero((char*) &create_info,sizeof(create_info)); + create_info.db_type=DB_TYPE_DEFAULT; + create_info.row_type=ROW_TYPE_DEFAULT; + create_info.default_table_charset=default_charset_info; + DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, + table_list, lex->create_list, + lex->key_list, 0, (ORDER *) 0, + DUP_ERROR, 0, &lex->alter_info, do_send_ok)); +} + + +int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) +{ + TABLE_LIST *table; + List<Item> field_list; + Item *item; + Protocol *protocol= thd->protocol; + DBUG_ENTER("mysql_checksum_table"); + + field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); + item->maybe_null= 1; + field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); + item->maybe_null= 1; + if (protocol->send_fields(&field_list, 1)) + DBUG_RETURN(-1); + + for (table= tables; table; table= table->next) + { + char table_name[NAME_LEN*2+2]; + TABLE *t; + + strxmov(table_name, table->db ,".", table->real_name, NullS); + + t= table->table= open_ltable(thd, table, TL_READ_NO_INSERT); + thd->clear_error(); // these errors shouldn't get client + + protocol->prepare_for_resend(); + protocol->store(table_name, system_charset_info); + + if (!t) + { + /* Table didn't exist */ + protocol->store_null(); + thd->net.last_error[0]=0; + } + else + { + t->pos_in_table_list= table; + + if (t->file->table_flags() & HA_HAS_CHECKSUM && + !(check_opt->flags & T_EXTEND)) + protocol->store((ulonglong)t->file->checksum()); + else if (!(t->file->table_flags() & HA_HAS_CHECKSUM) && + (check_opt->flags & T_QUICK)) + protocol->store_null(); + else + { + /* calculating table's checksum */ + ha_checksum crc= 0; + + /* InnoDB must be told explicitly to retrieve all columns, because + this function does not set field->query_id in the columns to the + current query id */ + t->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + + if (t->file->ha_rnd_init(1)) + protocol->store_null(); + else + { + for (;;) + { + ha_checksum row_crc= 0; + int error= t->file->rnd_next(t->record[0]); + if (unlikely(error)) + { + if (error == HA_ERR_RECORD_DELETED) + continue; + break; + } + if (t->record[0] != (byte*) t->field[0]->ptr) + row_crc= my_checksum(row_crc, t->record[0], + ((byte*) t->field[0]->ptr) - t->record[0]); + + for (uint i= 0; i < t->fields; i++ ) + { + Field *f= t->field[i]; + if (f->type() == FIELD_TYPE_BLOB) + { + String tmp; + f->val_str(&tmp); + row_crc= my_checksum(row_crc, (byte*) tmp.ptr(), tmp.length()); + } + else + row_crc= my_checksum(row_crc, (byte*) f->ptr, + f->pack_length()); + } + + crc+= row_crc; + } + protocol->store((ulonglong)crc); + t->file->ha_rnd_end(); + } + } + thd->clear_error(); + close_thread_tables(thd); + table->table=0; // For query cache + } + if (protocol->write()) + goto err; + } + + send_eof(thd); + DBUG_RETURN(0); + + err: + close_thread_tables(thd); // Shouldn't be needed + if (table) + table->table=0; + DBUG_RETURN(-1); +} diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 297bb7a5dd3..d6afc888be2 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -28,8 +28,22 @@ #include <sys/malloc.h> #endif -/* Intern key cache variables */ -extern "C" pthread_mutex_t THR_LOCK_keycache; +static const char *lock_descriptions[] = +{ + "No lock", + "Low priority read lock", + "Shared Read lock", + "High priority read lock", + "Read lock without concurrent inserts", + "Write lock that allows other writers", + "Write lock, but allow reading", + "Concurrent insert lock", + "Lock Used by delayed insert", + "Low priority write lock", + "High priority write lock", + "Highest priority write lock" +}; + #ifndef DBUG_OFF @@ -39,7 +53,7 @@ print_where(COND *cond,const char *info) if (cond) { char buff[256]; - String str(buff,(uint32) sizeof(buff)); + String str(buff,(uint32) sizeof(buff), system_charset_info); str.length(0); cond->print(&str); str.append('\0'); @@ -50,11 +64,8 @@ print_where(COND *cond,const char *info) DBUG_UNLOCK_FILE; } } - /* This is for debugging purposes */ -extern HASH open_cache; -extern TABLE *unused_tables; void print_cached_tables(void) { @@ -62,16 +73,16 @@ void print_cached_tables(void) TABLE *start_link,*lnk; VOID(pthread_mutex_lock(&LOCK_open)); - puts("DB Table Version Thread L.thread Open"); + puts("DB Table Version Thread L.thread Open Lock"); for (idx=unused=0 ; idx < open_cache.records ; idx++) { TABLE *entry=(TABLE*) hash_element(&open_cache,idx); - printf("%-14.14s %-32s%6ld%8ld%10ld%6d\n", + printf("%-14.14s %-32s%6ld%8ld%10ld%6d %s\n", entry->table_cache_key,entry->real_name,entry->version, entry->in_use ? entry->in_use->thread_id : 0L, entry->in_use ? entry->in_use->dbug_thread_id : 0L, - entry->db_stat ? 1 : 0); + entry->db_stat ? 1 : 0, entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use"); if (!entry->in_use) unused++; } @@ -102,10 +113,11 @@ void print_cached_tables(void) } -void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special) +void TEST_filesort(SORT_FIELD *sortorder,uint s_length) { char buff[256],buff2[256]; - String str(buff,sizeof(buff)),out(buff2,sizeof(buff2)); + String str(buff,sizeof(buff),system_charset_info); + String out(buff2,sizeof(buff2),system_charset_info); const char *sep; DBUG_ENTER("TEST_filesort"); @@ -135,8 +147,6 @@ void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special) out.append('\0'); // Purify doesn't like c_ptr() DBUG_LOCK_FILE; VOID(fputs("\nInfo about FILESORT\n",DBUG_FILE)); - if (special) - fprintf(DBUG_FILE,"Records to sort: %lu\n",(ulong) special); fprintf(DBUG_FILE,"Sortorder: %s\n",out.ptr()); DBUG_UNLOCK_FILE; DBUG_VOID_RETURN; @@ -155,19 +165,21 @@ TEST_join(JOIN *join) { JOIN_TAB *tab=join->join_tab+i; TABLE *form=tab->table; - fprintf(DBUG_FILE,"%-16.16s type: %-7s q_keys: %4d refs: %d key: %d len: %d\n", + char key_map_buff[128]; + fprintf(DBUG_FILE,"%-16.16s type: %-7s q_keys: %s refs: %d key: %d len: %d\n", form->table_name, join_type_str[tab->type], - tab->keys, + tab->keys.print(key_map_buff), tab->ref.key_parts, tab->ref.key, tab->ref.key_length); if (tab->select) { + char buf[MAX_KEY/8+1]; if (tab->use_quick == 2) fprintf(DBUG_FILE, - " quick select checked for each record (keys: %d)\n", - (int) tab->select->quick_keys); + " quick select checked for each record (keys: %s)\n", + tab->select->quick_keys.print(buf)); else if (tab->select->quick) fprintf(DBUG_FILE," quick select used on key %s, length: %d\n", form->key_info[tab->select->quick->index].name, @@ -192,11 +204,149 @@ TEST_join(JOIN *join) #endif +typedef struct st_debug_lock +{ + ulong thread_id; + char table_name[FN_REFLEN]; + bool waiting; + const char *lock_text; + enum thr_lock_type type; +} TABLE_LOCK_INFO; + +static int dl_compare(TABLE_LOCK_INFO *a,TABLE_LOCK_INFO *b) +{ + if (a->thread_id > b->thread_id) + return 1; + if (a->thread_id < b->thread_id) + return -1; + if (a->waiting == b->waiting) + return 0; + else if (a->waiting) + return -1; + return 1; +} + + +static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data, + bool wait, const char *text) +{ + if (data) + { + TABLE *table=(TABLE *)data->debug_print_param; + if (table && table->tmp_table == NO_TMP_TABLE) + { + TABLE_LOCK_INFO table_lock_info; + table_lock_info.thread_id=table->in_use->thread_id; + memcpy(table_lock_info.table_name, table->table_cache_key, + table->key_length); + table_lock_info.table_name[strlen(table_lock_info.table_name)]='.'; + table_lock_info.waiting=wait; + table_lock_info.lock_text=text; + // lock_type is also obtainable from THR_LOCK_DATA + table_lock_info.type=table->reginfo.lock_type; + VOID(push_dynamic(ar,(gptr) &table_lock_info)); + } + } +} + + +/* + Regarding MERGE tables: + + For now, the best option is to use the common TABLE *pointer for all + cases; The drawback is that for MERGE tables we will see many locks + for the merge tables even if some of them are for individual tables. + + The way to solve this is to add to 'THR_LOCK' structure a pointer to + the filename and use this when printing the data. + (We can for now ignore this and just print the same name for all merge + table parts; Please add the above as a comment to the display_lock + function so that we can easily add this if we ever need this. +*/ + +static void display_table_locks(void) +{ + LIST *list; + DYNAMIC_ARRAY saved_table_locks; + + VOID(my_init_dynamic_array(&saved_table_locks,sizeof(TABLE_LOCK_INFO),open_cache.records + 20,50)); + VOID(pthread_mutex_lock(&THR_LOCK_lock)); + for (list= thr_lock_thread_list; list; list= list_rest(list)) + { + THR_LOCK *lock=(THR_LOCK*) list->data; + + VOID(pthread_mutex_lock(&lock->mutex)); + push_locks_into_array(&saved_table_locks, lock->write.data, FALSE, + "Locked - write"); + push_locks_into_array(&saved_table_locks, lock->write_wait.data, TRUE, + "Waiting - write"); + push_locks_into_array(&saved_table_locks, lock->read.data, FALSE, + "Locked - read"); + push_locks_into_array(&saved_table_locks, lock->read_wait.data, TRUE, + "Waiting - read"); + VOID(pthread_mutex_unlock(&lock->mutex)); + } + VOID(pthread_mutex_unlock(&THR_LOCK_lock)); + if (!saved_table_locks.elements) goto end; + + qsort((gptr) dynamic_element(&saved_table_locks,0,TABLE_LOCK_INFO *),saved_table_locks.elements,sizeof(TABLE_LOCK_INFO),(qsort_cmp) dl_compare); + freeze_size(&saved_table_locks); + + puts("\nThread database.table_name Locked/Waiting Lock_type\n"); + + unsigned int i; + for (i=0 ; i < saved_table_locks.elements ; i++) + { + TABLE_LOCK_INFO *dl_ptr=dynamic_element(&saved_table_locks,i,TABLE_LOCK_INFO*); + printf("%-8ld%-28.28s%-22s%s\n", + dl_ptr->thread_id,dl_ptr->table_name,dl_ptr->lock_text,lock_descriptions[(int)dl_ptr->type]); + } + puts("\n\n"); +end: + delete_dynamic(&saved_table_locks); +} + + +static int print_key_cache_status(const char *name, KEY_CACHE *key_cache) +{ + char llbuff1[22]; + char llbuff2[22]; + char llbuff3[22]; + char llbuff4[22]; + + if (!key_cache->key_cache_inited) + { + printf("%s: Not in use\n", name); + } + else + { + printf("%s\n\ +Buffer_size: %10lu\n\ +Block_size: %10lu\n\ +Division_limit: %10lu\n\ +Age_limit: %10lu\n\ +blocks used: %10lu\n\ +not flushed: %10lu\n\ +w_requests: %10s\n\ +writes: %10s\n\ +r_requests: %10s\n\ +reads: %10s\n\n", + name, + (ulong) key_cache->param_buff_size, key_cache->param_block_size, + key_cache->param_division_limit, key_cache->param_age_threshold, + key_cache->blocks_used,key_cache->global_blocks_changed, + llstr(key_cache->global_cache_w_requests,llbuff1), + llstr(key_cache->global_cache_write,llbuff2), + llstr(key_cache->global_cache_r_requests,llbuff3), + llstr(key_cache->global_cache_read,llbuff4)); + } + return 0; +} + + void mysql_print_status(THD *thd) { char current_dir[FN_REFLEN]; - char llbuff[22]; - printf("\nStatus information:\n\n"); my_getwd(current_dir, sizeof(current_dir),MYF(0)); printf("Current dir: %s\n", current_dir); @@ -213,17 +363,8 @@ void mysql_print_status(THD *thd) /* Print key cache status */ if (thd) thd->proc_info="key cache"; - pthread_mutex_lock(&THR_LOCK_keycache); - printf("key_cache status:\n\ -blocks used:%10lu\n\ -not flushed:%10lu\n", - _my_blocks_used, _my_blocks_changed); - printf("w_requests: %10s\n", llstr(_my_cache_w_requests, llbuff)); - printf("writes: %10s\n", llstr(_my_cache_write, llbuff)); - printf("r_requests: %10s\n", llstr(_my_cache_r_requests, llbuff)); - printf("reads: %10s\n", llstr(_my_cache_read, llbuff)); - pthread_mutex_unlock(&THR_LOCK_keycache); - + puts("\nKey caches:"); + process_key_caches(print_key_cache_status); if (thd) thd->proc_info="status"; pthread_mutex_lock(&LOCK_status); @@ -260,6 +401,7 @@ Next alarm time: %lu\n", alarm_info.max_used_alarms, alarm_info.next_alarm_time); #endif + display_table_locks(); fflush(stdout); if (thd) thd->proc_info="malloc"; diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 84156ff4022..0b84d1b5fb3 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -28,11 +28,12 @@ ** dynamic functions, so this shouldn't be a real problem. */ -#ifdef __GNUC__ -#pragma implementation // gcc: implement sql_udf.h +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" +#include <my_pthread.h> #ifdef HAVE_DLOPEN extern "C" @@ -70,10 +71,10 @@ extern "C" static bool initialized = 0; static MEM_ROOT mem; static HASH udf_hash; -static pthread_mutex_t THR_LOCK_udf; +static rw_lock_t THR_LOCK_udf; -static udf_func *add_udf(char *name, Item_result ret, +static udf_func *add_udf(LEX_STRING *name, Item_result ret, char *dl, Item_udftype typ); static void del_udf(udf_func *udf); static void *find_udf_dl(const char *dl); @@ -82,15 +83,15 @@ static char *init_syms(udf_func *tmp, char *nm) { char *end; - if (!((tmp->func= dlsym(tmp->dlhandle, tmp->name)))) - return tmp->name; + if (!((tmp->func= dlsym(tmp->dlhandle, tmp->name.str)))) + return tmp->name.str; - end=strmov(nm,tmp->name); + end=strmov(nm,tmp->name.str); if (tmp->type == UDFTYPE_AGGREGATE) { - (void)strmov(end, "_reset"); - if (!((tmp->func_reset= dlsym(tmp->dlhandle, nm)))) + (void)strmov(end, "_clear"); + if (!((tmp->func_clear= dlsym(tmp->dlhandle, nm)))) return nm; (void)strmov(end, "_add"); if (!((tmp->func_add= dlsym(tmp->dlhandle, nm)))) @@ -122,8 +123,8 @@ extern "C" byte* get_hash_key(const byte *buff,uint *length, my_bool not_used __attribute__((unused))) { udf_func *udf=(udf_func*) buff; - *length=(uint) udf->name_length; - return (byte*) udf->name; + *length=(uint) udf->name.length; + return (byte*) udf->name.str; } /* @@ -143,12 +144,12 @@ void udf_init() if (initialized) DBUG_VOID_RETURN; - pthread_mutex_init(&THR_LOCK_udf,MY_MUTEX_INIT_SLOW); - + my_rwlock_init(&THR_LOCK_udf,NULL); + init_sql_alloc(&mem, UDF_ALLOC_BLOCK_SIZE, 0); THD *new_thd = new THD; if (!new_thd || - hash_init(&udf_hash,32,0,0,get_hash_key, NULL, HASH_CASE_INSENSITIVE)) + hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0)) { sql_print_error("Can't allocate memory for udf structures"); hash_free(&udf_hash); @@ -166,7 +167,7 @@ void udf_init() tables.lock_type = TL_READ; tables.db=new_thd->db; - if (open_and_lock_tables(new_thd, &tables)) + if (simple_open_n_lock_tables(new_thd, &tables)) { DBUG_PRINT("error",("Can't open udf table")); sql_print_error("Can't open the mysql.func table. Please run the mysql_install_db script to create it."); @@ -178,8 +179,10 @@ void udf_init() while (!(error = read_record_info.read_record(&read_record_info))) { DBUG_PRINT("info",("init udf record")); - char *name=get_field(&mem, table, 0); - char *dl_name= get_field(&mem, table, 2); + LEX_STRING name; + name.str=get_field(&mem, table->field[0]); + name.length = strlen(name.str); + char *dl_name= get_field(&mem, table->field[2]); bool new_dl=0; Item_udftype udftype=UDFTYPE_FUNCTION; if (table->fields >= 4) // New func table @@ -192,17 +195,18 @@ void udf_init() */ if (strchr(dl_name, '/') || IF_WIN(strchr(dl_name, '\\'),0) || - strlen(name) > NAME_LEN) + strlen(name.str) > NAME_LEN) { sql_print_error("Invalid row in mysql.func table for function '%.64s'", - name); + name.str); continue; } - if (!(tmp = add_udf(name,(Item_result) table->field[1]->val_int(), - dl_name, udftype))) + + if (!(tmp= add_udf(&name,(Item_result) table->field[1]->val_int(), + dl_name, udftype))) { - sql_print_error("Can't alloc memory for udf function: '%.64s'", name); + sql_print_error("Can't alloc memory for udf function: '%.64s'", name.str); continue; } @@ -212,8 +216,7 @@ void udf_init() if (!(dl = dlopen(tmp->dl, RTLD_NOW))) { /* Print warning to log */ - sql_print_error(ER(ER_CANT_OPEN_LIBRARY), - tmp->dl,errno,dlerror()); + sql_print_error(ER(ER_CANT_OPEN_LIBRARY), tmp->dl,errno,dlerror()); /* Keep the udf in the hash so that we can remove it later */ continue; } @@ -269,7 +272,7 @@ void udf_free() if (initialized) { initialized= 0; - pthread_mutex_destroy(&THR_LOCK_udf); + rwlock_destroy(&THR_LOCK_udf); } DBUG_VOID_RETURN; } @@ -290,10 +293,10 @@ static void del_udf(udf_func *udf) The functions will be automaticly removed when the least threads doesn't use it anymore */ - char *name= udf->name; - uint name_length=udf->name_length; - udf->name=(char*) "*"; - udf->name_length=1; + char *name= udf->name.str; + uint name_length=udf->name.length; + udf->name.str=(char*) "*"; + udf->name.length=1; hash_update(&udf_hash,(byte*) udf,(byte*) name,name_length); } DBUG_VOID_RETURN; @@ -303,7 +306,7 @@ static void del_udf(udf_func *udf) void free_udf(udf_func *udf) { DBUG_ENTER("free_udf"); - pthread_mutex_lock(&THR_LOCK_udf); + rw_wrlock(&THR_LOCK_udf); if (!--udf->usage_count) { /* @@ -315,7 +318,7 @@ void free_udf(udf_func *udf) if (!find_udf_dl(udf->dl)) dlclose(udf->dlhandle); } - pthread_mutex_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); DBUG_VOID_RETURN; } @@ -328,7 +331,11 @@ udf_func *find_udf(const char *name,uint length,bool mark_used) DBUG_ENTER("find_udf"); /* TODO: This should be changed to reader locks someday! */ - pthread_mutex_lock(&THR_LOCK_udf); + if (mark_used) + rw_wrlock(&THR_LOCK_udf); /* Called during fix_fields */ + else + rw_rdlock(&THR_LOCK_udf); /* Called during parsing */ + if ((udf=(udf_func*) hash_search(&udf_hash,(byte*) name, length ? length : (uint) strlen(name)))) { @@ -337,7 +344,7 @@ udf_func *find_udf(const char *name,uint length,bool mark_used) else if (mark_used) udf->usage_count++; } - pthread_mutex_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); DBUG_RETURN(udf); } @@ -362,7 +369,7 @@ static void *find_udf_dl(const char *dl) /* Assume that name && dl is already allocated */ -static udf_func *add_udf(char *name, Item_result ret, char *dl, +static udf_func *add_udf(LEX_STRING *name, Item_result ret, char *dl, Item_udftype type) { if (!name || !dl || !(uint) type || (uint) type > (uint) UDFTYPE_AGGREGATE) @@ -371,13 +378,12 @@ static udf_func *add_udf(char *name, Item_result ret, char *dl, if (!tmp) return 0; bzero((char*) tmp,sizeof(*tmp)); - tmp->name = name; - tmp->name_length=(uint) strlen(tmp->name); + tmp->name = *name; //dup !! tmp->dl = dl; tmp->returns = ret; tmp->type = type; tmp->usage_count=1; - if (hash_insert(&udf_hash,(byte*) tmp)) + if (my_hash_insert(&udf_hash,(byte*) tmp)) return 0; using_udf_functions=1; return tmp; @@ -396,7 +402,7 @@ int mysql_create_function(THD *thd,udf_func *udf) if (!initialized) { - send_error(&thd->net, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); + send_error(thd, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); DBUG_RETURN(1); } @@ -407,19 +413,19 @@ int mysql_create_function(THD *thd,udf_func *udf) */ if (strchr(udf->dl, '/') || IF_WIN(strchr(udf->dl, '\\'),0)) { - send_error(&thd->net, ER_UDF_NO_PATHS,ER(ER_UDF_NO_PATHS)); + send_error(thd, ER_UDF_NO_PATHS,ER(ER_UDF_NO_PATHS)); DBUG_RETURN(1); } - if (udf->name_length > NAME_LEN) + if (udf->name.length > NAME_LEN) { - net_printf(&thd->net, ER_TOO_LONG_IDENT,udf->name); + net_printf(thd, ER_TOO_LONG_IDENT,udf->name); DBUG_RETURN(1); } - pthread_mutex_lock(&THR_LOCK_udf); - if ((hash_search(&udf_hash,(byte*) udf->name, udf->name_length))) + rw_wrlock(&THR_LOCK_udf); + if ((hash_search(&udf_hash,(byte*) udf->name.str, udf->name.length))) { - net_printf(&thd->net, ER_UDF_EXISTS, udf->name); + net_printf(thd, ER_UDF_EXISTS, udf->name); goto err; } if (!(dl = find_udf_dl(udf->dl))) @@ -428,7 +434,7 @@ int mysql_create_function(THD *thd,udf_func *udf) { DBUG_PRINT("error",("dlopen of %s failed, error: %d (%s)", udf->dl,errno,dlerror())); - net_printf(&thd->net, ER_CANT_OPEN_LIBRARY, udf->dl, errno, dlerror()); + net_printf(thd, ER_CANT_OPEN_LIBRARY, udf->dl, errno, dlerror()); goto err; } new_dl=1; @@ -438,23 +444,22 @@ int mysql_create_function(THD *thd,udf_func *udf) char buf[NAME_LEN+16], *missing; if ((missing= init_syms(udf, buf))) { - net_printf(&thd->net, ER_CANT_FIND_DL_ENTRY, missing); + net_printf(thd, ER_CANT_FIND_DL_ENTRY, missing); goto err; } } - - udf->name=strdup_root(&mem,udf->name); + udf->name.str=strdup_root(&mem,udf->name.str); udf->dl=strdup_root(&mem,udf->dl); - if (!(u_d=add_udf(udf->name,udf->returns,udf->dl,udf->type))) + if (!(u_d=add_udf(&udf->name,udf->returns,udf->dl,udf->type))) { - send_error(&thd->net,0); // End of memory + send_error(thd,0); // End of memory goto err; } u_d->dlhandle = dl; u_d->func=udf->func; u_d->func_init=udf->func_init; u_d->func_deinit=udf->func_deinit; - u_d->func_reset=udf->func_reset; + u_d->func_clear=udf->func_clear; u_d->func_add=udf->func_add; /* create entry in mysql.func table */ @@ -466,10 +471,10 @@ int mysql_create_function(THD *thd,udf_func *udf) if (!(table = open_ltable(thd,&tables,TL_WRITE))) goto err; - restore_record(table,2); // Get default values for fields - table->field[0]->store(u_d->name, u_d->name_length); + restore_record(table,default_values); // Default values for fields + table->field[0]->store(u_d->name.str, u_d->name.length, system_charset_info); table->field[1]->store((longlong) u_d->returns); - table->field[2]->store(u_d->dl,(uint) strlen(u_d->dl)); + table->field[2]->store(u_d->dl,(uint) strlen(u_d->dl), system_charset_info); if (table->fields >= 4) // If not old func format table->field[3]->store((longlong) u_d->type); error = table->file->write_row(table->record[0]); @@ -477,22 +482,22 @@ int mysql_create_function(THD *thd,udf_func *udf) close_thread_tables(thd); if (error) { - net_printf(&thd->net, ER_ERROR_ON_WRITE, "mysql.func",error); + net_printf(thd, ER_ERROR_ON_WRITE, "mysql.func",error); del_udf(u_d); goto err; } - pthread_mutex_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); DBUG_RETURN(0); err: if (new_dl) dlclose(dl); - pthread_mutex_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); DBUG_RETURN(1); } -int mysql_drop_function(THD *thd,const char *udf_name) +int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) { TABLE *table; TABLE_LIST tables; @@ -500,14 +505,14 @@ int mysql_drop_function(THD *thd,const char *udf_name) DBUG_ENTER("mysql_drop_function"); if (!initialized) { - send_error(&thd->net, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); + send_error(thd, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); DBUG_RETURN(1); } - pthread_mutex_lock(&THR_LOCK_udf); - if (!(udf=(udf_func*) hash_search(&udf_hash,(byte*) udf_name, - (uint) strlen(udf_name)))) + rw_wrlock(&THR_LOCK_udf); + if (!(udf=(udf_func*) hash_search(&udf_hash,(byte*) udf_name->str, + (uint) udf_name->length))) { - net_printf(&thd->net, ER_FUNCTION_NOT_DEFINED, udf_name); + net_printf(thd, ER_FUNCTION_NOT_DEFINED, udf_name->str); goto err; } del_udf(udf); @@ -523,8 +528,11 @@ int mysql_drop_function(THD *thd,const char *udf_name) tables.real_name= tables.alias= (char*) "func"; if (!(table = open_ltable(thd,&tables,TL_WRITE))) goto err; - if (!table->file->index_read_idx(table->record[0],0,(byte*) udf_name, - (uint) strlen(udf_name), + table->field[0]->store(udf_name->str, udf_name->length, system_charset_info); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + if (!table->file->index_read_idx(table->record[0], 0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, HA_READ_KEY_EXACT)) { int error; @@ -533,10 +541,10 @@ int mysql_drop_function(THD *thd,const char *udf_name) } close_thread_tables(thd); - pthread_mutex_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); DBUG_RETURN(0); err: - pthread_mutex_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); DBUG_RETURN(1); } diff --git a/sql/sql_udf.h b/sql/sql_udf.h index 1ee9c44ce48..ca00901ea67 100644 --- a/sql/sql_udf.h +++ b/sql/sql_udf.h @@ -17,7 +17,7 @@ /* This file defines structures needed by udf functions */ -#ifdef __GNUC__ +#ifdef USE_PRAGMA_INTERFACE #pragma interface #endif @@ -25,8 +25,7 @@ enum Item_udftype {UDFTYPE_FUNCTION=1,UDFTYPE_AGGREGATE}; typedef struct st_udf_func { - char *name; - int name_length; + LEX_STRING name; Item_result returns; Item_udftype type; char *dl; @@ -34,7 +33,7 @@ typedef struct st_udf_func void *func; void *func_init; void *func_deinit; - void *func_reset; + void *func_clear; void *func_add; ulong usage_count; } udf_func; @@ -50,23 +49,25 @@ class udf_handler :public Sql_alloc UDF_ARGS f_args; UDF_INIT initid; char *num_buffer; - uchar error; + uchar error, is_null; bool initialized; Item **args; public: table_map used_tables_cache; bool const_item_cache; + bool not_original; udf_handler(udf_func *udf_arg) :u_d(udf_arg), buffers(0), error(0), - initialized(0) + is_null(0), initialized(0), not_original(0) {} ~udf_handler(); - const char *name() const { return u_d ? u_d->name : "?"; } + const char *name() const { return u_d ? u_d->name.str : "?"; } Item_result result_type () const { return u_d ? u_d->returns : STRING_RESULT;} bool get_arguments(); bool fix_fields(THD *thd,struct st_table_list *tlist,Item_result_field *item, uint arg_count,Item **args); + void cleanup(); double val(my_bool *null_value) { if (get_arguments()) @@ -74,7 +75,6 @@ class udf_handler :public Sql_alloc *null_value=1; return 0.0; } - uchar is_null=0; double (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)= (double (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func; double tmp=func(&initid, &f_args, &is_null, &error); @@ -93,7 +93,6 @@ class udf_handler :public Sql_alloc *null_value=1; return LL(0); } - uchar is_null=0; longlong (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)= (longlong (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func; longlong tmp=func(&initid, &f_args, &is_null, &error); @@ -105,22 +104,15 @@ class udf_handler :public Sql_alloc *null_value=0; return tmp; } - void reset(my_bool *null_value) + void clear() { - uchar is_null=0; - if (get_arguments()) - { - *null_value=1; - return; - } - void (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)= - (void (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func_reset; - func(&initid, &f_args, &is_null, &error); - *null_value= (my_bool) (is_null || error); + is_null= 0; + void (*func)(UDF_INIT *, uchar *, uchar *)= + (void (*)(UDF_INIT *, uchar *, uchar *)) u_d->func_clear; + func(&initid, &is_null, &error); } void add(my_bool *null_value) { - uchar is_null=0; if (get_arguments()) { *null_value=1; @@ -140,5 +132,5 @@ void udf_init(void),udf_free(void); udf_func *find_udf(const char *name, uint len=0,bool mark_used=0); void free_udf(udf_func *udf); int mysql_create_function(THD *thd,udf_func *udf); -int mysql_drop_function(THD *thd,const char *name); +int mysql_drop_function(THD *thd,const LEX_STRING *name); #endif diff --git a/sql/sql_union.cc b/sql/sql_union.cc index f9c21079851..0948602bbb4 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -24,322 +24,652 @@ #include "mysql_priv.h" #include "sql_select.h" - -int mysql_union(THD *thd, LEX *lex,select_result *result) +int mysql_union(THD *thd, LEX *lex, select_result *result, + SELECT_LEX_UNIT *unit) { - SELECT_LEX *sl, *last_sl, *lex_sl; - ORDER *order; - List<Item> item_list; - TABLE *table; - int res; - ulonglong add_rows= 0; - ulong found_rows_for_union= lex->select_lex.options & OPTION_FOUND_ROWS; - ulong describe= lex->select_lex.options & SELECT_DESCRIBE; - TABLE_LIST result_table_list; - TABLE_LIST *first_table=(TABLE_LIST *)lex->select_lex.table_list.first; - TMP_TABLE_PARAM tmp_table_param; - select_union *union_result; - ha_rows examined_rows= 0; DBUG_ENTER("mysql_union"); + int res= 0; + if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK, ""))) + res= unit->exec(); + res|= unit->cleanup(); + DBUG_RETURN(res); +} + + +/*************************************************************************** +** store records in temporary table for UNION +***************************************************************************/ + +select_union::select_union(TABLE *table_par) + :table(table_par) +{ + bzero((char*) &info,sizeof(info)); + /* + We can always use IGNORE because the temporary table will only + contain a unique key if we are using not using UNION ALL + */ + info.ignore= 1; +} + +select_union::~select_union() +{ +} + + +int select_union::prepare(List<Item> &list, SELECT_LEX_UNIT *u) +{ + unit= u; + return 0; +} + - /* Fix tables 'to-be-unioned-from' list to point at opened tables */ - last_sl= &lex->select_lex; - for (sl= last_sl; - sl && sl->linkage != NOT_A_SELECT; - last_sl=sl, sl=sl->next) +bool select_union::send_data(List<Item> &values) +{ + if (unit->offset_limit_cnt) + { // using limit offset,count + unit->offset_limit_cnt--; + return 0; + } + fill_record(table->field, values, 1); + if (thd->net.report_error || write_record(table,&info)) { - for (TABLE_LIST *cursor= (TABLE_LIST *)sl->table_list.first; - cursor; - cursor=cursor->next) + if (thd->net.last_errno == ER_RECORD_FILE_FULL) { - cursor->table= (my_reinterpret_cast(TABLE_LIST*) (cursor->table))->table; + thd->clear_error(); // do not report user about table overflow + if (create_myisam_from_heap(thd, table, &tmp_table_param, + info.last_errno, 1)) + return 1; } + else + return 1; } + return 0; +} - /* last_sel now points at the last select where the ORDER BY is stored */ - if (sl) - { - /* - The found SL is an extra SELECT_LEX argument that contains - the ORDER BY and LIMIT parameter for the whole UNION - */ - lex_sl= sl; - order= (ORDER *) lex_sl->order_list.first; - // This is done to eliminate unnecessary slowing down of the first query - if (!order || !describe) - last_sl->next=0; // Remove this extra element - } - else if (!last_sl->braces) - { - lex_sl= last_sl; // ORDER BY is here - order= (ORDER *) lex_sl->order_list.first; - } - else - { - lex_sl=0; - order=0; - } - - if (describe) + +bool select_union::send_eof() +{ + return 0; +} + + +bool select_union::flush() +{ + int error; + if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) { - Item *item; - item_list.push_back(new Item_empty_string("table",NAME_LEN)); - item_list.push_back(new Item_empty_string("type",10)); - item_list.push_back(item=new Item_empty_string("possible_keys", - NAME_LEN*MAX_KEY)); - item->maybe_null=1; - item_list.push_back(item=new Item_empty_string("key",NAME_LEN)); - item->maybe_null=1; - item_list.push_back(item=new Item_int("key_len",0,3)); - item->maybe_null=1; - item_list.push_back(item=new Item_empty_string("ref", - NAME_LEN*MAX_REF_PARTS)); - item->maybe_null=1; - item_list.push_back(new Item_real("rows",0.0,0,10)); - item_list.push_back(new Item_empty_string("Extra",255)); + table->file->print_error(error,MYF(0)); + ::send_error(thd); + return 1; } - else + return 0; +} + + +/* + initialization procedures before fake_select_lex preparation() + + SYNOPSIS + st_select_lex_unit::init_prepare_fake_select_lex() + thd - thread handler + + RETURN + options of SELECT +*/ + +ulong +st_select_lex_unit::init_prepare_fake_select_lex(THD *thd) +{ + ulong options_tmp= thd->options | fake_select_lex->options; + thd->lex->current_select= fake_select_lex; + offset_limit_cnt= global_parameters->offset_limit; + select_limit_cnt= global_parameters->select_limit + + global_parameters->offset_limit; + + if (select_limit_cnt < global_parameters->select_limit) + select_limit_cnt= HA_POS_ERROR; // no limit + if (select_limit_cnt == HA_POS_ERROR) + options_tmp&= ~OPTION_FOUND_ROWS; + else if (found_rows_for_union && !thd->lex->describe) + options_tmp|= OPTION_FOUND_ROWS; + fake_select_lex->table_list.link_in_list((byte *)&result_table_list, + (byte **) + &result_table_list.next); + return options_tmp; +} + + +int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, + ulong additional_options, + const char *tmp_table_alias) +{ + SELECT_LEX *lex_select_save= thd_arg->lex->current_select; + SELECT_LEX *sl, *first_select; + select_result *tmp_result; + bool is_union; + TABLE *empty_table= 0; + DBUG_ENTER("st_select_lex_unit::prepare"); + + describe= test(additional_options & SELECT_DESCRIBE); + + /* + result object should be reassigned even if preparing already done for + max/min subquery (ALL/ANY optimization) + */ + result= sel_result; + + if (prepared) { - Item *item; - ORDER *orr; - List_iterator<Item> it(lex->select_lex.item_list); - TABLE_LIST *first_table= (TABLE_LIST*) lex->select_lex.table_list.first; - - /* Create a list of items that will be in the result set */ - while ((item= it++)) - if (item_list.push_back(item)) - DBUG_RETURN(-1); - if (setup_tables(first_table) || - setup_fields(thd,first_table,item_list,0,0,1)) - DBUG_RETURN(-1); - for (orr=order;orr;orr=orr->next) + if (describe) { - item=*orr->item; - if (((item->type() == Item::FIELD_ITEM) && ((class Item_field*)item)->table_name)) + /* fast reinit for EXPLAIN */ + for (sl= first_select_in_union(); sl; sl= sl->next_select()) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),item->full_name(),"ORDER BY"); - DBUG_RETURN(-1); + sl->join->result= result; + select_limit_cnt= HA_POS_ERROR; + offset_limit_cnt= 0; + if (!sl->join->procedure && + result->prepare(sl->join->fields_list, this)) + { + DBUG_RETURN(1); + } + sl->join->select_options|= SELECT_DESCRIBE; + sl->join->reinit(); } } + DBUG_RETURN(0); } + prepared= 1; + res= 0; + + thd_arg->lex->current_select= sl= first_select= first_select_in_union(); + found_rows_for_union= first_select->options & OPTION_FOUND_ROWS; + is_union= test(first_select->next_select() || fake_select_lex); - bzero((char*) &tmp_table_param,sizeof(tmp_table_param)); - tmp_table_param.field_count=item_list.elements; - if (!(table=create_tmp_table(thd, &tmp_table_param, item_list, - (ORDER*) 0, !describe & !lex->union_option, - 1, 0, - (lex->select_lex.options | thd->options | - TMP_TABLE_ALL_COLUMNS)))) - DBUG_RETURN(-1); - table->file->extra(HA_EXTRA_WRITE_CACHE); - table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - bzero((char*) &result_table_list,sizeof(result_table_list)); - result_table_list.db= (char*) ""; - result_table_list.real_name=result_table_list.alias= (char*) "union"; - result_table_list.table=table; - - if (!(union_result=new select_union(table))) + /* Global option */ + + if (is_union) { - res= -1; - goto exit; + if (!(tmp_result= union_result= new select_union(0))) + goto err; + union_result->tmp_table_param.init(); + if (describe) + tmp_result= sel_result; } - union_result->not_describe= !describe; - union_result->tmp_table_param=&tmp_table_param; - for (sl= &lex->select_lex; sl; sl=sl->next) + else + tmp_result= sel_result; + + for (;sl; sl= sl->next_select()) { - ha_rows records_at_start; - lex->select=sl; -#if MYSQL_VERSION_ID < 40100 - if (describe && sl->linkage == NOT_A_SELECT) - break; // Skip extra item in case of 'explain' -#endif - /* Don't use offset for the last union if there is no braces */ - if (sl != lex_sl) + bool can_skip_order_by; + sl->options|= SELECT_NO_UNLOCK; + JOIN *join= new JOIN(thd_arg, sl->item_list, + sl->options | thd_arg->options | additional_options, + tmp_result); + if (!join) + goto err; + + thd_arg->lex->current_select= sl; + offset_limit_cnt= sl->offset_limit; + select_limit_cnt= sl->select_limit+sl->offset_limit; + if (select_limit_cnt < sl->select_limit) + select_limit_cnt= HA_POS_ERROR; // no limit + + can_skip_order_by= is_union && + (!sl->braces || select_limit_cnt == HA_POS_ERROR); + + res= join->prepare(&sl->ref_pointer_array, + (TABLE_LIST*) sl->table_list.first, sl->with_wild, + sl->where, + (can_skip_order_by ? 0 : sl->order_list.elements) + + sl->group_list.elements, + can_skip_order_by ? + (ORDER*) 0 : (ORDER *)sl->order_list.first, + (ORDER*) sl->group_list.first, + sl->having, + (is_union ? (ORDER*) 0 : + (ORDER*) thd_arg->lex->proc_list.first), + sl, this); + /* There are no * in the statement anymore (for PS) */ + sl->with_wild= 0; + last_procedure= join->procedure; + if (res || thd_arg->is_fatal_error) + goto err; + if (sl == first_select) { - thd->offset_limit= sl->offset_limit; - thd->select_limit=sl->select_limit+sl->offset_limit; + /* + We need to create an empty table object. It is used + to create tmp_table fields in Item_type_holder. + The main reason of this is that we can't create + field object without table. + */ + DBUG_ASSERT(!empty_table); + empty_table= (TABLE*) thd->calloc(sizeof(TABLE)); + types.empty(); + List_iterator_fast<Item> it(sl->item_list); + Item *item_tmp; + while ((item_tmp= it++)) + { + /* Error's in 'new' will be detected after loop */ + types.push_back(new Item_type_holder(thd_arg, item_tmp)); + } + + if (thd_arg->is_fatal_error) + goto err; // out of memory } else { - thd->offset_limit= 0; - /* - We can't use LIMIT at this stage if we are using ORDER BY for the - whole query - */ - thd->select_limit= HA_POS_ERROR; - if (! sl->order_list.first) - thd->select_limit= sl->select_limit+sl->offset_limit; + if (types.elements != sl->item_list.elements) + { + my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, + ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT),MYF(0)); + goto err; + } + List_iterator_fast<Item> it(sl->item_list); + List_iterator_fast<Item> tp(types); + Item *type, *item_tmp; + while ((type= tp++, item_tmp= it++)) + { + if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp)) + DBUG_RETURN(-1); + } } - if (thd->select_limit < sl->select_limit) - thd->select_limit= HA_POS_ERROR; // no limit + } + + if (is_union) + { + /* + Check that it was possible to aggregate + all collations together for UNION. + */ + List_iterator_fast<Item> tp(types); + Item_arena *arena= thd->current_arena; + Item *type; + ulong create_options; + while ((type= tp++)) + { + if (type->result_type() == STRING_RESULT && + type->collation.derivation == DERIVATION_NONE) + { + my_error(ER_CANT_AGGREGATE_NCOLLATIONS, MYF(0), "UNION"); + goto err; + } + } + + create_options= (first_select_in_union()->options | thd_arg->options | + TMP_TABLE_ALL_COLUMNS); /* - When using braces, SQL_CALC_FOUND_ROWS affects the whole query. - We don't calculate found_rows() per union part + Force the temporary table to be a MyISAM table if we're going to use + fullext functions (MATCH ... AGAINST .. IN BOOLEAN MODE) when reading + from it (this should be removed in 5.2 when fulltext search is moved + out of MyISAM). */ - if (thd->select_limit == HA_POS_ERROR || sl->braces) - sl->options&= ~OPTION_FOUND_ROWS; - else + if (global_parameters->ftfunc_list->elements) + create_options= create_options | TMP_TABLE_FORCE_MYISAM; + + union_result->tmp_table_param.field_count= types.elements; + if (!(table= create_tmp_table(thd_arg, + &union_result->tmp_table_param, types, + (ORDER*) 0, (bool) union_distinct, 1, + create_options, HA_POS_ERROR, + (char *) tmp_table_alias))) + goto err; + table->file->extra(HA_EXTRA_WRITE_CACHE); + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + bzero((char*) &result_table_list, sizeof(result_table_list)); + result_table_list.db= (char*) ""; + result_table_list.real_name= result_table_list.alias= (char*) "union"; + result_table_list.table= table; + union_result->set_table(table); + + thd_arg->lex->current_select= lex_select_save; + if (!item_list.elements) { /* - We are doing an union without braces. In this case - SQL_CALC_FOUND_ROWS should be done on all sub parts + We're in statement prepare or in execution + of a conventional statement. */ - sl->options|= found_rows_for_union; - } + Item_arena *tmp_arena,backup; + tmp_arena= thd->change_arena_if_needed(&backup); + + Field **field; + for (field= table->field; *field; field++) + { + Item_field *item= new Item_field(*field); + if (!item || item_list.push_back(item)) + { + if (tmp_arena) + thd->restore_backup_item_arena(tmp_arena, &backup); + DBUG_RETURN(-1); + } + } + if (tmp_arena) + thd->restore_backup_item_arena(tmp_arena, &backup); + if (arena->is_stmt_prepare()) + { + /* prepare fake select to initialize it correctly */ + (void) init_prepare_fake_select_lex(thd); + if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, + result))) + { + fake_select_lex->table_list.empty(); + DBUG_RETURN(-1); + } + fake_select_lex->item_list= item_list; - records_at_start= table->file->records; - res=mysql_select(thd, (describe && sl->linkage==NOT_A_SELECT) ? - first_table : (TABLE_LIST*) sl->table_list.first, - sl->item_list, - sl->where, - (sl->braces) ? (ORDER *)sl->order_list.first : - (ORDER *) 0, - (ORDER*) sl->group_list.first, - sl->having, - (ORDER*) NULL, - sl->options | thd->options | SELECT_NO_UNLOCK | - describe, - union_result); - if (res) - goto exit; - examined_rows+= thd->examined_row_count; - /* Needed for the following test and for records_at_start in next loop */ - table->file->info(HA_STATUS_VARIABLE); - if (found_rows_for_union & sl->options) + thd_arg->lex->current_select= fake_select_lex; + res= fake_select_lex->join-> + prepare(&fake_select_lex->ref_pointer_array, + (TABLE_LIST*) fake_select_lex->table_list.first, + 0, 0, + fake_select_lex->order_list.elements, + (ORDER*) fake_select_lex->order_list.first, + (ORDER*) NULL, NULL, + (ORDER*) NULL, + fake_select_lex, this); + fake_select_lex->table_list.empty(); + } + } + else if (arena->is_stmt_execute()) { /* - This is a union without braces. Remember the number of rows that could - also have been part of the result set. - We get this from the difference of between total number of possible - rows and actual rows added to the temporary table. + We're in execution of a prepared statement: reset field items + to point at fields from the created temporary table. */ - add_rows+= (ulonglong) (thd->limit_found_rows - (table->file->records - - records_at_start)); + List_iterator_fast<Item> it(item_list); + for (Field **field= table->field; *field; field++) + { + Item_field *item_field= (Item_field*) it++; + DBUG_ASSERT(item_field); + item_field->reset_field(*field); + } } } - if (union_result->flush()) + + thd_arg->lex->current_select= lex_select_save; + + DBUG_RETURN(res || thd_arg->is_fatal_error ? 1 : 0); + +err: + thd_arg->lex->current_select= lex_select_save; + DBUG_RETURN(-1); +} + + +int st_select_lex_unit::exec() +{ + SELECT_LEX *lex_select_save= thd->lex->current_select; + SELECT_LEX *select_cursor=first_select_in_union(); + ulonglong add_rows=0; + ha_rows examined_rows= 0; + DBUG_ENTER("st_select_lex_unit::exec"); + + if (executed && !uncacheable && !describe) + DBUG_RETURN(0); + executed= 1; + + if (uncacheable || !item || !item->assigned() || describe) { - res= 1; // Error is already sent - goto exit; + if (item) + item->reset_value_registration(); + if (optimized && item) + { + if (item->assigned()) + { + item->assigned(0); // We will reinit & rexecute unit + item->reset(); + table->file->delete_all_rows(); + } + /* re-enabling indexes for next subselect iteration */ + if (union_distinct && table->file->enable_indexes(HA_KEY_SWITCH_ALL)) + DBUG_ASSERT(0); + } + for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select()) + { + ha_rows records_at_start= 0; + thd->lex->current_select= sl; + + if (optimized) + res= sl->join->reinit(); + else + { + if (sl != global_parameters && !describe) + { + offset_limit_cnt= sl->offset_limit; + select_limit_cnt= sl->select_limit+sl->offset_limit; + } + else + { + offset_limit_cnt= 0; + /* + We can't use LIMIT at this stage if we are using ORDER BY for the + whole query + */ + if (sl->order_list.first || describe) + select_limit_cnt= HA_POS_ERROR; + else + select_limit_cnt= sl->select_limit+sl->offset_limit; + } + if (select_limit_cnt < sl->select_limit) + select_limit_cnt= HA_POS_ERROR; // no limit + + /* + When using braces, SQL_CALC_FOUND_ROWS affects the whole query: + we don't calculate found_rows() per union part. + Otherwise, SQL_CALC_FOUND_ROWS should be done on all sub parts. + */ + sl->join->select_options= + (select_limit_cnt == HA_POS_ERROR || sl->braces) ? + sl->options & ~OPTION_FOUND_ROWS : sl->options | found_rows_for_union; + res= sl->join->optimize(); + } + if (!res) + { + records_at_start= table->file->records; + sl->join->exec(); + if (sl == union_distinct) + { + if (table->file->disable_indexes(HA_KEY_SWITCH_ALL)) + DBUG_RETURN(1); + table->no_keyread=1; + } + res= sl->join->error; + offset_limit_cnt= sl->offset_limit; + if (!res) + { + examined_rows+= thd->examined_row_count; + if (union_result->flush()) + { + thd->lex->current_select= lex_select_save; + DBUG_RETURN(1); + } + } + } + if (res) + { + thd->lex->current_select= lex_select_save; + DBUG_RETURN(res); + } + /* Needed for the following test and for records_at_start in next loop */ + table->file->info(HA_STATUS_VARIABLE); + if (found_rows_for_union && !sl->braces && + select_limit_cnt != HA_POS_ERROR) + { + /* + This is a union without braces. Remember the number of rows that + could also have been part of the result set. + We get this from the difference of between total number of possible + rows and actual rows added to the temporary table. + */ + add_rows+= (ulonglong) (thd->limit_found_rows - (ulonglong) + ((table->file->records - records_at_start))); + } + } } - delete union_result; + optimized= 1; /* Send result to 'result' */ - lex->select = &lex->select_lex; - res =-1; + res= -1; { - /* Create a list of fields in the temporary table */ - List_iterator<Item> it(item_list); - Field **field; - thd->lex.select_lex.ftfunc_list.empty(); + List<Item_func_match> empty_list; + empty_list.empty(); - for (field=table->field ; *field ; field++) - { - (void) it++; - (void) it.replace(new Item_field(*field)); - } - if (!thd->fatal_error) // Check if EOM + if (!thd->is_fatal_error) // Check if EOM { - if (lex_sl) + ulong options_tmp= init_prepare_fake_select_lex(thd); + JOIN *join= fake_select_lex->join; + if (!join) { - thd->offset_limit=lex_sl->offset_limit; - thd->select_limit=lex_sl->select_limit+lex_sl->offset_limit; - if (thd->select_limit < lex_sl->select_limit) - thd->select_limit= HA_POS_ERROR; // no limit - if (thd->select_limit == HA_POS_ERROR) - thd->options&= ~OPTION_FOUND_ROWS; + /* + allocate JOIN for fake select only once (prevent + mysql_select automatic allocation) + */ + if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, + result))) + { + fake_select_lex->table_list.empty(); + DBUG_RETURN(-1); + } + + /* + Fake st_select_lex should have item list for correctref_array + allocation. + */ + fake_select_lex->item_list= item_list; } - else + else { - thd->offset_limit= 0; - thd->select_limit= thd->variables.select_limit; - if (found_rows_for_union && !describe) - thd->options|= OPTION_FOUND_ROWS; + JOIN_TAB *tab,*end; + for (tab=join->join_tab,end=tab+join->tables ; tab != end ; tab++) + { + delete tab->select; + delete tab->quick; + } + join->init(thd, item_list, thd->options, result); } - if (describe) - thd->select_limit= HA_POS_ERROR; // no limit - - res= mysql_select(thd,&result_table_list, - item_list, NULL, (describe) ? 0 : order, - (ORDER*) NULL, NULL, (ORDER*) NULL, - thd->options, result); + res= mysql_select(thd, &fake_select_lex->ref_pointer_array, + &result_table_list, + 0, item_list, NULL, + global_parameters->order_list.elements, + (ORDER*)global_parameters->order_list.first, + (ORDER*) NULL, NULL, (ORDER*) NULL, + options_tmp | SELECT_NO_UNLOCK, + result, this, fake_select_lex); + + fake_select_lex->table_list.empty(); if (!res) { - thd->limit_found_rows= (ulonglong)table->file->records + add_rows; - thd->examined_row_count+= examined_rows; + thd->limit_found_rows = (ulonglong)table->file->records + add_rows; + thd->examined_row_count+= examined_rows; } + /* + Mark for slow query log if any of the union parts didn't use + indexes efficiently + */ } } - -exit: - free_tmp_table(thd,table); + thd->lex->current_select= lex_select_save; DBUG_RETURN(res); } -/*************************************************************************** -** store records in temporary table for UNION -***************************************************************************/ - -select_union::select_union(TABLE *table_par) - :table(table_par), not_describe(0) -{ - bzero((char*) &info,sizeof(info)); - /* - We can always use DUP_IGNORE because the temporary table will only - contain a unique key if we are using not using UNION ALL - */ - info.handle_duplicates=DUP_IGNORE; -} - -select_union::~select_union() +int st_select_lex_unit::cleanup() { -} + int error= 0; + DBUG_ENTER("st_select_lex_unit::cleanup"); - -int select_union::prepare(List<Item> &list) -{ - if (not_describe && list.elements != table->fields) + if (cleaned) { - my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, - ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT),MYF(0)); - return -1; + DBUG_RETURN(0); } - return 0; -} + cleaned= 1; -bool select_union::send_data(List<Item> &values) -{ - if (thd->offset_limit) - { // using limit offset,count - thd->offset_limit--; - return 0; + if (union_result) + { + delete union_result; + union_result=0; // Safety + if (table) + free_tmp_table(thd, table); + table= 0; // Safety } - - fill_record(table->field, values, 1); - if ((write_record(table,&info))) + JOIN *join; + SELECT_LEX *sl= first_select_in_union(); + for (; sl; sl= sl->next_select()) { - if (create_myisam_from_heap(thd, table, tmp_table_param, info.last_errno, - 1)) - return 1; + if ((join= sl->join)) + { + error|= sl->join->cleanup(); + delete join; + } + else + { + // it can be DO/SET with subqueries + for (SELECT_LEX_UNIT *lex_unit= sl->first_inner_unit(); + lex_unit != 0; + lex_unit= lex_unit->next_unit()) + { + error|= lex_unit->cleanup(); + } + } } - return 0; + if (fake_select_lex && (join= fake_select_lex->join)) + { + join->tables_list= 0; + join->tables= 0; + error|= join->cleanup(); + delete join; + } + DBUG_RETURN(error); } -bool select_union::send_eof() + +void st_select_lex_unit::reinit_exec_mechanism() { - return 0; + prepared= optimized= executed= 0; +#ifndef DBUG_OFF + if (first_select()->next_select()) + { + List_iterator_fast<Item> it(item_list); + Item *field; + while ((field= it++)) + { + /* + we can't cleanup here, because it broke link to temporary table field, + but have to drop fixed flag to allow next fix_field of this field + during re-executing + */ + field->fixed= 0; + } + } +#endif } -bool select_union::flush() + +/* + change select_result object of unit + + SYNOPSIS + st_select_lex_unit::change_result() + result new select_result object + old_result old select_result object + + RETURN + 0 - OK + -1 - error +*/ + +int st_select_lex_unit::change_result(select_subselect *result, + select_subselect *old_result) { - int error; - if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) + int res= 0; + for (SELECT_LEX *sl= first_select_in_union(); sl; sl= sl->next_select()) { - table->file->print_error(error,MYF(0)); - ::send_error(&thd->net); - return 1; + if (sl->join && sl->join->result == old_result) + if ((res= sl->join->change_result(result))) + return (res); } - return 0; + if (fake_select_lex && fake_select_lex->join) + res= fake_select_lex->join->change_result(result); + return (res); } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 888b475ffce..48a8cf93917 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -21,7 +21,6 @@ */ #include "mysql_priv.h" -#include "sql_acl.h" #include "sql_select.h" static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields); @@ -31,7 +30,7 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields); static bool compare_record(TABLE *table, ulong query_id) { if (!table->blob_fields) - return cmp_record(table,1); + return cmp_record(table,record[1]); /* Compare null bits */ if (memcmp(table->null_flags, table->null_flags+table->rec_buff_length, @@ -53,49 +52,50 @@ int mysql_update(THD *thd, List<Item> &fields, List<Item> &values, COND *conds, - ORDER *order, + uint order_num, ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates) + enum enum_duplicates handle_duplicates, + bool ignore) { - bool using_limit=limit != HA_POS_ERROR; + bool using_limit=limit != HA_POS_ERROR; bool safe_update= thd->options & OPTION_SAFE_UPDATES; bool used_key_is_modified, transactional_table, log_delayed; int error=0; - uint used_index, want_privilege; + uint used_index= MAX_KEY; + bool need_sort= TRUE; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint want_privilege; +#endif ulong query_id=thd->query_id, timestamp_query_id; ha_rows updated, found; key_map old_used_keys; TABLE *table; - SQL_SELECT *select; + SQL_SELECT *select= 0; READ_RECORD info; - TABLE_LIST tables; - List<Item> all_fields; + TABLE_LIST *update_table_list= ((TABLE_LIST*) + thd->lex->select_lex.table_list.first); DBUG_ENTER("mysql_update"); - LINT_INIT(used_index); LINT_INIT(timestamp_query_id); - if (!(table = open_ltable(thd,table_list,table_list->lock_type))) - DBUG_RETURN(-1); /* purecov: inspected */ - table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); + if ((open_and_lock_tables(thd, table_list))) + DBUG_RETURN(-1); thd->proc_info="init"; + table= table_list->table; + table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); /* Calculate "table->used_keys" based on the WHERE */ table->used_keys=table->keys_in_use; - table->quick_keys=0; - want_privilege=table->grant.want_privilege; - table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); - - bzero((char*) &tables,sizeof(tables)); // For ORDER BY - tables.table= table; - tables.alias= table_list->alias; + table->quick_keys.clear_all(); - if (setup_tables(table_list) || setup_conds(thd,table_list,&conds) || - setup_order(thd, &tables, all_fields, all_fields, order) || - setup_ftfuncs(thd)) - DBUG_RETURN(-1); /* purecov: inspected */ - old_used_keys=table->used_keys; // Keys used in WHERE +#ifndef NO_EMBEDDED_ACCESS_CHECKS + want_privilege= table->grant.want_privilege; +#endif + if ((error= mysql_prepare_update(thd, table_list, update_table_list, + &conds, order_num, order))) + DBUG_RETURN(error); + old_used_keys= table->used_keys; // Keys used in WHERE /* Change the query_id for the timestamp column so that we can check if this is modified directly @@ -104,79 +104,105 @@ int mysql_update(THD *thd, { timestamp_query_id=table->timestamp_field->query_id; table->timestamp_field->query_id=thd->query_id-1; - table->time_stamp= table->timestamp_field->offset() +1; } /* Check the fields we are going to modify */ +#ifndef NO_EMBEDDED_ACCESS_CHECKS table->grant.want_privilege=want_privilege; - if (setup_fields(thd,table_list,fields,1,0,0)) +#endif + if (setup_fields(thd, 0, update_table_list, fields, 1, 0, 0)) DBUG_RETURN(-1); /* purecov: inspected */ if (table->timestamp_field) { // Don't set timestamp column if this is modified if (table->timestamp_field->query_id == thd->query_id) - table->time_stamp=0; + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; else table->timestamp_field->query_id=timestamp_query_id; } +#ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check values */ table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); - if (setup_fields(thd,table_list,values,0,0,0)) +#endif + if (setup_fields(thd, 0, update_table_list, values, 1, 0, 0)) { + free_underlaid_joins(thd, &thd->lex->select_lex); DBUG_RETURN(-1); /* purecov: inspected */ } + if (conds) + { + Item::cond_result cond_value; + conds= remove_eq_conds(thd, conds, &cond_value); + if (cond_value == Item::COND_FALSE) + limit= 0; // Impossible WHERE + } // Don't count on usage of 'only index' when calculating which key to use - table->used_keys=0; - select=make_select(table,0,0,conds,&error); - if (error || - (select && select->check_quick(thd, safe_update, limit)) || !limit) + table->used_keys.clear_all(); + if (limit) + select=make_select(table,0,0,conds,&error); + if (error || !limit || + (select && select->check_quick(thd, safe_update, limit))) { delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); if (error) { DBUG_RETURN(-1); // Error in where } - send_ok(&thd->net); // No matching records + send_ok(thd); // No matching records DBUG_RETURN(0); } + if (!select && limit != HA_POS_ERROR) + { + if ((used_index= get_index_for_order(table, order, limit)) != MAX_KEY) + need_sort= FALSE; + } /* If running in safe sql mode, don't allow updates without keys */ - if (!table->quick_keys) + if (table->quick_keys.is_clear_all()) { - thd->lex.select_lex.options|=QUERY_NO_INDEX_USED; + thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (safe_update && !using_limit) { - delete select; - send_error(&thd->net,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); - DBUG_RETURN(1); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); + goto err; } } - init_ftfuncs(thd,1); + init_ftfuncs(thd, &thd->lex->select_lex, 1); + /* Check if we are modifying a key that we are used to search with */ if (select && select->quick) + { + used_index=select->quick->index; used_key_is_modified= (!select->quick->unique_key_range() && - check_if_key_used(table, - (used_index=select->quick->index), - fields)); - else if ((used_index=table->file->key_used_on_scan) < MAX_KEY) - used_key_is_modified=check_if_key_used(table, used_index, fields); + check_if_key_used(table, used_index, fields)); + } else - used_key_is_modified=0; + { + used_key_is_modified= 0; + if (used_index == MAX_KEY) // no index for sort order + used_index= table->file->key_used_on_scan; + if (used_index != MAX_KEY) + used_key_is_modified= check_if_key_used(table, used_index, fields); + } + if (used_key_is_modified || order) { /* We can't update table directly; We must first search after all matching rows before updating the table! */ - table->file->extra(HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE); - if (old_used_keys & ((key_map) 1 << used_index)) + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + if (used_index < MAX_KEY && old_used_keys.is_set(used_index)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } - if (order) + /* note: can actually avoid sorting below.. */ + if (order && (need_sort || used_key_is_modified)) { /* Doing an ORDER BY; Let filesort find and sort the rows we are going @@ -186,13 +212,13 @@ int mysql_update(THD *thd, SORT_FIELD *sortorder; ha_rows examined_rows; - table->io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), - MYF(MY_FAE | MY_ZEROFILL)); + table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), + MYF(MY_FAE | MY_ZEROFILL)); if (!(sortorder=make_unireg_sortorder(order, &length)) || - (table->found_records = filesort(table, sortorder, length, - select, 0L, - limit, &examined_rows)) == - HA_POS_ERROR) + (table->sort.found_records = filesort(thd, table, sortorder, length, + select, limit, + &examined_rows)) + == HA_POS_ERROR) { free_io_cache(table); goto err; @@ -217,12 +243,27 @@ int mysql_update(THD *thd, DISK_BUFFER_SIZE, MYF(MY_WME))) goto err; - init_read_record(&info,thd,table,select,0,1); + /* + When we get here, we have one of the following options: + A. used_index == MAX_KEY + This means we should use full table scan, and start it with + init_read_record call + B. used_index != MAX_KEY + B.1 quick select is used, start the scan with init_read_record + B.2 quick select is not used, this is full index scan (with LIMIT) + Full index scan must be started with init_read_record_idx + */ + if (used_index == MAX_KEY || (select && select->quick)) + init_read_record(&info,thd,table,select,0,1); + else + init_read_record_idx(&info, thd, table, 1, used_index); + thd->proc_info="Searching rows for update"; uint tmp_limit= limit; + while (!(error=info.read_record(&info)) && !thd->killed) { - if (!(select && select->skipp_record())) + if (!(select && select->skip_record())) { table->file->position(table->record[0]); if (my_b_write(&tempfile,table->file->ref, @@ -242,6 +283,7 @@ int mysql_update(THD *thd, error= 1; // Aborted limit= tmp_limit; end_read_record(&info); + /* Change select to use tempfile */ if (select) { @@ -269,22 +311,22 @@ int mysql_update(THD *thd, } } - if (handle_duplicates == DUP_IGNORE) + if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); init_read_record(&info,thd,table,select,0,1); updated= found= 0; - thd->count_cuted_fields=1; /* calc cuted fields */ + thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */ thd->cuted_fields=0L; thd->proc_info="Updating"; query_id=thd->query_id; while (!(error=info.read_record(&info)) && !thd->killed) { - if (!(select && select->skipp_record())) + if (!(select && select->skip_record())) { - store_record(table,1); - if (fill_record(fields, values, 0)) + store_record(table,record[1]); + if (fill_record(fields,values, 0) || thd->net.report_error) break; /* purecov: inspected */ found++; if (compare_record(table, query_id)) @@ -294,9 +336,9 @@ int mysql_update(THD *thd, { updated++; } - else if (handle_duplicates != DUP_IGNORE || - error != HA_ERR_FOUND_DUPP_KEY) + else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { + thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); error= 1; break; @@ -310,16 +352,26 @@ int mysql_update(THD *thd, } else table->file->unlock_row(); + thd->row_count++; } if (thd->killed && !error) error= 1; // Aborted end_read_record(&info); free_io_cache(table); // If ORDER BY + delete select; thd->proc_info="end"; VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY)); + + /* + Invalidate the table in the query cache if something changed. + This must be before binlog writing and ha_autocommit_... + */ + if (updated) + query_cache_invalidate3(thd, table_list, 1); + transactional_table= table->file->has_transactions(); log_delayed= (transactional_table || table->tmp_table); - if (updated && (error <= 0 || !transactional_table)) + if ((updated || (error < 0)) && (error <= 0 || !transactional_table)) { mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) @@ -327,52 +379,45 @@ int mysql_update(THD *thd, if (error <= 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed); + log_delayed, FALSE); if (mysql_bin_log.write(&qinfo) && transactional_table) error=1; // Rollback update } if (!log_delayed) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } + free_underlaid_joins(thd, &thd->lex->select_lex); if (transactional_table) { if (ha_autocommit_or_rollback(thd, error >= 0)) error=1; } - /* - Store table for future invalidation or invalidate it in - the query cache if something changed - */ - if (updated) - { - query_cache_invalidate3(thd, table_list, 1); - } if (thd->lock) { mysql_unlock_tables(thd, thd->lock); thd->lock=0; } - delete select; if (error >= 0) - send_error(&thd->net,thd->killed ? ER_SERVER_SHUTDOWN : 0); /* purecov: inspected */ + send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); /* purecov: inspected */ else { char buff[80]; - sprintf(buff,ER(ER_UPDATE_INFO), (long) found, (long) updated, - (long) thd->cuted_fields); - send_ok(&thd->net, + sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, + (ulong) thd->cuted_fields); + send_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, thd->insert_id_used ? thd->insert_id() : 0L,buff); DBUG_PRINT("info",("%d records updated",updated)); } - thd->count_cuted_fields=0; /* calc cuted fields */ + thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ free_io_cache(table); DBUG_RETURN(0); err: delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); if (table->key_read) { table->key_read=0; @@ -381,6 +426,60 @@ err: DBUG_RETURN(-1); } +/* + Prepare items in UPDATE statement + + SYNOPSIS + mysql_prepare_update() + thd - thread handler + table_list - global table list + update_table_list - local table list of UPDATE SELECT_LEX + conds - conditions + order_num - number of ORDER BY list entries + order - ORDER BY clause list + + RETURN VALUE + 0 - OK + 1 - error (message is sent to user) + -1 - error (message is not sent to user) +*/ +int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, + TABLE_LIST *update_table_list, + Item **conds, uint order_num, ORDER *order) +{ + TABLE *table= table_list->table; + TABLE_LIST tables; + List<Item> all_fields; + DBUG_ENTER("mysql_prepare_update"); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); +#endif + + bzero((char*) &tables,sizeof(tables)); // For ORDER BY + tables.table= table; + tables.alias= table_list->alias; + thd->allow_sum_func= 0; + + if (setup_tables(update_table_list) || + setup_conds(thd, update_table_list, conds) || + thd->lex->select_lex.setup_ref_array(thd, order_num) || + setup_order(thd, thd->lex->select_lex.ref_pointer_array, + update_table_list, all_fields, all_fields, order) || + setup_ftfuncs(&thd->lex->select_lex)) + DBUG_RETURN(-1); + + /* Check that we are not using table that we are updating in a sub select */ + if (find_real_table_in_list(table_list->next, + table_list->db, table_list->real_name)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); + DBUG_RETURN(-1); + } + + DBUG_RETURN(0); +} + /*************************************************************************** Update multiple tables from join @@ -411,89 +510,149 @@ static table_map get_table_map(List<Item> *items) int mysql_multi_update_lock(THD *thd, TABLE_LIST *table_list, - List<Item> *fields) + List<Item> *fields, + SELECT_LEX *select_lex) { int res; TABLE_LIST *tl; + TABLE_LIST *update_list= (TABLE_LIST*) thd->lex->select_lex.table_list.first; const bool using_lock_tables= thd->locked_tables != 0; + bool initialized_dervied= 0; DBUG_ENTER("mysql_multi_update_lock"); + /* + The following loop is here to to ensure that we only lock tables + that we are going to update with a write lock + */ for (;;) { - table_map update_map; - int tnr; - - if ((res= open_tables(thd, table_list))) + table_map update_tables, derived_tables=0; + uint tnr, table_count; + + if ((res=open_tables(thd, table_list, &table_count))) DBUG_RETURN(res); /* Only need to call lock_tables if we are not using LOCK TABLES */ - if (!using_lock_tables && ((res= lock_tables(thd, table_list)))) + if (!using_lock_tables && + ((res= lock_tables(thd, table_list, table_count)))) DBUG_RETURN(res); + if (!initialized_dervied) + { + initialized_dervied= 1; + relink_tables_for_derived(thd); + if ((res= mysql_handle_derived(thd->lex))) + DBUG_RETURN(res); + } + /* Ensure that we have update privilege for all tables and columns in the SET part - While we are here, initialize the table->map field. + While we are here, initialize the table->map field to check which + tables are updated and updatability of derived tables */ - for (tl= table_list,tnr=0 ; tl ; tl=tl->next) + for (tl= update_list, tnr=0 ; tl ; tl=tl->next) { TABLE *table= tl->table; - table->grant.want_privilege= (UPDATE_ACL & ~table->grant.privilege); + /* + Update of derived tables is checked later + We don't check privileges here, becasue then we would get error + "UPDATE command denided .. for column N" instead of + "Target table ... is not updatable" + */ + if (!tl->derived) + table->grant.want_privilege= (UPDATE_ACL & ~table->grant.privilege); table->map= (table_map) 1 << (tnr++); } - if (setup_fields(thd, table_list, *fields, 1, 0, 0)) + if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) DBUG_RETURN(-1); - update_map= get_table_map(fields); + update_tables= get_table_map(fields); /* Unlock the tables in preparation for relocking */ if (!using_lock_tables) - { - mysql_unlock_tables(thd, thd->lock); + { + mysql_unlock_tables(thd, thd->lock); thd->lock= 0; } /* - Set the table locking strategy according to the update map + Count tables and setup timestamp handling + Set also the table locking strategy according to the update map */ - for (tl= table_list ; tl ; tl=tl->next) + for (tl= update_list; tl; tl= tl->next) { TABLE_LIST *save= tl->next; TABLE *table= tl->table; uint wants; - tl->next= 0; - if (update_map & table->map) + /* if table will be updated then check that it is unique */ + if (table->map & update_tables) { + /* + Multi-update can't be constructed over-union => we always have + single SELECT on top and have to check underlaying SELECTs of it + */ + if (select_lex->check_updateable_in_subqueries(tl->db, + tl->real_name)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), + tl->real_name); + DBUG_RETURN(-1); + } DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); - tl->lock_type= thd->lex.lock_option; - tl->updating= 1; + tl->lock_type= thd->lex->multi_lock_option; + tl->updating= 1; // loacal or only list + if (tl->table_list) + tl->table_list->updating= 1; // global list (if we have 2 lists) wants= UPDATE_ACL; } else { - DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); + DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); // If we are using the binary log, we need TL_READ_NO_INSERT to get // correct order of statements. Otherwise, we use a TL_READ lock to // improve performance. tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ; - tl->updating= 0; + tl->updating= 0; // loacal or only list + if (tl->table_list) + tl->table_list->updating= 0; // global list (if we have 2 lists) wants= SELECT_ACL; } - if (!using_lock_tables) - tl->table->reginfo.lock_type= tl->lock_type; - if (check_access(thd, wants, tl->db, &tl->grant.privilege, 0, 0) || - (grant_option && check_grant(thd, wants, tl, 0, 0))) + if (tl->derived) + derived_tables|= table->map; + else { - tl->next= save; - DBUG_RETURN(1); + tl->next= 0; + if (!using_lock_tables) + tl->table->reginfo.lock_type= tl->lock_type; + if (check_access(thd, wants, tl->db, &tl->grant.privilege, 0, 0) || + (grant_option && check_grant(thd, wants, tl, 0, 0, 0))) + { + tl->next= save; + DBUG_RETURN(1); + } + tl->next= save; + } + } + + if (thd->lex->derived_tables && (update_tables & derived_tables)) + { + // find derived table which cause error + for (tl= update_list; tl; tl= tl->next) + { + if (tl->derived && (update_tables & tl->table->map)) + { + my_printf_error(ER_NON_UPDATABLE_TABLE, ER(ER_NON_UPDATABLE_TABLE), + MYF(0), tl->alias, "UPDATE"); + DBUG_RETURN(-1); + } } - tl->next= save; } /* Relock the tables with the correct modes */ - res= lock_tables(thd,table_list); + res= lock_tables(thd, table_list, table_count); if (using_lock_tables) break; // Don't have to do setup_field() @@ -501,25 +660,23 @@ int mysql_multi_update_lock(THD *thd, We must setup fields again as the file may have been reopened during lock_tables */ - { List_iterator_fast<Item> field_it(*fields); Item_field *item; while ((item= (Item_field *) field_it++)) -#if MYSQL_VERSION < 40100 - item->field= item->result_field= 0; -#else + { + item->field->query_id= 0; item->cleanup(); -#endif + } } - if (setup_fields(thd, table_list, *fields, 1, 0, 0)) + if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) DBUG_RETURN(-1); /* If lock succeded and the table map didn't change since the above lock we can continue. */ - if (!res && update_map == get_table_map(fields)) + if (!res && update_tables == get_table_map(fields)) break; /* @@ -542,46 +699,40 @@ int mysql_multi_update(THD *thd, List<Item> *values, COND *conds, ulong options, - enum enum_duplicates handle_duplicates) + enum enum_duplicates handle_duplicates, bool ignore, + SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) { int res; TABLE_LIST *tl; + TABLE_LIST *update_list= (TABLE_LIST*) thd->lex->select_lex.table_list.first; + List<Item> total_list; multi_update *result; DBUG_ENTER("mysql_multi_update"); - thd->select_limit= HA_POS_ERROR; - - if ((res= mysql_multi_update_lock(thd, table_list, fields))) - DBUG_RETURN(res); - - /* - Count tables and setup timestamp handling - */ - for (tl= table_list ; tl ; tl=tl->next) + /* Setup timestamp handling */ + for (tl= update_list; tl; tl= tl->next) { TABLE *table= tl->table; + /* Only set timestamp column if this is not modified */ + if (table->timestamp_field && + table->timestamp_field->query_id == thd->query_id) + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; /* We only need SELECT privilege for columns in the values list */ table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); - if (table->timestamp_field) - { - table->time_stamp=0; - // Only set timestamp column if this is not modified - if (table->timestamp_field->query_id != thd->query_id) - table->time_stamp= table->timestamp_field->offset() +1; - } } - if (!(result=new multi_update(thd, table_list, fields, values, - handle_duplicates))) + if (!(result=new multi_update(thd, update_list, fields, values, + handle_duplicates, ignore))) DBUG_RETURN(-1); - List<Item> total_list; - res= mysql_select(thd,table_list,total_list, - conds, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, + res= mysql_select(thd, &select_lex->ref_pointer_array, + select_lex->get_table_list(), select_lex->with_wild, + total_list, + conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, (ORDER *)NULL, options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, - result); + result, unit, select_lex); delete result; DBUG_RETURN(res); } @@ -589,11 +740,11 @@ int mysql_multi_update(THD *thd, multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list, List<Item> *field_list, List<Item> *value_list, - enum enum_duplicates handle_duplicates_arg) + enum enum_duplicates handle_duplicates_arg, bool ignore_arg) :all_tables(table_list), update_tables(0), thd(thd_arg), tmp_tables(0), updated(0), found(0), fields(field_list), values(value_list), table_count(0), copy_field(0), handle_duplicates(handle_duplicates_arg), - do_update(1), trans_safe(0), transactional_tables(1) + do_update(1), trans_safe(0), transactional_tables(1), ignore(ignore_arg) {} @@ -601,7 +752,8 @@ multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list, Connect fields with tables and create list of tables that are updated */ -int multi_update::prepare(List<Item> ¬_used_values) +int multi_update::prepare(List<Item> ¬_used_values, + SELECT_LEX_UNIT *lex_unit) { TABLE_LIST *table_ref; SQL_LIST update; @@ -612,7 +764,7 @@ int multi_update::prepare(List<Item> ¬_used_values) uint i, max_fields; DBUG_ENTER("multi_update::prepare"); - thd->count_cuted_fields=1; + thd->count_cuted_fields= CHECK_FIELD_WARN; thd->cuted_fields=0L; thd->proc_info="updating main table"; @@ -620,8 +772,7 @@ int multi_update::prepare(List<Item> ¬_used_values) if (!tables_to_update) { - my_error(ER_NOT_SUPPORTED_YET, MYF(0), - "You didn't specify any tables to UPDATE"); + my_error(ER_NO_TABLES_USED, MYF(0)); DBUG_RETURN(1); } @@ -630,7 +781,7 @@ int multi_update::prepare(List<Item> ¬_used_values) reference tables */ - if (setup_fields(thd, all_tables, *values, 1,0,0)) + if (setup_fields(thd, 0, all_tables, *values, 1, 0, 0)) DBUG_RETURN(1); /* @@ -652,7 +803,7 @@ int multi_update::prepare(List<Item> ¬_used_values) update.link_in_list((byte*) tl, (byte**) &tl->next); tl->shared= table_count++; table->no_keyread=1; - table->used_keys=0; + table->used_keys.clear_all(); table->pos_in_table_list= tl; } } @@ -668,14 +819,14 @@ int multi_update::prepare(List<Item> ¬_used_values) table_count); values_for_table= (List_item **) thd->alloc(sizeof(List_item *) * table_count); - if (thd->fatal_error) + if (thd->is_fatal_error) DBUG_RETURN(1); for (i=0 ; i < table_count ; i++) { fields_for_table[i]= new List_item; values_for_table[i]= new List_item; } - if (thd->fatal_error) + if (thd->is_fatal_error) DBUG_RETURN(1); /* Split fields into fields_for_table[] and values_by_table[] */ @@ -687,7 +838,7 @@ int multi_update::prepare(List<Item> ¬_used_values) fields_for_table[offset]->push_back(item); values_for_table[offset]->push_back(value); } - if (thd->fatal_error) + if (thd->is_fatal_error) DBUG_RETURN(1); /* Allocate copy fields */ @@ -715,7 +866,7 @@ int multi_update::prepare(List<Item> ¬_used_values) mysql_lock_have_duplicate(thd, table, update_tables)) table->no_cache= 1; // Disable row cache } - DBUG_RETURN(thd->fatal_error != 0); + DBUG_RETURN(thd->is_fatal_error != 0); } @@ -746,7 +897,7 @@ multi_update::initialize_tables(JOIN *join) { TABLE *table=table_ref->table; uint cnt= table_ref->shared; - Item_field *If; + Item_field *ifield; List<Item> temp_fields= *fields_for_table[cnt]; ORDER group; @@ -769,11 +920,11 @@ multi_update::initialize_tables(JOIN *join) /* ok to be on stack as this is not referenced outside of this func */ Field_string offset(table->file->ref_length, 0, "offset", - table, 1); - if (!(If=new Item_field(((Field *) &offset)))) + table, &my_charset_bin); + if (!(ifield= new Item_field(((Field *) &offset)))) DBUG_RETURN(1); - If->maybe_null=0; - if (temp_fields.push_front(If)) + ifield->maybe_null= 0; + if (temp_fields.push_front(ifield)) DBUG_RETURN(1); /* Make an unique key over the first field to avoid duplicated updates */ @@ -788,8 +939,10 @@ multi_update::initialize_tables(JOIN *join) if (!(tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields, - (ORDER*) &group, 0, 0, 0, - TMP_TABLE_ALL_COLUMNS))) + (ORDER*) &group, 0, 0, + TMP_TABLE_ALL_COLUMNS, + HA_POS_ERROR, + (char *) ""))) DBUG_RETURN(1); tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE); } @@ -850,7 +1003,6 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields) } - multi_update::~multi_update() { TABLE_LIST *table; @@ -870,7 +1022,7 @@ multi_update::~multi_update() } if (copy_field) delete [] copy_field; - thd->count_cuted_fields=0; // Restore this setting + thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting if (!trans_safe) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } @@ -904,8 +1056,8 @@ bool multi_update::send_data(List<Item> ¬_used_values) if (table == table_to_update) { table->status|= STATUS_UPDATED; - store_record(table,1); - if (fill_record(*fields_for_table[offset], *values_for_table[offset],0 )) + store_record(table,record[1]); + if (fill_record(*fields_for_table[offset], *values_for_table[offset], 0)) DBUG_RETURN(1); found++; if (compare_record(table, thd->query_id)) @@ -924,9 +1076,9 @@ bool multi_update::send_data(List<Item> ¬_used_values) table->record[0]))) { updated--; - if (handle_duplicates != DUP_IGNORE || - error != HA_ERR_FOUND_DUPP_KEY) + if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { + thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); DBUG_RETURN(1); } @@ -938,22 +1090,23 @@ bool multi_update::send_data(List<Item> ¬_used_values) int error; TABLE *tmp_table= tmp_tables[offset]; fill_record(tmp_table->field+1, *values_for_table[offset], 1); - found++; /* Store pointer to row */ memcpy((char*) tmp_table->field[0]->ptr, (char*) table->file->ref, table->file->ref_length); /* Write row, ignoring duplicated updates to a row */ - if ((error= tmp_table->file->write_row(tmp_table->record[0])) && - (error != HA_ERR_FOUND_DUPP_KEY && - error != HA_ERR_FOUND_DUPP_UNIQUE)) + if (error= tmp_table->file->write_row(tmp_table->record[0])) { - if (create_myisam_from_heap(thd, tmp_table, tmp_table_param + offset, - error, 1)) + if (error != HA_ERR_FOUND_DUPP_KEY && + error != HA_ERR_FOUND_DUPP_UNIQUE && + create_myisam_from_heap(thd, tmp_table, + tmp_table_param + offset, error, 1)) { do_update=0; DBUG_RETURN(1); // Not a table_is_full error } } + else + found++; } } DBUG_RETURN(0); @@ -963,7 +1116,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) void multi_update::send_error(uint errcode,const char *err) { /* First send error what ever it is ... */ - ::send_error(&thd->net,errcode,err); + ::send_error(thd,errcode,err); /* If nothing updated return */ if (!updated) @@ -992,25 +1145,23 @@ int multi_update::do_updates(bool from_send_error) TABLE_LIST *cur_table; int local_error; ha_rows org_updated; - TABLE *table; + TABLE *table, *tmp_table; DBUG_ENTER("do_updates"); - - do_update= 0; // Don't retry this function + do_update= 0; // Don't retry this function if (!found) DBUG_RETURN(0); for (cur_table= update_tables; cur_table ; cur_table= cur_table->next) { byte *ref_pos; - TABLE *tmp_table; - + table = cur_table->table; if (table == table_to_update) continue; // Already updated org_updated= updated; tmp_table= tmp_tables[cur_table->shared]; tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache - (void) table->file->rnd_init(0); + (void) table->file->ha_rnd_init(0); table->file->extra(HA_EXTRA_NO_CACHE); /* @@ -1026,7 +1177,7 @@ int multi_update::do_updates(bool from_send_error) } copy_field_end=copy_field_ptr; - if ((local_error = tmp_table->file->rnd_init(1))) + if ((local_error = tmp_table->file->ha_rnd_init(1))) goto err; ref_pos= (byte*) tmp_table->field[0]->ptr; @@ -1045,7 +1196,7 @@ int multi_update::do_updates(bool from_send_error) if ((local_error= table->file->rnd_pos(table->record[0], ref_pos))) goto err; table->status|= STATUS_UPDATED; - store_record(table,1); + store_record(table,record[1]); /* Copy data from temporary table to current table */ for (copy_field_ptr=copy_field; @@ -1058,8 +1209,7 @@ int multi_update::do_updates(bool from_send_error) if ((local_error=table->file->update_row(table->record[1], table->record[0]))) { - if (local_error != HA_ERR_FOUND_DUPP_KEY || - handle_duplicates != DUP_IGNORE) + if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY) goto err; } updated++; @@ -1077,13 +1227,20 @@ int multi_update::do_updates(bool from_send_error) else trans_safe= 0; // Can't do safe rollback } - (void) table->file->rnd_end(); + (void) table->file->ha_rnd_end(); + (void) tmp_table->file->ha_rnd_end(); } DBUG_RETURN(0); err: if (!from_send_error) + { + thd->fatal_error(); table->file->print_error(local_error,MYF(0)); + } + + (void) table->file->ha_rnd_end(); + (void) tmp_table->file->ha_rnd_end(); if (updated != org_updated) { @@ -1109,10 +1266,20 @@ bool multi_update::send_eof() int local_error = (table_count) ? do_updates(0) : 0; thd->proc_info= "end"; + /* We must invalidate the query cache before binlog writing and + ha_autocommit_... */ + + if (updated) + { + query_cache_invalidate3(thd, update_tables, 1); + } + /* Write the SQL statement to the binlog if we updated rows and we succeeded or if we updated some non - transacational tables + transacational tables. + Note that if we updated nothing we don't write to the binlog (TODO: + fix this). */ if (updated && (local_error <= 0 || !trans_safe)) @@ -1123,7 +1290,7 @@ bool multi_update::send_eof() if (local_error <= 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed); + log_delayed, FALSE); if (mysql_bin_log.write(&qinfo) && trans_safe) local_error= 1; // Rollback update } @@ -1142,18 +1309,14 @@ bool multi_update::send_eof() /* Safety: If we haven't got an error before (should not happen) */ my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update", MYF(0)); - ::send_error(&thd->net); + ::send_error(thd); return 1; } - sprintf(buff,ER(ER_UPDATE_INFO), (long) found, (long) updated, - (long) thd->cuted_fields); - if (updated) - { - query_cache_invalidate3(thd, update_tables, 1); - } - ::send_ok(&thd->net, + sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, + (ulong) thd->cuted_fields); + ::send_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, thd->insert_id_used ? thd->insert_id() : 0L,buff); return 0; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 68d08b353d9..162b4183c84 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -17,26 +17,38 @@ /* sql_yacc.yy */ %{ +/* thd is passed as an arg to yyparse(), and subsequently to yylex(). +** The type will be void*, so it must be cast to (THD*) when used. +** Use the YYTHD macro for this. +*/ +#define YYPARSE_PARAM yythd +#define YYLEX_PARAM yythd +#define YYTHD ((THD *)yythd) + #define MYSQL_YACC #define YYINITDEPTH 100 #define YYMAXDEPTH 3200 /* Because of 64K stack */ -#define Lex current_lex -#define Select Lex->select +#define Lex (YYTHD->lex) +#define Select Lex->current_select #include "mysql_priv.h" #include "slave.h" -#include "sql_acl.h" #include "lex_symbol.h" +#include "item_create.h" #include <myisam.h> #include <myisammrg.h> -extern void yyerror(const char*); -int yylex(void *yylval); +int yylex(void *yylval, void *yythd); #define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if(my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }} -inline Item *or_or_concat(Item* A, Item* B) +#define WARN_DEPRECATED(A,B) \ + push_warning_printf(((THD *)yythd), MYSQL_ERROR::WARN_LEVEL_WARN, \ + ER_WARN_DEPRECATED_SYNTAX, \ + ER(ER_WARN_DEPRECATED_SYNTAX), (A), (B)); + +inline Item *or_or_concat(THD *thd, Item* A, Item* B) { - return (current_thd->sql_mode & MODE_PIPES_AS_CONCAT ? + return (thd->variables.sql_mode & MODE_PIPES_AS_CONCAT ? (Item*) new Item_func_concat(A,B) : (Item*) new Item_cond_or(A,B)); } @@ -51,6 +63,7 @@ inline Item *or_or_concat(Item* A, Item* B) Table_ident *table; char *simple_string; Item *item; + Item_num *item_num; List<Item> *item_list; List<String> *string_list; String *string; @@ -58,16 +71,21 @@ inline Item *or_or_concat(Item* A, Item* B) TABLE_LIST *table_list; udf_func *udf; LEX_USER *lex_user; - sys_var *variable; + struct sys_var_with_base variable; Key::Keytype key_type; + enum ha_key_alg key_alg; enum db_type db_type; enum row_type row_type; enum ha_rkey_function ha_rkey_mode; enum enum_tx_isolation tx_isolation; - enum Item_cast cast_type; + enum Cast_target cast_type; enum Item_udftype udf_type; + CHARSET_INFO *charset; thr_lock_type lock_type; interval_type interval; + timestamp_type date_time_type; + st_select_lex *select_lex; + chooser_compare_func_creator boolfunc2creator; } %{ @@ -84,14 +102,17 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token NEXT_SYM %token PREV_SYM +%token DIV_SYM %token EQ %token EQUAL_SYM +%token SOUNDS_SYM %token GE %token GT_SYM %token LE %token LT %token NE %token IS +%token MOD_SYM %token SHIFT_LEFT %token SHIFT_RIGHT %token SET_VAR @@ -101,6 +122,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token AFTER_SYM %token ALTER %token ANALYZE_SYM +%token ANY_SYM %token AVG_SYM %token BEGIN_SYM %token BINLOG_SYM @@ -108,18 +130,22 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token CLIENT_SYM %token COMMENT_SYM %token COMMIT_SYM +%token CONSISTENT_SYM %token COUNT_SYM %token CREATE %token CROSS %token CUBE_SYM %token DELETE_SYM +%token DUAL_SYM %token DO_SYM %token DROP %token EVENTS_SYM %token EXECUTE_SYM +%token EXPANSION_SYM %token FLUSH_SYM +%token HELP_SYM %token INSERT -%token IO_THREAD +%token RELAY_THREAD %token KILL_SYM %token LOAD %token LOCKS_SYM @@ -139,20 +165,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SELECT_SYM %token SHOW %token SLAVE +%token SNAPSHOT_SYM %token SQL_THREAD %token START_SYM %token STD_SYM +%token VARIANCE_SYM %token STOP_SYM %token SUM_SYM +%token ADDDATE_SYM %token SUPER_SYM %token TRUNCATE_SYM %token UNLOCK_SYM +%token UNTIL_SYM %token UPDATE_SYM %token ACTION %token AGGREGATE_SYM %token ALL -%token AND +%token AND_SYM %token AS %token ASC %token AUTO_INC @@ -164,7 +194,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token BOOL_SYM %token BOOLEAN_SYM %token BOTH +%token BTREE_SYM %token BY +%token BYTE_SYM %token CACHE_SYM %token CASCADE %token CAST_SYM @@ -172,11 +204,14 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token CHECKSUM_SYM %token CHECK_SYM %token COMMITTED_SYM +%token COLLATE_SYM +%token COLLATION_SYM %token COLUMNS %token COLUMN_SYM %token CONCURRENT %token CONSTRAINT %token CONVERT_SYM +%token CURRENT_USER %token DATABASES %token DATA_SYM %token DEFAULT @@ -186,7 +221,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token DESCRIBE %token DES_KEY_FILE %token DISABLE_SYM +%token DISCARD %token DISTINCT +%token DUPLICATE_SYM %token DYNAMIC_SYM %token ENABLE_SYM %token ENCLOSED @@ -195,6 +232,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ESCAPE_SYM %token EXISTS %token EXTENDED_SYM +%token FALSE_SYM %token FILE_SYM %token FIRST_SYM %token FIXED_SYM @@ -210,13 +248,15 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token GREATEST_SYM %token GROUP %token HAVING -%token HEAP_SYM +%token HASH_SYM %token HEX_NUM %token HIGH_PRIORITY %token HOSTS_SYM %token IDENT +%token IDENT_QUOTED %token IGNORE_SYM -%token INDEX +%token IMPORT +%token INDEX_SYM %token INDEXES %token INFILE %token INNER_SYM @@ -224,12 +264,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token INTO %token IN_SYM %token ISOLATION -%token ISAM_SYM %token JOIN_SYM %token KEYS %token KEY_SYM %token LEADING %token LEAST_SYM +%token LEAVES %token LEVEL_SYM %token LEX_HOSTNAME %token LIKE @@ -248,6 +288,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token MASTER_PORT_SYM %token MASTER_CONNECT_RETRY_SYM %token MASTER_SERVER_ID_SYM +%token MASTER_SSL_SYM +%token MASTER_SSL_CA_SYM +%token MASTER_SSL_CAPATH_SYM +%token MASTER_SSL_CERT_SYM +%token MASTER_SSL_CIPHER_SYM +%token MASTER_SSL_KEY_SYM %token RELAY_LOG_FILE_SYM %token RELAY_LOG_POS_SYM %token MATCH @@ -256,24 +302,26 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token MAX_QUERIES_PER_HOUR %token MAX_UPDATES_PER_HOUR %token MEDIUM_SYM -%token MERGE_SYM -%token MEMORY_SYM %token MIN_ROWS -%token MYISAM_SYM +%token NAMES_SYM %token NATIONAL_SYM %token NATURAL +%token NDBCLUSTER_SYM %token NEW_SYM %token NCHAR_SYM +%token NCHAR_STRING +%token NVARCHAR_SYM %token NOT %token NO_SYM %token NULL_SYM %token NUM %token OFFSET_SYM %token ON +%token ONE_SHOT_SYM %token OPEN_SYM %token OPTION %token OPTIONALLY -%token OR +%token OR_SYM %token OR_OR_CONCAT %token ORDER_SYM %token OUTER @@ -306,24 +354,33 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ROWS_SYM %token ROW_FORMAT_SYM %token ROW_SYM +%token RTREE_SYM %token SET +%token SEPARATOR_SYM +%token SERIAL_SYM %token SERIALIZABLE_SYM %token SESSION_SYM +%token SIMPLE_SYM %token SHUTDOWN -%token SSL_SYM +%token SPATIAL_SYM +%token SSL_SYM %token STARTING %token STATUS_SYM +%token STORAGE_SYM %token STRAIGHT_JOIN %token SUBJECT_SYM %token TABLES %token TABLE_SYM +%token TABLESPACE %token TEMPORARY %token TERMINATED %token TEXT_STRING %token TO_SYM %token TRAILING %token TRANSACTION_SYM +%token TRUE_SYM %token TYPE_SYM +%token TYPES_SYM %token FUNC_ARG0 %token FUNC_ARG1 %token FUNC_ARG2 @@ -332,21 +389,29 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token UDF_SONAME_SYM %token UDF_SYM %token UNCOMMITTED_SYM +%token UNDERSCORE_CHARSET +%token UNICODE_SYM %token UNION_SYM %token UNIQUE_SYM %token USAGE %token USE_FRM %token USE_SYM %token USING +%token VALUE_SYM %token VALUES %token VARIABLES %token WHERE %token WITH %token WRITE_SYM +%token NO_WRITE_TO_BINLOG %token X509_SYM -%token XOR +%token XOR %token COMPRESSED_SYM +%token ERRORS +%token WARNINGS + +%token ASCII_SYM %token BIGINT %token BLOB_SYM %token CHAR_SYM @@ -359,6 +424,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ENUM %token FAST_SYM %token FLOAT_SYM +%token GEOMETRY_SYM %token INT_SYM %token LIMIT %token LONGBLOB @@ -368,6 +434,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token MEDIUMTEXT %token NUMERIC_SYM %token PRECISION +%token PREPARE_SYM +%token DEALLOCATE_SYM %token QUICK %token REAL %token SIGNED_SYM @@ -386,20 +454,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token VARYING %token ZEROFILL +%token ADDDATE_SYM %token AGAINST %token ATAN %token BETWEEN_SYM %token BIT_AND %token BIT_OR +%token BIT_XOR %token CASE_SYM %token CONCAT %token CONCAT_WS +%token CONVERT_TZ_SYM %token CURDATE %token CURTIME %token DATABASE %token DATE_ADD_INTERVAL %token DATE_SUB_INTERVAL %token DAY_HOUR_SYM +%token DAY_MICROSECOND_SYM %token DAY_MINUTE_SYM %token DAY_SECOND_SYM %token DAY_SYM @@ -409,6 +481,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ELSE %token ELT_FUNC %token ENCODE_SYM +%token ENGINE_SYM +%token ENGINES_SYM %token ENCRYPT %token EXPORT_SET %token EXTRACT_SYM @@ -416,7 +490,14 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token FORMAT_SYM %token FOR_SYM %token FROM_UNIXTIME +%token GEOMCOLLFROMTEXT +%token GEOMFROMTEXT +%token GEOMFROMWKB +%token GEOMETRYCOLLECTION +%token GROUP_CONCAT_SYM %token GROUP_UNIQUE_USERS +%token GET_FORMAT +%token HOUR_MICROSECOND_SYM %token HOUR_MINUTE_SYM %token HOUR_SECOND_SYM %token HOUR_SYM @@ -426,16 +507,32 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token INTERVAL_SYM %token LAST_INSERT_ID %token LEFT +%token LINEFROMTEXT +%token LINESTRING %token LOCATE %token MAKE_SET_SYM %token MASTER_POS_WAIT +%token MICROSECOND_SYM +%token MINUTE_MICROSECOND_SYM %token MINUTE_SECOND_SYM %token MINUTE_SYM %token MODE_SYM %token MODIFY_SYM %token MONTH_SYM +%token MLINEFROMTEXT +%token MPOINTFROMTEXT +%token MPOLYFROMTEXT +%token MULTILINESTRING +%token MULTIPOINT +%token MULTIPOLYGON %token NOW_SYM +%token OLD_PASSWORD %token PASSWORD +%token PARAM_MARKER +%token POINTFROMTEXT +%token POINT_SYM +%token POLYFROMTEXT +%token POLYGON %token POSITION_SYM %token PROCEDURE %token RAND @@ -443,7 +540,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token RIGHT %token ROUND %token SECOND_SYM +%token SECOND_MICROSECOND_SYM %token SHARE_SYM +%token SUBDATE_SYM %token SUBSTRING %token SUBSTRING_INDEX %token TRIM @@ -456,6 +555,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token UNIQUE_USERS %token UNIX_TIMESTAMP %token USER +%token UTC_DATE_SYM +%token UTC_TIME_SYM +%token UTC_TIMESTAMP_SYM %token WEEK_SYM %token WHEN_SYM %token WORK_SYM @@ -477,43 +579,48 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SUBJECT_SYM %token CIPHER_SYM +%token BEFORE_SYM %left SET_VAR -%left OR_OR_CONCAT OR XOR -%left AND +%left OR_OR_CONCAT OR_SYM XOR +%left AND_SYM %left BETWEEN_SYM CASE_SYM WHEN_SYM THEN_SYM ELSE %left EQ EQUAL_SYM GE GT_SYM LE LT NE IS LIKE REGEXP IN_SYM %left '|' %left '&' %left SHIFT_LEFT SHIFT_RIGHT %left '-' '+' -%left '*' '/' '%' -%left NEG '~' +%left '*' '/' '%' DIV_SYM MOD_SYM %left '^' +%left NEG '~' %right NOT -%right BINARY +%right BINARY COLLATE_SYM %type <lex_str> - IDENT TEXT_STRING REAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM LEX_HOSTNAME - ULONGLONG_NUM field_ident select_alias ident ident_or_text + IDENT IDENT_QUOTED TEXT_STRING REAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM + LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text + UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal + NCHAR_STRING opt_component key_cache_name %type <lex_str_ptr> opt_table_alias %type <table> - table_ident table_ident_ref + table_ident table_ident_nodb references %type <simple_string> - remember_name remember_end opt_len opt_ident opt_db text_or_password - opt_escape + remember_name remember_end opt_ident opt_db text_or_password + opt_constraint constraint ident_or_empty %type <string> - text_string + text_string opt_gconcat_separator %type <num> type int_type real_type order_dir opt_field_spec lock_option udf_type if_exists opt_local opt_table_options table_options - table_option opt_if_not_exists opt_var_type opt_var_ident_type - opt_temporary + table_option opt_if_not_exists opt_no_write_to_binlog opt_var_type + opt_var_ident_type delete_option opt_temporary all_or_any opt_distinct + opt_ignore_leaves fulltext_options spatial_type union_option + start_transaction_opts %type <ulong_num> ULONG_NUM raid_types merge_insert_types @@ -527,14 +634,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <item> literal text_literal insert_ident order_ident simple_ident select_item2 expr opt_expr opt_else sum_expr in_sum_expr - table_wild opt_pad no_in_expr expr_expr simple_expr no_and_expr - using_list expr_or_default set_expr_or_default + table_wild no_in_expr expr_expr simple_expr no_and_expr + using_list expr_or_default set_expr_or_default interval_expr + param_marker singlerow_subselect singlerow_subselect_init + exists_subselect exists_subselect_init geometry_function + signed_literal now_or_signed_literal opt_escape + +%type <item_num> + NUM_literal %type <item_list> - expr_list udf_expr_list when_list ident_list ident_list_arg + expr_list udf_expr_list udf_sum_expr_list when_list ident_list + ident_list_arg %type <key_type> - key_type opt_unique_or_fulltext + key_type opt_unique_or_fulltext constraint_key_type + +%type <key_alg> + key_alg opt_btree_or_rtree %type <string_list> key_usage_list @@ -543,15 +660,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); key_part %type <table_list> - join_table_list join_table + join_table_list join_table %type <udf> UDF_CHAR_FUNC UDF_FLOAT_FUNC UDF_INT_FUNC UDA_CHAR_SUM UDA_FLOAT_SUM UDA_INT_SUM +%type <date_time_type> date_time_type; %type <interval> interval -%type <db_type> table_types +%type <db_type> storage_engines %type <row_type> row_types @@ -567,25 +685,39 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <lex_user> user grant_user +%type <charset> + opt_collate + charset_name + charset_name_or_default + old_or_new_charset_name + old_or_new_charset_name_or_default + collation_name + collation_name_or_default + %type <variable> internal_variable_name +%type <select_lex> in_subselect in_subselect_init + +%type <boolfunc2creator> comp_op + %type <NONE> query verb_clause create change select do drop insert replace insert2 insert_values update delete truncate rename - show describe load alter optimize flush + show describe load alter optimize keycache preload flush reset purge begin commit rollback savepoint - slave master_def master_defs - repair restore backup analyze check start + slave master_def master_defs master_file_def slave_until_opts + repair restore backup analyze check start checksum field_list field_list_item field_spec kill column_def key_def + keycache_list assign_to_keycache preload_list preload_keys select_item_list select_item values_list no_braces - limit_clause delete_limit_clause fields opt_values values + opt_limit_clause delete_limit_clause fields opt_values values procedure_list procedure_list2 procedure_item when_list2 expr_list2 handler opt_precision opt_ignore opt_column opt_restrict grant revoke set lock unlock string_list field_options field_option - field_opt_list opt_binary table_lock_list table_lock varchar - references opt_on_delete opt_on_delete_list opt_on_delete_item use - opt_delete_options opt_delete_option + field_opt_list opt_binary table_lock_list table_lock + ref_list opt_on_delete opt_on_delete_list opt_on_delete_item use + opt_delete_options opt_delete_option varchar nchar nvarchar opt_outer table_list table_name opt_option opt_place opt_attribute opt_attribute_list attribute column_list column_list_id opt_column_list grant_privileges opt_table user_list grant_option @@ -595,29 +727,34 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); opt_mi_check_type opt_to mi_check_types normal_join table_to_table_list table_to_table opt_table_list opt_as handler_rkey_function handler_read_or_scan - single_multi table_wild_list table_wild_one opt_wild opt_union union_list - precision union_option opt_and + single_multi table_wild_list table_wild_one opt_wild + union_clause union_list + precision subselect_start opt_and charset + subselect_end select_var_list select_var_list_init help opt_len + opt_extended_describe + prepare prepare_src execute deallocate END_OF_INPUT %type <NONE> '-' '+' '*' '/' '%' '(' ')' - ',' '!' '{' '}' '&' '|' AND OR OR_OR_CONCAT BETWEEN_SYM CASE_SYM THEN_SYM WHEN_SYM + ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_CONCAT BETWEEN_SYM CASE_SYM + THEN_SYM WHEN_SYM DIV_SYM MOD_SYM %% query: END_OF_INPUT { - THD *thd=current_thd; + THD *thd= YYTHD; if (!thd->bootstrap && - (!(thd->lex.select_lex.options & OPTION_FOUND_COMMENT))) + (!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) { - send_error(¤t_thd->net,ER_EMPTY_QUERY); + send_error(thd,ER_EMPTY_QUERY); YYABORT; - } + } else { - thd->lex.sql_command = SQLCOM_EMPTY_QUERY; + thd->lex->sql_command= SQLCOM_EMPTY_QUERY; } } | verb_clause END_OF_INPUT {}; @@ -629,22 +766,30 @@ verb_clause: | begin | change | check + | checksum | commit | create + | deallocate | delete | describe | do | drop + | execute + | flush | grant + | handler + | help | insert - | flush + | kill | load | lock - | kill | optimize - | purge + | keycache + | preload + | prepare + | purge | rename - | repair + | repair | replace | reset | restore @@ -653,14 +798,110 @@ verb_clause: | savepoint | select | set + | show | slave | start - | show | truncate - | handler | unlock | update - | use; + | use + ; + +deallocate: + deallocate_or_drop PREPARE_SYM ident + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + if (thd->command == COM_PREPARE) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + lex->sql_command= SQLCOM_DEALLOCATE_PREPARE; + lex->prepared_stmt_name= $3; + }; + +deallocate_or_drop: + DEALLOCATE_SYM | + DROP + ; + + +prepare: + PREPARE_SYM ident FROM prepare_src + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + if (thd->command == COM_PREPARE) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + lex->sql_command= SQLCOM_PREPARE; + lex->prepared_stmt_name= $2; + }; + +prepare_src: + TEXT_STRING_sys + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + lex->prepared_stmt_code= $1; + lex->prepared_stmt_code_is_varref= FALSE; + } + | '@' ident_or_text + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + lex->prepared_stmt_code= $2; + lex->prepared_stmt_code_is_varref= TRUE; + }; + +execute: + EXECUTE_SYM ident + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + if (thd->command == COM_PREPARE) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + lex->sql_command= SQLCOM_EXECUTE; + lex->prepared_stmt_name= $2; + } + execute_using + {} + ; + +execute_using: + /* nothing */ + | USING execute_var_list + ; + +execute_var_list: + execute_var_list ',' execute_var_ident + | execute_var_ident + ; + +execute_var_ident: '@' ident_or_text + { + LEX *lex=Lex; + LEX_STRING *lexstr= (LEX_STRING*)sql_memdup(&$2, sizeof(LEX_STRING)); + if (!lexstr || lex->prepared_stmt_params.push_back(lexstr)) + YYABORT; + } + ; + +/* help */ + +help: + HELP_SYM ident_or_text + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_HELP; + lex->help_arg= $2.str; + }; /* change master */ @@ -680,77 +921,105 @@ master_defs: | master_defs ',' master_def; master_def: - MASTER_HOST_SYM EQ TEXT_STRING + MASTER_HOST_SYM EQ TEXT_STRING_sys { Lex->mi.host = $3.str; } | - MASTER_USER_SYM EQ TEXT_STRING + MASTER_USER_SYM EQ TEXT_STRING_sys { Lex->mi.user = $3.str; } | - MASTER_PASSWORD_SYM EQ TEXT_STRING + MASTER_PASSWORD_SYM EQ TEXT_STRING_sys { Lex->mi.password = $3.str; } | - MASTER_LOG_FILE_SYM EQ TEXT_STRING - { - Lex->mi.log_file_name = $3.str; - } - | MASTER_PORT_SYM EQ ULONG_NUM { Lex->mi.port = $3; } | - MASTER_LOG_POS_SYM EQ ulonglong_num - { - Lex->mi.pos = $3; - /* - If the user specified a value < BIN_LOG_HEADER_SIZE, adjust it - instead of causing subsequent errors. - We need to do it in this file, because only there we know that - MASTER_LOG_POS has been explicitely specified. On the contrary - in change_master() (sql_repl.cc) we cannot distinguish between 0 - (MASTER_LOG_POS explicitely specified as 0) and 0 (unspecified), - whereas we want to distinguish (specified 0 means "read the binlog - from 0" (4 in fact), unspecified means "don't change the position - (keep the preceding value)"). - */ - Lex->mi.pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.pos); - } - | MASTER_CONNECT_RETRY_SYM EQ ULONG_NUM { Lex->mi.connect_retry = $3; } + | MASTER_SSL_SYM EQ ULONG_NUM + { + Lex->mi.ssl= $3 ? + LEX_MASTER_INFO::SSL_ENABLE : LEX_MASTER_INFO::SSL_DISABLE; + } + | MASTER_SSL_CA_SYM EQ TEXT_STRING_sys + { + Lex->mi.ssl_ca= $3.str; + } + | MASTER_SSL_CAPATH_SYM EQ TEXT_STRING_sys + { + Lex->mi.ssl_capath= $3.str; + } + | MASTER_SSL_CERT_SYM EQ TEXT_STRING_sys + { + Lex->mi.ssl_cert= $3.str; + } + | MASTER_SSL_CIPHER_SYM EQ TEXT_STRING_sys + { + Lex->mi.ssl_cipher= $3.str; + } + | MASTER_SSL_KEY_SYM EQ TEXT_STRING_sys + { + Lex->mi.ssl_key= $3.str; + } | - RELAY_LOG_FILE_SYM EQ TEXT_STRING + master_file_def + ; + +master_file_def: + MASTER_LOG_FILE_SYM EQ TEXT_STRING_sys { - Lex->mi.relay_log_name = $3.str; + Lex->mi.log_file_name = $3.str; } - | - RELAY_LOG_POS_SYM EQ ULONG_NUM - { - Lex->mi.relay_log_pos = $3; - /* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */ - Lex->mi.relay_log_pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos); - }; - + | MASTER_LOG_POS_SYM EQ ulonglong_num + { + Lex->mi.pos = $3; + /* + If the user specified a value < BIN_LOG_HEADER_SIZE, adjust it + instead of causing subsequent errors. + We need to do it in this file, because only there we know that + MASTER_LOG_POS has been explicitely specified. On the contrary + in change_master() (sql_repl.cc) we cannot distinguish between 0 + (MASTER_LOG_POS explicitely specified as 0) and 0 (unspecified), + whereas we want to distinguish (specified 0 means "read the binlog + from 0" (4 in fact), unspecified means "don't change the position + (keep the preceding value)"). + */ + Lex->mi.pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.pos); + } + | RELAY_LOG_FILE_SYM EQ TEXT_STRING_sys + { + Lex->mi.relay_log_name = $3.str; + } + | RELAY_LOG_POS_SYM EQ ULONG_NUM + { + Lex->mi.relay_log_pos = $3; + /* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */ + Lex->mi.relay_log_pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos); + } + ; /* create a table */ create: CREATE opt_table_options TABLE_SYM opt_if_not_exists table_ident { + THD *thd= YYTHD; LEX *lex=Lex; lex->sql_command= SQLCOM_CREATE_TABLE; - if (!add_table_to_list($5, - ($2 & HA_LEX_CREATE_TMP_TABLE ? - &tmp_table_alias : (LEX_STRING*) 0), - TL_OPTION_UPDATING)) + if (!lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_UPDATING, + (using_update_log ? + TL_READ_NO_INSERT: + TL_READ))) YYABORT; lex->create_list.empty(); lex->key_list.empty(); @@ -759,63 +1028,85 @@ create: bzero((char*) &lex->create_info,sizeof(lex->create_info)); lex->create_info.options=$2 | $4; lex->create_info.db_type= (enum db_type) lex->thd->variables.table_type; + lex->create_info.default_table_charset= NULL; + lex->name=0; } create2 - {Lex->select= &Lex->select_lex;} - | CREATE opt_unique_or_fulltext INDEX ident ON table_ident + { Lex->current_select= &Lex->select_lex; } + | CREATE opt_unique_or_fulltext INDEX_SYM ident key_alg ON table_ident { LEX *lex=Lex; lex->sql_command= SQLCOM_CREATE_INDEX; - if (!add_table_to_list($6, NULL, TL_OPTION_UPDATING)) + if (!lex->current_select->add_table_to_list(lex->thd, $7, NULL, + TL_OPTION_UPDATING)) YYABORT; lex->create_list.empty(); lex->key_list.empty(); lex->col_list.empty(); lex->change=NullS; } - '(' key_list ')' + '(' key_list ')' { LEX *lex=Lex; - lex->key_list.push_back(new Key($2,$4.str,lex->col_list)); + + lex->key_list.push_back(new Key($2,$4.str, $5, 0, lex->col_list)); lex->col_list.empty(); } | CREATE DATABASE opt_if_not_exists ident { + Lex->create_info.default_table_charset= NULL; + Lex->create_info.used_fields= 0; + } + opt_create_database_options + { LEX *lex=Lex; lex->sql_command=SQLCOM_CREATE_DB; lex->name=$4.str; lex->create_info.options=$3; } - | CREATE udf_func_type UDF_SYM ident + | CREATE udf_func_type UDF_SYM IDENT_sys { LEX *lex=Lex; lex->sql_command = SQLCOM_CREATE_FUNCTION; - lex->udf.name=$4.str; - lex->udf.name_length=$4.length; + lex->udf.name = $4; lex->udf.type= $2; } - UDF_RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING + UDF_RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING_sys { LEX *lex=Lex; lex->udf.returns=(Item_result) $7; lex->udf.dl=$9.str; - }; + } + ; create2: - '(' create2a {} - | opt_create_table_options create3 {}; + '(' create2a {} + | opt_create_table_options create3 {} + | LIKE table_ident + { + LEX *lex=Lex; + if (!(lex->name= (char *)$2)) + YYABORT; + } + | '(' LIKE table_ident ')' + { + LEX *lex=Lex; + if (!(lex->name= (char *)$3)) + YYABORT; + } + ; create2a: field_list ')' opt_create_table_options create3 {} - | create_select ')' { Select->braces= 1;} union_opt {} + | create_select ')' { Select->set_braces(1);} union_opt {} ; create3: /* empty */ {} | opt_duplicate opt_as create_select - { Select->braces= 0;} opt_union {} + { Select->set_braces(0);} union_clause {} | opt_duplicate opt_as '(' create_select ')' - { Select->braces= 1;} union_opt {} + { Select->set_braces(1);} union_opt {} ; create_select: @@ -827,17 +1118,34 @@ create_select: lex->sql_command= SQLCOM_INSERT_SELECT; else if (lex->sql_command == SQLCOM_REPLACE) lex->sql_command= SQLCOM_REPLACE_SELECT; - lex->select->table_list.save_and_clear(&lex->save_list); + lex->current_select->table_list.save_and_clear(&lex->save_list); mysql_init_select(lex); + lex->current_select->parsing_place= SELECT_LIST; } - select_options select_item_list opt_select_from - { Lex->select->table_list.push_front(&Lex->save_list); } - ; + select_options select_item_list + { + Select->parsing_place= NO_MATTER; + } + opt_select_from + { Lex->current_select->table_list.push_front(&Lex->save_list); } + ; opt_as: /* empty */ {} | AS {}; +opt_create_database_options: + /* empty */ {} + | create_database_options {}; + +create_database_options: + create_database_option {} + | create_database_options create_database_option {}; + +create_database_option: + default_collation {} + | default_charset {}; + opt_table_options: /* empty */ { $$= 0; } | table_options { $$= $1;}; @@ -857,53 +1165,94 @@ opt_create_table_options: /* empty */ | create_table_options; +create_table_options_space_separated: + create_table_option + | create_table_option create_table_options_space_separated; + create_table_options: create_table_option - | create_table_option create_table_options; + | create_table_option create_table_options + | create_table_option ',' create_table_options; create_table_option: - TYPE_SYM EQ table_types { Lex->create_info.db_type= $3; } - | MAX_ROWS EQ ulonglong_num { Lex->create_info.max_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MAX_ROWS;} - | MIN_ROWS EQ ulonglong_num { Lex->create_info.min_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MIN_ROWS;} - | AVG_ROW_LENGTH EQ ULONG_NUM { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;} - | PASSWORD EQ TEXT_STRING { Lex->create_info.password=$3.str; } - | COMMENT_SYM EQ TEXT_STRING { Lex->create_info.comment=$3.str; } - | AUTO_INC EQ ulonglong_num { Lex->create_info.auto_increment_value=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AUTO;} - | PACK_KEYS_SYM EQ ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_PACK_KEYS : HA_OPTION_NO_PACK_KEYS; Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} - | PACK_KEYS_SYM EQ DEFAULT { Lex->create_info.table_options&= ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS); Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} - | CHECKSUM_SYM EQ ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; } - | DELAY_KEY_WRITE_SYM EQ ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; } - | ROW_FORMAT_SYM EQ row_types { Lex->create_info.row_type= $3; } - | RAID_TYPE EQ raid_types { Lex->create_info.raid_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} - | RAID_CHUNKS EQ ULONG_NUM { Lex->create_info.raid_chunks= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} - | RAID_CHUNKSIZE EQ ULONG_NUM { Lex->create_info.raid_chunksize= $3*RAID_BLOCK_SIZE; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} - | UNION_SYM EQ '(' table_list ')' + ENGINE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; } + | TYPE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; WARN_DEPRECATED("TYPE=storage_engine","ENGINE=storage_engine"); } + | MAX_ROWS opt_equal ulonglong_num { Lex->create_info.max_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MAX_ROWS;} + | MIN_ROWS opt_equal ulonglong_num { Lex->create_info.min_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MIN_ROWS;} + | AVG_ROW_LENGTH opt_equal ULONG_NUM { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;} + | PASSWORD opt_equal TEXT_STRING_sys { Lex->create_info.password=$3.str; } + | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3.str; } + | AUTO_INC opt_equal ulonglong_num { Lex->create_info.auto_increment_value=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AUTO;} + | PACK_KEYS_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_PACK_KEYS : HA_OPTION_NO_PACK_KEYS; Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} + | PACK_KEYS_SYM opt_equal DEFAULT { Lex->create_info.table_options&= ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS); Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} + | CHECKSUM_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; } + | DELAY_KEY_WRITE_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; } + | ROW_FORMAT_SYM opt_equal row_types { Lex->create_info.row_type= $3; } + | RAID_TYPE opt_equal raid_types { Lex->create_info.raid_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} + | RAID_CHUNKS opt_equal ULONG_NUM { Lex->create_info.raid_chunks= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} + | RAID_CHUNKSIZE opt_equal ULONG_NUM { Lex->create_info.raid_chunksize= $3*RAID_BLOCK_SIZE; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} + | UNION_SYM opt_equal '(' table_list ')' { /* Move the union list to the merge_list */ LEX *lex=Lex; - TABLE_LIST *table_list= (TABLE_LIST*) lex->select->table_list.first; - lex->create_info.merge_list= lex->select->table_list; + TABLE_LIST *table_list= lex->select_lex.get_table_list(); + lex->create_info.merge_list= lex->select_lex.table_list; lex->create_info.merge_list.elements--; lex->create_info.merge_list.first= (byte*) (table_list->next); - lex->select->table_list.elements=1; - lex->select->table_list.next= (byte**) &(table_list->next); + lex->select_lex.table_list.elements=1; + lex->select_lex.table_list.next= (byte**) &(table_list->next); table_list->next=0; lex->create_info.used_fields|= HA_CREATE_USED_UNION; } - | CHARSET opt_equal ident {} - | CHAR_SYM SET opt_equal ident {} - | INSERT_METHOD EQ merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;} - | DATA_SYM DIRECTORY_SYM EQ TEXT_STRING { Lex->create_info.data_file_name= $4.str; } - | INDEX DIRECTORY_SYM EQ TEXT_STRING { Lex->create_info.index_file_name= $4.str; }; - -table_types: - ISAM_SYM { $$= DB_TYPE_ISAM; } - | MYISAM_SYM { $$= DB_TYPE_MYISAM; } - | MERGE_SYM { $$= DB_TYPE_MRG_MYISAM; } - | HEAP_SYM { $$= DB_TYPE_HEAP; } - | MEMORY_SYM { $$= DB_TYPE_HEAP; } - | BERKELEY_DB_SYM { $$= DB_TYPE_BERKELEY_DB; } - | INNOBASE_SYM { $$= DB_TYPE_INNODB; }; + | default_charset + | default_collation + | INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;} + | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys + { Lex->create_info.data_file_name= $4.str; } + | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; }; + +default_charset: + opt_default charset opt_equal charset_name_or_default + { + HA_CREATE_INFO *cinfo= &Lex->create_info; + if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) && + cinfo->default_table_charset && $4 && + !my_charset_same(cinfo->default_table_charset,$4)) + { + net_printf(YYTHD, ER_CONFLICTING_DECLARATIONS, + "CHARACTER SET ", cinfo->default_table_charset->csname, + "CHARACTER SET ", $4->csname); + YYABORT; + } + Lex->create_info.default_table_charset= $4; + Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; + }; + +default_collation: + opt_default COLLATE_SYM opt_equal collation_name_or_default + { + HA_CREATE_INFO *cinfo= &Lex->create_info; + if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) && + cinfo->default_table_charset && $4 && + !my_charset_same(cinfo->default_table_charset,$4)) + { + net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, + $4->name, cinfo->default_table_charset->csname); + YYABORT; + } + Lex->create_info.default_table_charset= $4; + Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; + }; + +storage_engines: + ident_or_text + { + $$ = ha_resolve_by_name($1.str,$1.length); + if ($$ == DB_TYPE_UNKNOWN) { + net_printf(YYTHD, ER_UNKNOWN_STORAGE_ENGINE, $1.str); + YYABORT; + } + }; row_types: DEFAULT { $$= ROW_TYPE_DEFAULT; } @@ -922,11 +1271,11 @@ merge_insert_types: | LAST_SYM { $$= MERGE_INSERT_TO_LAST; }; opt_select_from: - /* empty */ + opt_limit_clause {} | select_from select_lock_type; udf_func_type: - /* empty */ { $$ = UDFTYPE_FUNCTION; } + /* empty */ { $$ = UDFTYPE_FUNCTION; } | AGGREGATE_SYM { $$ = UDFTYPE_AGGREGATE; }; udf_type: @@ -940,7 +1289,7 @@ field_list: field_list_item: - column_def + column_def | key_def ; @@ -950,18 +1299,36 @@ column_def: { Lex->col_list.empty(); /* Alloced by sql_alloc */ } - ; + ; key_def: - key_type opt_ident '(' key_list ')' + key_type opt_ident key_alg '(' key_list ')' { LEX *lex=Lex; - lex->key_list.push_back(new Key($1,$2,lex->col_list)); + lex->key_list.push_back(new Key($1,$2, $3, 0, lex->col_list)); + lex->col_list.empty(); /* Alloced by sql_alloc */ + } + | opt_constraint constraint_key_type opt_ident key_alg '(' key_list ')' + { + LEX *lex=Lex; + const char *key_name= $3 ? $3:$1; + lex->key_list.push_back(new Key($2, key_name, $4, 0, + lex->col_list)); lex->col_list.empty(); /* Alloced by sql_alloc */ } | opt_constraint FOREIGN KEY_SYM opt_ident '(' key_list ')' references { - Lex->col_list.empty(); /* Alloced by sql_alloc */ + LEX *lex=Lex; + lex->key_list.push_back(new foreign_key($4 ? $4:$1, lex->col_list, + $8, + lex->ref_list, + lex->fk_delete_opt, + lex->fk_update_opt, + lex->fk_match_option)); + lex->key_list.push_back(new Key(Key::MULTIPLE, $4 ? $4 : $1, + HA_KEY_ALG_UNDEF, 1, + lex->col_list)); + lex->col_list.empty(); /* Alloced by sql_alloc */ } | constraint opt_check_constraint { @@ -983,99 +1350,165 @@ check_constraint: ; opt_constraint: - /* empty */ - | constraint; - + /* empty */ { $$=(char*) 0; } + | constraint { $$= $1; } + ; + constraint: - CONSTRAINT opt_ident; + CONSTRAINT opt_ident { $$=$2; } + ; field_spec: field_ident { LEX *lex=Lex; - lex->length=lex->dec=0; lex->type=0; lex->interval=0; - lex->default_value=0; + lex->length=lex->dec=0; lex->type=0; + lex->default_value= lex->on_update_value= 0; + lex->comment=0; + lex->charset=NULL; } type opt_attribute { LEX *lex=Lex; - if (add_field_to_list($1.str, + if (add_field_to_list(lex->thd, $1.str, (enum enum_field_types) $3, lex->length,lex->dec,lex->type, - lex->default_value,lex->change, - lex->interval)) + lex->default_value, lex->on_update_value, + lex->comment, + lex->change,&lex->interval_list,lex->charset, + lex->uint_geom_type)) YYABORT; }; type: - int_type opt_len field_options { Lex->length=$2; $$=$1; } + int_type opt_len field_options { $$=$1; } | real_type opt_precision field_options { $$=$1; } | FLOAT_SYM float_options field_options { $$=FIELD_TYPE_FLOAT; } | BIT_SYM opt_len { Lex->length=(char*) "1"; $$=FIELD_TYPE_TINY; } | BOOL_SYM { Lex->length=(char*) "1"; $$=FIELD_TYPE_TINY; } - | char '(' NUM ')' opt_binary { Lex->length=$3.str; + | BOOLEAN_SYM { Lex->length=(char*) "1"; + $$=FIELD_TYPE_TINY; } + | char '(' NUM ')' opt_binary { Lex->length=$3.str; $$=FIELD_TYPE_STRING; } | char opt_binary { Lex->length=(char*) "1"; $$=FIELD_TYPE_STRING; } - | BINARY '(' NUM ')' { Lex->length=$3.str; - Lex->type|=BINARY_FLAG; + | nchar '(' NUM ')' { Lex->length=$3.str; + $$=FIELD_TYPE_STRING; + Lex->charset=national_charset_info; } + | nchar { Lex->length=(char*) "1"; + $$=FIELD_TYPE_STRING; + Lex->charset=national_charset_info; } + | BINARY '(' NUM ')' { Lex->length=$3.str; + Lex->charset=&my_charset_bin; + $$=FIELD_TYPE_STRING; } + | BINARY { Lex->length= (char*) "1"; + Lex->charset=&my_charset_bin; $$=FIELD_TYPE_STRING; } | varchar '(' NUM ')' opt_binary { Lex->length=$3.str; $$=FIELD_TYPE_VAR_STRING; } + | nvarchar '(' NUM ')' { Lex->length=$3.str; + $$=FIELD_TYPE_VAR_STRING; + Lex->charset=national_charset_info; } | VARBINARY '(' NUM ')' { Lex->length=$3.str; - Lex->type|=BINARY_FLAG; + Lex->charset=&my_charset_bin; $$=FIELD_TYPE_VAR_STRING; } - | YEAR_SYM opt_len field_options { $$=FIELD_TYPE_YEAR; Lex->length=$2; } + | YEAR_SYM opt_len field_options { $$=FIELD_TYPE_YEAR; } | DATE_SYM { $$=FIELD_TYPE_DATE; } | TIME_SYM { $$=FIELD_TYPE_TIME; } - | TIMESTAMP { $$=FIELD_TYPE_TIMESTAMP; } - | TIMESTAMP '(' NUM ')' { Lex->length=$3.str; - $$=FIELD_TYPE_TIMESTAMP; } + | TIMESTAMP opt_len + { + if (YYTHD->variables.sql_mode & MODE_MAXDB) + $$=FIELD_TYPE_DATETIME; + else + { + /* + Unlike other types TIMESTAMP fields are NOT NULL by default. + */ + Lex->type|= NOT_NULL_FLAG; + $$=FIELD_TYPE_TIMESTAMP; + } + } | DATETIME { $$=FIELD_TYPE_DATETIME; } - | TINYBLOB { Lex->type|=BINARY_FLAG; + | TINYBLOB { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_TINY_BLOB; } - | BLOB_SYM { Lex->type|=BINARY_FLAG; + | BLOB_SYM opt_len { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_BLOB; } - | MEDIUMBLOB { Lex->type|=BINARY_FLAG; + | spatial_type { +#ifdef HAVE_SPATIAL + Lex->charset=&my_charset_bin; + Lex->uint_geom_type= (uint)$1; + $$=FIELD_TYPE_GEOMETRY; +#else + net_printf(Lex->thd, ER_FEATURE_DISABLED, + sym_group_geom.name, + sym_group_geom.needed_define); + YYABORT; +#endif + } + | MEDIUMBLOB { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_MEDIUM_BLOB; } - | LONGBLOB { Lex->type|=BINARY_FLAG; + | LONGBLOB { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_LONG_BLOB; } - | LONG_SYM VARBINARY { Lex->type|=BINARY_FLAG; + | LONG_SYM VARBINARY { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_MEDIUM_BLOB; } - | LONG_SYM varchar { $$=FIELD_TYPE_MEDIUM_BLOB; } - | TINYTEXT { $$=FIELD_TYPE_TINY_BLOB; } - | TEXT_SYM { $$=FIELD_TYPE_BLOB; } - | MEDIUMTEXT { $$=FIELD_TYPE_MEDIUM_BLOB; } - | LONGTEXT { $$=FIELD_TYPE_LONG_BLOB; } + | LONG_SYM varchar opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; } + | TINYTEXT opt_binary { $$=FIELD_TYPE_TINY_BLOB; } + | TEXT_SYM opt_len opt_binary { $$=FIELD_TYPE_BLOB; } + | MEDIUMTEXT opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; } + | LONGTEXT opt_binary { $$=FIELD_TYPE_LONG_BLOB; } | DECIMAL_SYM float_options field_options { $$=FIELD_TYPE_DECIMAL;} | NUMERIC_SYM float_options field_options { $$=FIELD_TYPE_DECIMAL;} - | ENUM {Lex->interval_list.empty();} '(' string_list ')' + | FIXED_SYM float_options field_options + { $$=FIELD_TYPE_DECIMAL;} + | ENUM {Lex->interval_list.empty();} '(' string_list ')' opt_binary + { $$=FIELD_TYPE_ENUM; } + | SET { Lex->interval_list.empty();} '(' string_list ')' opt_binary + { $$=FIELD_TYPE_SET; } + | LONG_SYM opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; } + | SERIAL_SYM { - LEX *lex=Lex; - lex->interval=typelib(lex->interval_list); - $$=FIELD_TYPE_ENUM; + $$=FIELD_TYPE_LONGLONG; + Lex->type|= (AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNSIGNED_FLAG | + UNIQUE_FLAG); } - | SET { Lex->interval_list.empty();} '(' string_list ')' - { - LEX *lex=Lex; - lex->interval=typelib(lex->interval_list); - $$=FIELD_TYPE_SET; - }; + ; + +spatial_type: + GEOMETRY_SYM { $$= Field::GEOM_GEOMETRY; } + | GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; } + | POINT_SYM { $$= Field::GEOM_POINT; } + | MULTIPOINT { $$= Field::GEOM_MULTIPOINT; } + | LINESTRING { $$= Field::GEOM_LINESTRING; } + | MULTILINESTRING { $$= Field::GEOM_MULTILINESTRING; } + | POLYGON { $$= Field::GEOM_POLYGON; } + | MULTIPOLYGON { $$= Field::GEOM_MULTIPOLYGON; } + ; char: CHAR_SYM {} - | NCHAR_SYM {} - | NATIONAL_SYM CHAR_SYM {}; + ; + +nchar: + NCHAR_SYM {} + | NATIONAL_SYM CHAR_SYM {} + ; varchar: char VARYING {} | VARCHAR {} - | NATIONAL_SYM VARCHAR {} - | NCHAR_SYM VARCHAR {}; + ; + +nvarchar: + NATIONAL_SYM VARCHAR {} + | NVARCHAR_SYM {} + | NCHAR_SYM VARCHAR {} + | NATIONAL_SYM CHAR_SYM VARYING {} + | NCHAR_SYM VARYING {} + ; int_type: INT_SYM { $$=FIELD_TYPE_LONG; } @@ -1085,7 +1518,7 @@ int_type: | BIGINT { $$=FIELD_TYPE_LONGLONG; }; real_type: - REAL { $$= current_thd->sql_mode & MODE_REAL_AS_FLOAT ? + REAL { $$= YYTHD->variables.sql_mode & MODE_REAL_AS_FLOAT ? FIELD_TYPE_FLOAT : FIELD_TYPE_DOUBLE; } | DOUBLE_SYM { $$=FIELD_TYPE_DOUBLE; } | DOUBLE_SYM PRECISION { $$=FIELD_TYPE_DOUBLE; }; @@ -1117,8 +1550,8 @@ field_option: | ZEROFILL { Lex->type|= UNSIGNED_FLAG | ZEROFILL_FLAG; }; opt_len: - /* empty */ { $$=(char*) 0; } /* use default length */ - | '(' NUM ')' { $$=$2.str; }; + /* empty */ { Lex->length=(char*) 0; } /* use default length */ + | '(' NUM ')' { Lex->length= $2.str; }; opt_precision: /* empty */ {} @@ -1135,25 +1568,157 @@ opt_attribute_list: attribute: NULL_SYM { Lex->type&= ~ NOT_NULL_FLAG; } | NOT NULL_SYM { Lex->type|= NOT_NULL_FLAG; } - | DEFAULT literal { Lex->default_value=$2; } + | DEFAULT now_or_signed_literal { Lex->default_value=$2; } + | ON UPDATE_SYM NOW_SYM optional_braces + { Lex->on_update_value= new Item_func_now_local(); } | AUTO_INC { Lex->type|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG; } - | PRIMARY_SYM KEY_SYM { Lex->type|= PRI_KEY_FLAG | NOT_NULL_FLAG; } - | UNIQUE_SYM { Lex->type|= UNIQUE_FLAG; } - | UNIQUE_SYM KEY_SYM { Lex->type|= UNIQUE_KEY_FLAG; } - | COMMENT_SYM text_literal {}; + | SERIAL_SYM DEFAULT VALUE_SYM + { + LEX *lex=Lex; + lex->type|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNIQUE_FLAG; + lex->alter_info.flags|= ALTER_ADD_INDEX; + } + | opt_primary KEY_SYM + { + LEX *lex=Lex; + lex->type|= PRI_KEY_FLAG | NOT_NULL_FLAG; + lex->alter_info.flags|= ALTER_ADD_INDEX; + } + | UNIQUE_SYM + { + LEX *lex=Lex; + lex->type|= UNIQUE_FLAG; + lex->alter_info.flags|= ALTER_ADD_INDEX; + } + | UNIQUE_SYM KEY_SYM + { + LEX *lex=Lex; + lex->type|= UNIQUE_KEY_FLAG; + lex->alter_info.flags|= ALTER_ADD_INDEX; + } + | COMMENT_SYM TEXT_STRING_sys { Lex->comment= &$2; } + | BINARY { Lex->type|= BINCMP_FLAG; } + | COLLATE_SYM collation_name + { + if (Lex->charset && !my_charset_same(Lex->charset,$2)) + { + net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, + $2->name,Lex->charset->csname); + YYABORT; + } + else + { + Lex->charset=$2; + } + } + ; -opt_binary: +now_or_signed_literal: + NOW_SYM optional_braces { $$= new Item_func_now_local(); } + | signed_literal { $$=$1; } + ; + +charset: + CHAR_SYM SET {} + | CHARSET {} + ; + +charset_name: + ident_or_text + { + if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0)))) + { + net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,$1.str); + YYABORT; + } + } + | BINARY { $$= &my_charset_bin; } + ; + +charset_name_or_default: + charset_name { $$=$1; } + | DEFAULT { $$=NULL; } ; + + +old_or_new_charset_name: + ident_or_text + { + if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))) && + !($$=get_old_charset_by_name($1.str))) + { + net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,$1.str); + YYABORT; + } + } + | BINARY { $$= &my_charset_bin; } + ; + +old_or_new_charset_name_or_default: + old_or_new_charset_name { $$=$1; } + | DEFAULT { $$=NULL; } ; + +collation_name: + ident_or_text + { + if (!($$=get_charset_by_name($1.str,MYF(0)))) + { + net_printf(YYTHD,ER_UNKNOWN_COLLATION,$1.str); + YYABORT; + } + }; + +opt_collate: + /* empty */ { $$=NULL; } + | COLLATE_SYM collation_name_or_default { $$=$2; } + ; + +collation_name_or_default: + collation_name { $$=$1; } + | DEFAULT { $$=NULL; } ; + +opt_default: /* empty */ {} - | BINARY { Lex->type|=BINARY_FLAG; } - | CHAR_SYM SET opt_equal ident {} + | DEFAULT {}; + +opt_binary: + /* empty */ { Lex->charset=NULL; } + | ASCII_SYM { Lex->charset=&my_charset_latin1; } + | BYTE_SYM { Lex->charset=&my_charset_bin; } + | UNICODE_SYM + { + if (!(Lex->charset=get_charset_by_csname("ucs2",MY_CS_PRIMARY,MYF(0)))) + { + net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,"ucs2"); + YYABORT; + } + } + | charset charset_name { Lex->charset=$2; } ; + +opt_primary: + /* empty */ + | PRIMARY_SYM ; references: - REFERENCES table_ident opt_on_delete {} - | REFERENCES table_ident '(' key_list ')' opt_on_delete - { - Lex->col_list.empty(); /* Alloced by sql_alloc */ - }; + REFERENCES table_ident + { + LEX *lex=Lex; + lex->fk_delete_opt= lex->fk_update_opt= lex->fk_match_option= 0; + lex->ref_list.empty(); + } + opt_ref_list + { + $$=$2; + }; + +opt_ref_list: + /* empty */ opt_on_delete {} + | '(' ref_list ')' opt_on_delete {}; + +ref_list: + ref_list ',' ident { Lex->ref_list.push_back(new key_part_spec($3.str)); } + | ident { Lex->ref_list.push_back(new key_part_spec($1.str)); }; + opt_on_delete: /* empty */ {} @@ -1163,41 +1728,80 @@ opt_on_delete_list: opt_on_delete_list opt_on_delete_item {} | opt_on_delete_item {}; - opt_on_delete_item: - ON DELETE_SYM delete_option {} - | ON UPDATE_SYM delete_option {} - | MATCH FULL {} - | MATCH PARTIAL {}; + ON DELETE_SYM delete_option { Lex->fk_delete_opt= $3; } + | ON UPDATE_SYM delete_option { Lex->fk_update_opt= $3; } + | MATCH FULL { Lex->fk_match_option= foreign_key::FK_MATCH_FULL; } + | MATCH PARTIAL { Lex->fk_match_option= foreign_key::FK_MATCH_PARTIAL; } + | MATCH SIMPLE_SYM { Lex->fk_match_option= foreign_key::FK_MATCH_SIMPLE; }; delete_option: - RESTRICT {} - | CASCADE {} - | SET NULL_SYM {} - | NO_SYM ACTION {} - | SET DEFAULT {}; + RESTRICT { $$= (int) foreign_key::FK_OPTION_RESTRICT; } + | CASCADE { $$= (int) foreign_key::FK_OPTION_CASCADE; } + | SET NULL_SYM { $$= (int) foreign_key::FK_OPTION_SET_NULL; } + | NO_SYM ACTION { $$= (int) foreign_key::FK_OPTION_NO_ACTION; } + | SET DEFAULT { $$= (int) foreign_key::FK_OPTION_DEFAULT; }; key_type: - opt_constraint PRIMARY_SYM KEY_SYM { $$= Key::PRIMARY; } - | key_or_index { $$= Key::MULTIPLE; } - | FULLTEXT_SYM { $$= Key::FULLTEXT; } - | FULLTEXT_SYM key_or_index { $$= Key::FULLTEXT; } - | opt_constraint UNIQUE_SYM { $$= Key::UNIQUE; } - | opt_constraint UNIQUE_SYM key_or_index { $$= Key::UNIQUE; }; + key_or_index { $$= Key::MULTIPLE; } + | FULLTEXT_SYM opt_key_or_index { $$= Key::FULLTEXT; } + | SPATIAL_SYM opt_key_or_index + { +#ifdef HAVE_SPATIAL + $$= Key::SPATIAL; +#else + net_printf(Lex->thd, ER_FEATURE_DISABLED, + sym_group_geom.name, sym_group_geom.needed_define); + YYABORT; +#endif + }; + +constraint_key_type: + PRIMARY_SYM KEY_SYM { $$= Key::PRIMARY; } + | UNIQUE_SYM opt_key_or_index { $$= Key::UNIQUE; }; key_or_index: KEY_SYM {} - | INDEX {}; + | INDEX_SYM {}; + +opt_key_or_index: + /* empty */ {} + | key_or_index + ; keys_or_index: KEYS {} - | INDEX {} + | INDEX_SYM {} | INDEXES {}; opt_unique_or_fulltext: /* empty */ { $$= Key::MULTIPLE; } | UNIQUE_SYM { $$= Key::UNIQUE; } - | FULLTEXT_SYM { $$= Key::FULLTEXT; }; + | FULLTEXT_SYM { $$= Key::FULLTEXT;} + | SPATIAL_SYM + { +#ifdef HAVE_SPATIAL + $$= Key::SPATIAL; +#else + net_printf(Lex->thd, ER_FEATURE_DISABLED, + sym_group_geom.name, sym_group_geom.needed_define); + YYABORT; +#endif + } + ; + +key_alg: + /* empty */ { $$= HA_KEY_ALG_UNDEF; } + | USING opt_btree_or_rtree { $$= $2; } + | TYPE_SYM opt_btree_or_rtree { $$= $2; }; + +opt_btree_or_rtree: + BTREE_SYM { $$= HA_KEY_ALG_BTREE; } + | RTREE_SYM + { + $$= HA_KEY_ALG_RTREE; + } + | HASH_SYM { $$= HA_KEY_ALG_HASH; }; key_list: key_list ',' key_part order_dir { Lex->col_list.push_back($3); } @@ -1205,12 +1809,28 @@ key_list: key_part: ident { $$=new key_part_spec($1.str); } - | ident '(' NUM ')' { $$=new key_part_spec($1.str,(uint) atoi($3.str)); }; + | ident '(' NUM ')' + { + int key_part_len= atoi($3.str); +#if MYSQL_VERSION_ID < 50000 + if (!key_part_len) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Key part '%s' length cannot be 0", + MYF(0), $1.str); + } +#endif + $$=new key_part_spec($1.str,(uint) key_part_len); + }; opt_ident: /* empty */ { $$=(char*) 0; } /* Defaultlength */ | field_ident { $$=$1.str; }; +opt_component: + /* empty */ { $$.str= 0; $$.length= 0; } + | '.' ident { $$=$2; }; + string_list: text_string { Lex->interval_list.push_back($1); } | string_list ',' text_string { Lex->interval_list.push_back($3); }; @@ -1222,121 +1842,198 @@ string_list: alter: ALTER opt_ignore TABLE_SYM table_ident { - LEX *lex=Lex; - lex->sql_command = SQLCOM_ALTER_TABLE; - lex->name=0; - if (!add_table_to_list($4, NULL, TL_OPTION_UPDATING)) + THD *thd= YYTHD; + LEX *lex= thd->lex; + lex->sql_command= SQLCOM_ALTER_TABLE; + lex->name= 0; + lex->duplicates= DUP_ERROR; + if (!lex->select_lex.add_table_to_list(thd, $4, NULL, + TL_OPTION_UPDATING)) YYABORT; - lex->drop_primary=0; lex->create_list.empty(); lex->key_list.empty(); lex->col_list.empty(); - lex->drop_list.empty(); - lex->alter_list.empty(); - lex->select->order_list.elements=0; - lex->select->order_list.first=0; - lex->select->order_list.next= (byte**) &lex->select->order_list.first; - lex->select->db=lex->name=0; - bzero((char*) &lex->create_info,sizeof(lex->create_info)); + lex->select_lex.init_order(); + lex->select_lex.db=lex->name=0; + bzero((char*) &lex->create_info,sizeof(lex->create_info)); lex->create_info.db_type= DB_TYPE_DEFAULT; + lex->create_info.default_table_charset= NULL; lex->create_info.row_type= ROW_TYPE_NOT_USED; - lex->alter_keys_onoff=LEAVE_AS_IS; - lex->simple_alter=1; + lex->alter_info.reset(); + lex->alter_info.is_simple= 1; + lex->alter_info.flags= 0; } alter_list {} - ; - + | ALTER DATABASE ident_or_empty + { + Lex->create_info.default_table_charset= NULL; + Lex->create_info.used_fields= 0; + } + opt_create_database_options + { + LEX *lex=Lex; + lex->sql_command=SQLCOM_ALTER_DB; + lex->name= $3; + }; + + +ident_or_empty: + /* empty */ { $$= 0; } + | ident { $$= $1.str; }; + + alter_list: + | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; } + | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; } | alter_list_item | alter_list ',' alter_list_item; add_column: - ADD opt_column { Lex->change=0; }; + ADD opt_column + { + LEX *lex=Lex; + lex->change=0; + lex->alter_info.flags|= ALTER_ADD_COLUMN; + }; alter_list_item: - add_column column_def opt_place { Lex->simple_alter=0; } - | ADD key_def { Lex->simple_alter=0; } - | add_column '(' field_list ')' { Lex->simple_alter=0; } + add_column column_def opt_place { Lex->alter_info.is_simple= 0; } + | ADD key_def + { + LEX *lex=Lex; + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_ADD_INDEX; + } + | add_column '(' field_list ')' { Lex->alter_info.is_simple= 0; } | CHANGE opt_column field_ident { LEX *lex=Lex; - lex->change= $3.str; lex->simple_alter=0; + lex->change= $3.str; + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN; } field_spec opt_place - | MODIFY_SYM opt_column field_ident - { - LEX *lex=Lex; - lex->length=lex->dec=0; lex->type=0; lex->interval=0; - lex->default_value=0; - lex->simple_alter=0; - } - type opt_attribute - { - LEX *lex=Lex; - if (add_field_to_list($3.str, - (enum enum_field_types) $5, - lex->length,lex->dec,lex->type, - lex->default_value, $3.str, - lex->interval)) - YYABORT; - } - opt_place + | MODIFY_SYM opt_column field_ident + { + LEX *lex=Lex; + lex->length=lex->dec=0; lex->type=0; + lex->default_value= lex->on_update_value= 0; + lex->comment=0; + lex->charset= NULL; + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN; + } + type opt_attribute + { + LEX *lex=Lex; + if (add_field_to_list(lex->thd,$3.str, + (enum enum_field_types) $5, + lex->length,lex->dec,lex->type, + lex->default_value, lex->on_update_value, + lex->comment, + $3.str, &lex->interval_list, lex->charset, + lex->uint_geom_type)) + YYABORT; + } + opt_place | DROP opt_column field_ident opt_restrict { LEX *lex=Lex; - lex->drop_list.push_back(new Alter_drop(Alter_drop::COLUMN, - $3.str)); lex->simple_alter=0; + lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::COLUMN, + $3.str)); + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_DROP_COLUMN; } + | DROP FOREIGN KEY_SYM opt_ident { Lex->alter_info.is_simple= 0; } | DROP PRIMARY_SYM KEY_SYM { LEX *lex=Lex; - lex->drop_primary=1; lex->simple_alter=0; + lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, + primary_key_name)); + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_DROP_INDEX; } - | DROP FOREIGN KEY_SYM opt_ident { Lex->simple_alter=0; } | DROP key_or_index field_ident { LEX *lex=Lex; - lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY, - $3.str)); - lex->simple_alter=0; + lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, + $3.str)); + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_DROP_INDEX; } - | DISABLE_SYM KEYS { Lex->alter_keys_onoff=DISABLE; } - | ENABLE_SYM KEYS { Lex->alter_keys_onoff=ENABLE; } - | ALTER opt_column field_ident SET DEFAULT literal + | DISABLE_SYM KEYS { Lex->alter_info.keys_onoff= DISABLE; } + | ENABLE_SYM KEYS { Lex->alter_info.keys_onoff= ENABLE; } + | ALTER opt_column field_ident SET DEFAULT signed_literal { LEX *lex=Lex; - lex->alter_list.push_back(new Alter_column($3.str,$6)); - lex->simple_alter=0; + lex->alter_info.alter_list.push_back(new Alter_column($3.str,$6)); + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN; } | ALTER opt_column field_ident DROP DEFAULT { LEX *lex=Lex; - lex->alter_list.push_back(new Alter_column($3.str,(Item*) 0)); - lex->simple_alter=0; + lex->alter_info.alter_list.push_back(new Alter_column($3.str, + (Item*) 0)); + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN; } | RENAME opt_to table_ident { LEX *lex=Lex; - lex->select->db=$3->db.str; + lex->select_lex.db=$3->db.str; lex->name= $3->table.str; if (check_table_name($3->table.str,$3->table.length) || $3->db.str && check_db_name($3->db.str)) { - net_printf(&lex->thd->net,ER_WRONG_TABLE_NAME,$3->table.str); + net_printf(lex->thd,ER_WRONG_TABLE_NAME,$3->table.str); YYABORT; } + lex->alter_info.flags|= ALTER_RENAME; } - | create_table_options { Lex->simple_alter=0; } - | order_clause { Lex->simple_alter=0; }; + | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate + { + if (!$4) + { + THD *thd= YYTHD; + $4= thd->variables.collation_database; + } + $5= $5 ? $5 : $4; + if (!my_charset_same($4,$5)) + { + net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, + $5->name,$4->csname); + YYABORT; + } + LEX *lex= Lex; + lex->create_info.table_charset= + lex->create_info.default_table_charset= $5; + lex->create_info.used_fields|= (HA_CREATE_USED_CHARSET | + HA_CREATE_USED_DEFAULT_CHARSET); + lex->alter_info.is_simple= 0; + } + | create_table_options_space_separated + { + LEX *lex=Lex; + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_OPTIONS; + } + | order_clause + { + LEX *lex=Lex; + lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_ORDER; + }; opt_column: /* empty */ {} | COLUMN_SYM {}; opt_ignore: - /* empty */ { Lex->duplicates=DUP_ERROR; } - | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; }; + /* empty */ { Lex->ignore= 0;} + | IGNORE_SYM { Lex->ignore= 1;} + ; opt_restrict: /* empty */ {} @@ -1355,58 +2052,108 @@ opt_to: | AS {}; /* - * The first two deprecate the last two--delete the last two for 4.1 release - */ + SLAVE START and SLAVE STOP are deprecated. We keep them for compatibility. +*/ + slave: - START_SYM SLAVE slave_thread_opts + START_SYM SLAVE slave_thread_opts + { + LEX *lex=Lex; + lex->sql_command = SQLCOM_SLAVE_START; + lex->type = 0; + /* We'll use mi structure for UNTIL options */ + bzero((char*) &lex->mi, sizeof(lex->mi)); + /* If you change this code don't forget to update SLAVE START too */ + } + slave_until + {} + | STOP_SYM SLAVE slave_thread_opts + { + LEX *lex=Lex; + lex->sql_command = SQLCOM_SLAVE_STOP; + lex->type = 0; + /* If you change this code don't forget to update SLAVE STOP too */ + } + | SLAVE START_SYM slave_thread_opts { LEX *lex=Lex; lex->sql_command = SQLCOM_SLAVE_START; lex->type = 0; - } - | - STOP_SYM SLAVE slave_thread_opts + /* We'll use mi structure for UNTIL options */ + bzero((char*) &lex->mi, sizeof(lex->mi)); + } + slave_until + {} + | SLAVE STOP_SYM slave_thread_opts { LEX *lex=Lex; lex->sql_command = SQLCOM_SLAVE_STOP; lex->type = 0; } - | - SLAVE START_SYM slave_thread_opts - { - LEX *lex=Lex; - lex->sql_command = SQLCOM_SLAVE_START; - lex->type = 0; - } - | - SLAVE STOP_SYM slave_thread_opts - { - LEX *lex=Lex; - lex->sql_command = SQLCOM_SLAVE_STOP; - lex->type = 0; - }; + ; + start: - START_SYM TRANSACTION_SYM { Lex->sql_command = SQLCOM_BEGIN;} - {} + START_SYM TRANSACTION_SYM start_transaction_opts + { + Lex->sql_command = SQLCOM_BEGIN; + Lex->start_transaction_opt= $3; + } ; +start_transaction_opts: + /*empty*/ { $$ = 0; } + | WITH CONSISTENT_SYM SNAPSHOT_SYM + { + $$= MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT; + } + ; + slave_thread_opts: + { Lex->slave_thd_opt= 0; } + slave_thread_opt_list + {} + ; + +slave_thread_opt_list: slave_thread_opt - | slave_thread_opts ',' slave_thread_opt; + | slave_thread_opt_list ',' slave_thread_opt + ; slave_thread_opt: - /*empty*/ {} + /*empty*/ {} | SQL_THREAD { Lex->slave_thd_opt|=SLAVE_SQL; } - | IO_THREAD { Lex->slave_thd_opt|=SLAVE_IO; } + | RELAY_THREAD { Lex->slave_thd_opt|=SLAVE_IO; } ; - + +slave_until: + /*empty*/ {} + | UNTIL_SYM slave_until_opts + { + LEX *lex=Lex; + if ((lex->mi.log_file_name || lex->mi.pos) && + (lex->mi.relay_log_name || lex->mi.relay_log_pos) || + !((lex->mi.log_file_name && lex->mi.pos) || + (lex->mi.relay_log_name && lex->mi.relay_log_pos))) + { + send_error(lex->thd, ER_BAD_SLAVE_UNTIL_COND); + YYABORT; + } + + } + ; + +slave_until_opts: + master_file_def + | slave_until_opts ',' master_file_def ; + + restore: RESTORE_SYM table_or_tables { Lex->sql_command = SQLCOM_RESTORE_TABLE; } - table_list FROM TEXT_STRING + table_list FROM TEXT_STRING_sys { Lex->backup_dir = $6.str; }; @@ -1416,16 +2163,33 @@ backup: { Lex->sql_command = SQLCOM_BACKUP_TABLE; } - table_list TO_SYM TEXT_STRING + table_list TO_SYM TEXT_STRING_sys { Lex->backup_dir = $6.str; }; +checksum: + CHECKSUM_SYM table_or_tables + { + LEX *lex=Lex; + lex->sql_command = SQLCOM_CHECKSUM; + } + table_list opt_checksum_type + {} + ; + +opt_checksum_type: + /* nothing */ { Lex->check_opt.flags= 0; } + | QUICK { Lex->check_opt.flags= T_QUICK; } + | EXTENDED_SYM { Lex->check_opt.flags= T_EXTEND; } + ; + repair: - REPAIR table_or_tables + REPAIR opt_no_write_to_binlog table_or_tables { LEX *lex=Lex; lex->sql_command = SQLCOM_REPAIR; + lex->no_write_to_binlog= $2; lex->check_opt.init(); } table_list opt_mi_repair_type @@ -1446,10 +2210,11 @@ mi_repair_type: | USE_FRM { Lex->check_opt.sql_flags|= TT_USEFRM; }; analyze: - ANALYZE_SYM table_or_tables + ANALYZE_SYM opt_no_write_to_binlog table_or_tables { LEX *lex=Lex; lex->sql_command = SQLCOM_ANALYZE; + lex->no_write_to_binlog= $2; lex->check_opt.init(); } table_list opt_mi_check_type @@ -1483,16 +2248,23 @@ mi_check_type: | CHANGED { Lex->check_opt.flags|= T_CHECK_ONLY_CHANGED; }; optimize: - OPTIMIZE table_or_tables + OPTIMIZE opt_no_write_to_binlog table_or_tables { LEX *lex=Lex; lex->sql_command = SQLCOM_OPTIMIZE; + lex->no_write_to_binlog= $2; lex->check_opt.init(); } table_list opt_mi_check_type {} ; +opt_no_write_to_binlog: + /* empty */ { $$= 0; } + | NO_WRITE_TO_BINLOG { $$= 1; } + | LOCAL_SYM { $$= 1; } + ; + rename: RENAME table_or_tables { @@ -1509,10 +2281,97 @@ table_to_table_list: table_to_table: table_ident TO_SYM table_ident { - if (!add_table_to_list($1, NULL, TL_OPTION_UPDATING, TL_IGNORE) || - !add_table_to_list($3, NULL, TL_OPTION_UPDATING, TL_IGNORE)) - YYABORT; - }; + LEX *lex=Lex; + SELECT_LEX *sl= lex->current_select; + if (!sl->add_table_to_list(lex->thd, $1,NULL,TL_OPTION_UPDATING, + TL_IGNORE) || + !sl->add_table_to_list(lex->thd, $3,NULL,TL_OPTION_UPDATING, + TL_IGNORE)) + YYABORT; + }; + +keycache: + CACHE_SYM INDEX_SYM keycache_list IN_SYM key_cache_name + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_ASSIGN_TO_KEYCACHE; + lex->name_and_length= $5; + } + ; + +keycache_list: + assign_to_keycache + | keycache_list ',' assign_to_keycache; + +assign_to_keycache: + table_ident cache_keys_spec + { + LEX *lex=Lex; + SELECT_LEX *sel= &lex->select_lex; + if (!sel->add_table_to_list(lex->thd, $1, NULL, 0, + TL_READ, + sel->get_use_index(), + (List<String> *)0)) + YYABORT; + } + ; + +key_cache_name: + ident { $$= $1; } + | DEFAULT { $$ = default_key_cache_base; } + ; + +preload: + LOAD INDEX_SYM INTO CACHE_SYM + { + LEX *lex=Lex; + lex->sql_command=SQLCOM_PRELOAD_KEYS; + } + preload_list + {} + ; + +preload_list: + preload_keys + | preload_list ',' preload_keys; + +preload_keys: + table_ident cache_keys_spec opt_ignore_leaves + { + LEX *lex=Lex; + SELECT_LEX *sel= &lex->select_lex; + if (!sel->add_table_to_list(lex->thd, $1, NULL, $3, + TL_READ, + sel->get_use_index(), + (List<String> *)0)) + YYABORT; + } + ; + +cache_keys_spec: + { Select->interval_list.empty(); } + cache_key_list_or_empty + { + LEX *lex=Lex; + SELECT_LEX *sel= &lex->select_lex; + sel->use_index= sel->interval_list; + } + ; + +cache_key_list_or_empty: + /* empty */ { Lex->select_lex.use_index_ptr= 0; } + | opt_key_or_index '(' key_usage_list2 ')' + { + SELECT_LEX *sel= &Lex->select_lex; + sel->use_index_ptr= &sel->use_index; + } + ; + +opt_ignore_leaves: + /* empty */ + { $$= 0; } + | IGNORE_SYM LEAVES { $$= TL_OPTION_IGNORE_LEAVES; } + ; /* Select : retrieve data from table @@ -1520,35 +2379,105 @@ table_to_table: select: - select_init { Lex->sql_command=SQLCOM_SELECT; }; + select_init + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; + } + ; +/* Need select_init2 for subselects. */ select_init: - SELECT_SYM select_part2 { Select->braces= 0; } opt_union + SELECT_SYM select_init2 | - '(' SELECT_SYM select_part2 ')' { Select->braces= 1;} union_opt; + '(' select_paren ')' union_opt; +select_paren: + SELECT_SYM select_part2 + { + LEX *lex= Lex; + SELECT_LEX * sel= lex->current_select; + if (sel->set_braces(1)) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + if (sel->linkage == UNION_TYPE && + !sel->master_unit()->first_select()->braces) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + /* select in braces, can't contain global parameters */ + if (sel->master_unit()->fake_select_lex) + sel->master_unit()->global_parameters= + sel->master_unit()->fake_select_lex; + } + | '(' select_paren ')'; + +select_init2: + select_part2 + { + LEX *lex= Lex; + SELECT_LEX * sel= lex->current_select; + if (lex->current_select->set_braces(0)) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + if (sel->linkage == UNION_TYPE && + sel->master_unit()->first_select()->braces) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + } + union_clause + ; select_part2: { - LEX *lex=Lex; - mysql_init_select(lex); + LEX *lex= Lex; + SELECT_LEX *sel= lex->current_select; + if (sel->linkage != UNION_TYPE) + mysql_init_select(lex); + lex->current_select->parsing_place= SELECT_LIST; + } + select_options select_item_list + { + Select->parsing_place= NO_MATTER; } - select_options select_item_list select_into select_lock_type; + select_into select_lock_type; select_into: - limit_clause {} - | into + opt_order_clause opt_limit_clause {} + | into | select_from | into select_from | select_from into; select_from: - FROM join_table_list where_clause group_clause having_clause opt_order_clause limit_clause procedure_clause; - + FROM join_table_list where_clause group_clause having_clause + opt_order_clause opt_limit_clause procedure_clause + | FROM DUAL_SYM where_clause opt_limit_clause + /* oracle compatibility: oracle always requires FROM clause, + and DUAL is system table without fields. + Is "SELECT 1 FROM DUAL" any better than "SELECT 1" ? + Hmmm :) */ + ; select_options: /* empty*/ - | select_option_list; + | select_option_list + { + if (test_all_bits(Select->options, SELECT_ALL | SELECT_DISTINCT)) + { + net_printf(Lex->thd, ER_WRONG_USAGE, "ALL", "DISTINCT"); + YYABORT; + } + } + ; select_option_list: select_option_list select_option @@ -1562,7 +2491,7 @@ select_option: YYABORT; Lex->lock_option= TL_READ_HIGH_PRIORITY; } - | DISTINCT { Select->options|= SELECT_DISTINCT; } + | DISTINCT { Select->options|= SELECT_DISTINCT; } | SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; } | SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; } | SQL_BUFFER_RESULT @@ -1577,12 +2506,12 @@ select_option: YYABORT; Select->options|= OPTION_FOUND_ROWS; } - | SQL_NO_CACHE_SYM { current_thd->safe_to_cache_query=0; } + | SQL_NO_CACHE_SYM { Lex->safe_to_cache_query=0; } | SQL_CACHE_SYM { Lex->select_lex.options|= OPTION_TO_QUERY_CACHE; } - | ALL {} + | ALL { Select->options|= SELECT_ALL; } ; select_lock_type: @@ -1590,18 +2519,15 @@ select_lock_type: | FOR_SYM UPDATE_SYM { LEX *lex=Lex; - if (check_simple_select()) - YYABORT; - lex->lock_option= TL_WRITE; - lex->thd->safe_to_cache_query=0; + lex->current_select->set_lock_for_tables(TL_WRITE); + lex->safe_to_cache_query=0; } | LOCK_SYM IN_SYM SHARE_SYM MODE_SYM { LEX *lex=Lex; - if (check_simple_select()) - YYABORT; - lex->lock_option= TL_READ_WITH_SHARED_LOCKS; - lex->thd->safe_to_cache_query=0; + lex->current_select-> + set_lock_for_tables(TL_READ_WITH_SHARED_LOCKS); + lex->safe_to_cache_query=0; } ; @@ -1610,20 +2536,22 @@ select_item_list: | select_item | '*' { - if (add_item_to_list(new Item_field(NULL,NULL,"*"))) + THD *thd= YYTHD; + if (add_item_to_list(thd, new Item_field(NULL, NULL, "*"))) YYABORT; + (thd->lex->current_select->with_wild)++; }; select_item: remember_name select_item2 remember_end select_alias { - if (add_item_to_list($2)) + if (add_item_to_list(YYTHD, $2)) YYABORT; if ($4.str) - $2->set_name($4.str); + $2->set_name($4.str,$4.length,system_charset_info); else if (!$2->name) - $2->set_name($1,(uint) ($3 - $1)); + $2->set_name($1,(uint) ($3 - $1), YYTHD->charset()); }; remember_name: @@ -1637,230 +2565,419 @@ select_item2: | expr { $$=$1; }; select_alias: - { $$.str=0;} - | AS ident { $$=$2; } - | AS TEXT_STRING { $$=$2; } - | ident { $$=$1; } - | TEXT_STRING { $$=$1; }; + /* empty */ { $$.str=0;} + | AS ident { $$=$2; } + | AS TEXT_STRING_sys { $$=$2; } + | ident { $$=$1; } + | TEXT_STRING_sys { $$=$1; } + ; optional_braces: /* empty */ {} | '(' ')' {}; /* all possible expressions */ -expr: expr_expr {$$ = $1; } - | simple_expr {$$ = $1; }; +expr: + expr_expr { $$= $1; } + | simple_expr { $$= $1; } + ; + +comp_op: EQ { $$ = &comp_eq_creator; } + | GE { $$ = &comp_ge_creator; } + | GT_SYM { $$ = &comp_gt_creator; } + | LE { $$ = &comp_le_creator; } + | LT { $$ = &comp_lt_creator; } + | NE { $$ = &comp_ne_creator; } + ; + +all_or_any: ALL { $$ = 1; } + | ANY_SYM { $$ = 0; } + ; /* expressions that begin with 'expr' */ expr_expr: - expr IN_SYM '(' expr_list ')' - { $$= new Item_func_in($1,*$4); } + expr IN_SYM '(' expr_list ')' + { $4->push_front($1); $$= new Item_func_in(*$4); } | expr NOT IN_SYM '(' expr_list ')' - { $$= new Item_func_not(new Item_func_in($1,*$5)); } - | expr BETWEEN_SYM no_and_expr AND expr + { + $5->push_front($1); + Item_func_in *item= new Item_func_in(*$5); + item->negate(); + $$= item; + } + | expr IN_SYM in_subselect + { $$= new Item_in_subselect($1, $3); } + | expr NOT IN_SYM in_subselect + { + $$= new Item_func_not(new Item_in_subselect($1, $4)); + } + | expr BETWEEN_SYM no_and_expr AND_SYM expr { $$= new Item_func_between($1,$3,$5); } - | expr NOT BETWEEN_SYM no_and_expr AND expr - { $$= new Item_func_not(new Item_func_between($1,$4,$6)); } - | expr OR_OR_CONCAT expr { $$= or_or_concat($1,$3); } - | expr OR expr { $$= new Item_cond_or($1,$3); } + | expr NOT BETWEEN_SYM no_and_expr AND_SYM expr + { + Item_func_between *item= new Item_func_between($1,$4,$6); + item->negate(); + $$= item; + } + | expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } + | expr OR_SYM expr { $$= new Item_cond_or($1,$3); } | expr XOR expr { $$= new Item_cond_xor($1,$3); } - | expr AND expr { $$= new Item_cond_and($1,$3); } - | expr LIKE simple_expr opt_escape { $$= new Item_func_like($1,$3,$4); } - | expr NOT LIKE simple_expr opt_escape { $$= new Item_func_not(new Item_func_like($1,$4,$5));} + | expr AND_SYM expr { $$= new Item_cond_and($1,$3); } + | expr SOUNDS_SYM LIKE expr + { + $$= new Item_func_eq(new Item_func_soundex($1), + new Item_func_soundex($4)); + } + | expr LIKE simple_expr opt_escape + { $$= new Item_func_like($1,$3,$4); } + | expr NOT LIKE simple_expr opt_escape + { $$= new Item_func_not(new Item_func_like($1,$4,$5));} | expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | expr NOT REGEXP expr { $$= new Item_func_not(new Item_func_regex($1,$4)); } + | expr NOT REGEXP expr + { $$= new Item_func_not(new Item_func_regex($1,$4)); } | expr IS NULL_SYM { $$= new Item_func_isnull($1); } | expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | expr EQ expr { $$= new Item_func_eq($1,$3); } | expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | expr GE expr { $$= new Item_func_ge($1,$3); } - | expr GT_SYM expr { $$= new Item_func_gt($1,$3); } - | expr LE expr { $$= new Item_func_le($1,$3); } - | expr LT expr { $$= new Item_func_lt($1,$3); } - | expr NE expr { $$= new Item_func_ne($1,$3); } + | expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } + | expr comp_op all_or_any in_subselect %prec EQ + { + $$= all_any_subquery_creator($1, $2, $3, $4); + } | expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } | expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } | expr '+' expr { $$= new Item_func_plus($1,$3); } | expr '-' expr { $$= new Item_func_minus($1,$3); } | expr '*' expr { $$= new Item_func_mul($1,$3); } | expr '/' expr { $$= new Item_func_div($1,$3); } + | expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } + | expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } | expr '|' expr { $$= new Item_func_bit_or($1,$3); } - | expr '^' expr { $$= new Item_func_bit_xor($1,$3); } + | expr '^' expr { $$= new Item_func_bit_xor($1,$3); } | expr '&' expr { $$= new Item_func_bit_and($1,$3); } | expr '%' expr { $$= new Item_func_mod($1,$3); } - | expr '+' INTERVAL_SYM expr interval - { $$= new Item_date_add_interval($1,$4,$5,0); } - | expr '-' INTERVAL_SYM expr interval - { $$= new Item_date_add_interval($1,$4,$5,1); }; + | expr '+' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,0); } + | expr '-' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,1); } + ; /* expressions that begin with 'expr' that do NOT follow IN_SYM */ no_in_expr: - no_in_expr BETWEEN_SYM no_and_expr AND expr + no_in_expr BETWEEN_SYM no_and_expr AND_SYM expr { $$= new Item_func_between($1,$3,$5); } - | no_in_expr NOT BETWEEN_SYM no_and_expr AND expr - { $$= new Item_func_not(new Item_func_between($1,$4,$6)); } - | no_in_expr OR_OR_CONCAT expr { $$= or_or_concat($1,$3); } - | no_in_expr OR expr { $$= new Item_cond_or($1,$3); } + | no_in_expr NOT BETWEEN_SYM no_and_expr AND_SYM expr + { + Item_func_between *item= new Item_func_between($1,$4,$6); + item->negate(); + $$= item; + } + | no_in_expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } + | no_in_expr OR_SYM expr { $$= new Item_cond_or($1,$3); } | no_in_expr XOR expr { $$= new Item_cond_xor($1,$3); } - | no_in_expr AND expr { $$= new Item_cond_and($1,$3); } - | no_in_expr LIKE simple_expr opt_escape { $$= new Item_func_like($1,$3,$4); } - | no_in_expr NOT LIKE simple_expr opt_escape { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } + | no_in_expr AND_SYM expr { $$= new Item_cond_and($1,$3); } + | no_in_expr SOUNDS_SYM LIKE expr + { + $$= new Item_func_eq(new Item_func_soundex($1), + new Item_func_soundex($4)); + } + | no_in_expr LIKE simple_expr opt_escape + { $$= new Item_func_like($1,$3,$4); } + | no_in_expr NOT LIKE simple_expr opt_escape + { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } | no_in_expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | no_in_expr NOT REGEXP expr { $$= new Item_func_not(new Item_func_regex($1,$4)); } + | no_in_expr NOT REGEXP expr + { $$= new Item_func_not(new Item_func_regex($1,$4)); } | no_in_expr IS NULL_SYM { $$= new Item_func_isnull($1); } | no_in_expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | no_in_expr EQ expr { $$= new Item_func_eq($1,$3); } | no_in_expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | no_in_expr GE expr { $$= new Item_func_ge($1,$3); } - | no_in_expr GT_SYM expr { $$= new Item_func_gt($1,$3); } - | no_in_expr LE expr { $$= new Item_func_le($1,$3); } - | no_in_expr LT expr { $$= new Item_func_lt($1,$3); } - | no_in_expr NE expr { $$= new Item_func_ne($1,$3); } + | no_in_expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } + | no_in_expr comp_op all_or_any in_subselect %prec EQ + { + all_any_subquery_creator($1, $2, $3, $4); + } | no_in_expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } | no_in_expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } | no_in_expr '+' expr { $$= new Item_func_plus($1,$3); } | no_in_expr '-' expr { $$= new Item_func_minus($1,$3); } | no_in_expr '*' expr { $$= new Item_func_mul($1,$3); } | no_in_expr '/' expr { $$= new Item_func_div($1,$3); } + | no_in_expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } | no_in_expr '|' expr { $$= new Item_func_bit_or($1,$3); } | no_in_expr '^' expr { $$= new Item_func_bit_xor($1,$3); } | no_in_expr '&' expr { $$= new Item_func_bit_and($1,$3); } | no_in_expr '%' expr { $$= new Item_func_mod($1,$3); } - | no_in_expr '+' INTERVAL_SYM expr interval - { $$= new Item_date_add_interval($1,$4,$5,0); } - | no_in_expr '-' INTERVAL_SYM expr interval - { $$= new Item_date_add_interval($1,$4,$5,1); } + | no_in_expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } + | no_in_expr '+' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,0); } + | no_in_expr '-' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,1); } | simple_expr; /* expressions that begin with 'expr' that does NOT follow AND */ no_and_expr: - no_and_expr IN_SYM '(' expr_list ')' - { $$= new Item_func_in($1,*$4); } + no_and_expr IN_SYM '(' expr_list ')' + { $4->push_front($1); $$= new Item_func_in(*$4); } | no_and_expr NOT IN_SYM '(' expr_list ')' - { $$= new Item_func_not(new Item_func_in($1,*$5)); } - | no_and_expr BETWEEN_SYM no_and_expr AND expr + { + $5->push_front($1); + Item_func_in *item= new Item_func_in(*$5); + item->negate(); + $$= item; + } + | no_and_expr IN_SYM in_subselect + { $$= new Item_in_subselect($1, $3); } + | no_and_expr NOT IN_SYM in_subselect + { + $$= new Item_func_not(new Item_in_subselect($1, $4)); + } + | no_and_expr BETWEEN_SYM no_and_expr AND_SYM expr { $$= new Item_func_between($1,$3,$5); } - | no_and_expr NOT BETWEEN_SYM no_and_expr AND expr - { $$= new Item_func_not(new Item_func_between($1,$4,$6)); } - | no_and_expr OR_OR_CONCAT expr { $$= or_or_concat($1,$3); } - | no_and_expr OR expr { $$= new Item_cond_or($1,$3); } + | no_and_expr NOT BETWEEN_SYM no_and_expr AND_SYM expr + { + Item_func_between *item= new Item_func_between($1,$4,$6); + item->negate(); + $$= item; + } + | no_and_expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } + | no_and_expr OR_SYM expr { $$= new Item_cond_or($1,$3); } | no_and_expr XOR expr { $$= new Item_cond_xor($1,$3); } - | no_and_expr LIKE simple_expr opt_escape { $$= new Item_func_like($1,$3,$4); } - | no_and_expr NOT LIKE simple_expr opt_escape { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } + | no_and_expr SOUNDS_SYM LIKE expr + { + $$= new Item_func_eq(new Item_func_soundex($1), + new Item_func_soundex($4)); + } + | no_and_expr LIKE simple_expr opt_escape + { $$= new Item_func_like($1,$3,$4); } + | no_and_expr NOT LIKE simple_expr opt_escape + { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } | no_and_expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | no_and_expr NOT REGEXP expr { $$= new Item_func_not(new Item_func_regex($1,$4)); } + | no_and_expr NOT REGEXP expr + { $$= new Item_func_not(new Item_func_regex($1,$4)); } | no_and_expr IS NULL_SYM { $$= new Item_func_isnull($1); } | no_and_expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | no_and_expr EQ expr { $$= new Item_func_eq($1,$3); } | no_and_expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | no_and_expr GE expr { $$= new Item_func_ge($1,$3); } - | no_and_expr GT_SYM expr { $$= new Item_func_gt($1,$3); } - | no_and_expr LE expr { $$= new Item_func_le($1,$3); } - | no_and_expr LT expr { $$= new Item_func_lt($1,$3); } - | no_and_expr NE expr { $$= new Item_func_ne($1,$3); } + | no_and_expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } + | no_and_expr comp_op all_or_any in_subselect %prec EQ + { + all_any_subquery_creator($1, $2, $3, $4); + } | no_and_expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } | no_and_expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } | no_and_expr '+' expr { $$= new Item_func_plus($1,$3); } | no_and_expr '-' expr { $$= new Item_func_minus($1,$3); } | no_and_expr '*' expr { $$= new Item_func_mul($1,$3); } | no_and_expr '/' expr { $$= new Item_func_div($1,$3); } + | no_and_expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } | no_and_expr '|' expr { $$= new Item_func_bit_or($1,$3); } | no_and_expr '^' expr { $$= new Item_func_bit_xor($1,$3); } | no_and_expr '&' expr { $$= new Item_func_bit_and($1,$3); } | no_and_expr '%' expr { $$= new Item_func_mod($1,$3); } - | no_and_expr '+' INTERVAL_SYM expr interval - { $$= new Item_date_add_interval($1,$4,$5,0); } - | no_and_expr '-' INTERVAL_SYM expr interval - { $$= new Item_date_add_interval($1,$4,$5,1); } + | no_and_expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } + | no_and_expr '+' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,0); } + | no_and_expr '-' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,1); } | simple_expr; +interval_expr: + INTERVAL_SYM expr { $$=$2; } + ; + simple_expr: simple_ident + | simple_expr COLLATE_SYM ident_or_text %prec NEG + { + $$= new Item_func_set_collation($1, + new Item_string($3.str, + $3.length, + YYTHD->charset())); + } | literal + | param_marker | '@' ident_or_text SET_VAR expr { $$= new Item_func_set_user_var($2,$4); - current_thd->safe_to_cache_query=0; + Lex->uncacheable(UNCACHEABLE_RAND); } - | '@' ident_or_text + | '@' ident_or_text { $$= new Item_func_get_user_var($2); - current_thd->safe_to_cache_query=0; + Lex->uncacheable(UNCACHEABLE_RAND); } - | '@' '@' opt_var_ident_type ident_or_text + | '@' '@' opt_var_ident_type ident_or_text opt_component { - if (!($$= get_system_var((enum_var_type) $3, $4))) + + if ($4.str && $5.str && check_reserved_words(&$4)) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + if (!($$= get_system_var(YYTHD, (enum_var_type) $3, $4, $5))) YYABORT; } | sum_expr - | '-' expr %prec NEG { $$= new Item_func_neg($2); } + | '+' expr %prec NEG { $$= $2; } + | '-' expr %prec NEG { $$= new Item_func_neg($2); } | '~' expr %prec NEG { $$= new Item_func_bit_neg($2); } - | NOT expr %prec NEG { $$= new Item_func_not($2); } - | '!' expr %prec NEG { $$= new Item_func_not($2); } + | NOT expr %prec NEG + { + $$= negate_expression(YYTHD, $2); + } + | '!' expr %prec NEG + { + $$= negate_expression(YYTHD, $2); + } | '(' expr ')' { $$= $2; } + | '(' expr ',' expr_list ')' + { + $4->push_front($2); + $$= new Item_row(*$4); + } + | ROW_SYM '(' expr ',' expr_list ')' + { + $5->push_front($3); + $$= new Item_row(*$5); + } + | EXISTS exists_subselect { $$= $2; } + | singlerow_subselect { $$= $1; } | '{' ident expr '}' { $$= $3; } - | MATCH ident_list_arg AGAINST '(' expr ')' - { Select->ftfunc_list.push_back((Item_func_match *) - ($$=new Item_func_match_nl(*$2,$5))); } - | MATCH ident_list_arg AGAINST '(' expr IN_SYM BOOLEAN_SYM MODE_SYM ')' - { Select->ftfunc_list.push_back((Item_func_match *) - ($$=new Item_func_match_bool(*$2,$5))); } - | BINARY expr %prec NEG { $$= new Item_func_binary($2); } - | CAST_SYM '(' expr AS cast_type ')' { $$= create_func_cast($3, $5); } + | MATCH ident_list_arg AGAINST '(' expr fulltext_options ')' + { $2->push_front($5); + Select->add_ftfunc_to_list((Item_func_match*) + ($$=new Item_func_match(*$2,$6))); } + | ASCII_SYM '(' expr ')' { $$= new Item_func_ascii($3); } + | BINARY expr %prec NEG + { + $$= create_func_cast($2, ITEM_CAST_CHAR, -1, &my_charset_bin); + } + | CAST_SYM '(' expr AS cast_type ')' + { + $$= create_func_cast($3, $5, + Lex->length ? atoi(Lex->length) : -1, + Lex->charset); + } | CASE_SYM opt_expr WHEN_SYM when_list opt_else END { $$= new Item_func_case(* $4, $2, $5 ); } - | CONVERT_SYM '(' expr ',' cast_type ')' { $$= create_func_cast($3, $5); } + | CONVERT_SYM '(' expr ',' cast_type ')' + { + $$= create_func_cast($3, $5, + Lex->length ? atoi(Lex->length) : -1, + Lex->charset); + } + | CONVERT_SYM '(' expr USING charset_name ')' + { $$= new Item_func_conv_charset($3,$5); } + | DEFAULT '(' simple_ident ')' + { $$= new Item_default_value($3); } + | VALUES '(' simple_ident ')' + { $$= new Item_insert_value($3); } | FUNC_ARG0 '(' ')' - { $$= ((Item*(*)(void))($1.symbol->create_func))();} + { + if (!$1.symbol->create_func) + { + net_printf(Lex->thd, ER_FEATURE_DISABLED, + $1.symbol->group->name, + $1.symbol->group->needed_define); + YYABORT; + } + $$= ((Item*(*)(void))($1.symbol->create_func))(); + } | FUNC_ARG1 '(' expr ')' - { $$= ((Item*(*)(Item*))($1.symbol->create_func))($3);} + { + if (!$1.symbol->create_func) + { + net_printf(Lex->thd, ER_FEATURE_DISABLED, + $1.symbol->group->name, + $1.symbol->group->needed_define); + YYABORT; + } + $$= ((Item*(*)(Item*))($1.symbol->create_func))($3); + } | FUNC_ARG2 '(' expr ',' expr ')' - { $$= ((Item*(*)(Item*,Item*))($1.symbol->create_func))($3,$5);} + { + if (!$1.symbol->create_func) + { + net_printf(Lex->thd, ER_FEATURE_DISABLED, + $1.symbol->group->name, + $1.symbol->group->needed_define); + YYABORT; + } + $$= ((Item*(*)(Item*,Item*))($1.symbol->create_func))($3,$5); + } | FUNC_ARG3 '(' expr ',' expr ',' expr ')' - { $$= ((Item*(*)(Item*,Item*,Item*))($1.symbol->create_func))($3,$5,$7);} + { + if (!$1.symbol->create_func) + { + net_printf(Lex->thd, ER_FEATURE_DISABLED, + $1.symbol->group->name, + $1.symbol->group->needed_define); + YYABORT; + } + $$= ((Item*(*)(Item*,Item*,Item*))($1.symbol->create_func))($3,$5,$7); + } + | ADDDATE_SYM '(' expr ',' expr ')' + { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 0);} + | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' + { $$= new Item_date_add_interval($3, $6, $7, 0); } | ATAN '(' expr ')' { $$= new Item_func_atan($3); } | ATAN '(' expr ',' expr ')' { $$= new Item_func_atan($3,$5); } | CHAR_SYM '(' expr_list ')' { $$= new Item_func_char(*$3); } + | CHAR_SYM '(' expr_list USING charset_name ')' + { $$= new Item_func_char(*$3, $5); } + | CHARSET '(' expr ')' + { $$= new Item_func_charset($3); } | COALESCE '(' expr_list ')' { $$= new Item_func_coalesce(* $3); } + | COLLATION_SYM '(' expr ')' + { $$= new Item_func_collation($3); } | CONCAT '(' expr_list ')' { $$= new Item_func_concat(* $3); } | CONCAT_WS '(' expr ',' expr_list ')' - { $$= new Item_func_concat_ws($3, *$5); } + { $5->push_front($3); $$= new Item_func_concat_ws(*$5); } + | CONVERT_TZ_SYM '(' expr ',' expr ',' expr ')' + { + Lex->time_zone_tables_used= &fake_time_zone_tables_list; + $$= new Item_func_convert_tz($3, $5, $7); + } | CURDATE optional_braces - { $$= new Item_func_curdate(); current_thd->safe_to_cache_query=0; } + { $$= new Item_func_curdate_local(); Lex->safe_to_cache_query=0; } | CURTIME optional_braces - { $$= new Item_func_curtime(); current_thd->safe_to_cache_query=0; } + { $$= new Item_func_curtime_local(); Lex->safe_to_cache_query=0; } | CURTIME '(' expr ')' - { - $$= new Item_func_curtime($3); - current_thd->safe_to_cache_query=0; + { + $$= new Item_func_curtime_local($3); + Lex->safe_to_cache_query=0; } - | DATE_ADD_INTERVAL '(' expr ',' INTERVAL_SYM expr interval ')' - { $$= new Item_date_add_interval($3,$6,$7,0); } - | DATE_SUB_INTERVAL '(' expr ',' INTERVAL_SYM expr interval ')' - { $$= new Item_date_add_interval($3,$6,$7,1); } + | CURRENT_USER optional_braces + { $$= create_func_current_user(); } + | DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')' + { $$= new Item_date_add_interval($3,$5,$6,0); } + | DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')' + { $$= new Item_date_add_interval($3,$5,$6,1); } | DATABASE '(' ')' - { + { $$= new Item_func_database(); - current_thd->safe_to_cache_query=0; + Lex->safe_to_cache_query=0; } + | DATE_SYM '(' expr ')' + { $$= new Item_date_typecast($3); } + | DAY_SYM '(' expr ')' + { $$= new Item_func_dayofmonth($3); } | ELT_FUNC '(' expr ',' expr_list ')' - { $$= new Item_func_elt($3, *$5); } + { $5->push_front($3); $$= new Item_func_elt(*$5); } | MAKE_SET_SYM '(' expr ',' expr_list ')' { $$= new Item_func_make_set($3, *$5); } | ENCRYPT '(' expr ')' { $$= new Item_func_encrypt($3); - current_thd->safe_to_cache_query=0; + Lex->uncacheable(UNCACHEABLE_RAND); } | ENCRYPT '(' expr ',' expr ')' { $$= new Item_func_encrypt($3,$5); } - | DECODE_SYM '(' expr ',' TEXT_STRING ')' + | DECODE_SYM '(' expr ',' TEXT_STRING_literal ')' { $$= new Item_func_decode($3,$5.str); } - | ENCODE_SYM '(' expr ',' TEXT_STRING ')' + | ENCODE_SYM '(' expr ',' TEXT_STRING_literal ')' { $$= new Item_func_encode($3,$5.str); } | DES_DECRYPT_SYM '(' expr ')' { $$= new Item_func_des_decrypt($3); } @@ -1885,28 +3002,46 @@ simple_expr: $$= new Item_func_date_format (new Item_func_from_unixtime($3),$5,0); } | FIELD_FUNC '(' expr ',' expr_list ')' - { $$= new Item_func_field($3, *$5); } + { $5->push_front($3); $$= new Item_func_field(*$5); } + | geometry_function + { +#ifdef HAVE_SPATIAL + $$= $1; +#else + net_printf(Lex->thd, ER_FEATURE_DISABLED, + sym_group_geom.name, sym_group_geom.needed_define); + YYABORT; +#endif + } + | GET_FORMAT '(' date_time_type ',' expr ')' + { $$= new Item_func_get_format($3, $5); } | HOUR_SYM '(' expr ')' { $$= new Item_func_hour($3); } | IF '(' expr ',' expr ',' expr ')' { $$= new Item_func_if($3,$5,$7); } | INSERT '(' expr ',' expr ',' expr ',' expr ')' { $$= new Item_func_insert($3,$5,$7,$9); } - | INTERVAL_SYM expr interval '+' expr + | interval_expr interval '+' expr /* we cannot put interval before - */ - { $$= new Item_date_add_interval($5,$2,$3,0); } - | INTERVAL_SYM '(' expr ',' expr_list ')' - { $$= new Item_func_interval($3,* $5); } + { $$= new Item_date_add_interval($4,$1,$2,0); } + | interval_expr + { + if ($1->type() != Item::ROW_ITEM) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + $$= new Item_func_interval((Item_row *)$1); + } | LAST_INSERT_ID '(' ')' { - $$= new Item_int((char*) "last_insert_id()", - current_thd->insert_id(),21); - current_thd->safe_to_cache_query=0; + $$= new Item_func_last_insert_id(); + Lex->safe_to_cache_query= 0; } | LAST_INSERT_ID '(' expr ')' { - $$= new Item_func_set_last_insert_id($3); - current_thd->safe_to_cache_query=0; + $$= new Item_func_last_insert_id($3); + Lex->safe_to_cache_query= 0; } | LEFT '(' expr ',' expr ')' { $$= new Item_func_left($3,$5); } @@ -1914,42 +3049,50 @@ simple_expr: { $$= new Item_func_locate($5,$3); } | LOCATE '(' expr ',' expr ',' expr ')' { $$= new Item_func_locate($5,$3,$7); } - | GREATEST_SYM '(' expr ',' expr_list ')' + | GREATEST_SYM '(' expr ',' expr_list ')' { $5->push_front($3); $$= new Item_func_max(*$5); } | LEAST_SYM '(' expr ',' expr_list ')' { $5->push_front($3); $$= new Item_func_min(*$5); } | LOG_SYM '(' expr ')' - { $$= new Item_func_log($3); } + { $$= new Item_func_log($3); } | LOG_SYM '(' expr ',' expr ')' - { $$= new Item_func_log($3, $5); } + { $$= new Item_func_log($3, $5); } | MASTER_POS_WAIT '(' expr ',' expr ')' - { + { $$= new Item_master_pos_wait($3, $5); - current_thd->safe_to_cache_query=0; - } + Lex->safe_to_cache_query=0; + } | MASTER_POS_WAIT '(' expr ',' expr ',' expr ')' - { + { $$= new Item_master_pos_wait($3, $5, $7); - current_thd->safe_to_cache_query=0; + Lex->safe_to_cache_query=0; } + | MICROSECOND_SYM '(' expr ')' + { $$= new Item_func_microsecond($3); } | MINUTE_SYM '(' expr ')' { $$= new Item_func_minute($3); } + | MOD_SYM '(' expr ',' expr ')' + { $$ = new Item_func_mod( $3, $5); } | MONTH_SYM '(' expr ')' { $$= new Item_func_month($3); } | NOW_SYM optional_braces - { $$= new Item_func_now(); current_thd->safe_to_cache_query=0;} + { $$= new Item_func_now_local(); Lex->safe_to_cache_query=0;} | NOW_SYM '(' expr ')' - { $$= new Item_func_now($3); current_thd->safe_to_cache_query=0;} + { $$= new Item_func_now_local($3); Lex->safe_to_cache_query=0;} | PASSWORD '(' expr ')' { - $$= new Item_func_password($3); - } + $$= YYTHD->variables.old_passwords ? + (Item *) new Item_func_old_password($3) : + (Item *) new Item_func_password($3); + } + | OLD_PASSWORD '(' expr ')' + { $$= new Item_func_old_password($3); } | POSITION_SYM '(' no_in_expr IN_SYM expr ')' { $$ = new Item_func_locate($5,$3); } | RAND '(' expr ')' - { $$= new Item_func_rand($3); current_thd->safe_to_cache_query=0;} + { $$= new Item_func_rand($3); Lex->uncacheable(UNCACHEABLE_RAND);} | RAND '(' ')' - { $$= new Item_func_rand(); current_thd->safe_to_cache_query=0;} + { $$= new Item_func_rand(); Lex->uncacheable(UNCACHEABLE_RAND);} | REPLACE '(' expr ',' expr ',' expr ')' { $$= new Item_func_replace($3,$5,$7); } | RIGHT '(' expr ',' expr ')' @@ -1957,6 +3100,10 @@ simple_expr: | ROUND '(' expr ')' { $$= new Item_func_round($3, new Item_int((char*)"0",0,1),0); } | ROUND '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,0); } + | SUBDATE_SYM '(' expr ',' expr ')' + { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 1);} + | SUBDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' + { $$= new Item_date_add_interval($3, $6, $7, 1); } | SECOND_SYM '(' expr ')' { $$= new Item_func_second($3); } | SUBSTRING '(' expr ',' expr ',' expr ')' @@ -1969,33 +3116,45 @@ simple_expr: { $$= new Item_func_substr($3,$5); } | SUBSTRING_INDEX '(' expr ',' expr ',' expr ')' { $$= new Item_func_substr_index($3,$5,$7); } + | TIME_SYM '(' expr ')' + { $$= new Item_time_typecast($3); } + | TIMESTAMP '(' expr ')' + { $$= new Item_datetime_typecast($3); } + | TIMESTAMP '(' expr ',' expr ')' + { $$= new Item_func_add_time($3, $5, 1, 0); } | TRIM '(' expr ')' - { $$= new Item_func_trim($3,new Item_string(" ",1)); } - | TRIM '(' LEADING opt_pad FROM expr ')' + { $$= new Item_func_trim($3); } + | TRIM '(' LEADING expr FROM expr ')' { $$= new Item_func_ltrim($6,$4); } - | TRIM '(' TRAILING opt_pad FROM expr ')' + | TRIM '(' TRAILING expr FROM expr ')' { $$= new Item_func_rtrim($6,$4); } - | TRIM '(' BOTH opt_pad FROM expr ')' + | TRIM '(' BOTH expr FROM expr ')' { $$= new Item_func_trim($6,$4); } + | TRIM '(' LEADING FROM expr ')' + { $$= new Item_func_ltrim($5); } + | TRIM '(' TRAILING FROM expr ')' + { $$= new Item_func_rtrim($5); } + | TRIM '(' BOTH FROM expr ')' + { $$= new Item_func_trim($5); } | TRIM '(' expr FROM expr ')' { $$= new Item_func_trim($5,$3); } | TRUNCATE_SYM '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,1); } - | UDA_CHAR_SUM '(' udf_expr_list ')' + | UDA_CHAR_SUM '(' udf_sum_expr_list ')' { if ($3 != NULL) $$ = new Item_sum_udf_str($1, *$3); else $$ = new Item_sum_udf_str($1); } - | UDA_FLOAT_SUM '(' udf_expr_list ')' + | UDA_FLOAT_SUM '(' udf_sum_expr_list ')' { if ($3 != NULL) $$ = new Item_sum_udf_float($1, *$3); else $$ = new Item_sum_udf_float($1); } - | UDA_INT_SUM '(' udf_expr_list ')' + | UDA_INT_SUM '(' udf_sum_expr_list ')' { if ($3 != NULL) $$ = new Item_sum_udf_int($1, *$3); @@ -2024,24 +3183,29 @@ simple_expr: $$ = new Item_func_udf_int($1); } | UNIQUE_USERS '(' text_literal ',' NUM ',' NUM ',' expr_list ')' - { + { $$= new Item_func_unique_users($3,atoi($5.str),atoi($7.str), * $9); } | UNIX_TIMESTAMP '(' ')' { $$= new Item_func_unix_timestamp(); - current_thd->safe_to_cache_query=0; + Lex->safe_to_cache_query=0; } | UNIX_TIMESTAMP '(' expr ')' { $$= new Item_func_unix_timestamp($3); } | USER '(' ')' - { $$= new Item_func_user(); current_thd->safe_to_cache_query=0; } + { $$= new Item_func_user(); Lex->safe_to_cache_query=0; } + | UTC_DATE_SYM optional_braces + { $$= new Item_func_curdate_utc(); Lex->safe_to_cache_query=0;} + | UTC_TIME_SYM optional_braces + { $$= new Item_func_curtime_utc(); Lex->safe_to_cache_query=0;} + | UTC_TIMESTAMP_SYM optional_braces + { $$= new Item_func_now_utc(); Lex->safe_to_cache_query=0;} | WEEK_SYM '(' expr ')' - { - LEX *lex=Lex; - $$= new Item_func_week($3,new Item_int((char*) "0", - lex->thd->variables.default_week_format,1)); - } + { + $$= new Item_func_week($3,new Item_int((char*) "0", + YYTHD->variables.default_week_format,1)); + } | WEEK_SYM '(' expr ',' expr ')' { $$= new Item_func_week($3,$5); } | YEAR_SYM '(' expr ')' @@ -2051,17 +3215,98 @@ simple_expr: | YEARWEEK '(' expr ',' expr ')' { $$= new Item_func_yearweek($3, $5); } | BENCHMARK_SYM '(' ULONG_NUM ',' expr ')' - { + { $$=new Item_func_benchmark($3,$5); - current_thd->safe_to_cache_query=0; + Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); } | EXTRACT_SYM '(' interval FROM expr ')' { $$=new Item_extract( $3, $5); }; +geometry_function: + GEOMFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | GEOMFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + | GEOMFROMWKB '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_wkb($3)); } + | GEOMFROMWKB '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_wkb($3, $5)); } + | GEOMETRYCOLLECTION '(' expr_list ')' + { $$= GEOM_NEW(Item_func_spatial_collection(* $3, + Geometry::wkb_geometrycollection, + Geometry::wkb_point)); } + | LINESTRING '(' expr_list ')' + { $$= GEOM_NEW(Item_func_spatial_collection(* $3, + Geometry::wkb_linestring, Geometry::wkb_point)); } + | MULTILINESTRING '(' expr_list ')' + { $$= GEOM_NEW( Item_func_spatial_collection(* $3, + Geometry::wkb_multilinestring, Geometry::wkb_linestring)); } + | MLINEFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | MLINEFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + | MPOINTFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | MPOINTFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + | MPOLYFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | MPOLYFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + | MULTIPOINT '(' expr_list ')' + { $$= GEOM_NEW(Item_func_spatial_collection(* $3, + Geometry::wkb_multipoint, Geometry::wkb_point)); } + | MULTIPOLYGON '(' expr_list ')' + { $$= GEOM_NEW(Item_func_spatial_collection(* $3, + Geometry::wkb_multipolygon, Geometry::wkb_polygon)); } + | POINT_SYM '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_point($3,$5)); } + | POINTFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | POINTFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + | POLYFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | POLYFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + | POLYGON '(' expr_list ')' + { $$= GEOM_NEW(Item_func_spatial_collection(* $3, + Geometry::wkb_polygon, Geometry::wkb_linestring)); } + | GEOMCOLLFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | GEOMCOLLFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + | LINEFROMTEXT '(' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } + | LINEFROMTEXT '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } + ; + +fulltext_options: + /* nothing */ { $$= FT_NL; } + | WITH QUERY_SYM EXPANSION_SYM { $$= FT_NL | FT_EXPAND; } + | IN_SYM BOOLEAN_SYM MODE_SYM { $$= FT_BOOL; } + ; + udf_expr_list: /* empty */ { $$= NULL; } | expr_list { $$= $1;}; +udf_sum_expr_list: + { + LEX *lex= Lex; + if (lex->current_select->inc_in_sum_expr()) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + } + udf_expr_list + { + Select->in_sum_expr--; + $$= $2; + }; + sum_expr: AVG_SYM '(' in_sum_expr ')' { $$=new Item_sum_avg($3); } @@ -2069,6 +3314,8 @@ sum_expr: { $$=new Item_sum_and($3); } | BIT_OR '(' in_sum_expr ')' { $$=new Item_sum_or($3); } + | BIT_XOR '(' in_sum_expr ')' + { $$=new Item_sum_xor($3); } | COUNT_SYM '(' opt_all '*' ')' { $$=new Item_sum_count(new Item_int((int32) 0L,1)); } | COUNT_SYM '(' in_sum_expr ')' @@ -2087,28 +3334,72 @@ sum_expr: { $$=new Item_sum_max($3); } | STD_SYM '(' in_sum_expr ')' { $$=new Item_sum_std($3); } + | VARIANCE_SYM '(' in_sum_expr ')' + { $$=new Item_sum_variance($3); } | SUM_SYM '(' in_sum_expr ')' - { $$=new Item_sum_sum($3); }; + { $$=new Item_sum_sum($3); } + | GROUP_CONCAT_SYM '(' opt_distinct + { Select->in_sum_expr++; } + expr_list opt_gorder_clause + opt_gconcat_separator + ')' + { + Select->in_sum_expr--; + $$=new Item_func_group_concat($3,$5,Select->gorder_list,$7); + $5->empty(); + }; + +opt_distinct: + /* empty */ { $$ = 0; } + |DISTINCT { $$ = 1; }; + +opt_gconcat_separator: + /* empty */ { $$ = new (YYTHD->mem_root) String(",",1,default_charset_info); } + |SEPARATOR_SYM text_string { $$ = $2; }; + + +opt_gorder_clause: + /* empty */ + { + Select->gorder_list = NULL; + } + | order_clause + { + SELECT_LEX *select= Select; + select->gorder_list= + (SQL_LIST*) sql_memdup((char*) &select->order_list, + sizeof(st_sql_list)); + select->order_list.empty(); + }; + in_sum_expr: opt_all - { Select->in_sum_expr++; } + { + LEX *lex= Lex; + if (lex->current_select->inc_in_sum_expr()) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + } expr { Select->in_sum_expr--; - $$=$3; + $$= $3; }; cast_type: - BINARY { $$=ITEM_CAST_BINARY; } - | CHAR_SYM { $$=ITEM_CAST_CHAR; } - | SIGNED_SYM { $$=ITEM_CAST_SIGNED_INT; } - | SIGNED_SYM INT_SYM { $$=ITEM_CAST_SIGNED_INT; } - | UNSIGNED { $$=ITEM_CAST_UNSIGNED_INT; } - | UNSIGNED INT_SYM { $$=ITEM_CAST_UNSIGNED_INT; } - | DATE_SYM { $$=ITEM_CAST_DATE; } - | TIME_SYM { $$=ITEM_CAST_TIME; } - | DATETIME { $$=ITEM_CAST_DATETIME; } + BINARY opt_len { $$=ITEM_CAST_CHAR; Lex->charset= &my_charset_bin; } + | CHAR_SYM opt_len opt_binary { $$=ITEM_CAST_CHAR; } + | NCHAR_SYM opt_len { $$=ITEM_CAST_CHAR; Lex->charset= national_charset_info; } + | SIGNED_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } + | SIGNED_SYM INT_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } + | UNSIGNED { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } + | UNSIGNED INT_SYM { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } + | DATE_SYM { $$=ITEM_CAST_DATE; Lex->charset= NULL; Lex->length= (char*)0; } + | TIME_SYM { $$=ITEM_CAST_TIME; Lex->charset= NULL; Lex->length= (char*)0; } + | DATETIME { $$=ITEM_CAST_DATETIME; Lex->charset= NULL; Lex->length= (char*)0; } ; expr_list: @@ -2135,7 +3426,7 @@ ident_list2: opt_expr: /* empty */ { $$= NULL; } - | expr { $$= $1; }; + | expr { $$= $1; }; opt_else: /* empty */ { $$= NULL; } @@ -2149,7 +3440,7 @@ when_list: when_list2: expr THEN_SYM expr { - SELECT_LEX *sel=Select; + SELECT_LEX *sel=Select; sel->when_list.head()->push_back($1); sel->when_list.head()->push_back($3); } @@ -2160,13 +3451,8 @@ when_list2: sel->when_list.head()->push_back($5); }; -opt_pad: - /* empty */ { $$=new Item_string(" ",1); } - | expr { $$=$1; }; - join_table_list: - '(' join_table_list ')' { $$=$2; } - | join_table { $$=$1; } + join_table { $$=$1; } | join_table_list ',' join_table_list { $$=$3; } | join_table_list normal_join join_table_list { $$=$3; } | join_table_list STRAIGHT_JOIN join_table_list @@ -2174,9 +3460,9 @@ join_table_list: | join_table_list normal_join join_table_list ON expr { add_join_on($3,$5); $$=$3; } | join_table_list normal_join join_table_list - USING + USING { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; sel->db1=$1->db; sel->table1=$1->alias; sel->db2=$3->db; sel->table2=$3->alias; } @@ -2187,7 +3473,7 @@ join_table_list: { add_join_on($5,$7); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; } | join_table_list LEFT opt_outer JOIN_SYM join_table_list { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; sel->db1=$1->db; sel->table1=$1->alias; sel->db2=$5->db; sel->table2=$5->alias; } @@ -2203,7 +3489,7 @@ join_table_list: { add_join_on($1,$7); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$5; } | join_table_list RIGHT opt_outer JOIN_SYM join_table_list { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; sel->db1=$1->db; sel->table1=$1->alias; sel->db2=$5->db; sel->table2=$5->alias; } @@ -2226,20 +3512,77 @@ normal_join: join_table: { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; sel->use_index_ptr=sel->ignore_index_ptr=0; sel->table_join_options= 0; } table_ident opt_table_alias opt_key_definition { - SELECT_LEX *sel=Select; - if (!($$=add_table_to_list($2, $3, sel->table_join_options, - TL_UNLOCK, sel->use_index_ptr, - sel->ignore_index_ptr))) + LEX *lex= Lex; + SELECT_LEX *sel= lex->current_select; + if (!($$= sel->add_table_to_list(lex->thd, $2, $3, + sel->get_table_join_options(), + lex->lock_option, + sel->get_use_index(), + sel->get_ignore_index()))) YYABORT; } | '{' ident join_table LEFT OUTER JOIN_SYM join_table ON expr '}' - { add_join_on($7,$9); $7->outer_join|=JOIN_TYPE_LEFT; $$=$7; }; + { add_join_on($7,$9); $7->outer_join|=JOIN_TYPE_LEFT; $$=$7; } + | '(' select_derived union_opt ')' opt_table_alias + { + LEX *lex=Lex; + SELECT_LEX_UNIT *unit= lex->current_select->master_unit(); + lex->current_select= unit->outer_select(); + if (!($$= lex->current_select-> + add_table_to_list(lex->thd, new Table_ident(unit), $5, 0, + TL_READ,(List<String> *)0, + (List<String> *)0))) + + YYABORT; + } + | '(' join_table_list ')' { $$=$2; }; + +select_derived: + SELECT_SYM select_derived2 + | '(' select_derived ')' + { + LEX *lex= Lex; + SELECT_LEX * sel= lex->current_select; + if (sel->set_braces(1)) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + /* select in braces, can't contain global parameters */ + if (sel->master_unit()->fake_select_lex) + sel->master_unit()->global_parameters= + sel->master_unit()->fake_select_lex; + }; + +select_derived2: + { + LEX *lex= Lex; + lex->derived_tables= 1; + if (lex->sql_command == (int)SQLCOM_HA_READ || + lex->sql_command == (int)SQLCOM_KILL) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE || + mysql_new_select(lex, 1)) + YYABORT; + mysql_init_select(lex); + lex->current_select->linkage= DERIVED_TABLE_TYPE; + lex->current_select->parsing_place= SELECT_LIST; + } + select_options select_item_list + { + Select->parsing_place= NO_MATTER; + } + opt_select_from + ; opt_outer: /* empty */ {} @@ -2249,65 +3592,93 @@ opt_key_definition: /* empty */ {} | USE_SYM key_usage_list { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; sel->use_index= *$2; sel->use_index_ptr= &sel->use_index; } | FORCE_SYM key_usage_list { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; sel->use_index= *$2; sel->use_index_ptr= &sel->use_index; sel->table_join_options|= TL_OPTION_FORCE_INDEX; } | IGNORE_SYM key_usage_list { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; sel->ignore_index= *$2; sel->ignore_index_ptr= &sel->ignore_index; - } - ; + }; key_usage_list: - key_or_index { Select->interval_list.empty(); } '(' key_usage_list2 ')' - { $$= &Select->interval_list; }; + key_or_index { Select->interval_list.empty(); } + '(' key_list_or_empty ')' + { $$= &Select->interval_list; } + ; + +key_list_or_empty: + /* empty */ {} + | key_usage_list2 {} + ; key_usage_list2: key_usage_list2 ',' ident - { Select->interval_list.push_back(new String((const char*) $3.str,$3.length)); } + { Select-> + interval_list.push_back(new (YYTHD->mem_root) String((const char*) $3.str, $3.length, + system_charset_info)); } | ident - { Select->interval_list.push_back(new String((const char*) $1.str,$1.length)); } + { Select-> + interval_list.push_back(new (YYTHD->mem_root) String((const char*) $1.str, $1.length, + system_charset_info)); } | PRIMARY_SYM - { Select->interval_list.push_back(new String("PRIMARY",7)); }; + { Select-> + interval_list.push_back(new (YYTHD->mem_root) String("PRIMARY", 7, + system_charset_info)); }; using_list: ident { - SELECT_LEX *sel=Select; - if (!($$= new Item_func_eq(new Item_field(sel->db1,sel->table1, $1.str), new Item_field(sel->db2,sel->table2,$1.str)))) + SELECT_LEX *sel= Select; + if (!($$= new Item_func_eq(new Item_field(sel->db1, sel->table1, + $1.str), + new Item_field(sel->db2, sel->table2, + $1.str)))) YYABORT; } | using_list ',' ident { - SELECT_LEX *sel=Select; + SELECT_LEX *sel= Select; if (!($$= new Item_cond_and(new Item_func_eq(new Item_field(sel->db1,sel->table1,$3.str), new Item_field(sel->db2,sel->table2,$3.str)), $1))) YYABORT; }; interval: DAY_HOUR_SYM { $$=INTERVAL_DAY_HOUR; } + | DAY_MICROSECOND_SYM { $$=INTERVAL_DAY_MICROSECOND; } | DAY_MINUTE_SYM { $$=INTERVAL_DAY_MINUTE; } | DAY_SECOND_SYM { $$=INTERVAL_DAY_SECOND; } | DAY_SYM { $$=INTERVAL_DAY; } + | HOUR_MICROSECOND_SYM { $$=INTERVAL_HOUR_MICROSECOND; } | HOUR_MINUTE_SYM { $$=INTERVAL_HOUR_MINUTE; } | HOUR_SECOND_SYM { $$=INTERVAL_HOUR_SECOND; } | HOUR_SYM { $$=INTERVAL_HOUR; } + | MICROSECOND_SYM { $$=INTERVAL_MICROSECOND; } + | MINUTE_MICROSECOND_SYM { $$=INTERVAL_MINUTE_MICROSECOND; } | MINUTE_SECOND_SYM { $$=INTERVAL_MINUTE_SECOND; } | MINUTE_SYM { $$=INTERVAL_MINUTE; } | MONTH_SYM { $$=INTERVAL_MONTH; } + | SECOND_MICROSECOND_SYM { $$=INTERVAL_SECOND_MICROSECOND; } | SECOND_SYM { $$=INTERVAL_SECOND; } | YEAR_MONTH_SYM { $$=INTERVAL_YEAR_MONTH; } - | YEAR_SYM { $$=INTERVAL_YEAR; }; + | YEAR_SYM { $$=INTERVAL_YEAR; } + ; + +date_time_type: + DATE_SYM {$$=MYSQL_TIMESTAMP_DATE;} + | TIME_SYM {$$=MYSQL_TIMESTAMP_TIME;} + | DATETIME {$$=MYSQL_TIMESTAMP_DATETIME;} + | TIMESTAMP {$$=MYSQL_TIMESTAMP_DATETIME;} + ; table_alias: /* empty */ @@ -2326,29 +3697,43 @@ opt_all: where_clause: /* empty */ { Select->where= 0; } - | WHERE expr + | WHERE + { + Select->parsing_place= IN_WHERE; + } + expr { - Select->where= $2; - if ($2) - $2->top_level_item(); + SELECT_LEX *select= Select; + select->where= $3; + select->parsing_place= NO_MATTER; + if ($3) + $3->top_level_item(); } - ; + ; having_clause: /* empty */ - | HAVING { Select->create_refs=1; } expr - { - SELECT_LEX *sel=Select; - sel->having= $3; - sel->create_refs=0; - if ($3) - $3->top_level_item(); - } + | HAVING + { + Select->parsing_place= IN_HAVING; + } + expr + { + SELECT_LEX *sel= Select; + sel->having= $3; + sel->parsing_place= NO_MATTER; + if ($3) + $3->top_level_item(); + } ; opt_escape: - ESCAPE_SYM TEXT_STRING { $$= $2.str; } - | /* empty */ { $$= (char*) "\\"; }; + ESCAPE_SYM simple_expr { $$= $2; } + | /* empty */ + { + $$= new Item_string("\\", 1, &my_charset_latin1); + } + ; /* @@ -2361,23 +3746,35 @@ group_clause: group_list: group_list ',' order_ident order_dir - { if (add_group_to_list($3,(bool) $4)) YYABORT; } + { if (add_group_to_list(YYTHD, $3,(bool) $4)) YYABORT; } | order_ident order_dir - { if (add_group_to_list($1,(bool) $2)) YYABORT; }; + { if (add_group_to_list(YYTHD, $1,(bool) $2)) YYABORT; }; olap_opt: /* empty */ {} | WITH CUBE_SYM { LEX *lex=Lex; - net_printf(&lex->thd->net, ER_NOT_SUPPORTED_YET, "CUBE"); - YYABORT; /* To be deleted in 4.1 */ + if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) + { + net_printf(lex->thd, ER_WRONG_USAGE, "WITH CUBE", + "global union parameters"); + YYABORT; + } + lex->current_select->olap= CUBE_TYPE; + net_printf(lex->thd, ER_NOT_SUPPORTED_YET, "CUBE"); + YYABORT; /* To be deleted in 5.1 */ } | WITH ROLLUP_SYM { - LEX *lex=Lex; - net_printf(&lex->thd->net, ER_NOT_SUPPORTED_YET, "ROLLUP"); - YYABORT; /* To be deleted in 4.1 */ + LEX *lex= Lex; + if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) + { + net_printf(lex->thd, ER_WRONG_USAGE, "WITH ROLLUP", + "global union parameters"); + YYABORT; + } + lex->current_select->olap= ROLLUP_TYPE; } ; @@ -2390,24 +3787,43 @@ opt_order_clause: | order_clause; order_clause: - ORDER_SYM BY - { + ORDER_SYM BY + { LEX *lex=Lex; - if (lex->select->olap != UNSPECIFIED_OLAP_TYPE) + SELECT_LEX *sel= lex->current_select; + SELECT_LEX_UNIT *unit= sel-> master_unit(); + if (sel->linkage != GLOBAL_OPTIONS_TYPE && + sel->olap != UNSPECIFIED_OLAP_TYPE) { - net_printf(&lex->thd->net, ER_WRONG_USAGE, + net_printf(lex->thd, ER_WRONG_USAGE, "CUBE/ROLLUP", "ORDER BY"); YYABORT; } - lex->select->sort_default=1; + if (lex->sql_command != SQLCOM_ALTER_TABLE && !unit->fake_select_lex) + { + /* + A query of the of the form (SELECT ...) ORDER BY order_list is + executed in the same way as the query + SELECT ... ORDER BY order_list + unless the SELECT construct contains ORDER BY or LIMIT clauses. + Otherwise we create a fake SELECT_LEX if it has not been created + yet. + */ + SELECT_LEX *first_sl= unit->first_select(); + if (!first_sl->next_select() && + (first_sl->order_list.elements || + first_sl->select_limit != HA_POS_ERROR) && + unit->add_fake_select_lex(lex->thd)) + YYABORT; + } } order_list; order_list: order_list ',' order_ident order_dir - { if (add_order_to_list($3,(bool) $4)) YYABORT; } + { if (add_order_to_list(YYTHD, $3,(bool) $4)) YYABORT; } | order_ident order_dir - { if (add_order_to_list($1,(bool) $2)) YYABORT; }; + { if (add_order_to_list(YYTHD, $1,(bool) $2)) YYABORT; }; order_dir: /* empty */ { $$ = 1; } @@ -2415,21 +3831,25 @@ order_dir: | DESC { $$ =0; }; +opt_limit_clause_init: + /* empty */ + { + LEX *lex= Lex; + SELECT_LEX *sel= lex->current_select; + sel->offset_limit= 0L; + sel->select_limit= HA_POS_ERROR; + } + | limit_clause {} + ; + +opt_limit_clause: + /* empty */ {} + | limit_clause {} + ; + limit_clause: - /* empty */ {} - | LIMIT - { - LEX *lex=Lex; - if (lex->select->olap != UNSPECIFIED_OLAP_TYPE) - { - net_printf(&lex->thd->net, ER_WRONG_USAGE, "CUBE/ROLLUP", - "LIMIT"); - YYABORT; - } - } - limit_options - {} - ; + LIMIT limit_options {} + ; limit_options: ULONG_NUM @@ -2437,55 +3857,72 @@ limit_options: SELECT_LEX *sel= Select; sel->select_limit= $1; sel->offset_limit= 0L; + sel->explicit_limit= 1; } | ULONG_NUM ',' ULONG_NUM { SELECT_LEX *sel= Select; sel->select_limit= $3; sel->offset_limit= $1; + sel->explicit_limit= 1; } | ULONG_NUM OFFSET_SYM ULONG_NUM { SELECT_LEX *sel= Select; sel->select_limit= $1; sel->offset_limit= $3; + sel->explicit_limit= 1; } ; + delete_limit_clause: /* empty */ { LEX *lex=Lex; - lex->select->select_limit= HA_POS_ERROR; + lex->current_select->select_limit= HA_POS_ERROR; } | LIMIT ulonglong_num - { Select->select_limit= (ha_rows) $2; }; + { + SELECT_LEX *sel= Select; + sel->select_limit= (ha_rows) $2; + sel->explicit_limit= 1; + }; ULONG_NUM: - NUM { $$= strtoul($1.str,NULL,10); } - | LONG_NUM { $$= (ulong) strtoll($1.str,NULL,10); } - | ULONGLONG_NUM { $$= (ulong) strtoull($1.str,NULL,10); } - | REAL_NUM { $$= strtoul($1.str,NULL,10); } - | FLOAT_NUM { $$= strtoul($1.str,NULL,10); }; + NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | REAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | FLOAT_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + ; ulonglong_num: - NUM { $$= (ulonglong) strtoul($1.str,NULL,10); } - | ULONGLONG_NUM { $$= strtoull($1.str,NULL,10); } - | LONG_NUM { $$= (ulonglong) strtoll($1.str,NULL,10); } - | REAL_NUM { $$= strtoull($1.str,NULL,10); } - | FLOAT_NUM { $$= strtoull($1.str,NULL,10); }; + NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + | REAL_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + | FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + ; procedure_clause: /* empty */ | PROCEDURE ident /* Procedure name */ { LEX *lex=Lex; + if (&lex->select_lex != lex->current_select) + { + net_printf(lex->thd, ER_WRONG_USAGE, + "PROCEDURE", + "subquery"); + YYABORT; + } lex->proc_list.elements=0; lex->proc_list.first=0; lex->proc_list.next= (byte**) &lex->proc_list.first; if (add_proc_to_list(lex->thd, new Item_field(NULL,NULL,$2.str))) YYABORT; - current_thd->safe_to_cache_query=0; + Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); } '(' procedure_list ')'; @@ -2505,61 +3942,99 @@ procedure_item: if (add_proc_to_list(lex->thd, $2)) YYABORT; if (!$2->name) - $2->set_name($1,(uint) ((char*) lex->tok_end - $1)); - }; + $2->set_name($1,(uint) ((char*) lex->tok_end - $1), YYTHD->charset()); + } + ; + + +select_var_list_init: + { + LEX *lex=Lex; + if (!lex->describe && (!(lex->result= new select_dumpvar()))) + YYABORT; + } + select_var_list + {} + ; + +select_var_list: + select_var_list ',' select_var_ident + | select_var_ident {} + ; + +select_var_ident: '@' ident_or_text + { + LEX *lex=Lex; + if (lex->result && ((select_dumpvar *)lex->result)->var_list.push_back((LEX_STRING*) sql_memdup(&$2,sizeof(LEX_STRING)))) + YYABORT; + } + ; into: - INTO OUTFILE TEXT_STRING + INTO OUTFILE TEXT_STRING_sys { - THD *thd= current_thd; - thd->safe_to_cache_query= 0; - if (!(thd->lex.exchange= new sql_exchange($3.str,0))) - YYABORT; + LEX *lex= Lex; + lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + if (!(lex->exchange= new sql_exchange($3.str, 0)) || + !(lex->result= new select_export(lex->exchange))) + YYABORT; } opt_field_term opt_line_term - | INTO DUMPFILE TEXT_STRING + | INTO DUMPFILE TEXT_STRING_sys { - THD *thd= current_thd; - thd->safe_to_cache_query= 0; - if (!(thd->lex.exchange= new sql_exchange($3.str,1))) - YYABORT; - }; + LEX *lex=Lex; + if (!lex->describe) + { + lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + if (!(lex->exchange= new sql_exchange($3.str,1))) + YYABORT; + if (!(lex->result= new select_dump(lex->exchange))) + YYABORT; + } + } + | INTO select_var_list_init + { + Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + } + ; /* DO statement */ -do: DO_SYM +do: DO_SYM { LEX *lex=Lex; lex->sql_command = SQLCOM_DO; - if (!(lex->insert_list = new List_item)) - YYABORT; + mysql_init_select(lex); + } + expr_list + { + Lex->insert_list= $3; } - values - {} ; /* - Drop : delete tables or index + Drop : delete tables or index or user */ drop: - DROP opt_temporary TABLE_SYM if_exists table_list opt_restrict + DROP opt_temporary table_or_tables if_exists table_list opt_restrict { LEX *lex=Lex; lex->sql_command = SQLCOM_DROP_TABLE; lex->drop_temporary= $2; lex->drop_if_exists= $4; } - | DROP INDEX ident ON table_ident {} + | DROP INDEX_SYM ident ON table_ident {} { LEX *lex=Lex; lex->sql_command= SQLCOM_DROP_INDEX; - lex->drop_list.empty(); - lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY, - $3.str)); - if (!add_table_to_list($5, NULL, TL_OPTION_UPDATING)) + lex->alter_info.drop_list.empty(); + lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, + $3.str)); + if (!lex->current_select->add_table_to_list(lex->thd, $5, NULL, + TL_OPTION_UPDATING)) YYABORT; } | DROP DATABASE if_exists ident @@ -2569,12 +4044,21 @@ drop: lex->drop_if_exists=$3; lex->name=$4.str; } - | DROP UDF_SYM ident + | DROP UDF_SYM IDENT_sys { LEX *lex=Lex; lex->sql_command = SQLCOM_DROP_FUNCTION; - lex->udf.name=$3.str; - }; + lex->udf.name = $3; + } + | DROP USER + { + LEX *lex=Lex; + lex->sql_command = SQLCOM_DROP_USER; + lex->users_list.empty(); + } + user_list + {} + ; table_list: @@ -2583,7 +4067,11 @@ table_list: table_name: table_ident - { if (!add_table_to_list($1,NULL,TL_OPTION_UPDATING)) YYABORT; }; + { + if (!Select->add_table_to_list(YYTHD, $1, NULL, TL_OPTION_UPDATING)) + YYABORT; + } + ; if_exists: /* empty */ { $$= 0; } @@ -2599,13 +4087,22 @@ opt_temporary: */ insert: - INSERT { Lex->sql_command = SQLCOM_INSERT; } insert_lock_option + INSERT + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_INSERT; + lex->duplicates= DUP_ERROR; + mysql_init_select(lex); + /* for subselects */ + lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ; + lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; + } insert_lock_option opt_ignore insert2 { - set_lock_for_tables($3); - Lex->select= &Lex->select_lex; + Select->set_lock_for_tables($3); + Lex->current_select= &Lex->select_lex; } - insert_field_spec + insert_field_spec opt_insert_update {} ; @@ -2615,11 +4112,13 @@ replace: LEX *lex=Lex; lex->sql_command = SQLCOM_REPLACE; lex->duplicates= DUP_REPLACE; + mysql_init_select(lex); + lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; } replace_lock_option insert2 { - set_lock_for_tables($3); - Lex->select= &Lex->select_lex; + Select->set_lock_for_tables($3); + Lex->current_select= &Lex->select_lex; } insert_field_spec {} @@ -2673,8 +4172,9 @@ fields: insert_values: VALUES values_list {} - | create_select { Select->braces= 0;} opt_union {} - | '(' create_select ')' { Select->braces= 1;} union_opt {} + | VALUE_SYM values_list {} + | create_select { Select->set_braces(0);} union_clause {} + | '(' create_select ')' { Select->set_braces(1);} union_opt {} ; values_list: @@ -2735,63 +4235,88 @@ values: ; expr_or_default: - expr { $$= $1;} - | DEFAULT {$$= new Item_default(); } + expr { $$= $1;} + | DEFAULT {$$= new Item_default_value(); } ; +opt_insert_update: + /* empty */ + | ON DUPLICATE_SYM { Lex->duplicates= DUP_UPDATE; } + KEY_SYM UPDATE_SYM insert_update_list + ; + /* Update rows in a table */ update: - UPDATE_SYM - { - LEX *lex=Lex; - lex->sql_command = SQLCOM_UPDATE; - lex->select->order_list.elements=0; - lex->select->order_list.first=0; - lex->select->order_list.next= (byte**) &lex->select->order_list.first; + UPDATE_SYM + { + LEX *lex= Lex; + mysql_init_select(lex); + lex->sql_command= SQLCOM_UPDATE; + lex->lock_option= TL_UNLOCK; /* Will be set later */ + lex->duplicates= DUP_ERROR; } opt_low_priority opt_ignore join_table_list - SET update_list + SET update_list { - if (Lex->select->table_list.elements > 1) + LEX *lex= Lex; + if (lex->select_lex.table_list.elements > 1) { - LEX *lex=Lex; - lex->sql_command= SQLCOM_MULTI_UPDATE; - lex->lock_option= $3; + lex->sql_command= SQLCOM_UPDATE_MULTI; + lex->multi_lock_option= $3; + } + else if (lex->select_lex.get_table_list()->derived) + { + /* it is single table update and it is update of derived table */ + net_printf(lex->thd, ER_NON_UPDATABLE_TABLE, + lex->select_lex.get_table_list()->alias, "UPDATE"); + YYABORT; } else - set_lock_for_tables($3); + Select->set_lock_for_tables($3); } where_clause opt_order_clause delete_limit_clause {} ; update_list: - update_list ',' simple_ident equal expr + update_list ',' update_elem + | update_elem; + +update_elem: + simple_ident equal expr_or_default { - if (add_item_to_list($3) || add_value_to_list($5)) + if (add_item_to_list(YYTHD, $1) || add_value_to_list(YYTHD, $3)) YYABORT; - } - | simple_ident equal expr - { - if (add_item_to_list($1) || add_value_to_list($3)) - YYABORT; - }; + }; + +insert_update_list: + insert_update_list ',' insert_update_elem + | insert_update_elem; + +insert_update_elem: + simple_ident equal expr_or_default + { + LEX *lex= Lex; + if (lex->update_list.push_back($1) || + lex->value_list.push_back($3)) + YYABORT; + }; opt_low_priority: - /* empty */ { $$= current_thd->update_lock_default; } + /* empty */ { $$= YYTHD->update_lock_default; } | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; }; /* Delete rows from a table */ delete: DELETE_SYM - { - LEX *lex=Lex; - lex->sql_command= SQLCOM_DELETE; lex->select->options=0; + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_DELETE; + mysql_init_select(lex); lex->lock_option= lex->thd->update_lock_default; - lex->select->order_list.elements=0; - lex->select->order_list.first=0; - lex->select->order_list.next= (byte**) &lex->select->order_list.first; + lex->ignore= 0; + lex->select_lex.init_order(); } opt_delete_options single_multi {} ; @@ -2799,18 +4324,16 @@ delete: single_multi: FROM table_ident { - if (!add_table_to_list($2, NULL, TL_OPTION_UPDATING, - Lex->lock_option)) + if (!Select->add_table_to_list(YYTHD, $2, NULL, TL_OPTION_UPDATING, + Lex->lock_option)) YYABORT; } where_clause opt_order_clause delete_limit_clause {} - | table_wild_list - { mysql_init_multi_delete(Lex); } - FROM join_table_list where_clause - | FROM table_wild_list - { mysql_init_multi_delete(Lex); } - USING join_table_list where_clause + | table_wild_list {mysql_init_multi_delete(Lex);} + FROM join_table_list {fix_multi_delete_lex(Lex);} where_clause + | FROM table_wild_list { mysql_init_multi_delete(Lex);} + USING join_table_list {fix_multi_delete_lex(Lex);} where_clause {} ; @@ -2819,23 +4342,24 @@ table_wild_list: | table_wild_list ',' table_wild_one {}; table_wild_one: - ident opt_wild - { - if (!add_table_to_list(new Table_ident($1), NULL, - TL_OPTION_UPDATING, Lex->lock_option)) - YYABORT; - } - | ident '.' ident opt_wild - { - if (!add_table_to_list(new Table_ident($1,$3,0), NULL, - TL_OPTION_UPDATING, - Lex->lock_option)) + ident opt_wild opt_table_alias + { + if (!Select->add_table_to_list(YYTHD, new Table_ident($1), $3, + TL_OPTION_UPDATING, Lex->lock_option)) + YYABORT; + } + | ident '.' ident opt_wild opt_table_alias + { + if (!Select->add_table_to_list(YYTHD, + new Table_ident(YYTHD, $1, $3, 0), + $5, TL_OPTION_UPDATING, + Lex->lock_option)) YYABORT; - } + } ; opt_wild: - /* empty */ {} + /* empty */ {} | '.' '*' {}; @@ -2845,27 +4369,32 @@ opt_delete_options: opt_delete_option: QUICK { Select->options|= OPTION_QUICK; } - | LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; }; + | LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; } + | IGNORE_SYM { Lex->ignore= 1; }; truncate: TRUNCATE_SYM opt_table_sym table_name { - LEX* lex = Lex; + LEX* lex= Lex; lex->sql_command= SQLCOM_TRUNCATE; - lex->select->options=0; - lex->select->order_list.elements=0; - lex->select->order_list.first=0; - lex->select->order_list.next= (byte**) &lex->select->order_list.first; + lex->select_lex.options= 0; + lex->select_lex.init_order(); } ; opt_table_sym: /* empty */ | TABLE_SYM; - + /* Show things */ -show: SHOW { Lex->wild=0;} show_param +show: SHOW + { + LEX *lex=Lex; + lex->wild=0; + bzero((char*) &lex->create_info,sizeof(lex->create_info)); + } + show_param {} ; @@ -2874,35 +4403,37 @@ show_param: { Lex->sql_command= SQLCOM_SHOW_DATABASES; } | TABLES opt_db wild { - LEX *lex=Lex; + LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TABLES; - lex->select->db= $2; lex->select->options=0; + lex->select_lex.db= $2; } | TABLE_SYM STATUS_SYM opt_db wild { - LEX *lex=Lex; + LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TABLES; - lex->select->options|= SELECT_DESCRIBE; - lex->select->db= $3; + lex->describe= DESCRIBE_EXTENDED; + lex->select_lex.db= $3; } | OPEN_SYM TABLES opt_db wild { - LEX *lex=Lex; + LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_OPEN_TABLES; - lex->select->db= $3; - lex->select->options=0; + lex->select_lex.db= $3; } + | ENGINE_SYM storage_engines + { Lex->create_info.db_type= $2; } + show_engine_param | opt_full COLUMNS from_or_in table_ident opt_db wild { Lex->sql_command= SQLCOM_SHOW_FIELDS; if ($5) $4->change_db($5); - if (!add_table_to_list($4, NULL, 0)) + if (!Select->add_table_to_list(YYTHD, $4, NULL, 0)) YYABORT; } - | NEW_SYM MASTER_SYM FOR_SYM SLAVE WITH MASTER_LOG_FILE_SYM EQ - TEXT_STRING AND MASTER_LOG_POS_SYM EQ ulonglong_num - AND MASTER_SERVER_ID_SYM EQ + | NEW_SYM MASTER_SYM FOR_SYM SLAVE WITH MASTER_LOG_FILE_SYM EQ + TEXT_STRING_sys AND_SYM MASTER_LOG_POS_SYM EQ ulonglong_num + AND_SYM MASTER_SERVER_ID_SYM EQ ULONG_NUM { Lex->sql_command = SQLCOM_SHOW_NEW_MASTER; @@ -2910,7 +4441,7 @@ show_param: Lex->mi.pos = $12; Lex->mi.server_id = $16; } - | MASTER_SYM LOGS_SYM + | master_or_binary LOGS_SYM { Lex->sql_command = SQLCOM_SHOW_BINLOGS; } @@ -2920,33 +4451,89 @@ show_param: } | BINLOG_SYM EVENTS_SYM binlog_in binlog_from { - LEX *lex=Lex; - lex->sql_command = SQLCOM_SHOW_BINLOG_EVENTS; - lex->select->select_limit= lex->thd->variables.select_limit; - lex->select->offset_limit= 0L; - } limit_clause + LEX *lex= Lex; + lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS; + } opt_limit_clause_init | keys_or_index from_or_in table_ident opt_db { Lex->sql_command= SQLCOM_SHOW_KEYS; if ($4) $3->change_db($4); - if (!add_table_to_list($3, NULL, 0)) + if (!Select->add_table_to_list(YYTHD, $3, NULL, 0)) YYABORT; } + | COLUMN_SYM TYPES_SYM + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_SHOW_COLUMN_TYPES; + } + | TABLE_SYM TYPES_SYM + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES; + WARN_DEPRECATED("SHOW TABLE TYPES", "SHOW [STORAGE] ENGINES"); + } + | opt_storage ENGINES_SYM + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES; + } + | PRIVILEGES + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_SHOW_PRIVILEGES; + } + | COUNT_SYM '(' '*' ')' WARNINGS + { (void) create_select_for_variable("warning_count"); } + | COUNT_SYM '(' '*' ')' ERRORS + { (void) create_select_for_variable("error_count"); } + | WARNINGS opt_limit_clause_init + { Lex->sql_command = SQLCOM_SHOW_WARNS;} + | ERRORS opt_limit_clause_init + { Lex->sql_command = SQLCOM_SHOW_ERRORS;} | STATUS_SYM wild { Lex->sql_command= SQLCOM_SHOW_STATUS; } | INNOBASE_SYM STATUS_SYM - { Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS;} + { Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS"); } | opt_full PROCESSLIST_SYM { Lex->sql_command= SQLCOM_SHOW_PROCESSLIST;} | opt_var_type VARIABLES wild - { - THD *thd= current_thd; - thd->lex.sql_command= SQLCOM_SHOW_VARIABLES; - thd->lex.option_type= (enum_var_type) $1; + { + THD *thd= YYTHD; + thd->lex->sql_command= SQLCOM_SHOW_VARIABLES; + thd->lex->option_type= (enum_var_type) $1; } + | charset wild + { Lex->sql_command= SQLCOM_SHOW_CHARSETS; } + | COLLATION_SYM wild + { Lex->sql_command= SQLCOM_SHOW_COLLATIONS; } + | BERKELEY_DB_SYM LOGS_SYM + { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); } | LOGS_SYM - { Lex->sql_command= SQLCOM_SHOW_LOGS; } + { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW LOGS", "SHOW ENGINE BDB LOGS"); } + | GRANTS + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_SHOW_GRANTS; + THD *thd= lex->thd; + LEX_USER *curr_user; + if (!(curr_user= (LEX_USER*) thd->alloc(sizeof(st_lex_user)))) + YYABORT; + curr_user->user.str= thd->priv_user; + curr_user->user.length= strlen(thd->priv_user); + if (*thd->priv_host != 0) + { + curr_user->host.str= thd->priv_host; + curr_user->host.length= strlen(thd->priv_host); + } + else + { + curr_user->host.str= (char *) "%"; + curr_user->host.length= 1; + } + curr_user->password.str=NullS; + lex->grant_user= curr_user; + } | GRANTS FOR_SYM user { LEX *lex=Lex; @@ -2954,10 +4541,16 @@ show_param: lex->grant_user=$3; lex->grant_user->password.str=NullS; } + | CREATE DATABASE opt_if_not_exists ident + { + Lex->sql_command=SQLCOM_SHOW_CREATE_DB; + Lex->create_info.options=$3; + Lex->name=$4.str; + } | CREATE TABLE_SYM table_ident { Lex->sql_command = SQLCOM_SHOW_CREATE; - if(!add_table_to_list($3, NULL, 0)) + if (!Select->add_table_to_list(YYTHD, $3, NULL,0)) YYABORT; } | MASTER_SYM STATUS_SYM @@ -2969,13 +4562,50 @@ show_param: Lex->sql_command = SQLCOM_SHOW_SLAVE_STAT; }; +show_engine_param: + STATUS_SYM + { + switch (Lex->create_info.db_type) { + case DB_TYPE_NDBCLUSTER: + Lex->sql_command = SQLCOM_SHOW_NDBCLUSTER_STATUS; + break; + case DB_TYPE_INNODB: + Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; + break; + default: + net_printf(YYTHD, ER_NOT_SUPPORTED_YET, "STATUS"); + YYABORT; + } + } + | LOGS_SYM + { + switch (Lex->create_info.db_type) { + case DB_TYPE_BERKELEY_DB: + Lex->sql_command = SQLCOM_SHOW_LOGS; + break; + default: + net_printf(YYTHD, ER_NOT_SUPPORTED_YET, "LOGS"); + YYABORT; + } + }; + +master_or_binary: + MASTER_SYM + | BINARY; + +opt_storage: + /* empty */ + | STORAGE_SYM; + opt_db: /* empty */ { $$= 0; } | from_or_in ident { $$= $2.str; }; wild: /* empty */ - | LIKE text_string { Lex->wild= $2; }; + | LIKE TEXT_STRING_sys + { Lex->wild= new (YYTHD->mem_root) String($2.str, $2.length, + system_charset_info); }; opt_full: /* empty */ { Lex->verbose=0; } @@ -2987,7 +4617,7 @@ from_or_in: binlog_in: /* empty */ { Lex->mi.log_file_name = 0; } - | IN_SYM TEXT_STRING { Lex->mi.log_file_name = $2.str; }; + | IN_SYM TEXT_STRING_sys { Lex->mi.log_file_name = $2.str; }; binlog_from: /* empty */ { Lex->mi.pos = 4; /* skip magic number */ } @@ -3002,32 +4632,43 @@ describe: lex->wild=0; lex->verbose=0; lex->sql_command=SQLCOM_SHOW_FIELDS; - if (!add_table_to_list($2, NULL, 0)) + if (!Select->add_table_to_list(lex->thd, $2, NULL,0)) YYABORT; } opt_describe_column {} - | describe_command select - { Lex->select_lex.options|= SELECT_DESCRIBE; }; - + | describe_command opt_extended_describe + { Lex->describe|= DESCRIBE_NORMAL; } + select + { + LEX *lex=Lex; + lex->select_lex.options|= SELECT_DESCRIBE; + } + ; describe_command: DESC | DESCRIBE; +opt_extended_describe: + /* empty */ {} + | EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; } + ; + opt_describe_column: /* empty */ {} | text_string { Lex->wild= $1; } | ident - { Lex->wild= new String((const char*) $1.str,$1.length); }; + { Lex->wild= new (YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info); }; /* flush things */ flush: - FLUSH_SYM + FLUSH_SYM opt_no_write_to_binlog { LEX *lex=Lex; lex->sql_command= SQLCOM_FLUSH; lex->type=0; + lex->no_write_to_binlog= $2; } flush_options {} @@ -3076,27 +4717,54 @@ purge: PURGE { LEX *lex=Lex; - lex->sql_command = SQLCOM_PURGE; lex->type=0; + } purge_options + {} + ; + +purge_options: + master_or_binary LOGS_SYM purge_option + ; + +purge_option: + TO_SYM TEXT_STRING_sys + { + Lex->sql_command = SQLCOM_PURGE; + Lex->to_log = $2.str; + } + | BEFORE_SYM expr + { + if (!$2) + /* Can only be an out of memory situation, no need for a message */ + YYABORT; + if ($2->fix_fields(Lex->thd, 0, &$2) || $2->check_cols(1)) + { + net_printf(Lex->thd, ER_WRONG_ARGUMENTS, "PURGE LOGS BEFORE"); + YYABORT; + } + Item *tmp= new Item_func_unix_timestamp($2); + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + tmp->quick_fix_field(); + Lex->sql_command = SQLCOM_PURGE_BEFORE; + Lex->purge_time= (ulong) tmp->val_int(); } - MASTER_SYM LOGS_SYM TO_SYM TEXT_STRING - { - Lex->to_log = $6.str; - } ; + ; /* kill threads */ kill: - KILL_SYM expr + KILL_SYM { Lex->sql_command= SQLCOM_KILL; } expr { LEX *lex=Lex; - if ($2->fix_fields(lex->thd,0)) - { - send_error(&lex->thd->net, ER_SET_CONSTANTS_ONLY); + if ($3->fix_fields(lex->thd, 0, &$3) || $3->check_cols(1)) + { + send_error(lex->thd, ER_SET_CONSTANTS_ONLY); YYABORT; } - lex->sql_command=SQLCOM_KILL; - lex->thread_id= (ulong) $2->val_int(); + lex->thread_id= (ulong) $3->val_int(); }; /* change database */ @@ -3104,17 +4772,20 @@ kill: use: USE_SYM ident { LEX *lex=Lex; - lex->sql_command=SQLCOM_CHANGE_DB; lex->select->db= $2.str; + lex->sql_command=SQLCOM_CHANGE_DB; + lex->select_lex.db= $2.str; }; /* import, export of files */ -load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING +load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING_sys { LEX *lex=Lex; lex->sql_command= SQLCOM_LOAD; lex->lock_option= $3; lex->local_file= $4; + lex->duplicates= DUP_ERROR; + lex->ignore= 0; if (!(lex->exchange= new sql_exchange($6.str,0))) YYABORT; lex->field_list.empty(); @@ -3122,14 +4793,14 @@ load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING opt_duplicate INTO TABLE_SYM table_ident opt_field_term opt_line_term opt_ignore_lines opt_field_spec { - if (!add_table_to_list($11, NULL, TL_OPTION_UPDATING)) + if (!Select->add_table_to_list(YYTHD, $11, NULL, TL_OPTION_UPDATING)) YYABORT; } | LOAD TABLE_SYM table_ident FROM MASTER_SYM { Lex->sql_command = SQLCOM_LOAD_MASTER_TABLE; - if (!add_table_to_list($3, NULL, TL_OPTION_UPDATING)) + if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING)) YYABORT; } @@ -3144,7 +4815,7 @@ opt_local: | LOCAL_SYM { $$=1;}; load_data_lock: - /* empty */ { $$= current_thd->update_lock_default; } + /* empty */ { $$= YYTHD->update_lock_default; } | CONCURRENT { $$= TL_WRITE_CONCURRENT_INSERT ; } | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; }; @@ -3152,7 +4823,7 @@ load_data_lock: opt_duplicate: /* empty */ { Lex->duplicates=DUP_ERROR; } | REPLACE { Lex->duplicates=DUP_REPLACE; } - | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; }; + | IGNORE_SYM { Lex->ignore= 1; }; opt_field_term: /* empty */ @@ -3163,15 +4834,28 @@ field_term_list: | field_term; field_term: - TERMINATED BY text_string { Lex->exchange->field_term= $3;} + TERMINATED BY text_string + { + DBUG_ASSERT(Lex->exchange); + Lex->exchange->field_term= $3; + } | OPTIONALLY ENCLOSED BY text_string { - LEX *lex=Lex; - lex->exchange->enclosed= $4; - lex->exchange->opt_enclosed=1; + LEX *lex= Lex; + DBUG_ASSERT(lex->exchange); + lex->exchange->enclosed= $4; + lex->exchange->opt_enclosed= 1; } - | ENCLOSED BY text_string { Lex->exchange->enclosed= $3;} - | ESCAPED BY text_string { Lex->exchange->escaped= $3;}; + | ENCLOSED BY text_string + { + DBUG_ASSERT(Lex->exchange); + Lex->exchange->enclosed= $3; + } + | ESCAPED BY text_string + { + DBUG_ASSERT(Lex->exchange); + Lex->exchange->escaped= $3; + }; opt_line_term: /* empty */ @@ -3182,43 +4866,117 @@ line_term_list: | line_term; line_term: - TERMINATED BY text_string { Lex->exchange->line_term= $3;} - | STARTING BY text_string { Lex->exchange->line_start= $3;}; + TERMINATED BY text_string + { + DBUG_ASSERT(Lex->exchange); + Lex->exchange->line_term= $3; + } + | STARTING BY text_string + { + DBUG_ASSERT(Lex->exchange); + Lex->exchange->line_start= $3; + }; opt_ignore_lines: /* empty */ - | IGNORE_SYM NUM LINES - { Lex->exchange->skip_lines=atol($2.str); }; + | IGNORE_SYM NUM LINES + { + DBUG_ASSERT(Lex->exchange); + Lex->exchange->skip_lines= atol($2.str); + }; /* Common definitions */ text_literal: - TEXT_STRING { $$ = new Item_string($1.str,$1.length); } - | text_literal TEXT_STRING - { ((Item_string*) $1)->append($2.str,$2.length); }; + TEXT_STRING_literal + { + THD *thd= YYTHD; + $$ = new Item_string($1.str,$1.length,thd->variables.collation_connection); + } + | NCHAR_STRING + { $$= new Item_string($1.str,$1.length,national_charset_info); } + | UNDERSCORE_CHARSET TEXT_STRING + { $$ = new Item_string($2.str,$2.length,Lex->charset); } + | text_literal TEXT_STRING_literal + { ((Item_string*) $1)->append($2.str,$2.length); } + ; text_string: - TEXT_STRING { $$= new String($1.str,$1.length); } + TEXT_STRING_literal + { $$= new (YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); } | HEX_NUM { Item *tmp = new Item_varbinary($1.str,$1.length); - $$= tmp ? tmp->val_str((String*) 0) : (String*) 0; - }; + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + $$= tmp ? + tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + } + ; + +param_marker: + PARAM_MARKER + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + Item_param *item= new Item_param((uint) (lex->tok_start - + (uchar *) thd->query)); + if (!($$= item) || lex->param_list.push_back(item)) + { + send_error(thd, ER_OUT_OF_RESOURCES); + YYABORT; + } + } + ; + +signed_literal: + literal { $$ = $1; } + | '+' NUM_literal { $$ = $2; } + | '-' NUM_literal + { + $2->max_length++; + $$= $2->neg(); + } + ; + literal: text_literal { $$ = $1; } - | NUM { $$ = new Item_int($1.str, (longlong) strtol($1.str, NULL, 10),$1.length); } - | LONG_NUM { $$ = new Item_int($1.str, (longlong) strtoll($1.str,NULL,10), $1.length); } - | ULONGLONG_NUM { $$ = new Item_uint($1.str, $1.length); } - | REAL_NUM { $$ = new Item_real($1.str, $1.length); } - | FLOAT_NUM { $$ = new Item_float($1.str, $1.length); } + | NUM_literal { $$ = $1; } | NULL_SYM { $$ = new Item_null(); - Lex->next_state=STATE_OPERATOR_OR_IDENT;} + Lex->next_state=MY_LEX_OPERATOR_OR_IDENT;} + | FALSE_SYM { $$= new Item_int((char*) "FALSE",0,1); } + | TRUE_SYM { $$= new Item_int((char*) "TRUE",1,1); } | HEX_NUM { $$ = new Item_varbinary($1.str,$1.length);} + | UNDERSCORE_CHARSET HEX_NUM + { + Item *tmp= new Item_varbinary($2.str,$2.length); + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + String *str= tmp ? + tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + $$= new Item_string(str ? str->ptr() : "", + str ? str->length() : 0, + Lex->charset); + } | DATE_SYM text_literal { $$ = $2; } | TIME_SYM text_literal { $$ = $2; } | TIMESTAMP text_literal { $$ = $2; }; +NUM_literal: + NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); } + | LONG_NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); } + | ULONGLONG_NUM { $$ = new Item_uint($1.str, $1.length); } + | REAL_NUM { $$ = new Item_real($1.str, $1.length); } + | FLOAT_NUM { $$ = new Item_float($1.str, $1.length); } + ; + /********************************************************************** ** Createing different items. **********************************************************************/ @@ -3228,10 +4986,19 @@ insert_ident: | table_wild { $$=$1; }; table_wild: - ident '.' '*' { $$ = new Item_field(NullS,$1.str,"*"); } + ident '.' '*' + { + $$ = new Item_field(NullS,$1.str,"*"); + Lex->current_select->with_wild++; + } | ident '.' ident '.' '*' - { $$ = new Item_field((current_thd->client_capabilities & - CLIENT_NO_SCHEMA ? NullS : $1.str),$3.str,"*"); }; + { + $$ = new Item_field((YYTHD->client_capabilities & + CLIENT_NO_SCHEMA ? NullS : $1.str), + $3.str,"*"); + Lex->current_select->with_wild++; + } + ; order_ident: expr { $$=$1; }; @@ -3240,79 +5007,216 @@ simple_ident: ident { SELECT_LEX *sel=Select; - $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,NullS,$1.str) : (Item*) new Item_ref(NullS,NullS,$1.str); + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field(NullS,NullS,$1.str) : + (Item*) new Item_ref(NullS, NullS, $1.str); } | ident '.' ident { - SELECT_LEX *sel=Select; - $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$1.str,$3.str) : (Item*) new Item_ref(NullS,$1.str,$3.str); + THD *thd= YYTHD; + LEX *lex= thd->lex; + SELECT_LEX *sel= lex->current_select; + if (sel->no_table_names_allowed) + { + my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, + ER(ER_TABLENAME_NOT_ALLOWED_HERE), + MYF(0), $1.str, thd->where); + } + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field(NullS,$1.str,$3.str) : + (Item*) new Item_ref(NullS, $1.str, $3.str); } | '.' ident '.' ident { - SELECT_LEX *sel=Select; - $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$2.str,$4.str) : (Item*) new Item_ref(NullS,$2.str,$4.str); + THD *thd= YYTHD; + LEX *lex= thd->lex; + SELECT_LEX *sel= lex->current_select; + if (sel->no_table_names_allowed) + { + my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, + ER(ER_TABLENAME_NOT_ALLOWED_HERE), + MYF(0), $2.str, thd->where); + } + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field(NullS,$2.str,$4.str) : + (Item*) new Item_ref(NullS, $2.str, $4.str); } | ident '.' ident '.' ident { - SELECT_LEX *sel=Select; - $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str) : (Item*) new Item_ref((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str); + THD *thd= YYTHD; + LEX *lex= thd->lex; + SELECT_LEX *sel= lex->current_select; + if (sel->no_table_names_allowed) + { + my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, + ER(ER_TABLENAME_NOT_ALLOWED_HERE), + MYF(0), $3.str, thd->where); + } + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field((YYTHD->client_capabilities & + CLIENT_NO_SCHEMA ? NullS : $1.str), + $3.str, $5.str) : + (Item*) new Item_ref((YYTHD->client_capabilities & + CLIENT_NO_SCHEMA ? NullS : $1.str), + $3.str, $5.str); }; field_ident: ident { $$=$1;} - | ident '.' ident { $$=$3;} /* Skipp schema name in create*/ + | ident '.' ident '.' ident + { + TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first; + if (my_strcasecmp(table_alias_charset, $1.str, table->db)) + { + net_printf(YYTHD, ER_WRONG_DB_NAME, $1.str); + YYABORT; + } + if (my_strcasecmp(table_alias_charset, $3.str, table->real_name)) + { + net_printf(YYTHD, ER_WRONG_TABLE_NAME, $3.str); + YYABORT; + } + $$=$5; + } + | ident '.' ident + { + TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first; + if (my_strcasecmp(table_alias_charset, $1.str, table->alias)) + { + net_printf(YYTHD, ER_WRONG_TABLE_NAME, $1.str); + YYABORT; + } + $$=$3; + } | '.' ident { $$=$2;} /* For Delphi */; table_ident: ident { $$=new Table_ident($1); } - | ident '.' ident { $$=new Table_ident($1,$3,0);} + | ident '.' ident { $$=new Table_ident(YYTHD, $1,$3,0);} | '.' ident { $$=new Table_ident($2);} /* For Delphi */ ; -table_ident_ref: - ident { LEX_STRING db={(char*) "",0}; $$=new Table_ident(db,$1,0); } - | ident '.' ident { $$=new Table_ident($1,$3,0);} +table_ident_nodb: + ident { LEX_STRING db={(char*) any_db,3}; $$=new Table_ident(YYTHD, db,$1,0); } ; +IDENT_sys: + IDENT { $$= $1; } + | IDENT_QUOTED + { + THD *thd= YYTHD; + if (thd->charset_is_system_charset) + { + CHARSET_INFO *cs= system_charset_info; + int dummy_error; + uint wlen= cs->cset->well_formed_len(cs, $1.str, + $1.str+$1.length, + $1.length, &dummy_error); + if (wlen < $1.length) + { + net_printf(YYTHD, ER_INVALID_CHARACTER_STRING, cs->csname, + $1.str + wlen); + YYABORT; + } + $$= $1; + } + else + thd->convert_string(&$$, system_charset_info, + $1.str, $1.length, thd->charset()); + } + ; + +TEXT_STRING_sys: + TEXT_STRING + { + THD *thd= YYTHD; + if (thd->charset_is_system_charset) + $$= $1; + else + thd->convert_string(&$$, system_charset_info, + $1.str, $1.length, thd->charset()); + } + ; + +TEXT_STRING_literal: + TEXT_STRING + { + THD *thd= YYTHD; + if (thd->charset_is_collation_connection) + $$= $1; + else + thd->convert_string(&$$, thd->variables.collation_connection, + $1.str, $1.length, thd->charset()); + } + ; + + ident: - IDENT { $$=$1; } + IDENT_sys { $$=$1; } | keyword { - LEX *lex= Lex; - $$.str= lex->thd->strmake($1.str,$1.length); - $$.length=$1.length; - if (lex->next_state != STATE_END) - lex->next_state=STATE_OPERATOR_OR_IDENT; + THD *thd= YYTHD; + $$.str= thd->strmake($1.str, $1.length); + $$.length= $1.length; } ; ident_or_text: - ident { $$=$1;} - | TEXT_STRING { $$=$1;} - | LEX_HOSTNAME { $$=$1;}; + ident { $$=$1;} + | TEXT_STRING_sys { $$=$1;} + | LEX_HOSTNAME { $$=$1;}; user: ident_or_text { - if (!($$=(LEX_USER*) sql_alloc(sizeof(st_lex_user)))) + THD *thd= YYTHD; + if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) YYABORT; - $$->user = $1; $$->host.str=NullS; - } + $$->user = $1; + $$->host.str= (char *) "%"; + $$->host.length= 1; + } | ident_or_text '@' ident_or_text { - if (!($$=(LEX_USER*) sql_alloc(sizeof(st_lex_user)))) + THD *thd= YYTHD; + if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) YYABORT; $$->user = $1; $$->host=$3; - }; + } + | CURRENT_USER optional_braces + { + THD *thd= YYTHD; + if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) + YYABORT; + $$->user.str= thd->priv_user; + $$->user.length= strlen(thd->priv_user); + if (*thd->priv_host != 0) + { + $$->host.str= thd->priv_host; + $$->host.length= strlen(thd->priv_host); + } + else + { + $$->host.str= (char *) "%"; + $$->host.length= 1; + } + }; /* Keyword that we allow for identifiers */ keyword: ACTION {} + | ADDDATE_SYM {} | AFTER_SYM {} | AGAINST {} | AGGREGATE_SYM {} + | ANY_SYM {} + | ASCII_SYM {} | AUTO_INC {} | AVG_ROW_LENGTH {} | AVG_SYM {} @@ -3323,6 +5227,8 @@ keyword: | BIT_SYM {} | BOOL_SYM {} | BOOLEAN_SYM {} + | BYTE_SYM {} + | BTREE_SYM {} | CACHE_SYM {} | CHANGED {} | CHARSET {} @@ -3330,52 +5236,67 @@ keyword: | CIPHER_SYM {} | CLIENT_SYM {} | CLOSE_SYM {} + | COLLATION_SYM {} | COMMENT_SYM {} | COMMITTED_SYM {} | COMMIT_SYM {} | COMPRESSED_SYM {} | CONCURRENT {} + | CONSISTENT_SYM {} | CUBE_SYM {} | DATA_SYM {} | DATETIME {} | DATE_SYM {} | DAY_SYM {} + | DEALLOCATE_SYM {} | DELAY_KEY_WRITE_SYM {} | DES_KEY_FILE {} | DIRECTORY_SYM {} + | DISCARD {} | DO_SYM {} | DUMPFILE {} + | DUPLICATE_SYM {} | DYNAMIC_SYM {} | END {} | ENUM {} + | ENGINE_SYM {} + | ENGINES_SYM {} + | ERRORS {} | ESCAPE_SYM {} | EVENTS_SYM {} | EXECUTE_SYM {} + | EXPANSION_SYM {} | EXTENDED_SYM {} | FAST_SYM {} - | DISABLE_SYM {} - | ENABLE_SYM {} + | DISABLE_SYM {} + | ENABLE_SYM {} | FULL {} | FILE_SYM {} | FIRST_SYM {} | FIXED_SYM {} | FLUSH_SYM {} - | GRANTS {} + | GEOMETRY_SYM {} + | GEOMETRYCOLLECTION {} + | GET_FORMAT {} + | GRANTS {} | GLOBAL_SYM {} - | HEAP_SYM {} | HANDLER_SYM {} + | HASH_SYM {} + | HELP_SYM {} | HOSTS_SYM {} | HOUR_SYM {} | IDENTIFIED_SYM {} + | IMPORT {} | INDEXES {} | ISOLATION {} - | ISAM_SYM {} | ISSUER_SYM {} | INNOBASE_SYM {} | INSERT_METHOD {} - | IO_THREAD {} + | RELAY_THREAD {} | LAST_SYM {} + | LEAVES {} | LEVEL_SYM {} + | LINESTRING {} | LOCAL_SYM {} | LOCKS_SYM {} | LOGS_SYM {} @@ -3389,40 +5310,56 @@ keyword: | MASTER_PASSWORD_SYM {} | MASTER_SERVER_ID_SYM {} | MASTER_CONNECT_RETRY_SYM {} - | MAX_CONNECTIONS_PER_HOUR {} - | MAX_QUERIES_PER_HOUR {} - | MAX_UPDATES_PER_HOUR {} + | MASTER_SSL_SYM {} + | MASTER_SSL_CA_SYM {} + | MASTER_SSL_CAPATH_SYM {} + | MASTER_SSL_CERT_SYM {} + | MASTER_SSL_CIPHER_SYM {} + | MASTER_SSL_KEY_SYM {} + | MAX_CONNECTIONS_PER_HOUR {} + | MAX_QUERIES_PER_HOUR {} + | MAX_UPDATES_PER_HOUR {} | MEDIUM_SYM {} - | MERGE_SYM {} - | MEMORY_SYM {} + | MICROSECOND_SYM {} | MINUTE_SYM {} | MIN_ROWS {} | MODIFY_SYM {} | MODE_SYM {} | MONTH_SYM {} - | MYISAM_SYM {} + | MULTILINESTRING {} + | MULTIPOINT {} + | MULTIPOLYGON {} + | NAMES_SYM {} | NATIONAL_SYM {} | NCHAR_SYM {} + | NDBCLUSTER_SYM {} | NEXT_SYM {} | NEW_SYM {} | NO_SYM {} | NONE_SYM {} + | NVARCHAR_SYM {} | OFFSET_SYM {} + | OLD_PASSWORD {} + | ONE_SHOT_SYM {} | OPEN_SYM {} | PACK_KEYS_SYM {} + | PARTIAL {} | PASSWORD {} + | POINT_SYM {} + | POLYGON {} + | PREPARE_SYM {} | PREV_SYM {} | PROCESS {} | PROCESSLIST_SYM {} | QUERY_SYM {} | QUICK {} - | RAID_0_SYM {} + | RAID_0_SYM {} | RAID_CHUNKS {} | RAID_CHUNKSIZE {} - | RAID_STRIPED_SYM {} + | RAID_STRIPED_SYM {} | RAID_TYPE {} - | RELAY_LOG_FILE_SYM {} - | RELAY_LOG_POS_SYM {} + | RELAY_LOG_FILE_SYM {} + | RELAY_LOG_POS_SYM {} | RELOAD {} | REPAIR {} | REPEATABLE_SYM {} @@ -3435,24 +5372,32 @@ keyword: | ROWS_SYM {} | ROW_FORMAT_SYM {} | ROW_SYM {} + | RTREE_SYM {} | SAVEPOINT_SYM {} | SECOND_SYM {} + | SERIAL_SYM {} | SERIALIZABLE_SYM {} | SESSION_SYM {} | SIGNED_SYM {} + | SIMPLE_SYM {} | SHARE_SYM {} | SHUTDOWN {} - | SLAVE {} + | SLAVE {} + | SNAPSHOT_SYM {} + | SOUNDS_SYM {} | SQL_CACHE_SYM {} | SQL_BUFFER_RESULT {} | SQL_NO_CACHE_SYM {} - | SQL_THREAD {} + | SQL_THREAD {} | START_SYM {} | STATUS_SYM {} | STOP_SYM {} + | STORAGE_SYM {} | STRING_SYM {} + | SUBDATE_SYM {} | SUBJECT_SYM {} | SUPER_SYM {} + | TABLESPACE {} | TEMPORARY {} | TEXT_SYM {} | TRANSACTION_SYM {} @@ -3460,13 +5405,21 @@ keyword: | TIMESTAMP {} | TIME_SYM {} | TYPE_SYM {} + | TYPES_SYM {} + | UDF_RETURNS_SYM {} | UDF_SYM {} | UNCOMMITTED_SYM {} + | UNICODE_SYM {} + | UNTIL_SYM {} + | USER {} | USE_FRM {} | VARIABLES {} + | VALUE_SYM {} + | WARNINGS {} | WORK_SYM {} | X509_SYM {} - | YEAR_SYM {}; + | YEAR_SYM {} + ; /* Option functions */ @@ -3475,8 +5428,10 @@ set: { LEX *lex=Lex; lex->sql_command= SQLCOM_SET_OPTION; - lex->option_type=OPT_DEFAULT; + mysql_init_select(lex); + lex->option_type=OPT_SESSION; lex->var_list.empty(); + lex->one_shot_set= 0; } option_value_list {} @@ -3487,68 +5442,100 @@ opt_option: | OPTION {}; option_value_list: - option_type option_value - | option_value_list ',' option_type option_value; + option_value_ext + | option_value_list ',' option_value_ext; -option_type: - /* empty */ {} +option_value_ext: + option_type_ext sys_option_value {} + | option_type option_value {} + ; + +option_type_ext: + option_type {} | GLOBAL_SYM { Lex->option_type= OPT_GLOBAL; } | LOCAL_SYM { Lex->option_type= OPT_SESSION; } | SESSION_SYM { Lex->option_type= OPT_SESSION; } ; +option_type: + /* empty */ {} + | ONE_SHOT_SYM { Lex->option_type= OPT_SESSION; Lex->one_shot_set= 1; } + ; + opt_var_type: /* empty */ { $$=OPT_SESSION; } + | GLOBAL_SYM { $$=OPT_GLOBAL; } | LOCAL_SYM { $$=OPT_SESSION; } | SESSION_SYM { $$=OPT_SESSION; } - | GLOBAL_SYM { $$=OPT_GLOBAL; } ; opt_var_ident_type: /* empty */ { $$=OPT_DEFAULT; } + | GLOBAL_SYM '.' { $$=OPT_GLOBAL; } | LOCAL_SYM '.' { $$=OPT_SESSION; } | SESSION_SYM '.' { $$=OPT_SESSION; } - | GLOBAL_SYM '.' { $$=OPT_GLOBAL; } ; +sys_option_value: + internal_variable_name equal set_expr_or_default + { + LEX *lex=Lex; + lex->var_list.push_back(new set_var(lex->option_type, $1.var, + &$1.base_name, $3)); + } + | TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types + { + LEX *lex=Lex; + LEX_STRING tmp; + tmp.str=0; + tmp.length=0; + lex->var_list.push_back(new set_var(lex->option_type, + find_sys_var("tx_isolation"), + &tmp, + new Item_int((int32) $4))); + } + ; + option_value: '@' ident_or_text equal expr { Lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4))); } - | internal_variable_name equal set_expr_or_default - { - LEX *lex=Lex; - lex->var_list.push_back(new set_var(lex->option_type, $1, $3)); - } | '@' '@' opt_var_ident_type internal_variable_name equal set_expr_or_default + { + LEX *lex=Lex; + lex->var_list.push_back(new set_var((enum_var_type) $3, $4.var, + &$4.base_name, $6)); + } + | charset old_or_new_charset_name_or_default + { + THD *thd= YYTHD; + LEX *lex= Lex; + $2= $2 ? $2: global_system_variables.character_set_client; + lex->var_list.push_back(new set_var_collation_client($2,thd->variables.collation_database,$2)); + } + | NAMES_SYM charset_name_or_default opt_collate + { + THD *thd= YYTHD; + LEX *lex= Lex; + $2= $2 ? $2 : global_system_variables.character_set_client; + $3= $3 ? $3 : $2; + if (!my_charset_same($2,$3)) { - LEX *lex=Lex; - lex->var_list.push_back(new set_var((enum_var_type) $3, $4, $6)); - } - | TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types - { - LEX *lex=Lex; - lex->var_list.push_back(new set_var(lex->option_type, - find_sys_var("tx_isolation"), - new Item_int((int32) $4))); + net_printf(thd,ER_COLLATION_CHARSET_MISMATCH,$3->name,$2->csname); + YYABORT; } - | CHAR_SYM SET opt_equal set_expr_or_default - { - LEX *lex=Lex; - lex->var_list.push_back(new set_var(lex->option_type, - find_sys_var("convert_character_set"), - $4)); + lex->var_list.push_back(new set_var_collation_client($3,$3,$3)); } | PASSWORD equal text_or_password { - THD *thd=current_thd; + THD *thd=YYTHD; LEX_USER *user; - if (!(user=(LEX_USER*) sql_alloc(sizeof(LEX_USER)))) + if (!(user=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) YYABORT; user->host.str=0; user->user.str=thd->priv_user; - thd->lex.var_list.push_back(new set_var_password(user, $3)); + thd->lex->var_list.push_back(new set_var_password(user, $3)); } | PASSWORD FOR_SYM user equal text_or_password { @@ -3562,9 +5549,43 @@ internal_variable_name: sys_var *tmp=find_sys_var($1.str, $1.length); if (!tmp) YYABORT; - $$=tmp; + $$.var= tmp; + $$.base_name.str=0; + $$.base_name.length=0; + /* + If this is time_zone variable we should open time zone + describing tables + */ + if (tmp == &sys_time_zone) + Lex->time_zone_tables_used= &fake_time_zone_tables_list; } - ; + | ident '.' ident + { + if (check_reserved_words(&$1)) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + sys_var *tmp=find_sys_var($3.str, $3.length); + if (!tmp) + YYABORT; + if (!tmp->is_struct()) + net_printf(YYTHD, ER_VARIABLE_IS_NOT_STRUCT, $3.str); + $$.var= tmp; + $$.base_name= $1; + } + | DEFAULT '.' ident + { + sys_var *tmp=find_sys_var($3.str, $3.length); + if (!tmp) + YYABORT; + if (!tmp->is_struct()) + net_printf(YYTHD, ER_VARIABLE_IS_NOT_STRUCT, $3.str); + $$.var= tmp; + $$.base_name.str= (char*) "default"; + $$.base_name.length= 7; + } + ; isolation_types: READ_SYM UNCOMMITTED_SYM { $$= ISO_READ_UNCOMMITTED; } @@ -3577,22 +5598,25 @@ text_or_password: TEXT_STRING { $$=$1.str;} | PASSWORD '(' TEXT_STRING ')' { - if (!$3.length) - $$=$3.str; - else - { - char *buff=(char*) sql_alloc(HASH_PASSWORD_LENGTH+1); - make_scrambled_password(buff,$3.str); - $$=buff; - } - }; + $$= $3.length ? YYTHD->variables.old_passwords ? + Item_func_old_password::alloc(YYTHD, $3.str) : + Item_func_password::alloc(YYTHD, $3.str) : + $3.str; + } + | OLD_PASSWORD '(' TEXT_STRING ')' + { + $$= $3.length ? Item_func_old_password::alloc(YYTHD, $3.str) : + $3.str; + } + ; set_expr_or_default: expr { $$=$1; } | DEFAULT { $$=0; } - | ON { $$=new Item_string("ON",2); } - | ALL { $$=new Item_string("ALL",3); } + | ON { $$=new Item_string("ON", 2, system_charset_info); } + | ALL { $$=new Item_string("ALL", 3, system_charset_info); } + | BINARY { $$=new Item_string("binary", 6, system_charset_info); } ; @@ -3617,16 +5641,22 @@ table_lock_list: table_lock: table_ident opt_table_alias lock_option - { if (!add_table_to_list($1,$2,0,(thr_lock_type) $3)) YYABORT; }; + { + if (!Select->add_table_to_list(YYTHD, $1, $2, 0, (thr_lock_type) $3)) + YYABORT; + } + ; lock_option: READ_SYM { $$=TL_READ_NO_INSERT; } - | WRITE_SYM { $$=current_thd->update_lock_default; } + | WRITE_SYM { $$=YYTHD->update_lock_default; } | LOW_PRIORITY WRITE_SYM { $$=TL_WRITE_LOW_PRIORITY; } - | READ_SYM LOCAL_SYM { $$= TL_READ; }; + | READ_SYM LOCAL_SYM { $$= TL_READ; } + ; unlock: - UNLOCK_SYM table_or_tables { Lex->sql_command=SQLCOM_UNLOCK_TABLES; }; + UNLOCK_SYM table_or_tables { Lex->sql_command=SQLCOM_UNLOCK_TABLES; } + ; /* @@ -3636,35 +5666,40 @@ unlock: handler: HANDLER_SYM table_ident OPEN_SYM opt_table_alias { - Lex->sql_command = SQLCOM_HA_OPEN; - if (!add_table_to_list($2,$4,0)) + LEX *lex= Lex; + lex->sql_command = SQLCOM_HA_OPEN; + if (!lex->current_select->add_table_to_list(lex->thd, $2, $4, 0)) YYABORT; } - | HANDLER_SYM table_ident_ref CLOSE_SYM + | HANDLER_SYM table_ident_nodb CLOSE_SYM { - Lex->sql_command = SQLCOM_HA_CLOSE; - if (!add_table_to_list($2,0,0)) + LEX *lex= Lex; + lex->sql_command = SQLCOM_HA_CLOSE; + if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0)) YYABORT; } - | HANDLER_SYM table_ident_ref READ_SYM + | HANDLER_SYM table_ident_nodb READ_SYM { LEX *lex=Lex; lex->sql_command = SQLCOM_HA_READ; lex->ha_rkey_mode= HA_READ_KEY_EXACT; /* Avoid purify warnings */ - lex->select->select_limit= 1; - lex->select->offset_limit= 0L; - if (!add_table_to_list($2,0,0)) + lex->current_select->select_limit= 1; + lex->current_select->offset_limit= 0L; + if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0)) YYABORT; } - handler_read_or_scan where_clause limit_clause { }; + handler_read_or_scan where_clause opt_limit_clause {} + ; handler_read_or_scan: handler_scan_function { Lex->backup_dir= 0; } - | ident handler_rkey_function { Lex->backup_dir= $1.str; }; + | ident handler_rkey_function { Lex->backup_dir= $1.str; } + ; handler_scan_function: FIRST_SYM { Lex->ha_read_mode = RFIRST; } - | NEXT_SYM { Lex->ha_read_mode = RNEXT; }; + | NEXT_SYM { Lex->ha_read_mode = RNEXT; } + ; handler_rkey_function: FIRST_SYM { Lex->ha_read_mode = RFIRST; } @@ -3678,14 +5713,16 @@ handler_rkey_function: lex->ha_rkey_mode=$1; if (!(lex->insert_list = new List_item)) YYABORT; - } '(' values ')' { }; + } '(' values ')' { } + ; handler_rkey_mode: EQ { $$=HA_READ_KEY_EXACT; } | GE { $$=HA_READ_KEY_OR_NEXT; } | LE { $$=HA_READ_KEY_OR_PREV; } | GT_SYM { $$=HA_READ_AFTER_KEY; } - | LT { $$=HA_READ_BEFORE_KEY; }; + | LT { $$=HA_READ_BEFORE_KEY; } + ; /* GRANT / REVOKE */ @@ -3697,13 +5734,23 @@ revoke: lex->users_list.empty(); lex->columns.empty(); lex->grant= lex->grant_tot_col=0; - lex->select->db=0; + lex->select_lex.db=0; lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; bzero((char*) &lex->mqh, sizeof(lex->mqh)); } + revoke_command + {} + ; + +revoke_command: grant_privileges ON opt_table FROM user_list {} + | + ALL opt_privileges ',' GRANT OPTION FROM user_list + { + Lex->sql_command = SQLCOM_REVOKE_ALL; + } ; grant: @@ -3714,7 +5761,7 @@ grant: lex->columns.empty(); lex->sql_command = SQLCOM_GRANT; lex->grant= lex->grant_tot_col= 0; - lex->select->db= 0; + lex->select_lex.db= 0; lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; bzero((char *)&(lex->mqh),sizeof(lex->mqh)); @@ -3726,21 +5773,26 @@ grant: grant_privileges: grant_privilege_list {} - | ALL PRIVILEGES { Lex->grant = GLOBAL_ACLS;} - | ALL { Lex->grant = GLOBAL_ACLS;}; + | ALL opt_privileges { Lex->grant = GLOBAL_ACLS;} + ; + +opt_privileges: + /* empty */ + | PRIVILEGES + ; grant_privilege_list: grant_privilege | grant_privilege_list ',' grant_privilege; grant_privilege: - SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list {} + SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list {} | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list {} | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list {} | REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list {} | DELETE_SYM { Lex->grant |= DELETE_ACL;} | USAGE {} - | INDEX { Lex->grant |= INDEX_ACL;} + | INDEX_SYM { Lex->grant |= INDEX_ACL;} | ALTER { Lex->grant |= ALTER_ACL;} | CREATE { Lex->grant |= CREATE_ACL;} | DROP { Lex->grant |= DROP_ACL;} @@ -3761,7 +5813,7 @@ grant_privilege: opt_and: /* empty */ {} - | AND {} + | AND_SYM {} ; require_list: @@ -3775,7 +5827,7 @@ require_list_element: LEX *lex=Lex; if (lex->x509_subject) { - net_printf(&lex->thd->net,ER_DUP_ARGUMENT, "SUBJECT"); + net_printf(lex->thd,ER_DUP_ARGUMENT, "SUBJECT"); YYABORT; } lex->x509_subject=$2.str; @@ -3785,7 +5837,7 @@ require_list_element: LEX *lex=Lex; if (lex->x509_issuer) { - net_printf(&lex->thd->net,ER_DUP_ARGUMENT, "ISSUER"); + net_printf(lex->thd,ER_DUP_ARGUMENT, "ISSUER"); YYABORT; } lex->x509_issuer=$2.str; @@ -3795,58 +5847,59 @@ require_list_element: LEX *lex=Lex; if (lex->ssl_cipher) { - net_printf(&lex->thd->net,ER_DUP_ARGUMENT, "CIPHER"); + net_printf(lex->thd,ER_DUP_ARGUMENT, "CIPHER"); YYABORT; } lex->ssl_cipher=$2.str; } ; - + opt_table: '*' { - LEX *lex=Lex; - lex->select->db=lex->thd->db; + LEX *lex= Lex; + lex->current_select->db= lex->thd->db; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE); + send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); YYABORT; } } | ident '.' '*' { - LEX *lex=Lex; - lex->select->db = $1.str; + LEX *lex= Lex; + lex->current_select->db = $1.str; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE); + send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); YYABORT; } } | '*' '.' '*' { - LEX *lex=Lex; - lex->select->db = NULL; + LEX *lex= Lex; + lex->current_select->db = NULL; if (lex->grant == GLOBAL_ACLS) lex->grant= GLOBAL_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE); + send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); YYABORT; } } | table_ident { LEX *lex=Lex; - if (!add_table_to_list($1,NULL,0)) + if (!lex->current_select->add_table_to_list(lex->thd, $1,NULL,0)) YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = TABLE_ACLS & ~GRANT_ACL; - }; + } + ; user_list: @@ -3865,19 +5918,31 @@ grant_user: $$=$1; $1->password=$4; if ($4.length) { - char *buff=(char*) sql_alloc(HASH_PASSWORD_LENGTH+1); - if (buff) - { - make_scrambled_password(buff,$4.str); - $1->password.str=buff; - $1->password.length=HASH_PASSWORD_LENGTH; - } + if (YYTHD->variables.old_passwords) + { + char *buff= + (char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1); + if (buff) + make_scrambled_password_323(buff, $4.str); + $1->password.str= buff; + $1->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; + } + else + { + char *buff= + (char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1); + if (buff) + make_scrambled_password(buff, $4.str); + $1->password.str= buff; + $1->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH; + } } } | user IDENTIFIED_SYM BY PASSWORD TEXT_STRING { $$=$1; $1->password=$5 ; } | user - { $$=$1; $1->password.str=NullS; }; + { $$=$1; $1->password.str=NullS; } + ; opt_column_list: @@ -3895,13 +5960,14 @@ column_list: column_list_id: ident { - String *new_str = new String((const char*) $1.str,$1.length); + String *new_str = new (YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info); List_iterator <LEX_COLUMN> iter(Lex->columns); class LEX_COLUMN *point; LEX *lex=Lex; while ((point=iter++)) { - if (!my_strcasecmp(point->column.ptr(),new_str->ptr())) + if (!my_strcasecmp(system_charset_info, + point->column.ptr(), new_str->ptr())) break; } lex->grant_tot_col|= lex->which_columns; @@ -3909,11 +5975,12 @@ column_list_id: point->rights |= lex->which_columns; else lex->columns.push_back(new LEX_COLUMN (*new_str,lex->which_columns)); - }; + } + ; require_clause: /* empty */ - | REQUIRE_SYM require_list + | REQUIRE_SYM require_list { Lex->ssl_type=SSL_TYPE_SPECIFIED; } @@ -3929,7 +5996,7 @@ require_clause: /* empty */ { Lex->ssl_type=SSL_TYPE_NONE; } - ; + ; grant_options: /* empty */ {} @@ -3937,7 +6004,8 @@ grant_options: grant_option_list: grant_option_list grant_option {} - | grant_option {}; + | grant_option {} + ; grant_option: GRANT OPTION { Lex->grant |= GRANT_ACL;} @@ -3955,21 +6023,23 @@ grant_option: { Lex->mqh.connections=$2; Lex->mqh.bits |= 4; - }; + } + ; begin: - BEGIN_SYM { Lex->sql_command = SQLCOM_BEGIN;} opt_work {} + BEGIN_SYM { Lex->sql_command = SQLCOM_BEGIN; Lex->start_transaction_opt= 0;} opt_work {} ; opt_work: /* empty */ {} - | WORK_SYM {;}; + | WORK_SYM {;} + ; commit: COMMIT_SYM { Lex->sql_command = SQLCOM_COMMIT;}; rollback: - ROLLBACK_SYM + ROLLBACK_SYM { Lex->sql_command = SQLCOM_ROLLBACK; } @@ -3986,13 +6056,14 @@ savepoint: }; /* -** UNIONS : glue selects together + UNIONS : glue selects together */ -opt_union: +union_clause: /* empty */ {} - | union_list; + | union_list + ; union_list: UNION_SYM union_option @@ -4001,52 +6072,126 @@ union_list: if (lex->exchange) { /* Only the last SELECT can have INTO...... */ - net_printf(&lex->thd->net, ER_WRONG_USAGE,"UNION","INTO"); + net_printf(lex->thd, ER_WRONG_USAGE, "UNION", "INTO"); YYABORT; } - if (lex->select->linkage == NOT_A_SELECT) + if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) { - send_error(&lex->thd->net, ER_SYNTAX_ERROR); + yyerror(ER(ER_SYNTAX_ERROR)); YYABORT; } - if (mysql_new_select(lex)) + if (mysql_new_select(lex, 0)) YYABORT; - lex->select->linkage=UNION_TYPE; - } + mysql_init_select(lex); + lex->current_select->linkage=UNION_TYPE; + if ($2) /* UNION DISTINCT - remember position */ + lex->current_select->master_unit()->union_distinct= + lex->current_select; + } select_init {} ; union_opt: union_list {} - | optional_order_or_limit {}; + | optional_order_or_limit {} + ; optional_order_or_limit: - /* empty - intentional reduce/reduce conflict here !!! - { code } below should not be executed - when neither ORDER BY nor LIMIT are used */ {} + /* Empty */ {} | { - LEX *lex=Lex; - if (!lex->select->braces) + THD *thd= YYTHD; + LEX *lex= thd->lex; + DBUG_ASSERT(lex->current_select->linkage != GLOBAL_OPTIONS_TYPE); + SELECT_LEX *sel= lex->current_select; + SELECT_LEX_UNIT *unit= sel->master_unit(); + SELECT_LEX *fake= unit->fake_select_lex; + if (fake) { - send_error(&lex->thd->net, ER_SYNTAX_ERROR); - YYABORT; + unit->global_parameters= fake; + fake->no_table_names_allowed= 1; + lex->current_select= fake; } - if (lex->select != &lex->select_lex) - { - if (mysql_new_select(lex)) - YYABORT; - mysql_init_select(lex); - lex->select->linkage=NOT_A_SELECT; - lex->select->select_limit=lex->thd->variables.select_limit; - } + thd->where= "global ORDER clause"; } - opt_order_clause limit_clause + order_or_limit + { + THD *thd= YYTHD; + thd->lex->current_select->no_table_names_allowed= 0; + thd->where= ""; + } ; -union_option: - /* empty */ {} - | DISTINCT {} - | ALL { Lex->union_option=1; } +order_or_limit: + order_clause opt_limit_clause_init + | limit_clause ; + +union_option: + /* empty */ { $$=1; } + | DISTINCT { $$=1; } + | ALL { $$=0; } + ; + +singlerow_subselect: + subselect_start singlerow_subselect_init + subselect_end + { + $$= $2; + }; + +singlerow_subselect_init: + select_init2 + { + $$= new Item_singlerow_subselect(Lex->current_select-> + master_unit()->first_select()); + }; + +exists_subselect: + subselect_start exists_subselect_init + subselect_end + { + $$= $2; + }; + +exists_subselect_init: + select_init2 + { + $$= new Item_exists_subselect(Lex->current_select->master_unit()-> + first_select()); + }; + +in_subselect: + subselect_start in_subselect_init + subselect_end + { + $$= $2; + }; + +in_subselect_init: + select_init2 + { + $$= Lex->current_select->master_unit()->first_select(); + }; + +subselect_start: + '(' SELECT_SYM + { + LEX *lex=Lex; + if (lex->sql_command == (int)SQLCOM_HA_READ || + lex->sql_command == (int)SQLCOM_KILL) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + if (mysql_new_select(Lex, 1)) + YYABORT; + }; + +subselect_end: + ')' + { + LEX *lex=Lex; + lex->current_select = lex->current_select->return_after_parsing(); + }; + diff --git a/sql/stacktrace.c b/sql/stacktrace.c index fa9ab093f26..838f547dc02 100644 --- a/sql/stacktrace.c +++ b/sql/stacktrace.c @@ -38,12 +38,12 @@ void safe_print_str(const char* name, const char* val, int max_len) } fprintf(stderr, "= "); - for(; max_len && PTR_SANE(val) && *val; --max_len) + for (; max_len && PTR_SANE(val) && *val; --max_len) fputc(*val++, stderr); fputc('\n', stderr); } -#ifdef HAVE_LINUXTHREADS +#ifdef TARGET_OS_LINUX #define SIGRETURN_FRAME_COUNT 2 #if defined(__alpha__) && defined(__GNUC__) @@ -59,7 +59,7 @@ void safe_print_str(const char* name, const char* val, int max_len) inline uchar** find_prev_fp(uint32* pc, uchar** fp) { int i; - for(i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc) + for (i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc) { uchar* p = (uchar*)pc; if (p[2] == 222 && p[3] == 35) @@ -73,7 +73,7 @@ inline uchar** find_prev_fp(uint32* pc, uchar** fp) inline uint32* find_prev_pc(uint32* pc, uchar** fp) { int i; - for(i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc) + for (i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc) { char* p = (char*)pc; if (p[1] == 0 && p[2] == 94 && p[3] == -73) @@ -201,7 +201,7 @@ end: stack trace is much more helpful in diagnosing the problem, so please do \n\ resolve it\n"); } -#endif /* HAVE_LINUXTHREADS */ +#endif /* TARGET_OS_LINUX */ #endif /* HAVE_STACKTRACE */ /* Produce a core for the thread */ diff --git a/sql/stacktrace.h b/sql/stacktrace.h index 980e1ea07eb..d5d1e05ef0e 100644 --- a/sql/stacktrace.h +++ b/sql/stacktrace.h @@ -18,7 +18,7 @@ extern "C" { #endif -#ifdef HAVE_LINUXTHREADS +#ifdef TARGET_OS_LINUX #if defined(HAVE_STACKTRACE) || (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) #undef HAVE_STACKTRACE #define HAVE_STACKTRACE @@ -30,7 +30,7 @@ extern char* heap_start; void print_stacktrace(gptr stack_bottom, ulong thread_stack); void safe_print_str(const char* name, const char* val, int max_len); #endif /* (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) */ -#endif /* HAVE_LINUXTHREADS */ +#endif /* TARGET_OS_LINUX */ /* Define empty prototypes for functions that are not implemented */ #ifndef HAVE_STACKTRACE diff --git a/sql/strfunc.cc b/sql/strfunc.cc new file mode 100644 index 00000000000..81aca092cec --- /dev/null +++ b/sql/strfunc.cc @@ -0,0 +1,237 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* Some useful string utility functions used by the MySQL server */ + +#include "mysql_priv.h" + +/* + Return bitmap for strings used in a set + + SYNOPSIS + find_set() + lib Strings in set + str Strings of set-strings separated by ',' + err_pos If error, set to point to start of wrong set string + err_len If error, set to the length of wrong set string + set_warning Set to 1 if some string in set couldn't be used + + NOTE + We delete all end space from str before comparison + + RETURN + bitmap of all sets found in x. + set_warning is set to 1 if there was any sets that couldn't be set +*/ + +static const char field_separator=','; + +ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs, + char **err_pos, uint *err_len, bool *set_warning) +{ + CHARSET_INFO *strip= cs ? cs : &my_charset_latin1; + const char *end= str + strip->cset->lengthsp(strip, str, length); + ulonglong found= 0; + *err_pos= 0; // No error yet + if (str != end) + { + const char *start= str; + for (;;) + { + const char *pos= start; + uint var_len; + int mblen= 1; + + if (cs && cs->mbminlen > 1) + { + for ( ; pos < end; pos+= mblen) + { + my_wc_t wc; + if ((mblen= cs->cset->mb_wc(cs, &wc, (const uchar *) pos, + (const uchar *) end)) < 1) + mblen= 1; // Not to hang on a wrong multibyte sequence + if (wc == (my_wc_t) field_separator) + break; + } + } + else + for (; pos != end && *pos != field_separator; pos++) ; + var_len= (uint) (pos - start); + uint find= cs ? find_type2(lib, start, var_len, cs) : + find_type(lib, start, var_len, (bool) 0); + if (!find) + { + *err_pos= (char*) start; + *err_len= var_len; + *set_warning= 1; + } + else + found|= ((longlong) 1 << (find - 1)); + if (pos >= end) + break; + start= pos + mblen; + } + } + return found; +} + + +/* + Function to find a string in a TYPELIB + (Same format as mysys/typelib.c) + + SYNOPSIS + find_type() + lib TYPELIB (struct of pointer to values + count) + find String to find + length Length of string to find + part_match Allow part matching of value + + RETURN + 0 error + > 0 position in TYPELIB->type_names +1 +*/ + +uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match) +{ + uint found_count=0, found_pos=0; + const char *end= find+length; + const char *i; + const char *j; + for (uint pos=0 ; (j=lib->type_names[pos++]) ; ) + { + for (i=find ; i != end && + my_toupper(system_charset_info,*i) == + my_toupper(system_charset_info,*j) ; i++, j++) ; + if (i == end) + { + if (! *j) + return(pos); + found_count++; + found_pos= pos; + } + } + return(found_count == 1 && part_match ? found_pos : 0); +} + + +/* + Find a string in a list of strings according to collation + + SYNOPSIS + find_type2() + lib TYPELIB (struct of pointer to values + count) + x String to find + length String length + cs Character set + collation to use for comparison + + NOTES + + RETURN + 0 No matching value + >0 Offset+1 in typelib for matched string +*/ + +uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) +{ + int find,pos; + const char *j; + DBUG_ENTER("find_type2"); + DBUG_PRINT("enter",("x: '%s' lib: 0x%lx",x,typelib)); + + if (!typelib->count) + { + DBUG_PRINT("exit",("no count")); + DBUG_RETURN(0); + } + + for (find=0, pos=0 ; (j=typelib->type_names[pos]) ; pos++) + { + if (!my_strnncoll(cs, (const uchar*) x, length, + (const uchar*) j, typelib->type_lengths[pos])) + DBUG_RETURN(pos+1); + } + DBUG_PRINT("exit",("Couldn't find type")); + DBUG_RETURN(0); +} /* find_type */ + + +/* + Un-hex all elements in a typelib + + SYNOPSIS + unhex_type2() + interval TYPELIB (struct of pointer to values + lengths + count) + + NOTES + + RETURN + N/A +*/ + +void unhex_type2(TYPELIB *interval) +{ + for (uint pos= 0; pos < interval->count; pos++) + { + char *from, *to; + for (from= to= (char*) interval->type_names[pos]; *from; ) + { + /* + Note, hexchar_to_int(*from++) doesn't work + one some compilers, e.g. IRIX. Looks like a compiler + bug in inline functions in combination with arguments + that have a side effect. So, let's use from[0] and from[1] + and increment 'from' by two later. + */ + + *to++= (char) (hexchar_to_int(from[0]) << 4) + + hexchar_to_int(from[1]); + from+= 2; + } + interval->type_lengths[pos] /= 2; + } +} + + +/* + Check if the first word in a string is one of the ones in TYPELIB + + SYNOPSIS + check_word() + lib TYPELIB + val String to check + end End of input + end_of_word Store value of last used byte here if we found word + + RETURN + 0 No matching value + > 1 lib->type_names[#-1] matched + end_of_word will point to separator character/end in 'val' +*/ + +uint check_word(TYPELIB *lib, const char *val, const char *end, + const char **end_of_word) +{ + int res; + const char *ptr; + + /* Fiend end of word */ + for (ptr= val ; ptr < end && my_isalpha(&my_charset_latin1, *ptr) ; ptr++) + ; + if ((res=find_type(lib, val, (uint) (ptr - val), 1)) > 0) + *end_of_word= ptr; + return res; +} diff --git a/sql/structs.h b/sql/structs.h index 156bf7745af..081ada88bf7 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -20,9 +20,28 @@ struct st_table; class Field; -typedef struct st_date_format { /* How to print date */ - uint pos[6]; /* Positions to YY.MM.DD HH:MM:SS */ -} DATE_FORMAT; +typedef struct st_lex_string +{ + char *str; + uint length; +} LEX_STRING; + +typedef struct st_lex_string_with_init :public st_lex_string +{ + st_lex_string_with_init(const char *str_arg, uint length_arg) + { + str= (char*) str_arg; + length= length_arg; + } +} LEX_STRING_WITH_INIT; + + +typedef struct st_date_time_format { + uchar positions[8]; + char time_separator; /* Separator between hour and minute */ + uint flag; /* For future */ + LEX_STRING format; +} DATE_TIME_FORMAT; typedef struct st_keyfile_info { /* used with ha_info() */ @@ -70,7 +89,12 @@ typedef struct st_key { enum ha_key_alg algorithm; KEY_PART_INFO *key_part; char *name; /* Name of key */ - ulong *rec_per_key; /* Key part distribution */ + /* + Array of AVG(#records with the same field value) for 1st ... Nth key part. + 0 means 'not known'. + For temporary heap tables this member is NULL. + */ + ulong *rec_per_key; union { int bdb_return_if_eq; } handler; @@ -104,19 +128,22 @@ typedef struct st_read_record { /* Parameter to read_record */ uint index; byte *ref_pos; /* pointer to form->refpos */ byte *record; + byte *rec_buf; /* to read field values after filesort */ byte *cache,*cache_pos,*cache_end,*read_positions; IO_CACHE *io_cache; bool print_error, ignore_not_found_rows; } READ_RECORD; -enum timestamp_type { TIMESTAMP_NONE, TIMESTAMP_DATE, TIMESTAMP_FULL, - TIMESTAMP_TIME }; -typedef struct st_time { - uint year,month,day,hour,minute,second,second_part; - bool neg; - timestamp_type time_type; -} TIME; +/* + Originally MySQL used TIME structure inside server only, but since + 4.1 it's exported to user in the new client API. Define aliases for + new names to keep existing code simple. +*/ + +typedef struct st_mysql_time TIME; +typedef enum enum_mysql_timestamp_type timestamp_type; + typedef struct { ulong year,month,day,hour; @@ -125,6 +152,14 @@ typedef struct { } INTERVAL; +typedef struct st_known_date_time_format { + const char *format_name; + const char *date_format; + const char *datetime_format; + const char *time_format; +} KNOWN_DATE_TIME_FORMAT; + + enum SHOW_TYPE { SHOW_UNDEF, @@ -145,22 +180,23 @@ enum SHOW_TYPE SHOW_SSL_CTX_SESS_TIMEOUTS, SHOW_SSL_CTX_SESS_CACHE_FULL, SHOW_SSL_GET_CIPHER_LIST, #endif /* HAVE_OPENSSL */ - SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING + SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING, SHOW_SLAVE_RETRIED_TRANS, + SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG, SHOW_KEY_CACHE_LONGLONG }; enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED}; + +extern const char *show_comp_option_name[]; + typedef int *(*update_var)(THD *, struct show_var_st *); + typedef struct show_var_st { const char *name; char *value; SHOW_TYPE type; } SHOW_VAR; -typedef struct lex_string { - char *str; - uint length; -} LEX_STRING; typedef struct st_lex_user { LEX_STRING user, host, password; @@ -182,7 +218,7 @@ typedef struct user_conn { #define REG_NEW_RECORD 2 /* Write a new record if not found */ #define REG_UPDATE 4 /* Uppdate record */ #define REG_DELETE 8 /* Delete found record */ -#define REG_PROG 16 /* User is updateing database */ +#define REG_PROG 16 /* User is updating database */ #define REG_CLEAR_AFTER_WRITE 32 #define REG_MAY_BE_UPDATED 64 #define REG_AUTO_UPDATE 64 /* Used in D-forms for scroll-tables */ diff --git a/sql/table.cc b/sql/table.cc index 8ce6362e63c..8ac64ac198d 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -24,32 +24,55 @@ /* Functions defined in this file */ -static void frm_error(int error,TABLE *form,const char *name,int errortype); +static void frm_error(int error,TABLE *form,const char *name, + int errortype, int errarg); static void fix_type_pointers(const char ***array, TYPELIB *point_to_type, uint types, char **names); static uint find_field(TABLE *form,uint start,uint length); -static byte* get_field_name(Field *buff,uint *length, +static byte* get_field_name(Field **buff,uint *length, my_bool not_used __attribute__((unused))) { - *length= (uint) strlen(buff->field_name); - return (byte*) buff->field_name; + *length= (uint) strlen((*buff)->field_name); + return (byte*) (*buff)->field_name; } - /* Open a .frm file */ +/* + Open a .frm file + + SYNOPSIS + openfrm() + + name path to table-file "db/name" + alias alias for table + db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..) + can be 0 (example in ha_example_table) + prgflag READ_ALL etc.. + ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc.. + outparam result table + + RETURN VALUES + 0 ok + 1 Error (see frm_error) + 2 Error (see frm_error) + 3 Wrong data in .frm file + 4 Error (see frm_error) + 5 Error (see frm_error: charset unavailable) + 6 Unknown .frm version +*/ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, uint ha_open_flags, TABLE *outparam) { reg1 uint i; reg2 uchar *strpos; - int j,error; + int j,error, errarg= 0; uint rec_buff_length,n_length,int_length,records,key_parts,keys, interval_count,interval_parts,read_length,db_create_options; uint key_info_length, com_length; ulong pos; - char index_file[FN_REFLEN], *names, *keynames; + char index_file[FN_REFLEN], *names, *keynames, *comment_pos; uchar head[288],*disk_buff,new_field_pack_flag; my_string record; const char **int_array; @@ -61,6 +84,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, uchar *null_pos; uint null_bit, new_frm_ver, field_pack_length; SQL_CRYPT *crypted=0; + MEM_ROOT **root_ptr, *old_root; DBUG_ENTER("openfrm"); DBUG_PRINT("enter",("name: '%s' form: %lx",name,outparam)); @@ -71,17 +95,18 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, error=1; init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0); - MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC); - my_pthread_setspecific_ptr(THR_MALLOC,&outparam->mem_root); + root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); + old_root= *root_ptr; + *root_ptr= &outparam->mem_root; outparam->real_name=strdup_root(&outparam->mem_root, - name+dirname_length(name)); - *fn_ext(outparam->real_name)='\0'; // Remove extension + name+dirname_length(name)); outparam->table_name=my_strdup(alias,MYF(MY_WME)); if (!outparam->real_name || !outparam->table_name) goto err_end; + *fn_ext(outparam->real_name)='\0'; // Remove extension - if ((file=my_open(fn_format(index_file,name,"",reg_ext,4), + if ((file=my_open(fn_format(index_file,name,"",reg_ext,MY_UNPACK_FILENAME), O_RDONLY | O_SHARE, MYF(0))) < 0) @@ -94,9 +119,13 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, *fn_ext(outparam->path)='\0'; // Remove extension if (my_read(file,(byte*) head,64,MYF(MY_NABP))) goto err_not_open; - if (head[0] != (uchar) 254 || head[1] != 1 || - (head[2] != FRM_VER && head[2] != FRM_VER+1 && head[2] != FRM_VER+3)) + if (head[0] != (uchar) 254 || head[1] != 1) + goto err_not_open; /* purecov: inspected */ + if (head[2] != FRM_VER && head[2] != FRM_VER+1 && head[2] != FRM_VER+3) + { + error= 6; goto err_not_open; /* purecov: inspected */ + } new_field_pack_flag=head[27]; new_frm_ver= (head[2] - FRM_VER); field_pack_length= new_frm_ver < 2 ? 11 : 17; @@ -106,6 +135,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, goto err_not_open; /* purecov: inspected */ *fn_ext(index_file)='\0'; // Remove .frm extension + outparam->frm_version= head[2]; outparam->db_type=ha_checktype((enum db_type) (uint) *(head+3)); outparam->db_create_options=db_create_options=uint2korr(head+30); outparam->db_options_in_use=outparam->db_create_options; @@ -117,8 +147,22 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->raid_type= head[41]; outparam->raid_chunks= head[42]; outparam->raid_chunksize= uint4korr(head+43); + outparam->table_charset=get_charset((uint) head[38],MYF(0)); null_field_first=1; } + if (!outparam->table_charset) + { + /* unknown charset in head[38] or pre-3.23 frm */ + if (use_mb(default_charset_info)) + { + /* Warn that we may be changing the size of character columns */ + sql_print_warning("'%s' had no or invalid character set, " + "and default character set is multi-byte, " + "so character column sizes may have changed", + name); + } + outparam->table_charset=default_charset_info; + } outparam->db_record_offset=1; if (db_create_options & HA_OPTION_LONG_BLOB_PTR) outparam->blob_ptr_size=portable_sizeof_char_ptr; @@ -133,10 +177,23 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, VOID(my_seek(file,(ulong) uint2korr(head+6),MY_SEEK_SET,MYF(0))); if (read_string(file,(gptr*) &disk_buff,key_info_length)) goto err_not_open; /* purecov: inspected */ - outparam->keys=keys= disk_buff[0]; - outparam->keys_for_keyread= outparam->keys_in_use= set_bits(key_map, keys); + if (disk_buff[0] & 0x80) + { + outparam->keys= keys= (disk_buff[1] << 7) | (disk_buff[0] & 0x7f); + outparam->key_parts= key_parts= uint2korr(disk_buff+2); + } + else + { + outparam->keys= keys= disk_buff[0]; + outparam->key_parts= key_parts= disk_buff[1]; + } + outparam->keys_for_keyread.init(0); + outparam->keys_in_use.init(keys); + outparam->read_only_keys.init(keys); + outparam->quick_keys.init(); + outparam->used_keys.init(); + outparam->keys_in_use_for_query.init(); - outparam->key_parts=key_parts=disk_buff[1]; n_length=keys*sizeof(KEY)+key_parts*sizeof(KEY_PART_INFO); if (!(keyinfo = (KEY*) alloc_root(&outparam->mem_root, n_length+uint2korr(disk_buff+4)))) @@ -208,10 +265,9 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, #ifdef HAVE_CRYPTED_FRM else if (*(head+26) == 2) { - extern SQL_CRYPT *get_crypt_for_frm(void); - my_pthread_setspecific_ptr(THR_MALLOC,old_root); + *root_ptr= old_root crypted=get_crypt_for_frm(); - my_pthread_setspecific_ptr(THR_MALLOC,&outparam->mem_root); + *root_ptr= &outparam->mem_root; outparam->crypted=1; } #endif @@ -236,9 +292,12 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, record[outparam->reclength]=0; // For purify and ->c_ptr() outparam->rec_buff_length=rec_buff_length; if (my_pread(file,(byte*) record,(uint) outparam->reclength, - (ulong) (uint2korr(head+6)+uint2korr(head+14)), + (ulong) (uint2korr(head+6)+ + ((uint2korr(head+14) == 0xffff ? + uint4korr(head+47) : uint2korr(head+14)))), MYF(MY_NABP))) goto err_not_open; /* purecov: inspected */ + /* HACK: table->record[2] is used instead of table->default_values here */ for (i=0 ; i < records ; i++, record+=rec_buff_length) { outparam->record[i]=(byte*) record; @@ -248,19 +307,22 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (records == 2) { /* fix for select */ - outparam->record[2]=outparam->record[1]; + outparam->default_values=outparam->record[1]; if (db_stat & HA_READ_ONLY) outparam->record[1]=outparam->record[0]; /* purecov: inspected */ } + outparam->insert_values=0; /* for INSERT ... UPDATE */ VOID(my_seek(file,pos,MY_SEEK_SET,MYF(0))); if (my_read(file,(byte*) head,288,MYF(MY_NABP))) goto err_not_open; +#ifdef HAVE_CRYPTED_FRM if (crypted) { crypted->decode((char*) head+256,288-256); if (sint2korr(head+284) != 0) // Should be 0 goto err_not_open; // Wrong password } +#endif outparam->fields= uint2korr(head+258); pos=uint2korr(head+260); /* Length of all screens */ @@ -273,7 +335,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->comment=strdup_root(&outparam->mem_root, (char*) head+47); - DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d", interval_count,interval_parts, outparam->keys,n_length,int_length)); + DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, outparam->keys,n_length,int_length, com_length)); if (!(field_ptr = (Field **) alloc_root(&outparam->mem_root, @@ -289,12 +351,14 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, pos+ (uint) (n_length+int_length+com_length)); if (read_string(file,(gptr*) &disk_buff,read_length)) goto err_not_open; /* purecov: inspected */ +#ifdef HAVE_CRYPTED_FRM if (crypted) { crypted->decode((char*) disk_buff,read_length); delete crypted; crypted=0; } +#endif strpos= disk_buff+pos; outparam->intervals= (TYPELIB*) (field_ptr+outparam->fields+1); @@ -304,10 +368,30 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->intervals=0; // For better debugging memcpy((char*) names, strpos+(outparam->fields*field_pack_length), (uint) (n_length+int_length)); + comment_pos=names+(n_length+int_length); + memcpy(comment_pos, disk_buff+read_length-com_length, com_length); fix_type_pointers(&int_array,&outparam->fieldnames,1,&names); fix_type_pointers(&int_array,outparam->intervals,interval_count, &names); + + { + /* Set ENUM and SET lengths */ + TYPELIB *interval; + for (interval= outparam->intervals; + interval < outparam->intervals + interval_count; + interval++) + { + uint count= (uint) (interval->count + 1) * sizeof(uint); + if (!(interval->type_lengths= (uint *) alloc_root(&outparam->mem_root, + count))) + goto err_not_open; + for (count= 0; count < interval->count; count++) + interval->type_lengths[count]= strlen(interval->type_names[count]); + interval->type_lengths[count]= 0; + } + } + if (keynames) fix_type_pointers(&int_array,&outparam->keynames,1,&keynames); VOID(my_close(file,MYF(MY_WME))); @@ -331,14 +415,17 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, use_hash= outparam->fields >= MAX_FIELDS_BEFORE_HASH; if (use_hash) use_hash= !hash_init(&outparam->name_hash, + system_charset_info, outparam->fields,0,0, - (hash_get_key) get_field_name,0, - HASH_CASE_INSENSITIVE); + (hash_get_key) get_field_name,0,0); for (i=0 ; i < outparam->fields; i++, strpos+=field_pack_length, field_ptr++) { uint pack_flag, interval_nr, unireg_type, recpos, field_length; enum_field_types field_type; + CHARSET_INFO *charset=NULL; + Field::geometry_type geom_type= Field::GEOM_GEOMETRY; + LEX_STRING comment; if (new_frm_ver == 3) { @@ -348,36 +435,104 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, pack_flag= uint2korr(strpos+8); unireg_type= (uint) strpos[10]; interval_nr= (uint) strpos[12]; - field_type= (enum_field_types) (uint) strpos[13]; + + uint comment_length=uint2korr(strpos+15); + field_type=(enum_field_types) (uint) strpos[13]; + + // charset and geometry_type share the same byte in frm + if (field_type == FIELD_TYPE_GEOMETRY) + { +#ifdef HAVE_SPATIAL + geom_type= (Field::geometry_type) strpos[14]; + charset= &my_charset_bin; +#else + error= 4; // unsupported field type + goto err_not_open; +#endif + } + else + { + if (!strpos[14]) + charset= &my_charset_bin; + else if (!(charset=get_charset((uint) strpos[14], MYF(0)))) + { + error= 5; // Unknown or unavailable charset + errarg= (int) strpos[14]; + goto err_not_open; + } + } + if (!comment_length) + { + comment.str= (char*) ""; + comment.length=0; + } + else + { + comment.str= (char*) comment_pos; + comment.length= comment_length; + comment_pos+= comment_length; + } } else { - /* old frm file */ field_length= (uint) strpos[3]; recpos= uint2korr(strpos+4), pack_flag= uint2korr(strpos+6); unireg_type= (uint) strpos[8]; interval_nr= (uint) strpos[10]; + + /* old frm file */ field_type= (enum_field_types) f_packtype(pack_flag); + if (f_is_binary(pack_flag)) + { + /* + Try to choose the best 4.1 type: + - for 4.0 "CHAR(N) BINARY" or "VARCHAR(N) BINARY" + try to find a binary collation for character set. + - for other types (e.g. BLOB) just use my_charset_bin. + */ + if (!f_is_blob(pack_flag)) + { + // 3.23 or 4.0 string + if (!(charset= get_charset_by_csname(outparam->table_charset->csname, + MY_CS_BINSORT, MYF(0)))) + charset= &my_charset_bin; + } + else + charset= &my_charset_bin; + } + else + charset= outparam->table_charset; + bzero((char*) &comment, sizeof(comment)); } + if (interval_nr && charset->mbminlen > 1) + { + /* Unescape UCS2 intervals from HEX notation */ + TYPELIB *interval= outparam->intervals + interval_nr - 1; + unhex_type2(interval); + } + *field_ptr=reg_field= make_field(record+recpos, (uint32) field_length, null_pos,null_bit, pack_flag, field_type, + charset, + geom_type, (Field::utype) MTYP_TYPENR(unireg_type), (interval_nr ? outparam->intervals+interval_nr-1 : (TYPELIB*) 0), outparam->fieldnames.type_names[i], outparam); - if (!*field_ptr) // Field in 4.1 + if (!reg_field) // Not supported field type { error= 4; goto err_not_open; /* purecov: inspected */ } + reg_field->comment=comment; if (!(reg_field->flags & NOT_NULL_FLAG)) { if ((null_bit<<=1) == 256) @@ -391,15 +546,15 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (outparam->timestamp_field == reg_field) outparam->timestamp_field_offset=i; if (use_hash) - (void) hash_insert(&outparam->name_hash,(byte*) *field_ptr); // Will never fail + (void) my_hash_insert(&outparam->name_hash,(byte*) field_ptr); // Will never fail } *field_ptr=0; // End marker /* Fix key->name and key_part->field */ if (key_parts) { - uint primary_key=(uint) (find_type((char*) "PRIMARY",&outparam->keynames, - 3)-1); + uint primary_key=(uint) (find_type((char*) primary_key_name, + &outparam->keynames, 3) - 1); uint ha_option=outparam->file->table_flags(); keyinfo=outparam->key_info; key_part=keyinfo->key_part; @@ -407,20 +562,11 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, for (uint key=0 ; key < outparam->keys ; key++,keyinfo++) { uint usable_parts=0; - ulong index_flags; keyinfo->name=(char*) outparam->keynames.type_names[key]; /* Fix fulltext keys for old .frm files */ if (outparam->key_info[key].flags & HA_FULLTEXT) outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT; - /* This has to be done after the above fulltext correction */ - index_flags=outparam->file->index_flags(key); - if (!(index_flags & HA_KEY_READ_ONLY)) - { - outparam->read_only_keys|= ((key_map) 1 << key); - outparam->keys_for_keyread&= ~((key_map) 1 << key); - } - if (primary_key >= MAX_KEY && (keyinfo->flags & HA_NOSAME)) { /* @@ -486,20 +632,18 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, field->key_length() == keyinfo->key_length ? UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG); if (i == 0) - field->key_start|= ((key_map) 1 << key); - if ((index_flags & HA_KEY_READ_ONLY) && - field->key_length() == key_part->length && - field->type() != FIELD_TYPE_BLOB) + field->key_start.set_bit(key); + if (field->key_length() == key_part->length && + !(field->flags & BLOB_FLAG)) { - if (field->key_type() != HA_KEYTYPE_TEXT || - ((!(ha_option & HA_KEY_READ_WRONG_STR) || - field->flags & BINARY_FLAG) && - !(keyinfo->flags & HA_FULLTEXT))) - field->part_of_key|= ((key_map) 1 << key); - if ((field->key_type() != HA_KEYTYPE_TEXT || - !(keyinfo->flags & HA_FULLTEXT)) && - !(index_flags & HA_WRONG_ASCII_ORDER)) - field->part_of_sortkey|= ((key_map) 1 << key); + if (outparam->file->index_flags(key, i, 0) & HA_KEYREAD_ONLY) + { + outparam->read_only_keys.clear_bit(key); + outparam->keys_for_keyread.set_bit(key); + field->part_of_key.set_bit(key); + } + if (outparam->file->index_flags(key, i, 1) & HA_READ_ORDER) + field->part_of_sortkey.set_bit(key); } if (!(key_part->key_part_flag & HA_REVERSE_SORT) && usable_parts == i) @@ -518,7 +662,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (field->key_length() != key_part->length) { key_part->key_part_flag|= HA_PART_KEY_SEG; - if (field->type() != FIELD_TYPE_BLOB) + if (!(field->flags & BLOB_FLAG)) { // Create a new field field=key_part->field=field->new_field(&outparam->mem_root, outparam); @@ -544,11 +688,16 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, set_if_bigger(outparam->max_key_length,keyinfo->key_length+ keyinfo->key_parts); outparam->total_key_length+= keyinfo->key_length; - if (keyinfo->flags & HA_NOSAME) + /* + MERGE tables do not have unique indexes. But every key could be + an unique index on the underlying MyISAM table. (Bug #10400) + */ + if ((keyinfo->flags & HA_NOSAME) || + (ha_option & HA_ANY_INDEX_MAY_BE_UNIQUE)) set_if_bigger(outparam->max_unique_length,keyinfo->key_length); } - if (primary_key < MAX_KEY && - (outparam->keys_in_use & ((key_map) 1 << primary_key))) + if (primary_key < MAX_KEY && + (outparam->keys_in_use.is_set(primary_key))) { outparam->primary_key=primary_key; /* @@ -634,12 +783,20 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->crashed=((err == HA_ERR_CRASHED_ON_USAGE) && outparam->file->auto_repair() && !(ha_open_flags & HA_OPEN_FOR_REPAIR)); + + if (err==HA_ERR_NO_SUCH_TABLE) + { + /* The table did not exists in storage engine, use same error message + as if the .frm file didn't exist */ + error= 1; + my_errno= ENOENT; + } goto err_not_open; /* purecov: inspected */ } } outparam->db_low_byte_first=outparam->file->low_byte_first(); - my_pthread_setspecific_ptr(THR_MALLOC,old_root); + *root_ptr= old_root; opened_tables++; #ifndef DBUG_OFF if (use_hash) @@ -654,8 +811,8 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, err_end: /* Here when no file */ delete crypted; - my_pthread_setspecific_ptr(THR_MALLOC,old_root); - frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG); + *root_ptr= old_root; + frm_error(error, outparam, name, ME_ERROR + ME_WAITTANG, errarg); delete outparam->file; outparam->file=0; // For easyer errorchecking outparam->db_stat=0; @@ -840,7 +997,8 @@ ulong make_new_entry(File file, uchar *fileinfo, TYPELIB *formnames, /* error message when opening a form file */ -static void frm_error(int error, TABLE *form, const char *name, myf errortype) +static void frm_error(int error, TABLE *form, const char *name, + myf errortype, int errarg) { int err_no; char buff[FN_REFLEN]; @@ -863,13 +1021,34 @@ static void frm_error(int error, TABLE *form, const char *name, myf errortype) break; case 2: { - datext=form->file ? *form->file->bas_ext() : ""; + datext= form->file ? *form->file->bas_ext() : ""; + datext= datext==NullS ? "" : datext; err_no= (my_errno == ENOENT) ? ER_FILE_NOT_FOUND : (my_errno == EAGAIN) ? ER_FILE_USED : ER_CANT_OPEN_FILE; my_error(err_no,errortype, fn_format(buff,form->real_name,form_dev,datext,2),my_errno); break; } + case 5: + { + const char *csname= get_charset_name((uint) errarg); + char tmp[10]; + if (!csname || csname[0] =='?') + { + my_snprintf(tmp, sizeof(tmp), "#%d", errarg); + csname= tmp; + } + my_printf_error(ER_UNKNOWN_COLLATION, + "Unknown collation '%s' in table '%-.64s' definition", + MYF(0), csname, form->real_name); + break; + } + case 6: + my_printf_error(ER_NOT_FORM_FILE, + "Table '%-.64s' was created with a different version " + "of MySQL and cannot be read", + MYF(0), name); + break; default: /* Better wrong error than none */ case 4: my_error(ER_NOT_FORM_FILE,errortype, @@ -920,21 +1099,26 @@ fix_type_pointers(const char ***array, TYPELIB *point_to_type, uint types, } /* fix_type_pointers */ -TYPELIB *typelib(List<String> &strings) +TYPELIB *typelib(MEM_ROOT *mem_root, List<String> &strings) { - TYPELIB *result=(TYPELIB*) sql_alloc(sizeof(TYPELIB)); + TYPELIB *result= (TYPELIB*) alloc_root(mem_root, sizeof(TYPELIB)); if (!result) return 0; result->count=strings.elements; result->name=""; - if (!(result->type_names=(const char **) sql_alloc(sizeof(char *)* - (result->count+1)))) + uint nbytes= (sizeof(char*) + sizeof(uint)) * (result->count + 1); + if (!(result->type_names= (const char**) alloc_root(mem_root, nbytes))) return 0; + result->type_lengths= (uint*) (result->type_names + result->count + 1); List_iterator<String> it(strings); String *tmp; for (uint i=0; (tmp=it++) ; i++) - result->type_names[i]=tmp->ptr(); - result->type_names[result->count]=0; // End marker + { + result->type_names[i]= tmp->ptr(); + result->type_lengths[i]= tmp->length(); + } + result->type_names[result->count]= 0; // End marker + result->type_lengths[result->count]= 0; return result; } @@ -991,13 +1175,26 @@ ulong next_io_size(register ulong pos) } /* next_io_size */ -void append_unescaped(String *res,const char *pos) +/* + Store an SQL quoted string. + + SYNOPSIS + append_unescaped() + res result String + pos string to be quoted + length it's length + + NOTE + This function works correctly with utf8 or single-byte charset strings. + May fail with some multibyte charsets though. +*/ + +void append_unescaped(String *res, const char *pos, uint length) { -#ifdef USE_MB - const char *end= pos + strlen(pos); -#endif + const char *end= pos+length; + res->append('\''); - for (; *pos ; ) + for (; pos != end ; pos++) { #if defined(USE_MB) && MYSQL_VERSION_ID < 40100 uint mblen; @@ -1035,13 +1232,14 @@ void append_unescaped(String *res,const char *pos) res->append(*pos); break; } - pos++; } + res->append('\''); } /* Create a .frm file */ -File create_frm(register my_string name, uint reclength, uchar *fileinfo, +File create_frm(register my_string name, const char *db, const char *table, + uint reclength, uchar *fileinfo, HA_CREATE_INFO *create_info, uint keys) { register File file; @@ -1054,7 +1252,7 @@ File create_frm(register my_string name, uint reclength, uchar *fileinfo, create_flags|= O_EXCL | O_NOFOLLOW; #if SIZEOF_OFF_T > 4 - /* Fix this in MySQL 4.0; The current limit is 4G rows (QQ) */ + /* Fix this when we have new .frm files; Current limit is 4G rows (QQ) */ if (create_info->max_rows > ~(ulong) 0) create_info->max_rows= ~(ulong) 0; if (create_info->min_rows > ~(ulong) 0) @@ -1066,16 +1264,17 @@ File create_frm(register my_string name, uint reclength, uchar *fileinfo, */ set_if_smaller(create_info->raid_chunks, 255); - if ((file= my_create(name, CREATE_MODE, create_flags, MYF(MY_WME))) >= 0) + if ((file= my_create(name, CREATE_MODE, create_flags, MYF(0))) >= 0) { bzero((char*) fileinfo,64); - fileinfo[0]=(uchar) 254; fileinfo[1]= 1; fileinfo[2]= FRM_VER+1; // Header + fileinfo[0]=(uchar) 254; fileinfo[1]= 1; fileinfo[2]= FRM_VER+3; // Header fileinfo[3]= (uchar) ha_checktype(create_info->db_type); fileinfo[4]=1; int2store(fileinfo+6,IO_SIZE); /* Next block starts here */ key_length=keys*(7+NAME_LEN+MAX_REF_PARTS*9)+16; length=(ulong) next_io_size((ulong) (IO_SIZE+key_length+reclength)); int4store(fileinfo+10,length); + if (key_length > 0xffff) key_length=0xffff; int2store(fileinfo+14,key_length); int2store(fileinfo+16,reclength); int4store(fileinfo+18,create_info->max_rows); @@ -1085,6 +1284,8 @@ File create_frm(register my_string name, uint reclength, uchar *fileinfo, int2store(fileinfo+30,create_info->table_options); fileinfo[32]=0; // No filename anymore int4store(fileinfo+34,create_info->avg_row_length); + fileinfo[38]= (create_info->default_table_charset ? + create_info->default_table_charset->number : 0); fileinfo[40]= (uchar) create_info->row_type; fileinfo[41]= (uchar) create_info->raid_type; fileinfo[42]= (uchar) create_info->raid_chunks; @@ -1100,6 +1301,13 @@ File create_frm(register my_string name, uint reclength, uchar *fileinfo, } } } + else + { + if (my_errno == ENOENT) + my_error(ER_BAD_DB_ERROR,MYF(0),db); + else + my_error(ER_CANT_CREATE_TABLE,MYF(0),table,my_errno); + } return (file); } /* create_frm */ @@ -1115,6 +1323,8 @@ void update_create_info_from_table(HA_CREATE_INFO *create_info, TABLE *table) create_info->raid_type=table->raid_type; create_info->raid_chunks=table->raid_chunks; create_info->raid_chunksize=table->raid_chunksize; + create_info->default_table_charset=table->table_charset; + create_info->table_charset= 0; DBUG_VOID_RETURN; } @@ -1129,17 +1339,55 @@ rename_file_ext(const char * from,const char * to,const char * ext) /* - Alloc a value as a string and return it - If field is empty, return NULL + Allocate string field in MEM_ROOT and return it as String + + SYNOPSIS + get_field() + mem MEM_ROOT for allocating + field Field for retrieving of string + res result String + + RETURN VALUES + 1 string is empty + 0 all ok */ -char *get_field(MEM_ROOT *mem, TABLE *table, uint fieldnr) +bool get_field(MEM_ROOT *mem, Field *field, String *res) { - Field *field=table->field[fieldnr]; char buff[MAX_FIELD_WIDTH], *to; - String str(buff,sizeof(buff)); - field->val_str(&str,&str); - uint length=str.length(); + String str(buff,sizeof(buff),&my_charset_bin); + uint length; + + field->val_str(&str); + if (!(length= str.length())) + return 1; + to= strmake_root(mem, str.ptr(), length); + res->set(to, length, ((Field_str*)field)->charset()); + return 0; +} + + +/* + Allocate string field in MEM_ROOT and return it as NULL-terminated string + + SYNOPSIS + get_field() + mem MEM_ROOT for allocating + field Field for retrieving of string + + RETURN VALUES + NullS string is empty + # pointer to NULL-terminated string value of field +*/ + +char *get_field(MEM_ROOT *mem, Field *field) +{ + char buff[MAX_FIELD_WIDTH], *to; + String str(buff,sizeof(buff),&my_charset_bin); + uint length; + + field->val_str(&str); + length= str.length(); if (!length || !(to= (char*) alloc_root(mem,length+1))) return NullS; memcpy(to,str.ptr(),(uint) length); @@ -1166,18 +1414,20 @@ char *get_field(MEM_ROOT *mem, TABLE *table, uint fieldnr) bool check_db_name(char *name) { char *start=name; - bool last_char_is_space= FALSE; + /* Used to catch empty names and names with end space */ + bool last_char_is_space= TRUE; - if (lower_case_table_names) - casedn_str(name); + if (lower_case_table_names && name != any_db) + my_casedn_str(files_charset_info, name); while (*name) { #if defined(USE_MB) && defined(USE_MB_IDENT) - last_char_is_space= my_isspace(default_charset_info, *name); - if (use_mb(default_charset_info)) + last_char_is_space= my_isspace(system_charset_info, *name); + if (use_mb(system_charset_info)) { - int len=my_ismbchar(default_charset_info, name, name+MBMAXLEN); + int len=my_ismbchar(system_charset_info, name, + name+system_charset_info->mbmaxlen); if (len) { name += len; @@ -1219,10 +1469,10 @@ bool check_table_name(const char *name, uint length) while (name != end) { #if defined(USE_MB) && defined(USE_MB_IDENT) - last_char_is_space= my_isspace(default_charset_info, *name); - if (use_mb(default_charset_info)) + last_char_is_space= my_isspace(system_charset_info, *name); + if (use_mb(system_charset_info)) { - int len=my_ismbchar(default_charset_info, name, end); + int len=my_ismbchar(system_charset_info, name, end); if (len) { name += len; @@ -1245,15 +1495,16 @@ bool check_table_name(const char *name, uint length) bool check_column_name(const char *name) { const char *start= name; - bool last_char_is_space= FALSE; - + bool last_char_is_space= TRUE; + while (*name) { #if defined(USE_MB) && defined(USE_MB_IDENT) - last_char_is_space= my_isspace(default_charset_info, *name); - if (use_mb(default_charset_info)) + last_char_is_space= my_isspace(system_charset_info, *name); + if (use_mb(system_charset_info)) { - int len=my_ismbchar(default_charset_info, name, name+MBMAXLEN); + int len=my_ismbchar(system_charset_info, name, + name+system_charset_info->mbmaxlen); if (len) { name += len; @@ -1268,7 +1519,7 @@ bool check_column_name(const char *name) name++; } /* Error if empty or too long column name */ - return last_char_is_space || (name == start || (uint) (name - start) > NAME_LEN); + return last_char_is_space || (uint) (name - start) > NAME_LEN; } /* diff --git a/sql/table.h b/sql/table.h index 610c76e35c2..8cb15d3a69c 100644 --- a/sql/table.h +++ b/sql/table.h @@ -19,12 +19,14 @@ class Item; /* Needed by ORDER */ class GRANT_TABLE; +class st_select_lex_unit; /* Order clause list element */ typedef struct st_order { struct st_order *next; Item **item; /* Point at item in select fields */ + Item *item_ptr; /* Storage for initial item */ bool asc; /* true if ascending */ bool free_me; /* true if item isn't shared */ bool in_field_list; /* true if in select field list */ @@ -43,6 +45,36 @@ typedef struct st_grant_info enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2}; +typedef struct st_filesort_info +{ + IO_CACHE *io_cache; /* If sorted through filebyte */ + byte *addon_buf; /* Pointer to a buffer if sorted with fields */ + uint addon_length; /* Length of the buffer */ + struct st_sort_addon_field *addon_field; /* Pointer to the fields info */ + void (*unpack)(struct st_sort_addon_field *, byte *); /* To unpack back */ + byte *record_pointers; /* If sorted in memory */ + ha_rows found_records; /* How many records in sort */ +} FILESORT_INFO; + + +/* + Values in this enum are used to indicate how a tables TIMESTAMP field + should be treated. It can be set to the current timestamp on insert or + update or both. + WARNING: The values are used for bit operations. If you change the + enum, you must keep the bitwise relation of the values. For example: + (int) TIMESTAMP_AUTO_SET_ON_BOTH must be equal to + (int) TIMESTAMP_AUTO_SET_ON_INSERT | (int) TIMESTAMP_AUTO_SET_ON_UPDATE. + We use an enum here so that the debugger can display the value names. +*/ +enum timestamp_auto_set_type +{ + TIMESTAMP_NO_AUTO_SET= 0, TIMESTAMP_AUTO_SET_ON_INSERT= 1, + TIMESTAMP_AUTO_SET_ON_UPDATE= 2, TIMESTAMP_AUTO_SET_ON_BOTH= 3 +}; +#define clear_timestamp_auto_bits(_target_, _bits_) \ + (_target_)= (enum timestamp_auto_set_type)((int)(_target_) & ~(int)(_bits_)) + /* Table cache entry struct */ class Field_timestamp; @@ -52,8 +84,11 @@ struct st_table { handler *file; Field **field; /* Pointer to fields */ Field_blob **blob_field; /* Pointer to blob fields */ - HASH name_hash; /* hash of field names */ - byte *record[3]; /* Pointer to records */ + /* hash of field names (contains pointers to elements of field array) */ + HASH name_hash; + byte *record[2]; /* Pointer to records */ + byte *default_values; /* Default values for INSERT */ + byte *insert_values; /* used by INSERT ... UPDATE */ uint fields; /* field count */ uint reclength; /* Recordlength */ uint rec_buff_length; @@ -63,7 +98,9 @@ struct st_table { uint null_fields; /* number of null fields */ uint blob_fields; /* number of blob fields */ key_map keys_in_use, keys_for_keyread, read_only_keys; - key_map quick_keys, used_keys, keys_in_use_for_query; + key_map quick_keys; + key_map used_keys; /* keys that cover all used table fields */ + key_map keys_in_use_for_query; KEY *key_info; /* data of keys in database */ TYPELIB keynames; /* Pointers to keynames */ ha_rows max_rows; /* create information */ @@ -81,8 +118,23 @@ struct st_table { uint raid_type,raid_chunks; uint status; /* Used by postfix.. */ uint system; /* Set if system record */ - ulong time_stamp; /* Set to offset+1 of record */ + + /* + If this table has TIMESTAMP field with auto-set property (pointed by + timestamp_field member) then this variable indicates during which + operations (insert only/on update/in both cases) we should set this + field to current timestamp. If there are no such field in this table + or we should not automatically set its value during execution of current + statement then the variable contains TIMESTAMP_NO_AUTO_SET (i.e. 0). + + Value of this variable is set for each statement in open_table() and + if needed cleared later in statement processing code (see mysql_update() + as example). + */ + timestamp_auto_set_type timestamp_field_type; + /* Index of auto-updated TIMESTAMP field in field array */ uint timestamp_field_offset; + uint next_number_index; uint blob_ptr_size; /* 4 or 8 */ uint next_number_key_offset; @@ -102,7 +154,7 @@ struct st_table { my_bool maybe_null; /* true if (outer_join != 0) */ my_bool force_index; my_bool distinct,const_table,no_rows; - my_bool key_read, bulk_insert; + my_bool key_read; my_bool crypted; my_bool db_low_byte_first; /* Portable row format */ my_bool locked_by_flush; @@ -111,6 +163,8 @@ struct st_table { my_bool crashed; my_bool is_view; my_bool no_keyread, no_cache; + my_bool clear_query_id; /* To reset query_id for tables and cols */ + my_bool auto_increment_field_not_null; Field *next_number_field, /* Set if next_number is activated */ *found_next_number_field, /* Set on open */ *rowid_field; @@ -124,10 +178,12 @@ struct st_table { my_bool timestamp_mode; #endif my_string comment; /* Comment about table */ + CHARSET_INFO *table_charset; /* Default charset of string fields */ REGINFO reginfo; /* field connections */ MEM_ROOT mem_root; GRANT_INFO grant; + /* A pair "database_name\0table_name\0", widely used as simply a db name */ char *table_cache_key; char *table_name,*real_name,*path; uint key_length; /* Length of key */ @@ -135,21 +191,21 @@ struct st_table { table_map map; /* ID bit of table (1,2,4,8,16...) */ ulong version,flush_version; uchar *null_flags; - IO_CACHE *io_cache; /* If sorted trough file*/ - byte *record_pointers; /* If sorted in memory */ - ha_rows found_records; /* How many records in sort */ + FILESORT_INFO sort; ORDER *group; ha_rows quick_rows[MAX_KEY]; uint quick_key_parts[MAX_KEY]; key_part_map const_key_parts[MAX_KEY]; ulong query_id; + uchar frm_version; union /* Temporary variables */ { uint temp_pool_slot; /* Used by intern temp tables */ struct st_table_list *pos_in_table_list; }; - + /* number of select if it is derived table */ + uint derived_select_number; THD *in_use; /* Which thread uses this */ struct st_table *next,*prev; }; @@ -162,22 +218,28 @@ typedef struct st_table_list { struct st_table_list *next; char *db, *alias, *real_name; + char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ struct st_table_list *natural_join; /* natural join on this table*/ /* ... join ... USE INDEX ... IGNORE INDEX */ - List<String> *use_index,*ignore_index; - TABLE *table; - GRANT_INFO grant; + List<String> *use_index, *ignore_index; + TABLE *table; /* opened table */ + st_table_list *table_list; /* pointer to node of list of all tables */ + class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ + GRANT_INFO grant; thr_lock_type lock_type; uint outer_join; /* Which join type */ - uint shared; /* Used in union or in multi-upd */ + uint shared; /* Used in multi-upd */ uint32 db_length, real_name_length; bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ bool force_index; /* Prefer index over table scan */ + bool ignore_leaves; /* Preload only non-leaf nodes */ + bool cacheable_table; /* stop PS caching */ + /* used in multi-upd privelege check */ + bool table_in_update_from_clause; } TABLE_LIST; - typedef struct st_changed_table_list { struct st_changed_table_list *next; @@ -185,10 +247,11 @@ typedef struct st_changed_table_list uint32 key_length; } CHANGED_TABLE_LIST; - typedef struct st_open_table_list { struct st_open_table_list *next; char *db,*table; uint32 in_use,locked; } OPEN_TABLE_LIST; + + diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc index 8b9baa6f045..3a9ca397bba 100644 --- a/sql/thr_malloc.cc +++ b/sql/thr_malloc.cc @@ -24,7 +24,7 @@ extern "C" { { THD *thd=current_thd; if (thd) // QQ; To be removed - thd->fatal_error=1; /* purecov: inspected */ + thd->fatal_error(); /* purecov: inspected */ sql_print_error(ER(ER_OUT_OF_RESOURCES)); } } @@ -38,7 +38,7 @@ void init_sql_alloc(MEM_ROOT *mem_root, uint block_size, uint pre_alloc) gptr sql_alloc(uint Size) { - MEM_ROOT *root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC); + MEM_ROOT *root= *my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC); char *ptr= (char*) alloc_root(root,Size); return ptr; } @@ -85,3 +85,36 @@ gptr sql_memdup(const void *ptr,uint len) void sql_element_free(void *ptr __attribute__((unused))) {} /* purecov: deadcode */ + + + +char *sql_strmake_with_convert(const char *str, uint32 arg_length, + CHARSET_INFO *from_cs, + uint32 max_res_length, + CHARSET_INFO *to_cs, uint32 *result_length) +{ + char *pos; + uint32 new_length= to_cs->mbmaxlen*arg_length; + max_res_length--; // Reserve place for end null + + set_if_smaller(new_length, max_res_length); + if (!(pos= sql_alloc(new_length+1))) + return pos; // Error + + if ((from_cs == &my_charset_bin) || (to_cs == &my_charset_bin)) + { + // Safety if to_cs->mbmaxlen > 0 + new_length= min(arg_length, max_res_length); + memcpy(pos, str, new_length); + } + else + { + uint dummy_errors; + new_length= copy_and_convert((char*) pos, new_length, to_cs, str, + arg_length, from_cs, &dummy_errors); + } + pos[new_length]= 0; + *result_length= new_length; + return pos; +} + diff --git a/sql/time.cc b/sql/time.cc index d8b4b80e351..e76b169b336 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -20,164 +20,10 @@ #include "mysql_priv.h" #include <m_ctype.h> -static ulong const days_at_timestart=719528; /* daynr at 1970.01.01 */ -uchar *days_in_month= (uchar*) "\037\034\037\036\037\036\037\037\036\037\036\037"; - - - /* Init some variabels needed when using my_local_time */ - /* Currently only my_time_zone is inited */ - -static long my_time_zone=0; - -void init_time(void) -{ - time_t seconds; - struct tm *l_time,tm_tmp;; - TIME my_time; - - seconds= (time_t) time((time_t*) 0); - localtime_r(&seconds,&tm_tmp); - l_time= &tm_tmp; - my_time_zone= 3600; /* Comp. for -3600 in my_gmt_sec */ - my_time.year= (uint) l_time->tm_year+1900; - my_time.month= (uint) l_time->tm_mon+1; - my_time.day= (uint) l_time->tm_mday; - my_time.hour= (uint) l_time->tm_hour; - my_time.minute= (uint) l_time->tm_min; - my_time.second= (uint) l_time->tm_sec; - my_gmt_sec(&my_time, &my_time_zone); /* Init my_time_zone */ -} - -/* - Convert current time to sec. since 1970.01.01 - This code handles also day light saving time. - The idea is to cache the time zone (including daylight saving time) - for the next call to make things faster. - -*/ - -long my_gmt_sec(TIME *t, long *my_timezone) -{ - uint loop; - time_t tmp; - struct tm *l_time,tm_tmp; - long diff, current_timezone; - - if (t->year > TIMESTAMP_MAX_YEAR || t->year < TIMESTAMP_MIN_YEAR) - return 0; - - if (t->hour >= 24) - { /* Fix for time-loop */ - t->day+=t->hour/24; - t->hour%=24; - } - - /* - Calculate the gmt time based on current time and timezone - The -1 on the end is to ensure that if have a date that exists twice - (like 2002-10-27 02:00:0 MET), we will find the initial date. - - By doing -3600 we will have to call localtime_r() several times, but - I couldn't come up with a better way to get a repeatable result :( - - We can't use mktime() as it's buggy on many platforms and not thread safe. - - Note: this code assumes that our time_t estimation is not too far away - from real value (we assume that localtime_r(tmp) will return something - within 24 hrs from t) which is probably true for all current time zones. - */ - tmp=(time_t) (((calc_daynr((uint) t->year,(uint) t->month,(uint) t->day) - - (long) days_at_timestart)*86400L + (long) t->hour*3600L + - (long) (t->minute*60 + t->second)) + (time_t) my_time_zone - - 3600); - current_timezone= my_time_zone; - - localtime_r(&tmp,&tm_tmp); - l_time=&tm_tmp; - for (loop=0; - loop < 2 && - (t->hour != (uint) l_time->tm_hour || - t->minute != (uint) l_time->tm_min || - t->second != (uint) l_time->tm_sec); - loop++) - { /* One check should be enough ? */ - /* Get difference in days */ - int days= t->day - l_time->tm_mday; - if (days < -1) - days= 1; // Month has wrapped - else if (days > 1) - days= -1; - diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour)) + - (long) (60*((int) t->minute - (int) l_time->tm_min)) + - (long) ((int) t->second - (int) l_time->tm_sec)); - current_timezone+= diff+3600; // Compensate for -3600 above - tmp+= (time_t) diff; - localtime_r(&tmp,&tm_tmp); - l_time=&tm_tmp; - } - /* - Fix that if we are in the non existing daylight saving time hour - we move the start of the next real hour. - - This code doesn't handle such exotical thing as time-gaps whose length - is more than one hour or non-integer (latter can theoretically happen - if one of seconds will be removed due leap correction, or because of - general time correction like it happened for Africa/Monrovia time zone - in year 1972). - */ - if (loop == 2 && t->hour != (uint) l_time->tm_hour) - { - int days= t->day - l_time->tm_mday; - if (days < -1) - days=1; // Month has wrapped - else if (days > 1) - days= -1; - diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour))+ - (long) (60*((int) t->minute - (int) l_time->tm_min)) + - (long) ((int) t->second - (int) l_time->tm_sec)); - if (diff == 3600) - tmp+=3600 - t->minute*60 - t->second; // Move to next hour - else if (diff == -3600) - tmp-=t->minute*60 + t->second; // Move to previous hour - } - *my_timezone= current_timezone; - - if (tmp < TIMESTAMP_MIN_VALUE || tmp > TIMESTAMP_MAX_VALUE) - tmp= 0; - - return (long) tmp; -} /* my_gmt_sec */ - /* Some functions to calculate dates */ - /* Calculate nr of day since year 0 in new date-system (from 1615) */ - -long calc_daynr(uint year,uint month,uint day) -{ - long delsum; - int temp; - DBUG_ENTER("calc_daynr"); - - if (year == 0 && month == 0 && day == 0) - DBUG_RETURN(0); /* Skip errors */ - if (year < 200) - { - if ((year=year+1900) < 1900+YY_PART_YEAR) - year+=100; - } - delsum= (long) (365L * year+ 31*(month-1) +day); - if (month <= 2) - year--; - else - delsum-= (long) (month*4+23)/10; - temp=(int) ((year/100+1)*3)/4; - DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld", - year+(month <= 2),month,day,delsum+year/4-temp)); - DBUG_RETURN(delsum+(int) year/4-temp); -} /* calc_daynr */ - - +#ifndef TESTTIME /* Calc weekday from daynr */ /* Returns 0 for monday, 1 for tuesday .... */ @@ -328,6 +174,7 @@ ulong convert_period_to_month(ulong period) return a*12+b-1; } + ulong convert_month_to_period(ulong month) { ulong year; @@ -341,282 +188,691 @@ ulong convert_month_to_period(ulong month) } -/***************************************************************************** -** convert a timestamp string to a TIME value. -** At least the following formats are recogniced (based on number of digits) -** YYMMDD, YYYYMMDD, YYMMDDHHMMSS, YYYYMMDDHHMMSS -** YY-MM-DD, YYYY-MM-DD, YY-MM-DD HH.MM.SS -** Returns the type of string -*****************************************************************************/ +/* + Convert a timestamp string to a TIME value and produce a warning + if string was truncated during conversion. + NOTE + See description of str_to_datetime() for more information. +*/ timestamp_type -str_to_TIME(const char *str, uint length, TIME *l_time,bool fuzzy_date) +str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, + uint flags) +{ + int was_cut; + timestamp_type ts_type= str_to_datetime(str, length, l_time, flags, &was_cut); + if (was_cut) + make_truncated_value_warning(current_thd, str, length, ts_type); + return ts_type; +} + + +/* + Convert a datetime from broken-down TIME representation to corresponding + TIMESTAMP value. + + SYNOPSIS + TIME_to_timestamp() + thd - current thread + t - datetime in broken-down representation, + in_dst_time_gap - pointer to bool which is set to true if t represents + value which doesn't exists (falls into the spring + time-gap) or to false otherwise. + + RETURN + Number seconds in UTC since start of Unix Epoch corresponding to t. + 0 - t contains datetime value which is out of TIMESTAMP range. + +*/ +my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *in_dst_time_gap) { - uint field_length,year_length,digits,i,number_of_fields,date[7]; - uint not_zero_date; - const char *pos; - const char *end=str+length; - DBUG_ENTER("str_to_TIME"); - DBUG_PRINT("enter",("str: %.*s",length,str)); - - for (; str != end && !isdigit(*str) ; str++) ; // Skip garbage - if (str == end) - DBUG_RETURN(TIMESTAMP_NONE); + my_time_t timestamp; + + *in_dst_time_gap= 0; + + if (t->year < TIMESTAMP_MAX_YEAR && t->year > TIMESTAMP_MIN_YEAR || + t->year == TIMESTAMP_MAX_YEAR && t->month == 1 && t->day == 1 || + t->year == TIMESTAMP_MIN_YEAR && t->month == 12 && t->day == 31) + { + thd->time_zone_used= 1; + timestamp= thd->variables.time_zone->TIME_to_gmt_sec(t, in_dst_time_gap); + if (timestamp >= TIMESTAMP_MIN_VALUE && timestamp <= TIMESTAMP_MAX_VALUE) + return timestamp; + } + + /* If we are here we have range error. */ + return(0); +} + + +/* + Convert a time string to a TIME struct and produce a warning + if string was cut during conversion. + + NOTE + See str_to_time() for more info. +*/ +bool +str_to_time_with_warn(const char *str, uint length, TIME *l_time) +{ + int was_cut; + bool ret_val= str_to_time(str, length, l_time, &was_cut); + if (was_cut) + make_truncated_value_warning(current_thd, str, length, MYSQL_TIMESTAMP_TIME); + return ret_val; +} + + +/* + Convert datetime value specified as number to broken-down TIME + representation and form value of DATETIME type as side-effect. + + SYNOPSIS + number_to_TIME() + nr - datetime value as number + time_res - pointer for structure for broken-down representation + fuzzy_date - indicates whenever we allow fuzzy dates + was_cut - set ot 1 if there was some kind of error during + conversion or to 0 if everything was OK. + + DESCRIPTION + Convert a datetime value of formats YYMMDD, YYYYMMDD, YYMMDDHHMSS, + YYYYMMDDHHMMSS to broken-down TIME representation. Return value in + YYYYMMDDHHMMSS format as side-effect. + + This function also checks if datetime value fits in DATETIME range. + + RETURN VALUE + Datetime value in YYYYMMDDHHMMSS format. + If input value is not valid datetime value then 0 is returned. +*/ + +longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date, + int *was_cut) +{ + long part1,part2; + + *was_cut= 0; + + if (nr == LL(0) || nr >= LL(10000101000000)) + goto ok; + if (nr < 101) + goto err; + if (nr <= (YY_PART_YEAR-1)*10000L+1231L) + { + nr= (nr+20000000L)*1000000L; // YYMMDD, year: 2000-2069 + goto ok; + } + if (nr < (YY_PART_YEAR)*10000L+101L) + goto err; + if (nr <= 991231L) + { + nr= (nr+19000000L)*1000000L; // YYMMDD, year: 1970-1999 + goto ok; + } + if (nr < 10000101L) + goto err; + if (nr <= 99991231L) + { + nr= nr*1000000L; + goto ok; + } + if (nr < 101000000L) + goto err; + if (nr <= (YY_PART_YEAR-1)*LL(10000000000)+LL(1231235959)) + { + nr= nr+LL(20000000000000); // YYMMDDHHMMSS, 2000-2069 + goto ok; + } + if (nr < YY_PART_YEAR*LL(10000000000)+ LL(101000000)) + goto err; + if (nr <= LL(991231235959)) + nr= nr+LL(19000000000000); // YYMMDDHHMMSS, 1970-1999 + + ok: + part1=(long) (nr/LL(1000000)); + part2=(long) (nr - (longlong) part1*LL(1000000)); + time_res->year= (int) (part1/10000L); part1%=10000L; + time_res->month= (int) part1 / 100; + time_res->day= (int) part1 % 100; + time_res->hour= (int) (part2/10000L); part2%=10000L; + time_res->minute=(int) part2 / 100; + time_res->second=(int) part2 % 100; + + if (time_res->year <= 9999 && time_res->month <= 12 && + time_res->day <= 31 && time_res->hour <= 23 && + time_res->minute <= 59 && time_res->second <= 59 && + (fuzzy_date || (time_res->month != 0 && time_res->day != 0) || nr==0)) + return nr; + + err: + + *was_cut= 1; + return LL(0); +} + + +/* + Convert a system time structure to TIME +*/ + +void localtime_to_TIME(TIME *to, struct tm *from) +{ + to->neg=0; + to->second_part=0; + to->year= (int) ((from->tm_year+1900) % 10000); + to->month= (int) from->tm_mon+1; + to->day= (int) from->tm_mday; + to->hour= (int) from->tm_hour; + to->minute= (int) from->tm_min; + to->second= (int) from->tm_sec; +} + +void calc_time_from_sec(TIME *to, long seconds, long microseconds) +{ + long t_seconds; + to->hour= seconds/3600L; + t_seconds= seconds%3600L; + to->minute= t_seconds/60L; + to->second= t_seconds%60L; + to->second_part= microseconds; +} + + +/* + Parse a format string specification + + SYNOPSIS + parse_date_time_format() + format_type Format of string (time, date or datetime) + format_str String to parse + format_length Length of string + date_time_format Format to fill in + + NOTES + Fills in date_time_format->positions for all date time parts. + + positions marks the position for a datetime element in the format string. + The position array elements are in the following order: + YYYY-DD-MM HH-MM-DD.FFFFFF AM + 0 1 2 3 4 5 6 7 + + If positions[0]= 5, it means that year will be the forth element to + read from the parsed date string. + + RETURN + 0 ok + 1 error +*/ + +bool parse_date_time_format(timestamp_type format_type, + const char *format, uint format_length, + DATE_TIME_FORMAT *date_time_format) +{ + uint offset= 0, separators= 0; + const char *ptr= format, *format_str; + const char *end= ptr+format_length; + uchar *dt_pos= date_time_format->positions; + /* need_p is set if we are using AM/PM format */ + bool need_p= 0, allow_separator= 0; + ulong part_map= 0, separator_map= 0; + const char *parts[16]; + + date_time_format->time_separator= 0; + date_time_format->flag= 0; // For future + /* - ** calculate first number of digits. - ** If length= 8 or >= 14 then year is of format YYYY. - (YYYY-MM-DD, YYYYMMDD, YYYYYMMDDHHMMSS) + Fill position with 'dummy' arguments to found out if a format tag is + used twice (This limit's the format to 255 characters, but this is ok) */ - for (pos=str; pos != end && isdigit(*pos) ; pos++) ; - digits= (uint) (pos-str); - year_length= (digits == 4 || digits == 8 || digits >= 14) ? 4 : 2; - field_length=year_length-1; - not_zero_date= 0; - for (i=0 ; i < 6 && str != end && isdigit(*str) ; i++) + dt_pos[0]= dt_pos[1]= dt_pos[2]= dt_pos[3]= + dt_pos[4]= dt_pos[5]= dt_pos[6]= dt_pos[7]= 255; + + for (; ptr != end; ptr++) { - uint tmp_value=(uint) (uchar) (*str++ - '0'); - while (str != end && isdigit(str[0]) && field_length--) + if (*ptr == '%' && ptr+1 != end) { - tmp_value=tmp_value*10 + (uint) (uchar) (*str - '0'); - str++; + uint position; + LINT_INIT(position); + switch (*++ptr) { + case 'y': // Year + case 'Y': + position= 0; + break; + case 'c': // Month + case 'm': + position= 1; + break; + case 'd': + case 'e': + position= 2; + break; + case 'h': + case 'I': + case 'l': + need_p= 1; // Need AM/PM + /* Fall through */ + case 'k': + case 'H': + position= 3; + break; + case 'i': + position= 4; + break; + case 's': + case 'S': + position= 5; + break; + case 'f': + position= 6; + if (dt_pos[5] != offset-1 || ptr[-2] != '.') + return 1; // Wrong usage of %f + break; + case 'p': // AM/PM + if (offset == 0) // Can't be first + return 0; + position= 7; + break; + default: + return 1; // Unknown controll char + } + if (dt_pos[position] != 255) // Don't allow same tag twice + return 1; + parts[position]= ptr-1; + + /* + If switching from time to date, ensure that all time parts + are used + */ + if (part_map && position <= 2 && !(part_map & (1 | 2 | 4))) + offset=5; + part_map|= (ulong) 1 << position; + dt_pos[position]= offset++; + allow_separator= 1; } - date[i]=tmp_value; - not_zero_date|= tmp_value; - if (i == 2 && str != end && *str == 'T') - str++; // ISO8601: CCYYMMDDThhmmss - else if ( i != 5 ) // Skip inter-field delimiters + else { - while (str != end && (ispunct(*str) || isspace(*str))) - { - // Only allow space between days and hours - if (isspace(*str) && i != 2) - DBUG_RETURN(TIMESTAMP_NONE); - str++; - } + /* + Don't allow any characters in format as this could easily confuse + the date reader + */ + if (!allow_separator) + return 1; // No separator here + allow_separator= 0; // Don't allow two separators + separators++; + /* Store in separator_map which parts are punct characters */ + if (my_ispunct(&my_charset_latin1, *ptr)) + separator_map|= (ulong) 1 << (offset-1); + else if (!my_isspace(&my_charset_latin1, *ptr)) + return 1; } - field_length=1; // Rest fields can only be 2 } - /* Handle second fractions */ - if (i == 6 && (uint) (end-str) >= 2 && *str == '.' && isdigit(str[1])) + + /* If no %f, specify it after seconds. Move %p up, if necessary */ + if ((part_map & 32) && !(part_map & 64)) { - str++; - uint tmp_value=(uint) (uchar) (*str - '0'); - field_length=3; - while (str++ != end && isdigit(str[0]) && field_length--) - tmp_value=tmp_value*10 + (uint) (uchar) (*str - '0'); - date[6]=tmp_value; - not_zero_date|= tmp_value; + dt_pos[6]= dt_pos[5] +1; + parts[6]= parts[5]; // For later test in (need_p) + if (dt_pos[6] == dt_pos[7]) // Move %p one step up if used + dt_pos[7]++; } - else - date[6]=0; - - if (year_length == 2 && not_zero_date) - date[0]+= (date[0] < YY_PART_YEAR ? 2000 : 1900); - number_of_fields=i; - while (i < 6) - date[i++]=0; - if (number_of_fields < 3 || date[1] > 12 || - date[2] > 31 || date[3] > 23 || date[4] > 59 || date[5] > 59 || - (!fuzzy_date && (date[1] == 0 || date[2] == 0))) + + /* + Check that we have not used a non legal format specifier and that all + format specifiers have been used + + The last test is to ensure that %p is used if and only if + it's needed. + */ + if ((format_type == MYSQL_TIMESTAMP_DATETIME && + !test_all_bits(part_map, (1 | 2 | 4 | 8 | 16 | 32))) || + (format_type == MYSQL_TIMESTAMP_DATE && part_map != (1 | 2 | 4)) || + (format_type == MYSQL_TIMESTAMP_TIME && + !test_all_bits(part_map, 8 | 16 | 32)) || + !allow_separator || // %option should be last + (need_p && dt_pos[6] +1 != dt_pos[7]) || + (need_p ^ (dt_pos[7] != 255))) + return 1; + + if (dt_pos[6] != 255) // If fractional seconds { - /* Only give warning for a zero date if there is some garbage after */ - if (!not_zero_date) // If zero date + /* remove fractional seconds from later tests */ + uint pos= dt_pos[6] -1; + /* Remove separator before %f from sep map */ + separator_map= ((separator_map & ((ulong) (1 << pos)-1)) | + ((separator_map & ~((ulong) (1 << pos)-1)) >> 1)); + if (part_map & 64) { - for (; str != end ; str++) - { - if (!isspace(*str)) - { - not_zero_date= 1; // Give warning - break; - } - } + separators--; // There is always a separator + need_p= 1; // force use of separators } - if (not_zero_date) - current_thd->cuted_fields++; - DBUG_RETURN(TIMESTAMP_NONE); } - if (str != end && current_thd->count_cuted_fields) + + /* + Remove possible separator before %p from sep_map + (This can either be at position 3, 4, 6 or 7) h.m.d.%f %p + */ + if (dt_pos[7] != 255) { - for (; str != end ; str++) + if (need_p && parts[7] != parts[6]+2) + separators--; + } + /* + Calculate if %p is in first or last part of the datetime field + + At this point we have either %H-%i-%s %p 'year parts' or + 'year parts' &H-%i-%s %p" as %f was removed above + */ + offset= dt_pos[6] <= 3 ? 3 : 6; + /* Remove separator before %p from sep map */ + separator_map= ((separator_map & ((ulong) (1 << offset)-1)) | + ((separator_map & ~((ulong) (1 << offset)-1)) >> 1)); + + format_str= 0; + switch (format_type) { + case MYSQL_TIMESTAMP_DATE: + format_str= known_date_time_formats[INTERNAL_FORMAT].date_format; + /* fall through */ + case MYSQL_TIMESTAMP_TIME: + if (!format_str) + format_str=known_date_time_formats[INTERNAL_FORMAT].time_format; + + /* + If there is no separators, allow the internal format as we can read + this. If separators are used, they must be between each part + */ + if (format_length == 6 && !need_p && + !my_strnncoll(&my_charset_bin, + (const uchar *) format, 6, + (const uchar *) format_str, 6)) + return 0; + if (separator_map == (1 | 2)) { - if (!isspace(*str)) + if (format_type == MYSQL_TIMESTAMP_TIME) { - current_thd->cuted_fields++; - break; + if (*(format+2) != *(format+5)) + break; // Error + /* Store the character used for time formats */ + date_time_format->time_separator= *(format+2); } + return 0; } + break; + case MYSQL_TIMESTAMP_DATETIME: + /* + If there is no separators, allow the internal format as we can read + this. If separators are used, they must be between each part. + Between DATE and TIME we also allow space as separator + */ + if ((format_length == 12 && !need_p && + !my_strnncoll(&my_charset_bin, + (const uchar *) format, 12, + (const uchar*) known_date_time_formats[INTERNAL_FORMAT].datetime_format, + 12)) || + (separators == 5 && separator_map == (1 | 2 | 8 | 16))) + return 0; + break; + default: + DBUG_ASSERT(1); + break; } - l_time->year= date[0]; - l_time->month= date[1]; - l_time->day= date[2]; - l_time->hour= date[3]; - l_time->minute=date[4]; - l_time->second=date[5]; - l_time->second_part=date[6]; - DBUG_RETURN(l_time->time_type= - (number_of_fields <= 3 ? TIMESTAMP_DATE : TIMESTAMP_FULL)); + return 1; // Error } -time_t str_to_timestamp(const char *str,uint length) -{ - TIME l_time; - long not_used; - time_t timestamp= 0; +/* + Create a DATE_TIME_FORMAT object from a format string specification - if (str_to_TIME(str,length,&l_time,0) != TIMESTAMP_NONE && - !(timestamp= my_gmt_sec(&l_time, ¬_used))) - current_thd->cuted_fields++; - - return timestamp; -} + SYNOPSIS + date_time_format_make() + format_type Format to parse (time, date or datetime) + format_str String to parse + format_length Length of string + NOTES + The returned object should be freed with my_free() -longlong str_to_datetime(const char *str,uint length,bool fuzzy_date) + RETURN + NULL ponter: Error + new object +*/ + +DATE_TIME_FORMAT +*date_time_format_make(timestamp_type format_type, + const char *format_str, uint format_length) { - TIME l_time; - if (str_to_TIME(str,length,&l_time,fuzzy_date) == TIMESTAMP_NONE) - return(0); - return (longlong) (l_time.year*LL(10000000000) + - l_time.month*LL(100000000)+ - l_time.day*LL(1000000)+ - l_time.hour*LL(10000)+ - (longlong) (l_time.minute*100+l_time.second)); + DATE_TIME_FORMAT tmp; + + if (format_length && format_length < 255 && + !parse_date_time_format(format_type, format_str, + format_length, &tmp)) + { + tmp.format.str= (char*) format_str; + tmp.format.length= format_length; + return date_time_format_copy((THD *)0, &tmp); + } + return 0; } -/***************************************************************************** -** convert a time string to a (ulong) value. -** Can use all full timestamp formats and -** [-] DAYS [H]H:MM:SS, [H]H:MM:SS, [M]M:SS, [H]HMMSS, [M]MSS or [S]S -** There may be an optional [.second_part] after seconds -*****************************************************************************/ +/* + Create a copy of a DATE_TIME_FORMAT object + + SYNOPSIS + date_and_time_format_copy() + thd Set if variable should be allocated in thread mem + format format to copy + + NOTES + The returned object should be freed with my_free() + + RETURN + NULL ponter: Error + new object +*/ -bool str_to_time(const char *str,uint length,TIME *l_time) +DATE_TIME_FORMAT *date_time_format_copy(THD *thd, DATE_TIME_FORMAT *format) { - long date[5],value; - const char *end=str+length; - bool found_days,found_hours; - uint state; - - l_time->neg=0; - for (; str != end && !isdigit(*str) && *str != '-' ; str++) - length--; - if (str != end && *str == '-') + DATE_TIME_FORMAT *new_format; + ulong length= sizeof(*format) + format->format.length + 1; + + if (thd) + new_format= (DATE_TIME_FORMAT *) thd->alloc(length); + else + new_format= (DATE_TIME_FORMAT *) my_malloc(length, MYF(MY_WME)); + if (new_format) { - l_time->neg=1; - str++; - length--; + /* Put format string after current pos */ + new_format->format.str= (char*) (new_format+1); + memcpy((char*) new_format->positions, (char*) format->positions, + sizeof(format->positions)); + new_format->time_separator= format->time_separator; + /* We make the string null terminated for easy printf in SHOW VARIABLES */ + memcpy((char*) new_format->format.str, format->format.str, + format->format.length); + new_format->format.str[format->format.length]= 0; + new_format->format.length= format->format.length; } - if (str == end) - return 1; + return new_format; +} - /* Check first if this is a full TIMESTAMP */ - if (length >= 12) - { // Probably full timestamp - if (str_to_TIME(str,length,l_time,1) == TIMESTAMP_FULL) - return 0; // Was an ok timestamp - } - /* Not a timestamp. Try to get this as a DAYS_TO_SECOND string */ - for (value=0; str != end && isdigit(*str) ; str++) - value=value*10L + (long) (*str - '0'); +KNOWN_DATE_TIME_FORMAT known_date_time_formats[6]= +{ + {"USA", "%m.%d.%Y", "%Y-%m-%d %H.%i.%s", "%h:%i:%s %p" }, + {"JIS", "%Y-%m-%d", "%Y-%m-%d %H:%i:%s", "%H:%i:%s" }, + {"ISO", "%Y-%m-%d", "%Y-%m-%d %H:%i:%s", "%H:%i:%s" }, + {"EUR", "%d.%m.%Y", "%Y-%m-%d %H.%i.%s", "%H.%i.%s" }, + {"INTERNAL", "%Y%m%d", "%Y%m%d%H%i%s", "%H%i%s" }, + { 0, 0, 0, 0 } +}; - if (*str == ' ') - { - while (++str != end && str[0] == ' ') ; - str--; - } - LINT_INIT(state); - found_days=found_hours=0; - if ((uint) (end-str) > 1 && (*str == ' ' && isdigit(str[1]))) - { // days ! - date[0]=value; - state=1; // Assume next is hours - found_days=1; - str++; // Skip space; - } - else if ((end-str) > 1 && *str == ':' && isdigit(str[1])) - { - date[0]=0; // Assume we found hours - date[1]=value; - state=2; - found_hours=1; - str++; // skip ':' - } - else - { - /* String given as one number; assume HHMMSS format */ - date[0]= 0; - date[1]= value/10000; - date[2]= value/100 % 100; - date[3]= value % 100; - state=4; - goto fractional; +/* + Return format string according format name. + If name is unknown, result is NULL +*/ + +const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format, + timestamp_type type) +{ + switch (type) { + case MYSQL_TIMESTAMP_DATE: + return format->date_format; + case MYSQL_TIMESTAMP_DATETIME: + return format->datetime_format; + case MYSQL_TIMESTAMP_TIME: + return format->time_format; + default: + DBUG_ASSERT(0); // Impossible + return 0; } +} - /* Read hours, minutes and seconds */ - for (;;) - { - for (value=0; str != end && isdigit(*str) ; str++) - value=value*10L + (long) (*str - '0'); - date[state++]=value; - if (state == 4 || (end-str) < 2 || *str != ':' || !isdigit(str[1])) +/**************************************************************************** + Functions to create default time/date/datetime strings + + NOTE: + For the moment the DATE_TIME_FORMAT argument is ignored becasue + MySQL doesn't support comparing of date/time/datetime strings that + are not in arbutary order as dates are compared as strings in some + context) + This functions don't check that given TIME structure members are + in valid range. If they are not, return value won't reflect any + valid date either. Additionally, make_time doesn't take into + account time->day member: it's assumed that days have been converted + to hours already. +****************************************************************************/ + +void make_time(const DATE_TIME_FORMAT *format __attribute__((unused)), + const TIME *l_time, String *str) +{ + uint length= (uint) my_time_to_str(l_time, (char*) str->ptr()); + str->length(length); + str->set_charset(&my_charset_bin); +} + + +void make_date(const DATE_TIME_FORMAT *format __attribute__((unused)), + const TIME *l_time, String *str) +{ + uint length= (uint) my_date_to_str(l_time, (char*) str->ptr()); + str->length(length); + str->set_charset(&my_charset_bin); +} + + +void make_datetime(const DATE_TIME_FORMAT *format __attribute__((unused)), + const TIME *l_time, String *str) +{ + uint length= (uint) my_datetime_to_str(l_time, (char*) str->ptr()); + str->length(length); + str->set_charset(&my_charset_bin); +} + + +void make_truncated_value_warning(THD *thd, const char *str_val, + uint str_length, timestamp_type time_type) +{ + char warn_buff[MYSQL_ERRMSG_SIZE]; + const char *type_str; + + char buff[128]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + str.append(str_val, str_length); + str.append('\0'); + + switch (time_type) { + case MYSQL_TIMESTAMP_DATE: + type_str= "date"; + break; + case MYSQL_TIMESTAMP_TIME: + type_str= "time"; + break; + case MYSQL_TIMESTAMP_DATETIME: // FALLTHROUGH + default: + type_str= "datetime"; break; - str++; // Skip ':' } + sprintf(warn_buff, ER(ER_TRUNCATED_WRONG_VALUE), + type_str, str.ptr()); + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, warn_buff); +} - if (state != 4) - { // Not HH:MM:SS - /* Fix the date to assume that seconds was given */ - if (!found_hours && !found_days) - { - bmove_upp((char*) (date+4), (char*) (date+state), - sizeof(long)*(state-1)); - bzero((char*) date, sizeof(long)*(4-state)); - } - else - bzero((char*) (date+state), sizeof(long)*(4-state)); - } - fractional: - /* Get fractional second part */ - if ((end-str) >= 2 && *str == '.' && isdigit(str[1])) - { - uint field_length=3; - str++; value=(uint) (uchar) (*str - '0'); - while (++str != end && isdigit(str[0]) && field_length--) - value=value*10 + (uint) (uchar) (*str - '0'); - date[4]=value; - } - else - date[4]=0; +/* Convert time value to integer in YYYYMMDDHHMMSS format */ - /* Some simple checks */ - if (date[2] >= 60 || date[3] >= 60) - { - current_thd->cuted_fields++; - return 1; - } - l_time->month=0; - l_time->day=date[0]; - l_time->hour=date[1]; - l_time->minute=date[2]; - l_time->second=date[3]; - l_time->second_part=date[4]; - - /* Check if there is garbage at end of the TIME specification */ - if (str != end && current_thd->count_cuted_fields) - { - do - { - if (!isspace(*str)) - { - current_thd->cuted_fields++; - break; - } - } while (++str != end); +ulonglong TIME_to_ulonglong_datetime(const TIME *time) +{ + return ((ulonglong) (time->year * 10000UL + + time->month * 100UL + + time->day) * ULL(1000000) + + (ulonglong) (time->hour * 10000UL + + time->minute * 100UL + + time->second)); +} + + +/* Convert TIME value to integer in YYYYMMDD format */ + +ulonglong TIME_to_ulonglong_date(const TIME *time) +{ + return (ulonglong) (time->year * 10000UL + time->month * 100UL + time->day); +} + + +/* + Convert TIME value to integer in HHMMSS format. + This function doesn't take into account time->day member: + it's assumed that days have been converted to hours already. +*/ + +ulonglong TIME_to_ulonglong_time(const TIME *time) +{ + return (ulonglong) (time->hour * 10000UL + + time->minute * 100UL + + time->second); +} + + +/* + Convert struct TIME (date and time split into year/month/day/hour/... + to a number in format YYYYMMDDHHMMSS (DATETIME), + YYYYMMDD (DATE) or HHMMSS (TIME). + + SYNOPSIS + TIME_to_ulonglong() + + DESCRIPTION + The function is used when we need to convert value of time item + to a number if it's used in numeric context, i. e.: + SELECT NOW()+1, CURDATE()+0, CURTIMIE()+0; + SELECT ?+1; + + NOTE + This function doesn't check that given TIME structure members are + in valid range. If they are not, return value won't reflect any + valid date either. +*/ + +ulonglong TIME_to_ulonglong(const TIME *time) +{ + switch (time->time_type) { + case MYSQL_TIMESTAMP_DATETIME: + return TIME_to_ulonglong_datetime(time); + case MYSQL_TIMESTAMP_DATE: + return TIME_to_ulonglong_date(time); + case MYSQL_TIMESTAMP_TIME: + return TIME_to_ulonglong_time(time); + case MYSQL_TIMESTAMP_NONE: + case MYSQL_TIMESTAMP_ERROR: + return ULL(0); + default: + DBUG_ASSERT(0); } return 0; } + +#endif diff --git a/sql/tzfile.h b/sql/tzfile.h new file mode 100644 index 00000000000..623cddc1f12 --- /dev/null +++ b/sql/tzfile.h @@ -0,0 +1,137 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + This file is based on public domain code from ftp://elsie.ncih.nist.gov/ + Initial source code is in the public domain, so clarified as of + 1996-06-05 by Arthur David Olson (arthur_david_olson@nih.gov). +*/ + +/* + Information about time zone files. +*/ + +#ifndef TZDIR +#define TZDIR "/usr/share/zoneinfo" /* Time zone object file directory */ +#endif /* !defined TZDIR */ + +/* + Each file begins with. . . +*/ + +#define TZ_MAGIC "TZif" + +struct tzhead { + char tzh_magic[4]; /* TZ_MAGIC */ + char tzh_reserved[16]; /* reserved for future use */ + char tzh_ttisgmtcnt[4]; /* coded number of trans. time flags */ + char tzh_ttisstdcnt[4]; /* coded number of trans. time flags */ + char tzh_leapcnt[4]; /* coded number of leap seconds */ + char tzh_timecnt[4]; /* coded number of transition times */ + char tzh_typecnt[4]; /* coded number of local time types */ + char tzh_charcnt[4]; /* coded number of abbr. chars */ +}; + +/* + . . .followed by. . . + + tzh_timecnt (char [4])s coded transition times a la time(2) + tzh_timecnt (unsigned char)s types of local time starting at above + tzh_typecnt repetitions of + one (char [4]) coded UTC offset in seconds + one (unsigned char) used to set tm_isdst + one (unsigned char) that's an abbreviation list index + tzh_charcnt (char)s '\0'-terminated zone abbreviations + tzh_leapcnt repetitions of + one (char [4]) coded leap second transition times + one (char [4]) total correction after above + tzh_ttisstdcnt (char)s indexed by type; if TRUE, transition + time is standard time, if FALSE, + transition time is wall clock time + if absent, transition times are + assumed to be wall clock time + tzh_ttisgmtcnt (char)s indexed by type; if TRUE, transition + time is UTC, if FALSE, + transition time is local time + if absent, transition times are + assumed to be local time +*/ + +/* + In the current implementation, we refuse to deal with files that + exceed any of the limits below. +*/ + +#ifndef TZ_MAX_TIMES +/* + The TZ_MAX_TIMES value below is enough to handle a bit more than a + year's worth of solar time (corrected daily to the nearest second) or + 138 years of Pacific Presidential Election time + (where there are three time zone transitions every fourth year). +*/ +#define TZ_MAX_TIMES 370 +#endif /* !defined TZ_MAX_TIMES */ + +#ifndef TZ_MAX_TYPES +#ifdef SOLAR +#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */ +#else +/* + Must be at least 14 for Europe/Riga as of Jan 12 1995, + as noted by Earl Chew <earl@hpato.aus.hp.com>. +*/ +#define TZ_MAX_TYPES 20 /* Maximum number of local time types */ +#endif /* defined SOLAR */ +#endif /* !defined TZ_MAX_TYPES */ + +#ifndef TZ_MAX_CHARS +#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */ + /* (limited by what unsigned chars can hold) */ +#endif /* !defined TZ_MAX_CHARS */ + +#ifndef TZ_MAX_LEAPS +#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */ +#endif /* !defined TZ_MAX_LEAPS */ + +#ifndef TZ_MAX_REV_RANGES +#ifdef SOLAR +/* Solar (Asia/RiyadhXX) zones need significantly bigger TZ_MAX_REV_RANGES */ +#define TZ_MAX_REV_RANGES (TZ_MAX_TIMES*2+TZ_MAX_LEAPS*2+2) +#else +#define TZ_MAX_REV_RANGES (TZ_MAX_TIMES+TZ_MAX_LEAPS+2) +#endif +#endif + +#define SECS_PER_MIN 60 +#define MINS_PER_HOUR 60 +#define HOURS_PER_DAY 24 +#define DAYS_PER_WEEK 7 +#define DAYS_PER_NYEAR 365 +#define DAYS_PER_LYEAR 366 +#define SECS_PER_HOUR (SECS_PER_MIN * MINS_PER_HOUR) +#define SECS_PER_DAY ((long) SECS_PER_HOUR * HOURS_PER_DAY) +#define MONS_PER_YEAR 12 + +#define TM_YEAR_BASE 1900 + +#define EPOCH_YEAR 1970 + +/* + Accurate only for the past couple of centuries, + that will probably do. +*/ + +#define isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0)) diff --git a/sql/tztime.cc b/sql/tztime.cc new file mode 100644 index 00000000000..b0a32748998 --- /dev/null +++ b/sql/tztime.cc @@ -0,0 +1,2631 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Most of the following code and structures were derived from + public domain code from ftp://elsie.nci.nih.gov/pub + (We will refer to this code as to elsie-code further.) +*/ + +/* + We should not include mysql_priv.h in mysql_tzinfo_to_sql utility since + it creates unsolved link dependencies on some platforms. +*/ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include <my_global.h> +#if !defined(TZINFO2SQL) && !defined(TESTTIME) +#include "mysql_priv.h" +#else +#include <my_time.h> +#include "tztime.h" +#include <my_sys.h> +#endif + +#include "tzfile.h" +#include <m_string.h> +#include <my_dir.h> + +/* + Now we don't use abbreviations in server but we will do this in future. +*/ +#if defined(TZINFO2SQL) || defined(TESTTIME) +#define ABBR_ARE_USED +#else +#if !defined(DBUG_OFF) +/* Let use abbreviations for debug purposes */ +#undef ABBR_ARE_USED +#define ABBR_ARE_USED +#endif /* !defined(DBUG_OFF) */ +#endif /* defined(TZINFO2SQL) || defined(TESTTIME) */ + +/* Structure describing local time type (e.g. Moscow summer time (MSD)) */ +typedef struct ttinfo +{ + long tt_gmtoff; // Offset from UTC in seconds + uint tt_isdst; // Is daylight saving time or not. Used to set tm_isdst +#ifdef ABBR_ARE_USED + uint tt_abbrind; // Index of start of abbreviation for this time type. +#endif + /* + We don't use tt_ttisstd and tt_ttisgmt members of original elsie-code + struct since we don't support POSIX-style TZ descriptions in variables. + */ +} TRAN_TYPE_INFO; + +/* Structure describing leap-second corrections. */ +typedef struct lsinfo +{ + my_time_t ls_trans; // Transition time + long ls_corr; // Correction to apply +} LS_INFO; + +/* + Structure with information describing ranges of my_time_t shifted to local + time (my_time_t + offset). Used for local TIME -> my_time_t conversion. + See comments for TIME_to_gmt_sec() for more info. +*/ +typedef struct revtinfo +{ + long rt_offset; // Offset of local time from UTC in seconds + uint rt_type; // Type of period 0 - Normal period. 1 - Spring time-gap +} REVT_INFO; + +#ifdef TZNAME_MAX +#define MY_TZNAME_MAX TZNAME_MAX +#endif +#ifndef TZNAME_MAX +#define MY_TZNAME_MAX 255 +#endif + +/* + Structure which fully describes time zone which is + described in our db or in zoneinfo files. +*/ +typedef struct st_time_zone_info +{ + uint leapcnt; // Number of leap-second corrections + uint timecnt; // Number of transitions between time types + uint typecnt; // Number of local time types + uint charcnt; // Number of characters used for abbreviations + uint revcnt; // Number of transition descr. for TIME->my_time_t conversion + /* The following are dynamical arrays are allocated in MEM_ROOT */ + my_time_t *ats; // Times of transitions between time types + unsigned char *types; // Local time types for transitions + TRAN_TYPE_INFO *ttis; // Local time types descriptions +#ifdef ABBR_ARE_USED + /* Storage for local time types abbreviations. They are stored as ASCIIZ */ + char *chars; +#endif + /* + Leap seconds corrections descriptions, this array is shared by + all time zones who use leap seconds. + */ + LS_INFO *lsis; + /* + Starting points and descriptions of shifted my_time_t (my_time_t + offset) + ranges on which shifted my_time_t -> my_time_t mapping is linear or undefined. + Used for tm -> my_time_t conversion. + */ + my_time_t *revts; + REVT_INFO *revtis; + /* + Time type which is used for times smaller than first transition or if + there are no transitions at all. + */ + TRAN_TYPE_INFO *fallback_tti; + +} TIME_ZONE_INFO; + + +static my_bool prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage); + + +#if defined(TZINFO2SQL) || defined(TESTTIME) + +/* + Load time zone description from zoneinfo (TZinfo) file. + + SYNOPSIS + tz_load() + name - path to zoneinfo file + sp - TIME_ZONE_INFO structure to fill + + RETURN VALUES + 0 - Ok + 1 - Error +*/ +static my_bool +tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage) +{ + char *p; + int read_from_file; + uint i; + FILE *file; + + if (!(file= my_fopen(name, O_RDONLY|O_BINARY, MYF(MY_WME)))) + return 1; + { + union + { + struct tzhead tzhead; + char buf[sizeof(struct tzhead) + sizeof(my_time_t) * TZ_MAX_TIMES + + TZ_MAX_TIMES + sizeof(TRAN_TYPE_INFO) * TZ_MAX_TYPES + +#ifdef ABBR_ARE_USED + max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1))) + +#endif + sizeof(LS_INFO) * TZ_MAX_LEAPS]; + } u; + uint ttisstdcnt; + uint ttisgmtcnt; + char *tzinfo_buf; + + read_from_file= my_fread(file, u.buf, sizeof(u.buf), MYF(MY_WME)); + + if (my_fclose(file, MYF(MY_WME)) != 0) + return 1; + + if (read_from_file < (int)sizeof(struct tzhead)) + return 1; + + ttisstdcnt= int4net(u.tzhead.tzh_ttisgmtcnt); + ttisgmtcnt= int4net(u.tzhead.tzh_ttisstdcnt); + sp->leapcnt= int4net(u.tzhead.tzh_leapcnt); + sp->timecnt= int4net(u.tzhead.tzh_timecnt); + sp->typecnt= int4net(u.tzhead.tzh_typecnt); + sp->charcnt= int4net(u.tzhead.tzh_charcnt); + p= u.tzhead.tzh_charcnt + sizeof u.tzhead.tzh_charcnt; + if (sp->leapcnt > TZ_MAX_LEAPS || + sp->typecnt == 0 || sp->typecnt > TZ_MAX_TYPES || + sp->timecnt > TZ_MAX_TIMES || + sp->charcnt > TZ_MAX_CHARS || + (ttisstdcnt != sp->typecnt && ttisstdcnt != 0) || + (ttisgmtcnt != sp->typecnt && ttisgmtcnt != 0)) + return 1; + if ((uint)(read_from_file - (p - u.buf)) < + sp->timecnt * 4 + /* ats */ + sp->timecnt + /* types */ + sp->typecnt * (4 + 2) + /* ttinfos */ + sp->charcnt + /* chars */ + sp->leapcnt * (4 + 4) + /* lsinfos */ + ttisstdcnt + /* ttisstds */ + ttisgmtcnt) /* ttisgmts */ + return 1; + + if (!(tzinfo_buf= (char *)alloc_root(storage, + ALIGN_SIZE(sp->timecnt * + sizeof(my_time_t)) + + ALIGN_SIZE(sp->timecnt) + + ALIGN_SIZE(sp->typecnt * + sizeof(TRAN_TYPE_INFO)) + +#ifdef ABBR_ARE_USED + ALIGN_SIZE(sp->charcnt) + +#endif + sp->leapcnt * sizeof(LS_INFO)))) + return 1; + + sp->ats= (my_time_t *)tzinfo_buf; + tzinfo_buf+= ALIGN_SIZE(sp->timecnt * sizeof(my_time_t)); + sp->types= (unsigned char *)tzinfo_buf; + tzinfo_buf+= ALIGN_SIZE(sp->timecnt); + sp->ttis= (TRAN_TYPE_INFO *)tzinfo_buf; + tzinfo_buf+= ALIGN_SIZE(sp->typecnt * sizeof(TRAN_TYPE_INFO)); +#ifdef ABBR_ARE_USED + sp->chars= tzinfo_buf; + tzinfo_buf+= ALIGN_SIZE(sp->charcnt); +#endif + sp->lsis= (LS_INFO *)tzinfo_buf; + + for (i= 0; i < sp->timecnt; i++, p+= 4) + sp->ats[i]= int4net(p); + + for (i= 0; i < sp->timecnt; i++) + { + sp->types[i]= (unsigned char) *p++; + if (sp->types[i] >= sp->typecnt) + return 1; + } + for (i= 0; i < sp->typecnt; i++) + { + TRAN_TYPE_INFO * ttisp; + + ttisp= &sp->ttis[i]; + ttisp->tt_gmtoff= int4net(p); + p+= 4; + ttisp->tt_isdst= (unsigned char) *p++; + if (ttisp->tt_isdst != 0 && ttisp->tt_isdst != 1) + return 1; + ttisp->tt_abbrind= (unsigned char) *p++; + if (ttisp->tt_abbrind > sp->charcnt) + return 1; + } + for (i= 0; i < sp->charcnt; i++) + sp->chars[i]= *p++; + sp->chars[i]= '\0'; /* ensure '\0' at end */ + for (i= 0; i < sp->leapcnt; i++) + { + LS_INFO *lsisp; + + lsisp= &sp->lsis[i]; + lsisp->ls_trans= int4net(p); + p+= 4; + lsisp->ls_corr= int4net(p); + p+= 4; + } + /* + Since we don't support POSIX style TZ definitions in variables we + don't read further like glibc or elsie code. + */ + } + + return prepare_tz_info(sp, storage); +} +#endif /* defined(TZINFO2SQL) || defined(TESTTIME) */ + + +/* + Finish preparation of time zone description for use in TIME_to_gmt_sec() + and gmt_sec_to_TIME() functions. + + SYNOPSIS + prepare_tz_info() + sp - pointer to time zone description + storage - pointer to MEM_ROOT where arrays for map allocated + + DESCRIPTION + First task of this function is to find fallback time type which will + be used if there are no transitions or we have moment in time before + any transitions. + Second task is to build "shifted my_time_t" -> my_time_t map used in + TIME -> my_time_t conversion. + Note: See description of TIME_to_gmt_sec() function first. + In order to perform TIME -> my_time_t conversion we need to build table + which defines "shifted by tz offset and leap seconds my_time_t" -> + my_time_t function wich is almost the same (except ranges of ambiguity) + as reverse function to piecewise linear function used for my_time_t -> + "shifted my_time_t" conversion and which is also specified as table in + zoneinfo file or in our db (It is specified as start of time type ranges + and time type offsets). So basic idea is very simple - let us iterate + through my_time_t space from one point of discontinuity of my_time_t -> + "shifted my_time_t" function to another and build our approximation of + reverse function. (Actually we iterate through ranges on which + my_time_t -> "shifted my_time_t" is linear function). + + RETURN VALUES + 0 Ok + 1 Error +*/ +static my_bool +prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage) +{ + my_time_t cur_t= MY_TIME_T_MIN; + my_time_t cur_l, end_t, end_l; + my_time_t cur_max_seen_l= MY_TIME_T_MIN; + long cur_offset, cur_corr, cur_off_and_corr; + uint next_trans_idx, next_leap_idx; + uint i; + /* + Temporary arrays where we will store tables. Needed because + we don't know table sizes ahead. (Well we can estimate their + upper bound but this will take extra space.) + */ + my_time_t revts[TZ_MAX_REV_RANGES]; + REVT_INFO revtis[TZ_MAX_REV_RANGES]; + + LINT_INIT(end_l); + + /* + Let us setup fallback time type which will be used if we have not any + transitions or if we have moment of time before first transition. + We will find first non-DST local time type and use it (or use first + local time type if all of them are DST types). + */ + for (i= 0; i < sp->typecnt && sp->ttis[i].tt_isdst; i++) + /* no-op */ ; + if (i == sp->typecnt) + i= 0; + sp->fallback_tti= &(sp->ttis[i]); + + + /* + Let us build shifted my_time_t -> my_time_t map. + */ + sp->revcnt= 0; + + /* Let us find initial offset */ + if (sp->timecnt == 0 || cur_t < sp->ats[0]) + { + /* + If we have not any transitions or t is before first transition we are using + already found fallback time type which index is already in i. + */ + next_trans_idx= 0; + } + else + { + /* cur_t == sp->ats[0] so we found transition */ + i= sp->types[0]; + next_trans_idx= 1; + } + + cur_offset= sp->ttis[i].tt_gmtoff; + + + /* let us find leap correction... unprobable, but... */ + for (next_leap_idx= 0; next_leap_idx < sp->leapcnt && + cur_t >= sp->lsis[next_leap_idx].ls_trans; + ++next_leap_idx) + continue; + + if (next_leap_idx > 0) + cur_corr= sp->lsis[next_leap_idx - 1].ls_corr; + else + cur_corr= 0; + + /* Iterate trough t space */ + while (sp->revcnt < TZ_MAX_REV_RANGES - 1) + { + cur_off_and_corr= cur_offset - cur_corr; + + /* + We assuming that cur_t could be only overflowed downwards, + we also assume that end_t won't be overflowed in this case. + */ + if (cur_off_and_corr < 0 && + cur_t < MY_TIME_T_MIN - cur_off_and_corr) + cur_t= MY_TIME_T_MIN - cur_off_and_corr; + + cur_l= cur_t + cur_off_and_corr; + + /* + Let us choose end_t as point before next time type change or leap + second correction. + */ + end_t= min((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1: + MY_TIME_T_MAX, + (next_leap_idx < sp->leapcnt) ? + sp->lsis[next_leap_idx].ls_trans - 1: MY_TIME_T_MAX); + /* + again assuming that end_t can be overlowed only in positive side + we also assume that end_t won't be overflowed in this case. + */ + if (cur_off_and_corr > 0 && + end_t > MY_TIME_T_MAX - cur_off_and_corr) + end_t= MY_TIME_T_MAX - cur_off_and_corr; + + end_l= end_t + cur_off_and_corr; + + + if (end_l > cur_max_seen_l) + { + /* We want special handling in the case of first range */ + if (cur_max_seen_l == MY_TIME_T_MIN) + { + revts[sp->revcnt]= cur_l; + revtis[sp->revcnt].rt_offset= cur_off_and_corr; + revtis[sp->revcnt].rt_type= 0; + sp->revcnt++; + cur_max_seen_l= end_l; + } + else + { + if (cur_l > cur_max_seen_l + 1) + { + /* We have a spring time-gap and we are not at the first range */ + revts[sp->revcnt]= cur_max_seen_l + 1; + revtis[sp->revcnt].rt_offset= revtis[sp->revcnt-1].rt_offset; + revtis[sp->revcnt].rt_type= 1; + sp->revcnt++; + if (sp->revcnt == TZ_MAX_TIMES + TZ_MAX_LEAPS + 1) + break; /* That was too much */ + cur_max_seen_l= cur_l - 1; + } + + /* Assume here end_l > cur_max_seen_l (because end_l>=cur_l) */ + + revts[sp->revcnt]= cur_max_seen_l + 1; + revtis[sp->revcnt].rt_offset= cur_off_and_corr; + revtis[sp->revcnt].rt_type= 0; + sp->revcnt++; + cur_max_seen_l= end_l; + } + } + + if (end_t == MY_TIME_T_MAX || + (cur_off_and_corr > 0) && + (end_t >= MY_TIME_T_MAX - cur_off_and_corr)) + /* end of t space */ + break; + + cur_t= end_t + 1; + + /* + Let us find new offset and correction. Because of our choice of end_t + cur_t can only be point where new time type starts or/and leap + correction is performed. + */ + if (sp->timecnt != 0 && cur_t >= sp->ats[0]) /* else reuse old offset */ + if (next_trans_idx < sp->timecnt && + cur_t == sp->ats[next_trans_idx]) + { + /* We are at offset point */ + cur_offset= sp->ttis[sp->types[next_trans_idx]].tt_gmtoff; + ++next_trans_idx; + } + + if (next_leap_idx < sp->leapcnt && + cur_t == sp->lsis[next_leap_idx].ls_trans) + { + /* we are at leap point */ + cur_corr= sp->lsis[next_leap_idx].ls_corr; + ++next_leap_idx; + } + } + + /* check if we have had enough space */ + if (sp->revcnt == TZ_MAX_REV_RANGES - 1) + return 1; + + /* set maximum end_l as finisher */ + revts[sp->revcnt]= end_l; + + /* Allocate arrays of proper size in sp and copy result there */ + if (!(sp->revts= (my_time_t *)alloc_root(storage, + sizeof(my_time_t) * (sp->revcnt + 1))) || + !(sp->revtis= (REVT_INFO *)alloc_root(storage, + sizeof(REVT_INFO) * sp->revcnt))) + return 1; + + memcpy(sp->revts, revts, sizeof(my_time_t) * (sp->revcnt + 1)); + memcpy(sp->revtis, revtis, sizeof(REVT_INFO) * sp->revcnt); + + return 0; +} + + +#if !defined(TZINFO2SQL) + +static const uint mon_lengths[2][MONS_PER_YEAR]= +{ + { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, + { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 } +}; + +static const uint mon_starts[2][MONS_PER_YEAR]= +{ + { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }, + { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335 } +}; + +static const uint year_lengths[2]= +{ + DAYS_PER_NYEAR, DAYS_PER_LYEAR +}; + +#define LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400) + + +/* + Converts time from my_time_t representation (seconds in UTC since Epoch) + to broken down representation using given local time zone offset. + + SYNOPSIS + sec_to_TIME() + tmp - pointer to structure for broken down representation + t - my_time_t value to be converted + offset - local time zone offset + + DESCRIPTION + Convert my_time_t with offset to TIME struct. Differs from timesub + (from elsie code) because doesn't contain any leap correction and + TM_GMTOFF and is_dst setting and contains some MySQL specific + initialization. Funny but with removing of these we almost have + glibc's offtime function. +*/ +static void +sec_to_TIME(TIME * tmp, my_time_t t, long offset) +{ + long days; + long rem; + int y; + int yleap; + const uint *ip; + + days= t / SECS_PER_DAY; + rem= t % SECS_PER_DAY; + + /* + We do this as separate step after dividing t, because this + allows us handle times near my_time_t bounds without overflows. + */ + rem+= offset; + while (rem < 0) + { + rem+= SECS_PER_DAY; + days--; + } + while (rem >= SECS_PER_DAY) + { + rem -= SECS_PER_DAY; + days++; + } + tmp->hour= (uint)(rem / SECS_PER_HOUR); + rem= rem % SECS_PER_HOUR; + tmp->minute= (uint)(rem / SECS_PER_MIN); + /* + A positive leap second requires a special + representation. This uses "... ??:59:60" et seq. + */ + tmp->second= (uint)(rem % SECS_PER_MIN); + + y= EPOCH_YEAR; + while (days < 0 || days >= (long)year_lengths[yleap= isleap(y)]) + { + int newy; + + newy= y + days / DAYS_PER_NYEAR; + if (days < 0) + newy--; + days-= (newy - y) * DAYS_PER_NYEAR + + LEAPS_THRU_END_OF(newy - 1) - + LEAPS_THRU_END_OF(y - 1); + y= newy; + } + tmp->year= y; + + ip= mon_lengths[yleap]; + for (tmp->month= 0; days >= (long) ip[tmp->month]; tmp->month++) + days= days - (long) ip[tmp->month]; + tmp->month++; + tmp->day= (uint)(days + 1); + + /* filling MySQL specific TIME members */ + tmp->neg= 0; tmp->second_part= 0; + tmp->time_type= MYSQL_TIMESTAMP_DATETIME; +} + + +/* + Find time range wich contains given my_time_t value + + SYNOPSIS + find_time_range() + t - my_time_t value for which we looking for range + range_boundaries - sorted array of range starts. + higher_bound - number of ranges + + DESCRIPTION + Performs binary search for range which contains given my_time_t value. + It has sense if number of ranges is greater than zero and my_time_t value + is greater or equal than beginning of first range. It also assumes that + t belongs to some range specified or end of last is MY_TIME_T_MAX. + + With this localtime_r on real data may takes less time than with linear + search (I've seen 30% speed up). + + RETURN VALUE + Index of range to which t belongs +*/ +static uint +find_time_range(my_time_t t, const my_time_t *range_boundaries, + uint higher_bound) +{ + uint i, lower_bound= 0; + + /* + Function will work without this assertion but result would be meaningless. + */ + DBUG_ASSERT(higher_bound > 0 && t >= range_boundaries[0]); + + /* + Do binary search for minimal interval which contain t. We preserve: + range_boundaries[lower_bound] <= t < range_boundaries[higher_bound] + invariant and decrease this higher_bound - lower_bound gap twice + times on each step. + */ + + while (higher_bound - lower_bound > 1) + { + i= (lower_bound + higher_bound) >> 1; + if (range_boundaries[i] <= t) + lower_bound= i; + else + higher_bound= i; + } + return lower_bound; +} + +/* + Find local time transition for given my_time_t. + + SYNOPSIS + find_transition_type() + t - my_time_t value to be converted + sp - pointer to struct with time zone description + + RETURN VALUE + Pointer to structure in time zone description describing + local time type for given my_time_t. +*/ +static +const TRAN_TYPE_INFO * +find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp) +{ + if (unlikely(sp->timecnt == 0 || t < sp->ats[0])) + { + /* + If we have not any transitions or t is before first transition let + us use fallback time type. + */ + return sp->fallback_tti; + } + + /* + Do binary search for minimal interval between transitions which + contain t. With this localtime_r on real data may takes less + time than with linear search (I've seen 30% speed up). + */ + return &(sp->ttis[sp->types[find_time_range(t, sp->ats, sp->timecnt)]]); +} + + +/* + Converts time in my_time_t representation (seconds in UTC since Epoch) to + broken down TIME representation in local time zone. + + SYNOPSIS + gmt_sec_to_TIME() + tmp - pointer to structure for broken down represenatation + sec_in_utc - my_time_t value to be converted + sp - pointer to struct with time zone description + + TODO + We can improve this function by creating joined array of transitions and + leap corrections. This will require adding extra field to TRAN_TYPE_INFO + for storing number of "extra" seconds to minute occured due to correction + (60th and 61st second, look how we calculate them as "hit" in this + function). + Under realistic assumptions about frequency of transitions the same array + can be used fot TIME -> my_time_t conversion. For this we need to + implement tweaked binary search which will take into account that some + TIME has two matching my_time_t ranges and some of them have none. +*/ +static void +gmt_sec_to_TIME(TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp) +{ + const TRAN_TYPE_INFO *ttisp; + const LS_INFO *lp; + long corr= 0; + int hit= 0; + int i; + + /* + Find proper transition (and its local time type) for our sec_in_utc value. + Funny but again by separating this step in function we receive code + which very close to glibc's code. No wonder since they obviously use + the same base and all steps are sensible. + */ + ttisp= find_transition_type(sec_in_utc, sp); + + /* + Let us find leap correction for our sec_in_utc value and number of extra + secs to add to this minute. + This loop is rarely used because most users will use time zones without + leap seconds, and even in case when we have such time zone there won't + be many iterations (we have about 22 corrections at this moment (2004)). + */ + for ( i= sp->leapcnt; i-- > 0; ) + { + lp= &sp->lsis[i]; + if (sec_in_utc >= lp->ls_trans) + { + if (sec_in_utc == lp->ls_trans) + { + hit= ((i == 0 && lp->ls_corr > 0) || + lp->ls_corr > sp->lsis[i - 1].ls_corr); + if (hit) + { + while (i > 0 && + sp->lsis[i].ls_trans == sp->lsis[i - 1].ls_trans + 1 && + sp->lsis[i].ls_corr == sp->lsis[i - 1].ls_corr + 1) + { + hit++; + i--; + } + } + } + corr= lp->ls_corr; + break; + } + } + + sec_to_TIME(tmp, sec_in_utc, ttisp->tt_gmtoff - corr); + + tmp->second+= hit; +} + + +/* + Converts local time in broken down representation to local + time zone analog of my_time_t represenation. + + SYNOPSIS + sec_since_epoch() + year, mon, mday, hour, min, sec - broken down representation. + + DESCRIPTION + Converts time in broken down representation to my_time_t representation + ignoring time zone. Note that we cannot convert back some valid _local_ + times near ends of my_time_t range because of my_time_t overflow. But we + ignore this fact now since MySQL will never pass such argument. + + RETURN VALUE + Seconds since epoch time representation. +*/ +static my_time_t +sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec) +{ +#ifndef WE_WANT_TO_HANDLE_UNORMALIZED_DATES + /* + It turns out that only whenever month is normalized or unnormalized + plays role. + */ + DBUG_ASSERT(mon > 0 && mon < 13); + long days= year * DAYS_PER_NYEAR - EPOCH_YEAR * DAYS_PER_NYEAR + + LEAPS_THRU_END_OF(year - 1) - + LEAPS_THRU_END_OF(EPOCH_YEAR - 1); + days+= mon_starts[isleap(year)][mon - 1]; +#else + long norm_month= (mon - 1) % MONS_PER_YEAR; + long a_year= year + (mon - 1)/MONS_PER_YEAR - (int)(norm_month < 0); + long days= a_year * DAYS_PER_NYEAR - EPOCH_YEAR * DAYS_PER_NYEAR + + LEAPS_THRU_END_OF(a_year - 1) - + LEAPS_THRU_END_OF(EPOCH_YEAR - 1); + days+= mon_starts[isleap(a_year)] + [norm_month + (norm_month < 0 ? MONS_PER_YEAR : 0)]; +#endif + days+= mday - 1; + + return ((days * HOURS_PER_DAY + hour) * MINS_PER_HOUR + min) * + SECS_PER_MIN + sec; +} + + +/* + Converts local time in broken down TIME representation to my_time_t + representation. + + SYNOPSIS + TIME_to_gmt_sec() + t - pointer to structure for broken down represenatation + sp - pointer to struct with time zone description + in_dst_time_gap - pointer to bool which is set to true if datetime + value passed doesn't really exist (i.e. falls into + spring time-gap) and is not touched otherwise. + + DESCRIPTION + This is mktime analog for MySQL. It is essentially different + from mktime (or hypotetical my_mktime) because: + - It has no idea about tm_isdst member so if it + has two answers it will give the smaller one + - If we are in spring time gap then it will return + beginning of the gap + - It can give wrong results near the ends of my_time_t due to + overflows, but we are safe since in MySQL we will never + call this function for such dates (its restriction for year + between 1970 and 2038 gives us several days of reserve). + - By default it doesn't support un-normalized input. But if + sec_since_epoch() function supports un-normalized dates + then this function should handle un-normalized input right, + altough it won't normalize structure TIME. + + Traditional approach to problem of conversion from broken down + representation to time_t is iterative. Both elsie's and glibc + implementation try to guess what time_t value should correspond to + this broken-down value. They perform localtime_r function on their + guessed value and then calculate the difference and try to improve + their guess. Elsie's code guesses time_t value in bit by bit manner, + Glibc's code tries to add difference between broken-down value + corresponding to guess and target broken-down value to current guess. + It also uses caching of last found correction... So Glibc's approach + is essentially faster but introduces some undetermenism (in case if + is_dst member of broken-down representation (tm struct) is not known + and we have two possible answers). + + We use completely different approach. It is better since it is both + faster than iterative implementations and fully determenistic. If you + look at my_time_t to TIME conversion then you'll find that it consist + of two steps: + The first is calculating shifted my_time_t value and the second - TIME + calculation from shifted my_time_t value (well it is a bit simplified + picture). The part in which we are interested in is my_time_t -> shifted + my_time_t conversion. It is piecewise linear function which is defined + by combination of transition times as break points and times offset + as changing function parameter. The possible inverse function for this + converison would be ambiguos but with MySQL's restrictions we can use + some function which is the same as inverse function on unambigiuos + ranges and coincides with one of branches of inverse function in + other ranges. Thus we just need to build table which will determine + this shifted my_time_t -> my_time_t conversion similar to existing + (my_time_t -> shifted my_time_t table). We do this in + prepare_tz_info function. + + TODO + If we can even more improve this function. For doing this we will need to + build joined map of transitions and leap corrections for gmt_sec_to_TIME() + function (similar to revts/revtis). Under realistic assumptions about + frequency of transitions we can use the same array for TIME_to_gmt_sec(). + We need to implement special version of binary search for this. Such step + will be beneficial to CPU cache since we will decrease data-set used for + conversion twice. + + RETURN VALUE + Seconds in UTC since Epoch. + 0 in case of error. +*/ +static my_time_t +TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap) +{ + my_time_t local_t; + uint saved_seconds; + uint i; + + DBUG_ENTER("TIME_to_gmt_sec"); + + /* We need this for correct leap seconds handling */ + if (t->second < SECS_PER_MIN) + saved_seconds= 0; + else + saved_seconds= t->second; + + /* + NOTE If we want to convert full my_time_t range without MySQL + restrictions we should catch overflow here somehow. + */ + + local_t= sec_since_epoch(t->year, t->month, t->day, + t->hour, t->minute, + saved_seconds ? 0 : t->second); + + /* We have at least one range */ + DBUG_ASSERT(sp->revcnt >= 1); + + if (local_t < sp->revts[0] || local_t > sp->revts[sp->revcnt]) + { + /* + This means that source time can't be represented as my_time_t due to + limited my_time_t range. + */ + DBUG_RETURN(0); + } + + /* binary search for our range */ + i= find_time_range(local_t, sp->revts, sp->revcnt); + + if (sp->revtis[i].rt_type) + { + /* + Oops! We are in spring time gap. + May be we should return error here? + Now we are returning my_time_t value corresponding to the + beginning of the gap. + */ + *in_dst_time_gap= 1; + DBUG_RETURN(sp->revts[i] - sp->revtis[i].rt_offset + saved_seconds); + } + else + DBUG_RETURN(local_t - sp->revtis[i].rt_offset + saved_seconds); +} + + +/* + End of elsie derived code. +*/ +#endif /* !defined(TZINFO2SQL) */ + + +#if !defined(TESTTIME) && !defined(TZINFO2SQL) + +/* + String with names of SYSTEM time zone. +*/ +static const String tz_SYSTEM_name("SYSTEM", 6, &my_charset_latin1); + + +/* + Instance of this class represents local time zone used on this system + (specified by TZ environment variable or via any other system mechanism). + It uses system functions (localtime_r, my_system_gmt_sec) for conversion + and is always available. Because of this it is used by default - if there + were no explicit time zone specified. On the other hand because of this + conversion methods provided by this class is significantly slower and + possibly less multi-threaded-friendly than corresponding Time_zone_db + methods so the latter should be preffered there it is possible. +*/ +class Time_zone_system : public Time_zone +{ +public: + virtual my_time_t TIME_to_gmt_sec(const TIME *t, + bool *in_dst_time_gap) const; + virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual const String * get_name() const; +}; + + +/* + Converts local time in system time zone in TIME representation + to its my_time_t representation. + + SYNOPSIS + TIME_to_gmt_sec() + t - pointer to TIME structure with local time in + broken-down representation. + in_dst_time_gap - pointer to bool which is set to true if datetime + value passed doesn't really exist (i.e. falls into + spring time-gap) and is not touched otherwise. + + DESCRIPTION + This method uses system function (localtime_r()) for conversion + local time in system time zone in TIME structure to its my_time_t + representation. Unlike the same function for Time_zone_db class + it it won't handle unnormalized input properly. Still it will + return lowest possible my_time_t in case of ambiguity or if we + provide time corresponding to the time-gap. + + You should call init_time() function before using this function. + + RETURN VALUE + Corresponding my_time_t value or 0 in case of error +*/ +my_time_t +Time_zone_system::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +{ + long not_used; + return my_system_gmt_sec(t, ¬_used, in_dst_time_gap); +} + + +/* + Converts time from UTC seconds since Epoch (my_time_t) representation + to system local time zone broken-down representation. + + SYNOPSIS + gmt_sec_to_TIME() + tmp - pointer to TIME structure to fill-in + t - my_time_t value to be converted + + NOTE + We assume that value passed to this function will fit into time_t range + supported by localtime_r. This conversion is putting restriction on + TIMESTAMP range in MySQL. If we can get rid of SYSTEM time zone at least + for interaction with client then we can extend TIMESTAMP range down to + the 1902 easily. +*/ +void +Time_zone_system::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +{ + struct tm tmp_tm; + time_t tmp_t= (time_t)t; + + localtime_r(&tmp_t, &tmp_tm); + localtime_to_TIME(tmp, &tmp_tm); + tmp->time_type= MYSQL_TIMESTAMP_DATETIME; +} + + +/* + Get name of time zone + + SYNOPSIS + get_name() + + RETURN VALUE + Name of time zone as String +*/ +const String * +Time_zone_system::get_name() const +{ + return &tz_SYSTEM_name; +} + + +/* + Instance of this class represents UTC time zone. It uses system gmtime_r + function for conversions and is always available. It is used only for + my_time_t -> TIME conversions in various UTC_... functions, it is not + intended for TIME -> my_time_t conversions and shouldn't be exposed to user. +*/ +class Time_zone_utc : public Time_zone +{ +public: + virtual my_time_t TIME_to_gmt_sec(const TIME *t, + bool *in_dst_time_gap) const; + virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual const String * get_name() const; +}; + + +/* + Convert UTC time from TIME representation to its my_time_t representation. + + SYNOPSIS + TIME_to_gmt_sec() + t - pointer to TIME structure with local time + in broken-down representation. + in_dst_time_gap - pointer to bool which is set to true if datetime + value passed doesn't really exist (i.e. falls into + spring time-gap) and is not touched otherwise. + + DESCRIPTION + Since Time_zone_utc is used only internally for my_time_t -> TIME + conversions, this function of Time_zone interface is not implemented for + this class and should not be called. + + RETURN VALUE + 0 +*/ +my_time_t +Time_zone_utc::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +{ + /* Should be never called */ + DBUG_ASSERT(0); + return 0; +} + + +/* + Converts time from UTC seconds since Epoch (my_time_t) representation + to broken-down representation (also in UTC). + + SYNOPSIS + gmt_sec_to_TIME() + tmp - pointer to TIME structure to fill-in + t - my_time_t value to be converted + + NOTE + See note for apropriate Time_zone_system method. +*/ +void +Time_zone_utc::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +{ + struct tm tmp_tm; + time_t tmp_t= (time_t)t; + gmtime_r(&tmp_t, &tmp_tm); + localtime_to_TIME(tmp, &tmp_tm); + tmp->time_type= MYSQL_TIMESTAMP_DATETIME; +} + + +/* + Get name of time zone + + SYNOPSIS + get_name() + + DESCRIPTION + Since Time_zone_utc is used only internally by SQL's UTC_* functions it + is not accessible directly, and hence this function of Time_zone + interface is not implemented for this class and should not be called. + + RETURN VALUE + 0 +*/ +const String * +Time_zone_utc::get_name() const +{ + /* Should be never called */ + DBUG_ASSERT(0); + return 0; +} + + +/* + Instance of this class represents some time zone which is + described in mysql.time_zone family of tables. +*/ +class Time_zone_db : public Time_zone +{ +public: + Time_zone_db(TIME_ZONE_INFO *tz_info_arg, const String * tz_name_arg); + virtual my_time_t TIME_to_gmt_sec(const TIME *t, + bool *in_dst_time_gap) const; + virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual const String * get_name() const; +private: + TIME_ZONE_INFO *tz_info; + const String *tz_name; +}; + + +/* + Initializes object representing time zone described by mysql.time_zone + tables. + + SYNOPSIS + Time_zone_db() + tz_info_arg - pointer to TIME_ZONE_INFO structure which is filled + according to db or other time zone description + (for example by my_tz_init()). + Several Time_zone_db instances can share one + TIME_ZONE_INFO structure. + tz_name_arg - name of time zone. +*/ +Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg, + const String *tz_name_arg): + tz_info(tz_info_arg), tz_name(tz_name_arg) +{ +} + + +/* + Converts local time in time zone described from TIME + representation to its my_time_t representation. + + SYNOPSIS + TIME_to_gmt_sec() + t - pointer to TIME structure with local time + in broken-down representation. + in_dst_time_gap - pointer to bool which is set to true if datetime + value passed doesn't really exist (i.e. falls into + spring time-gap) and is not touched otherwise. + + DESCRIPTION + Please see ::TIME_to_gmt_sec for function description and + parameter restrictions. + + RETURN VALUE + Corresponding my_time_t value or 0 in case of error +*/ +my_time_t +Time_zone_db::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +{ + return ::TIME_to_gmt_sec(t, tz_info, in_dst_time_gap); +} + + +/* + Converts time from UTC seconds since Epoch (my_time_t) representation + to local time zone described in broken-down representation. + + SYNOPSIS + gmt_sec_to_TIME() + tmp - pointer to TIME structure to fill-in + t - my_time_t value to be converted +*/ +void +Time_zone_db::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +{ + ::gmt_sec_to_TIME(tmp, t, tz_info); +} + + +/* + Get name of time zone + + SYNOPSIS + get_name() + + RETURN VALUE + Name of time zone as ASCIIZ-string +*/ +const String * +Time_zone_db::get_name() const +{ + return tz_name; +} + + +/* + Instance of this class represents time zone which + was specified as offset from UTC. +*/ +class Time_zone_offset : public Time_zone +{ +public: + Time_zone_offset(long tz_offset_arg); + virtual my_time_t TIME_to_gmt_sec(const TIME *t, + bool *in_dst_time_gap) const; + virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual const String * get_name() const; + /* + This have to be public because we want to be able to access it from + my_offset_tzs_get_key() function + */ + long offset; +private: + /* Extra reserve because of snprintf */ + char name_buff[7+16]; + String name; +}; + + +/* + Initializes object representing time zone described by its offset from UTC. + + SYNOPSIS + Time_zone_offset() + tz_offset_arg - offset from UTC in seconds. + Positive for direction to east. +*/ +Time_zone_offset::Time_zone_offset(long tz_offset_arg): + offset(tz_offset_arg) +{ + uint hours= abs((int)(offset / SECS_PER_HOUR)); + uint minutes= abs((int)(offset % SECS_PER_HOUR / SECS_PER_MIN)); + ulong length= my_snprintf(name_buff, sizeof(name_buff), "%s%02d:%02d", + (offset>=0) ? "+" : "-", hours, minutes); + name.set(name_buff, length, &my_charset_latin1); +} + + +/* + Converts local time in time zone described as offset from UTC + from TIME representation to its my_time_t representation. + + SYNOPSIS + TIME_to_gmt_sec() + t - pointer to TIME structure with local time + in broken-down representation. + in_dst_time_gap - pointer to bool which should be set to true if + datetime value passed doesn't really exist + (i.e. falls into spring time-gap) and is not + touched otherwise. + It is not really used in this class. + + RETURN VALUE + Corresponding my_time_t value or 0 in case of error +*/ +my_time_t +Time_zone_offset::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +{ + return sec_since_epoch(t->year, t->month, t->day, + t->hour, t->minute, t->second) - + offset; +} + + +/* + Converts time from UTC seconds since Epoch (my_time_t) representation + to local time zone described as offset from UTC and in broken-down + representation. + + SYNOPSIS + gmt_sec_to_TIME() + tmp - pointer to TIME structure to fill-in + t - my_time_t value to be converted +*/ +void +Time_zone_offset::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +{ + sec_to_TIME(tmp, t, offset); +} + + +/* + Get name of time zone + + SYNOPSIS + get_name() + + RETURN VALUE + Name of time zone as pointer to String object +*/ +const String * +Time_zone_offset::get_name() const +{ + return &name; +} + + +static Time_zone_utc tz_UTC; +static Time_zone_system tz_SYSTEM; + +Time_zone *my_tz_UTC= &tz_UTC; +Time_zone *my_tz_SYSTEM= &tz_SYSTEM; + +static HASH tz_names; +static HASH offset_tzs; +static MEM_ROOT tz_storage; + +/* + These mutex protects offset_tzs and tz_storage. + These protection needed only when we are trying to set + time zone which is specified as offset, and searching for existing + time zone in offset_tzs or creating if it didn't existed before in + tz_storage. So contention is low. +*/ +static pthread_mutex_t tz_LOCK; +static bool tz_inited= 0; + +/* + This two static variables are inteded for holding info about leap seconds + shared by all time zones. +*/ +static uint tz_leapcnt= 0; +static LS_INFO *tz_lsis= 0; + +/* + Shows whenever we have found time zone tables during start-up. + Used for avoiding of putting those tables to global table list + for queries that use time zone info. +*/ +static bool time_zone_tables_exist= 1; + + +typedef struct st_tz_names_entry: public Sql_alloc +{ + String name; + Time_zone *tz; +} TZ_NAMES_ENTRY; + + +/* + We are going to call both of these functions from C code so + they should obey C calling conventions. +*/ + +extern "C" byte* my_tz_names_get_key(TZ_NAMES_ENTRY *entry, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= entry->name.length(); + return (byte*) entry->name.ptr(); +} + +extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= sizeof(long); + return (byte*) &entry->offset; +} + + +/* + Prepare table list with time zone related tables from preallocated array. + + SYNOPSIS + tz_init_table_list() + tz_tabs - pointer to preallocated array of 4 TABLE_LIST objects. + + DESCRIPTION + This function prepares list of TABLE_LIST objects which can be used + for opening of time zone tables from preallocated array. +*/ + +void +tz_init_table_list(TABLE_LIST *tz_tabs) +{ + bzero(tz_tabs, sizeof(TABLE_LIST) * 4); + tz_tabs[0].alias= tz_tabs[0].real_name= (char*)"time_zone_name"; + tz_tabs[1].alias= tz_tabs[1].real_name= (char*)"time_zone"; + tz_tabs[2].alias= tz_tabs[2].real_name= (char*)"time_zone_transition_type"; + tz_tabs[3].alias= tz_tabs[3].real_name= (char*)"time_zone_transition"; + tz_tabs[0].next= tz_tabs+1; + tz_tabs[1].next= tz_tabs+2; + tz_tabs[2].next= tz_tabs+3; + tz_tabs[0].lock_type= tz_tabs[1].lock_type= tz_tabs[2].lock_type= + tz_tabs[3].lock_type= TL_READ; + tz_tabs[0].db= tz_tabs[1].db= tz_tabs[2].db= tz_tabs[3].db= (char *)"mysql"; +} + + +/* + Create table list with time zone related tables. + + SYNOPSIS + my_tz_get_table_list() + thd - current thread object + + DESCRIPTION + This function creates list of TABLE_LIST objects allocated in thd's + memroot, which can be used for opening of time zone tables. + + NOTE + my_tz_check_n_skip_implicit_tables() function depends on fact that + elements of list created are allocated as TABLE_LIST[4] array. + + RETURN VALUES + Returns pointer to first TABLE_LIST object, (could be 0 if time zone + tables don't exist) and &fake_time_zone_tables_list in case of error. +*/ + +TABLE_LIST * +my_tz_get_table_list(THD *thd) +{ + TABLE_LIST *tz_tabs; + + if (!time_zone_tables_exist) + return 0; + + if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * 4))) + return &fake_time_zone_tables_list; + + tz_init_table_list(tz_tabs); + + return tz_tabs; +} + + +/* + Initialize time zone support infrastructure. + + SYNOPSIS + my_tz_init() + thd - current thread object + default_tzname - default time zone or 0 if none. + bootstrap - indicates whenever we are in bootstrap mode + + DESCRIPTION + This function will init memory structures needed for time zone support, + it will register mandatory SYSTEM time zone in them. It will try to open + mysql.time_zone* tables and load information about default time zone and + information which further will be shared among all time zones loaded. + If system tables with time zone descriptions don't exist it won't fail + (unless default_tzname is time zone from tables). If bootstrap parameter + is true then this routine assumes that we are in bootstrap mode and won't + load time zone descriptions unless someone specifies default time zone + which is supposedly stored in those tables. + It'll also set default time zone if it is specified. + + RETURN VALUES + 0 - ok + 1 - Error +*/ +my_bool +my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) +{ + THD *thd; + TABLE_LIST *tables= 0; + TABLE_LIST tables_buff[5]; + TABLE *table; + TZ_NAMES_ENTRY *tmp_tzname; + my_bool return_val= 1; + int res; + uint counter; + DBUG_ENTER("my_tz_init"); + + /* + To be able to run this from boot, we allocate a temporary THD + */ + if (!(thd= new THD)) + DBUG_RETURN(1); + thd->store_globals(); + + /* Init all memory structures that require explicit destruction */ + if (hash_init(&tz_names, &my_charset_latin1, 20, + 0, 0, (hash_get_key)my_tz_names_get_key, 0, 0)) + { + sql_print_error("Fatal error: OOM while initializing time zones"); + goto end; + } + if (hash_init(&offset_tzs, &my_charset_latin1, 26, 0, 0, + (hash_get_key)my_offset_tzs_get_key, 0, 0)) + { + sql_print_error("Fatal error: OOM while initializing time zones"); + hash_free(&tz_names); + goto end; + } + init_alloc_root(&tz_storage, 32 * 1024, 0); + VOID(pthread_mutex_init(&tz_LOCK, MY_MUTEX_INIT_FAST)); + tz_inited= 1; + + /* Add 'SYSTEM' time zone to tz_names hash */ + if (!(tmp_tzname= new (&tz_storage) TZ_NAMES_ENTRY())) + { + sql_print_error("Fatal error: OOM while initializing time zones"); + goto end_with_cleanup; + } + tmp_tzname->name.set("SYSTEM", 6, &my_charset_latin1); + tmp_tzname->tz= my_tz_SYSTEM; + if (my_hash_insert(&tz_names, (const byte *)tmp_tzname)) + { + sql_print_error("Fatal error: OOM while initializing time zones"); + goto end_with_cleanup; + } + + if (bootstrap) + { + /* If we are in bootstrap mode we should not load time zone tables */ + return_val= time_zone_tables_exist= 0; + goto end_with_setting_default_tz; + } + + /* + After this point all memory structures are inited and we even can live + without time zone description tables. Now try to load information about + leap seconds shared by all time zones. + */ + + thd->db= my_strdup("mysql",MYF(0)); + thd->db_length= 5; // Safety + bzero((char*) &tables_buff, sizeof(TABLE_LIST)); + tables_buff[0].alias= tables_buff[0].real_name= + (char*)"time_zone_leap_second"; + tables_buff[0].lock_type= TL_READ; + tables_buff[0].db= thd->db; + tables_buff[0].next= tables_buff + 1; + /* Fill TABLE_LIST for rest of the time zone describing tables */ + tz_init_table_list(tables_buff + 1); + + if (open_tables(thd, tables_buff, &counter) || + lock_tables(thd, tables_buff, counter)) + { + sql_print_warning("Can't open and lock time zone table: %s " + "trying to live without them", thd->net.last_error); + /* We will try emulate that everything is ok */ + return_val= time_zone_tables_exist= 0; + goto end_with_setting_default_tz; + } + tables= tables_buff + 1; + + /* + Now we are going to load leap seconds descriptions that are shared + between all time zones that use them. We are using index for getting + records in proper order. Since we share the same MEM_ROOT between + all time zones we just allocate enough memory for it first. + */ + if (!(tz_lsis= (LS_INFO*) alloc_root(&tz_storage, + sizeof(LS_INFO) * TZ_MAX_LEAPS))) + { + sql_print_error("Fatal error: Out of memory while loading " + "mysql.time_zone_leap_second table"); + goto end_with_close; + } + + table= tables_buff[0].table; + /* + It is OK to ignore ha_index_init()/ha_index_end() return values since + mysql.time_zone* tables are MyISAM and these operations always succeed + for MyISAM. + */ + (void)table->file->ha_index_init(0); + tz_leapcnt= 0; + + res= table->file->index_first(table->record[0]); + + while (!res) + { + if (tz_leapcnt + 1 > TZ_MAX_LEAPS) + { + sql_print_error("Fatal error: While loading mysql.time_zone_leap_second" + " table: too much leaps"); + table->file->ha_index_end(); + goto end_with_close; + } + + tz_lsis[tz_leapcnt].ls_trans= (my_time_t)table->field[0]->val_int(); + tz_lsis[tz_leapcnt].ls_corr= (long)table->field[1]->val_int(); + + tz_leapcnt++; + + DBUG_PRINT("info", + ("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld", + tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans, + tz_lsis[tz_leapcnt-1].ls_corr)); + + res= table->file->index_next(table->record[0]); + } + + (void)table->file->ha_index_end(); + + if (res != HA_ERR_END_OF_FILE) + { + sql_print_error("Fatal error: Error while loading " + "mysql.time_zone_leap_second table"); + goto end_with_close; + } + + /* + Loading of info about leap seconds succeeded + */ + + return_val= 0; + + +end_with_setting_default_tz: + /* If we have default time zone try to load it */ + if (default_tzname) + { + String tmp_tzname(default_tzname, &my_charset_latin1); + if (!(global_system_variables.time_zone= my_tz_find(&tmp_tzname, tables))) + { + sql_print_error("Fatal error: Illegal or unknown default time zone '%s'", + default_tzname); + return_val= 1; + } + } + +end_with_close: + thd->version--; /* Force close to free memory */ + close_thread_tables(thd); + +end_with_cleanup: + + /* if there were error free time zone describing structs */ + if (return_val) + my_tz_free(); +end: + delete thd; + if (org_thd) + org_thd->store_globals(); /* purecov: inspected */ + else + { + /* Remember that we don't have a THD */ + my_pthread_setspecific_ptr(THR_THD, 0); + my_pthread_setspecific_ptr(THR_MALLOC, 0); + } + DBUG_RETURN(return_val); +} + + +/* + Free resources used by time zone support infrastructure. + + SYNOPSIS + my_tz_free() +*/ + +void my_tz_free() +{ + if (tz_inited) + { + tz_inited= 0; + VOID(pthread_mutex_destroy(&tz_LOCK)); + hash_free(&offset_tzs); + hash_free(&tz_names); + free_root(&tz_storage, MYF(0)); + } +} + + +/* + Load time zone description from system tables. + + SYNOPSIS + tz_load_from_open_tables() + tz_name - name of time zone that should be loaded. + tz_tables - list of tables from which time zone description + should be loaded + + DESCRIPTION + This function will try to load information about time zone specified + from the list of the already opened and locked tables (first table in + tz_tables should be time_zone_name, next time_zone, then + time_zone_transition_type and time_zone_transition should be last). + It will also update information in hash used for time zones lookup. + + RETURN VALUES + Returns pointer to newly created Time_zone object or 0 in case of error. + +*/ + +static Time_zone* +tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) +{ + TABLE *table= 0; + TIME_ZONE_INFO *tz_info; + TZ_NAMES_ENTRY *tmp_tzname; + Time_zone *return_val= 0; + int res; + uint tzid, ttid; + my_time_t ttime; + char buff[MAX_FIELD_WIDTH]; + String abbr(buff, sizeof(buff), &my_charset_latin1); + char *alloc_buff, *tz_name_buff; + /* + Temporary arrays that are used for loading of data for filling + TIME_ZONE_INFO structure + */ + my_time_t ats[TZ_MAX_TIMES]; + unsigned char types[TZ_MAX_TIMES]; + TRAN_TYPE_INFO ttis[TZ_MAX_TYPES]; +#ifdef ABBR_ARE_USED + char chars[max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))]; +#endif + + DBUG_ENTER("tz_load_from_open_tables"); + + + /* Prepare tz_info for loading also let us make copy of time zone name */ + if (!(alloc_buff= alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) + + tz_name->length() + 1))) + { + sql_print_error("Out of memory while loading time zone description"); + return 0; + } + tz_info= (TIME_ZONE_INFO *)alloc_buff; + bzero(tz_info, sizeof(TIME_ZONE_INFO)); + tz_name_buff= alloc_buff + sizeof(TIME_ZONE_INFO); + /* + By writing zero to the end we guarantee that we can call ptr() + instead of c_ptr() for time zone name. + */ + strmake(tz_name_buff, tz_name->ptr(), tz_name->length()); + + /* + Let us find out time zone id by its name (there is only one index + and it is specifically for this purpose). + */ + table= tz_tables->table; + tz_tables= tz_tables->next; + table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1); + /* + It is OK to ignore ha_index_init()/ha_index_end() return values since + mysql.time_zone* tables are MyISAM and these operations always succeed + for MyISAM. + */ + (void)table->file->ha_index_init(0); + + if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, + 0, HA_READ_KEY_EXACT)) + { +#ifdef EXTRA_DEBUG + /* + Most probably user has mistyped time zone name, so no need to bark here + unless we need it for debugging. + */ + sql_print_error("Can't find description of time zone '%s'", tz_name_buff); +#endif + goto end; + } + + tzid= (uint)table->field[1]->val_int(); + + (void)table->file->ha_index_end(); + + /* + Now we need to lookup record in mysql.time_zone table in order to + understand whenever this timezone uses leap seconds (again we are + using the only index in this table). + */ + table= tz_tables->table; + tz_tables= tz_tables->next; + table->field[0]->store((longlong)tzid); + (void)table->file->ha_index_init(0); + + if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, + 0, HA_READ_KEY_EXACT)) + { + sql_print_error("Can't find description of time zone '%u'", tzid); + goto end; + } + + /* If Uses_leap_seconds == 'Y' */ + if (table->field[1]->val_int() == 1) + { + tz_info->leapcnt= tz_leapcnt; + tz_info->lsis= tz_lsis; + } + + (void)table->file->ha_index_end(); + + /* + Now we will iterate through records for out time zone in + mysql.time_zone_transition_type table. Because we want records + only for our time zone guess what are we doing? + Right - using special index. + */ + table= tz_tables->table; + tz_tables= tz_tables->next; + table->field[0]->store((longlong)tzid); + (void)table->file->ha_index_init(0); + + // FIXME Is there any better approach than explicitly specifying 4 ??? + res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, + 4, HA_READ_KEY_EXACT); + while (!res) + { + ttid= (uint)table->field[1]->val_int(); + + if (ttid >= TZ_MAX_TYPES) + { + sql_print_error("Error while loading time zone description from " + "mysql.time_zone_transition_type table: too big " + "transition type id"); + goto end; + } + + ttis[ttid].tt_gmtoff= (long)table->field[2]->val_int(); + ttis[ttid].tt_isdst= (table->field[3]->val_int() > 0); + +#ifdef ABBR_ARE_USED + // FIXME should we do something with duplicates here ? + table->field[4]->val_str(&abbr, &abbr); + if (tz_info->charcnt + abbr.length() + 1 > sizeof(chars)) + { + sql_print_error("Error while loading time zone description from " + "mysql.time_zone_transition_type table: not enough " + "room for abbreviations"); + goto end; + } + ttis[ttid].tt_abbrind= tz_info->charcnt; + memcpy(chars + tz_info->charcnt, abbr.ptr(), abbr.length()); + tz_info->charcnt+= abbr.length(); + chars[tz_info->charcnt]= 0; + tz_info->charcnt++; + + DBUG_PRINT("info", + ("time_zone_transition_type table: tz_id=%u tt_id=%u tt_gmtoff=%ld " + "abbr='%s' tt_isdst=%u", tzid, ttid, ttis[ttid].tt_gmtoff, + chars + ttis[ttid].tt_abbrind, ttis[ttid].tt_isdst)); +#else + DBUG_PRINT("info", + ("time_zone_transition_type table: tz_id=%u tt_id=%u tt_gmtoff=%ld " + "tt_isdst=%u", tzid, ttid, ttis[ttid].tt_gmtoff, ttis[ttid].tt_isdst)); +#endif + + /* ttid is increasing because we are reading using index */ + DBUG_ASSERT(ttid >= tz_info->typecnt); + + tz_info->typecnt= ttid + 1; + + res= table->file->index_next_same(table->record[0], + (byte*)table->field[0]->ptr, 4); + } + + if (res != HA_ERR_END_OF_FILE) + { + sql_print_error("Error while loading time zone description from " + "mysql.time_zone_transition_type table"); + goto end; + } + + (void)table->file->ha_index_end(); + + + /* + At last we are doing the same thing for records in + mysql.time_zone_transition table. Here we additionaly need records + in ascending order by index scan also satisfies us. + */ + table= tz_tables->table; + table->field[0]->store((longlong)tzid); + (void)table->file->ha_index_init(0); + + // FIXME Is there any better approach than explicitly specifying 4 ??? + res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, + 4, HA_READ_KEY_EXACT); + while (!res) + { + ttime= (my_time_t)table->field[1]->val_int(); + ttid= (uint)table->field[2]->val_int(); + + if (tz_info->timecnt + 1 > TZ_MAX_TIMES) + { + sql_print_error("Error while loading time zone description from " + "mysql.time_zone_transition table: " + "too much transitions"); + goto end; + } + if (ttid + 1 > tz_info->typecnt) + { + sql_print_error("Error while loading time zone description from " + "mysql.time_zone_transition table: " + "bad transition type id"); + goto end; + } + + ats[tz_info->timecnt]= ttime; + types[tz_info->timecnt]= ttid; + tz_info->timecnt++; + + DBUG_PRINT("info", + ("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u", + tzid, (longlong)ttime, ttid)); + + res= table->file->index_next_same(table->record[0], + (byte*)table->field[0]->ptr, 4); + } + + /* + We have to allow HA_ERR_KEY_NOT_FOUND because some time zones + for example UTC have no transitons. + */ + if (res != HA_ERR_END_OF_FILE && res != HA_ERR_KEY_NOT_FOUND) + { + sql_print_error("Error while loading time zone description from " + "mysql.time_zone_transition table"); + goto end; + } + + (void)table->file->ha_index_end(); + table= 0; + + /* + Now we will allocate memory and init TIME_ZONE_INFO structure. + */ + if (!(alloc_buff= alloc_root(&tz_storage, + ALIGN_SIZE(sizeof(my_time_t) * + tz_info->timecnt) + + ALIGN_SIZE(tz_info->timecnt) + +#ifdef ABBR_ARE_USED + ALIGN_SIZE(tz_info->charcnt) + +#endif + sizeof(TRAN_TYPE_INFO) * tz_info->typecnt))) + { + sql_print_error("Out of memory while loading time zone description"); + goto end; + } + + + tz_info->ats= (my_time_t *)alloc_buff; + memcpy(tz_info->ats, ats, tz_info->timecnt * sizeof(my_time_t)); + alloc_buff+= ALIGN_SIZE(sizeof(my_time_t) * tz_info->timecnt); + tz_info->types= (unsigned char *)alloc_buff; + memcpy(tz_info->types, types, tz_info->timecnt); + alloc_buff+= ALIGN_SIZE(tz_info->timecnt); +#ifdef ABBR_ARE_USED + tz_info->chars= alloc_buff; + memcpy(tz_info->chars, chars, tz_info->charcnt); + alloc_buff+= ALIGN_SIZE(tz_info->charcnt); +#endif + tz_info->ttis= (TRAN_TYPE_INFO *)alloc_buff; + memcpy(tz_info->ttis, ttis, tz_info->typecnt * sizeof(TRAN_TYPE_INFO)); + + /* + Let us check how correct our time zone description and build + reversed map. We don't check for tz->timecnt < 1 since it ok for GMT. + */ + if (tz_info->typecnt < 1) + { + sql_print_error("loading time zone without transition types"); + goto end; + } + if (prepare_tz_info(tz_info, &tz_storage)) + { + sql_print_error("Unable to build mktime map for time zone"); + goto end; + } + + + if (!(tmp_tzname= new (&tz_storage) TZ_NAMES_ENTRY()) || + !(tmp_tzname->tz= new (&tz_storage) Time_zone_db(tz_info, + &(tmp_tzname->name))) || + (tmp_tzname->name.set(tz_name_buff, tz_name->length(), + &my_charset_latin1), + my_hash_insert(&tz_names, (const byte *)tmp_tzname))) + { + sql_print_error("Out of memory while loading time zone"); + goto end; + } + + /* + Loading of time zone succeeded + */ + return_val= tmp_tzname->tz; + +end: + + if (table) + (void)table->file->ha_index_end(); + + DBUG_RETURN(return_val); +} + + +/* + Parse string that specifies time zone as offset from UTC. + + SYNOPSIS + str_to_offset() + str - pointer to string which contains offset + length - length of string + offset - out parameter for storing found offset in seconds. + + DESCRIPTION + This function parses string which contains time zone offset + in form similar to '+10:00' and converts found value to + seconds from UTC form (east is positive). + + RETURN VALUE + 0 - Ok + 1 - String doesn't contain valid time zone offset +*/ +my_bool +str_to_offset(const char *str, uint length, long *offset) +{ + const char *end= str + length; + my_bool negative; + ulong number_tmp; + long offset_tmp; + + if (length < 4) + return 1; + + if (*str == '+') + negative= 0; + else if (*str == '-') + negative= 1; + else + return 1; + str++; + + number_tmp= 0; + + while (str < end && my_isdigit(&my_charset_latin1, *str)) + { + number_tmp= number_tmp*10 + *str - '0'; + str++; + } + + if (str + 1 >= end || *str != ':') + return 1; + str++; + + offset_tmp = number_tmp * MINS_PER_HOUR; number_tmp= 0; + + while (str < end && my_isdigit(&my_charset_latin1, *str)) + { + number_tmp= number_tmp * 10 + *str - '0'; + str++; + } + + if (str != end) + return 1; + + offset_tmp= (offset_tmp + number_tmp) * SECS_PER_MIN; + + if (negative) + offset_tmp= -offset_tmp; + + /* + Check if offset is in range prescribed by standard + (from -12:59 to 13:00). + */ + + if (number_tmp > 59 || offset_tmp < -13 * SECS_PER_HOUR + 1 || + offset_tmp > 13 * SECS_PER_HOUR) + return 1; + + *offset= offset_tmp; + + return 0; +} + + +/* + Get Time_zone object for specified time zone. + + SYNOPSIS + my_tz_find() + name - time zone specification + tz_tables - list of opened'n'locked time zone describing tables + + DESCRIPTION + This function checks if name is one of time zones described in db, + predefined SYSTEM time zone or valid time zone specification as + offset from UTC (In last case it will create proper Time_zone_offset + object if there were not any.). If name is ok it returns corresponding + Time_zone object. + + Clients of this function are not responsible for releasing resources + occupied by returned Time_zone object so they can just forget pointers + to Time_zone object if they are not needed longer. + + Other important property of this function: if some Time_zone found once + it will be for sure found later, so this function can also be used for + checking if proper Time_zone object exists (and if there will be error + it will be reported during first call). + + If name pointer is 0 then this function returns 0 (this allows to pass 0 + values as parameter without additional external check and this property + is used by @@time_zone variable handling code). + + It will perform lookup in system tables (mysql.time_zone*) if needed + using tz_tables as list of already opened tables (for info about this + list look at tz_load_from_open_tables() description). It won't perform + such lookup if no time zone describing tables were found during server + start up. + + RETURN VALUE + Pointer to corresponding Time_zone object. 0 - in case of bad time zone + specification or other error. + +*/ +Time_zone * +my_tz_find(const String * name, TABLE_LIST *tz_tables) +{ + TZ_NAMES_ENTRY *tmp_tzname; + Time_zone *result_tz= 0; + long offset; + + DBUG_ENTER("my_tz_find"); + DBUG_PRINT("enter", ("time zone name='%s'", + name ? ((String *)name)->c_ptr() : "NULL")); + + DBUG_ASSERT(!time_zone_tables_exist || tz_tables); + + if (!name) + DBUG_RETURN(0); + + VOID(pthread_mutex_lock(&tz_LOCK)); + + if (!str_to_offset(name->ptr(), name->length(), &offset)) + { + + if (!(result_tz= (Time_zone_offset *)hash_search(&offset_tzs, + (const byte *)&offset, + sizeof(long)))) + { + DBUG_PRINT("info", ("Creating new Time_zone_offset object")); + + if (!(result_tz= new (&tz_storage) Time_zone_offset(offset)) || + my_hash_insert(&offset_tzs, (const byte *) result_tz)) + { + result_tz= 0; + sql_print_error("Fatal error: Out of memory " + "while setting new time zone"); + } + } + } + else + { + result_tz= 0; + if ((tmp_tzname= (TZ_NAMES_ENTRY *)hash_search(&tz_names, + (const byte *)name->ptr(), + name->length()))) + result_tz= tmp_tzname->tz; + else if (time_zone_tables_exist) + result_tz= tz_load_from_open_tables(name, tz_tables); + } + + VOID(pthread_mutex_unlock(&tz_LOCK)); + + DBUG_RETURN(result_tz); +} + +#endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ + + +#ifdef TZINFO2SQL +/* + This code belongs to mysql_tzinfo_to_sql converter command line utility. + This utility should be used by db admin for populating mysql.time_zone + tables. +*/ + + +/* + Print info about time zone described by TIME_ZONE_INFO struct as + SQL statements populating mysql.time_zone* tables. + + SYNOPSIS + print_tz_as_sql() + tz_name - name of time zone + sp - structure describing time zone +*/ +void +print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp) +{ + uint i; + + /* Here we assume that all time zones have same leap correction tables */ + printf("INSERT INTO time_zone (Use_leap_seconds) VALUES ('%s');\n", + sp->leapcnt ? "Y" : "N"); + printf("SET @time_zone_id= LAST_INSERT_ID();\n"); + printf("INSERT INTO time_zone_name (Name, Time_zone_id) VALUES \ +('%s', @time_zone_id);\n", tz_name); + + if (sp->timecnt) + { + printf("INSERT INTO time_zone_transition \ +(Time_zone_id, Transition_time, Transition_type_id) VALUES\n"); + for (i= 0; i < sp->timecnt; i++) + printf("%s(@time_zone_id, %ld, %u)\n", (i == 0 ? " " : ","), sp->ats[i], + (uint)sp->types[i]); + printf(";\n"); + } + + printf("INSERT INTO time_zone_transition_type \ +(Time_zone_id, Transition_type_id, Offset, Is_DST, Abbreviation) VALUES\n"); + + for (i= 0; i < sp->typecnt; i++) + printf("%s(@time_zone_id, %u, %ld, %d, '%s')\n", (i == 0 ? " " : ","), i, + sp->ttis[i].tt_gmtoff, sp->ttis[i].tt_isdst, + sp->chars + sp->ttis[i].tt_abbrind); + printf(";\n"); +} + + +/* + Print info about leap seconds in time zone as SQL statements + populating mysql.time_zone_leap_second table. + + SYNOPSIS + print_tz_leaps_as_sql() + sp - structure describing time zone +*/ +void +print_tz_leaps_as_sql(const TIME_ZONE_INFO *sp) +{ + uint i; + + /* + We are assuming that there are only one list of leap seconds + For all timezones. + */ + printf("TRUNCATE TABLE time_zone_leap_second;\n"); + + if (sp->leapcnt) + { + printf("INSERT INTO time_zone_leap_second \ +(Transition_time, Correction) VALUES\n"); + for (i= 0; i < sp->leapcnt; i++) + printf("%s(%ld, %ld)\n", (i == 0 ? " " : ","), + sp->lsis[i].ls_trans, sp->lsis[i].ls_corr); + printf(";\n"); + } + + printf("ALTER TABLE time_zone_leap_second ORDER BY Transition_time;\n"); +} + + +/* + Some variables used as temporary or as parameters + in recursive scan_tz_dir() code. +*/ +TIME_ZONE_INFO tz_info; +MEM_ROOT tz_storage; +char fullname[FN_REFLEN + 1]; +char *root_name_end; + + +/* + Recursively scan zoneinfo directory and print all found time zone + descriptions as SQL. + + SYNOPSIS + scan_tz_dir() + name_end - pointer to end of path to directory to be searched. + + DESCRIPTION + This auxiliary recursive function also uses several global + variables as in parameters and for storing temporary values. + + fullname - path to directory that should be scanned. + root_name_end - pointer to place in fullname where part with + path to initial directory ends. + current_tz_id - last used time zone id + + RETURN VALUE + 0 - Ok, 1 - Fatal error + +*/ +my_bool +scan_tz_dir(char * name_end) +{ + MY_DIR *cur_dir; + char *name_end_tmp; + uint i; + + if (!(cur_dir= my_dir(fullname, MYF(MY_WANT_STAT)))) + return 1; + + name_end= strmake(name_end, "/", FN_REFLEN - (name_end - fullname)); + + for (i= 0; i < cur_dir->number_off_files; i++) + { + if (cur_dir->dir_entry[i].name[0] != '.') + { + name_end_tmp= strmake(name_end, cur_dir->dir_entry[i].name, + FN_REFLEN - (name_end - fullname)); + + if (MY_S_ISDIR(cur_dir->dir_entry[i].mystat->st_mode)) + { + if (scan_tz_dir(name_end_tmp)) + { + my_dirend(cur_dir); + return 1; + } + } + else if (MY_S_ISREG(cur_dir->dir_entry[i].mystat->st_mode)) + { + init_alloc_root(&tz_storage, 32768, 0); + if (!tz_load(fullname, &tz_info, &tz_storage)) + print_tz_as_sql(root_name_end + 1, &tz_info); + else + fprintf(stderr, + "Warning: Unable to load '%s' as time zone. Skipping it.\n", + fullname); + free_root(&tz_storage, MYF(0)); + } + else + fprintf(stderr, "Warning: '%s' is not regular file or directory\n", + fullname); + } + } + + my_dirend(cur_dir); + + return 0; +} + + +int +main(int argc, char **argv) +{ +#ifndef __NETWARE__ + MY_INIT(argv[0]); + + if (argc != 2 && argc != 3) + { + fprintf(stderr, "Usage:\n"); + fprintf(stderr, " %s timezonedir\n", argv[0]); + fprintf(stderr, " %s timezonefile timezonename\n", argv[0]); + fprintf(stderr, " %s --leap timezonefile\n", argv[0]); + return 1; + } + + if (argc == 2) + { + root_name_end= strmake(fullname, argv[1], FN_REFLEN); + + printf("TRUNCATE TABLE time_zone;\n"); + printf("TRUNCATE TABLE time_zone_name;\n"); + printf("TRUNCATE TABLE time_zone_transition;\n"); + printf("TRUNCATE TABLE time_zone_transition_type;\n"); + + if (scan_tz_dir(root_name_end)) + { + fprintf(stderr, "There were fatal errors during processing " + "of zoneinfo directory\n"); + return 1; + } + + printf("ALTER TABLE time_zone_transition " + "ORDER BY Time_zone_id, Transition_time;\n"); + printf("ALTER TABLE time_zone_transition_type " + "ORDER BY Time_zone_id, Transition_type_id;\n"); + } + else + { + init_alloc_root(&tz_storage, 32768, 0); + + if (strcmp(argv[1], "--leap") == 0) + { + if (tz_load(argv[2], &tz_info, &tz_storage)) + { + fprintf(stderr, "Problems with zoneinfo file '%s'\n", argv[2]); + return 1; + } + print_tz_leaps_as_sql(&tz_info); + } + else + { + if (tz_load(argv[1], &tz_info, &tz_storage)) + { + fprintf(stderr, "Problems with zoneinfo file '%s'\n", argv[2]); + return 1; + } + print_tz_as_sql(argv[2], &tz_info); + } + + free_root(&tz_storage, MYF(0)); + } + +#else + fprintf(stderr, "This tool has not been ported to NetWare\n"); +#endif /* __NETWARE__ */ + + return 0; +} + +#endif /* defined(TZINFO2SQL) */ + + +#ifdef TESTTIME + +/* + Some simple brute-force test wich allowed to catch a pair of bugs. + Also can provide interesting facts about system's time zone support + implementation. +*/ + +#ifndef CHAR_BIT +#define CHAR_BIT 8 +#endif + +#ifndef TYPE_BIT +#define TYPE_BIT(type) (sizeof (type) * CHAR_BIT) +#endif + +#ifndef TYPE_SIGNED +#define TYPE_SIGNED(type) (((type) -1) < 0) +#endif + +my_bool +is_equal_TIME_tm(const TIME* time_arg, const struct tm * tm_arg) +{ + return (time_arg->year == (uint)tm_arg->tm_year+TM_YEAR_BASE) && + (time_arg->month == (uint)tm_arg->tm_mon+1) && + (time_arg->day == (uint)tm_arg->tm_mday) && + (time_arg->hour == (uint)tm_arg->tm_hour) && + (time_arg->minute == (uint)tm_arg->tm_min) && + (time_arg->second == (uint)tm_arg->tm_sec) && + time_arg->second_part == 0; +} + + +int +main(int argc, char **argv) +{ + my_bool localtime_negative; + TIME_ZONE_INFO tz_info; + struct tm tmp; + TIME time_tmp; + time_t t, t1, t2; + char fullname[FN_REFLEN+1]; + char *str_end; + long not_used; + bool not_used_2; + MEM_ROOT tz_storage; + + MY_INIT(argv[0]); + + init_alloc_root(&tz_storage, 32768, 0); + + /* let us set some well known timezone */ + setenv("TZ", "MET", 1); + tzset(); + + /* Some initial time zone related system info */ + printf("time_t: %s %u bit\n", TYPE_SIGNED(time_t) ? "signed" : "unsigned", + (uint)TYPE_BIT(time_t)); + if (TYPE_SIGNED(time_t)) + { + t= -100; + localtime_negative= test(localtime_r(&t, &tmp) != 0); + printf("localtime_r %s negative params \ + (time_t=%d is %d-%d-%d %d:%d:%d)\n", + (localtime_negative ? "supports" : "doesn't support"), (int)t, + TM_YEAR_BASE + tmp.tm_year, tmp.tm_mon + 1, tmp.tm_mday, + tmp.tm_hour, tmp.tm_min, tmp.tm_sec); + + printf("mktime %s negative results (%d)\n", + (t == mktime(&tmp) ? "doesn't support" : "supports"), + (int)mktime(&tmp)); + } + + tmp.tm_year= 103; tmp.tm_mon= 2; tmp.tm_mday= 30; + tmp.tm_hour= 2; tmp.tm_min= 30; tmp.tm_sec= 0; tmp.tm_isdst= -1; + t= mktime(&tmp); + printf("mktime returns %s for spring time gap (%d)\n", + (t != (time_t)-1 ? "something" : "error"), (int)t); + + tmp.tm_year= 103; tmp.tm_mon= 8; tmp.tm_mday= 1; + tmp.tm_hour= 0; tmp.tm_min= 0; tmp.tm_sec= 0; tmp.tm_isdst= 0; + t= mktime(&tmp); + printf("mktime returns %s for non existing date (%d)\n", + (t != (time_t)-1 ? "something" : "error"), (int)t); + + tmp.tm_year= 103; tmp.tm_mon= 8; tmp.tm_mday= 1; + tmp.tm_hour= 25; tmp.tm_min=0; tmp.tm_sec=0; tmp.tm_isdst=1; + t= mktime(&tmp); + printf("mktime %s unnormalized input (%d)\n", + (t != (time_t)-1 ? "handles" : "doesn't handle"), (int)t); + + tmp.tm_year= 103; tmp.tm_mon= 9; tmp.tm_mday= 26; + tmp.tm_hour= 0; tmp.tm_min= 30; tmp.tm_sec= 0; tmp.tm_isdst= 1; + mktime(&tmp); + tmp.tm_hour= 2; tmp.tm_isdst= -1; + t= mktime(&tmp); + tmp.tm_hour= 4; tmp.tm_isdst= 0; + mktime(&tmp); + tmp.tm_hour= 2; tmp.tm_isdst= -1; + t1= mktime(&tmp); + printf("mktime is %s (%d %d)\n", + (t == t1 ? "determenistic" : "is non-determenistic"), + (int)t, (int)t1); + + /* Let us load time zone description */ + str_end= strmake(fullname, TZDIR, FN_REFLEN); + strmake(str_end, "/MET", FN_REFLEN - (str_end - fullname)); + + if (tz_load(fullname, &tz_info, &tz_storage)) + { + printf("Unable to load time zone info from '%s'\n", fullname); + free_root(&tz_storage, MYF(0)); + return 1; + } + + printf("Testing our implementation\n"); + + if (TYPE_SIGNED(time_t) && localtime_negative) + { + for (t= -40000; t < 20000; t++) + { + localtime_r(&t, &tmp); + gmt_sec_to_TIME(&time_tmp, (my_time_t)t, &tz_info); + if (!is_equal_TIME_tm(&time_tmp, &tmp)) + { + printf("Problem with negative time_t = %d\n", (int)t); + free_root(&tz_storage, MYF(0)); + return 1; + } + } + printf("gmt_sec_to_TIME = localtime for time_t in [-40000,20000) range\n"); + } + + for (t= 1000000000; t < 1100000000; t+= 13) + { + localtime_r(&t,&tmp); + gmt_sec_to_TIME(&time_tmp, (my_time_t)t, &tz_info); + + if (!is_equal_TIME_tm(&time_tmp, &tmp)) + { + printf("Problem with time_t = %d\n", (int)t); + free_root(&tz_storage, MYF(0)); + return 1; + } + } + printf("gmt_sec_to_TIME = localtime for time_t in [1000000000,1100000000) range\n"); + + init_time(); + + /* + Be careful here! my_system_gmt_sec doesn't fully handle unnormalized + dates. + */ + for (time_tmp.year= 1980; time_tmp.year < 2010; time_tmp.year++) + for (time_tmp.month= 1; time_tmp.month < 13; time_tmp.month++) + for (time_tmp.day= 1; + time_tmp.day < mon_lengths[isleap(time_tmp.year)][time_tmp.month-1]; + time_tmp.day++) + for (time_tmp.hour= 0; time_tmp.hour < 24; time_tmp.hour++) + for (time_tmp.minute= 0; time_tmp.minute < 60; time_tmp.minute+= 5) + for (time_tmp.second=0; time_tmp.second<60; time_tmp.second+=25) + { + t= (time_t)my_system_gmt_sec(&time_tmp, ¬_used, ¬_used_2); + t1= (time_t)TIME_to_gmt_sec(&time_tmp, &tz_info, ¬_used_2); + if (t != t1) + { + /* + We need special handling during autumn since my_system_gmt_sec + prefers greater time_t values (in MET) for ambiguity. + And BTW that is a bug which should be fixed !!! + */ + tmp.tm_year= time_tmp.year - TM_YEAR_BASE; + tmp.tm_mon= time_tmp.month - 1; + tmp.tm_mday= time_tmp.day; + tmp.tm_hour= time_tmp.hour; + tmp.tm_min= time_tmp.minute; + tmp.tm_sec= time_tmp.second; + tmp.tm_isdst= 1; + + t2= mktime(&tmp); + + if (t1 == t2) + continue; + + printf("Problem: %u/%u/%u %u:%u:%u with times t=%d, t1=%d\n", + time_tmp.year, time_tmp.month, time_tmp.day, + time_tmp.hour, time_tmp.minute, time_tmp.second, + (int)t,(int)t1); + + free_root(&tz_storage, MYF(0)); + return 1; + } + } + + printf("TIME_to_gmt_sec = my_system_gmt_sec for test range\n"); + + free_root(&tz_storage, MYF(0)); + return 0; +} + +#endif /* defined(TESTTIME) */ diff --git a/sql/tztime.h b/sql/tztime.h new file mode 100644 index 00000000000..e1ff71b6703 --- /dev/null +++ b/sql/tztime.h @@ -0,0 +1,104 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class interface */ +#endif + +#if !defined(TESTTIME) && !defined(TZINFO2SQL) + +/* + This class represents abstract time zone and provides + basic interface for TIME <-> my_time_t conversion. + Actual time zones which are specified by DB, or via offset + or use system functions are its descendants. +*/ +class Time_zone: public Sql_alloc +{ +public: + /* + Converts local time in broken down TIME representation to + my_time_t (UTC seconds since Epoch) represenation. + Returns 0 in case of error. Sets in_dst_time_gap to true if date provided + falls into spring time-gap (or lefts it untouched otherwise). + */ + virtual my_time_t TIME_to_gmt_sec(const TIME *t, + bool *in_dst_time_gap) const = 0; + /* + Converts time in my_time_t representation to local time in + broken down TIME representation. + */ + virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const = 0; + /* + Because of constness of String returned by get_name() time zone name + have to be already zeroended to be able to use String::ptr() instead + of c_ptr(). + */ + virtual const String * get_name() const = 0; + + /* + We need this only for surpressing warnings, objects of this type are + allocated on MEM_ROOT and should not require destruction. + */ + virtual ~Time_zone() {}; +}; + +extern Time_zone * my_tz_UTC; +extern Time_zone * my_tz_SYSTEM; +extern TABLE_LIST * my_tz_get_table_list(THD *thd); +extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables); +extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap); +extern void my_tz_free(); + + +/* + Check if we have pointer to the begining of list of implicitly used time + zone tables, set SELECT_ACL for them and fast-forward to its end. + + SYNOPSIS + my_tz_check_n_skip_implicit_tables() + table - (in/out) pointer to element of table list to check + tz_tables - list of implicitly used time zone tables received + from my_tz_get_table_list() function. + + NOTE + This function relies on my_tz_get_table_list() implementation. + + RETURN VALUE + TRUE - if table points to the beggining of tz_tables list + FALSE - otherwise. +*/ +inline bool my_tz_check_n_skip_implicit_tables(TABLE_LIST **table, + TABLE_LIST *tz_tables) +{ + if (*table == tz_tables) + { + for (int i= 0; i < 4; i++) + (*table)[i].grant.privilege= SELECT_ACL; + (*table)+= 3; + return TRUE; + } + return FALSE; +} + +/* + Maximum length of time zone name that we support + (Time zone name is char(64) in db) +*/ +#define MAX_TIME_ZONE_NAME_LENGTH 72 + +#endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ diff --git a/sql/udf_example.cc b/sql/udf_example.cc index 176ddeb10a3..50de0f187fe 100644 --- a/sql/udf_example.cc +++ b/sql/udf_example.cc @@ -126,6 +126,8 @@ typedef long long longlong; #include <m_ctype.h> #include <m_string.h> // To get strmov() +static pthread_mutex_t LOCK_hostname; + #ifdef HAVE_DLOPEN /* These must be right or mysqld will not find the symbol! */ @@ -147,6 +149,7 @@ longlong sequence(UDF_INIT *initid, UDF_ARGS *args, char *is_null, my_bool avgcost_init( UDF_INIT* initid, UDF_ARGS* args, char* message ); void avgcost_deinit( UDF_INIT* initid ); void avgcost_reset( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error ); +void avgcost_clear( UDF_INIT* initid, char* is_null, char *error ); void avgcost_add( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error ); double avgcost( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error ); } @@ -282,8 +285,8 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, for (n = ntrans + 1, n_end = ntrans + sizeof(ntrans)-2; word != w_end && n < n_end; word++ ) - if ( isalpha ( *word )) - *n++ = toupper ( *word ); + if ( my_isalpha ( &my_charset_latin1, *word )) + *n++ = my_toupper ( &my_charset_latin1, *word ); if ( n == ntrans + 1 ) /* return empty string if 0 bytes */ { @@ -583,6 +586,8 @@ longlong myfunc_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, case REAL_RESULT: // Add numers as longlong val += (longlong) *((double*) args->args[i]); break; + default: + break; } } return val; @@ -610,10 +615,12 @@ my_bool sequence_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return 1; } bzero(initid->ptr,sizeof(longlong)); - // Fool MySQL to think that this function is a constant - // This will ensure that MySQL only evalutes the function - // when the rows are sent to the client and not before any ORDER BY - // clauses + /* + Fool MySQL to think that this function is a constant + This will ensure that MySQL only evalutes the function + when the rows are sent to the client and not before any ORDER BY + clauses + */ initid->const_item=1; return 0; } @@ -630,9 +637,10 @@ longlong sequence(UDF_INIT *initid, UDF_ARGS *args, char *is_null, ulonglong val=0; if (args->arg_count) val= *((longlong*) args->args[0]); - return ++ *((longlong*) initid->ptr) + val; + return ++*((longlong*) initid->ptr) + val; } + /**************************************************************************** ** Some functions that handles IP and hostname conversions ** The orignal function was from Zeev Suraski. @@ -642,8 +650,6 @@ longlong sequence(UDF_INIT *initid, UDF_ARGS *args, char *is_null, ** ****************************************************************************/ -#if defined(HAVE_GETHOSTBYADDR_R) && defined(HAVE_SOLARIS_STYLE_GETHOST) - #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> @@ -651,9 +657,11 @@ longlong sequence(UDF_INIT *initid, UDF_ARGS *args, char *is_null, extern "C" { my_bool lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message); +void lookup_deinit(UDF_INIT *initid); char *lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *length, char *null_value, char *error); my_bool reverse_lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message); +void reverse_lookup_deinit(UDF_INIT *initid); char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *length, char *null_value, char *error); } @@ -676,9 +684,19 @@ my_bool lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message) } initid->max_length=11; initid->maybe_null=1; +#if !defined(HAVE_GETHOSTBYADDR_R) || !defined(HAVE_SOLARIS_STYLE_GETHOST) + (void) pthread_mutex_init(&LOCK_hostname,MY_MUTEX_INIT_SLOW); +#endif return 0; } +void lookup_deinit(UDF_INIT *initid) +{ +#if !defined(HAVE_GETHOSTBYADDR_R) || !defined(HAVE_SOLARIS_STYLE_GETHOST) + (void) pthread_mutex_destroy(&LOCK_hostname); +#endif +} + char *lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *null_value, char *error) { @@ -696,13 +714,23 @@ char *lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, length=sizeof(name_buff)-1; memcpy(name_buff,args->args[0],length); name_buff[length]=0; - +#if defined(HAVE_GETHOSTBYADDR_R) && defined(HAVE_SOLARIS_STYLE_GETHOST) if (!(hostent=gethostbyname_r(name_buff,&tmp_hostent,hostname_buff, sizeof(hostname_buff), &tmp_errno))) { *null_value=1; return 0; } +#else + VOID(pthread_mutex_lock(&LOCK_hostname)); + if (!(hostent= gethostbyname((char*) name_buff))) + { + VOID(pthread_mutex_unlock(&LOCK_hostname)); + *null_value= 1; + return 0; + } + VOID(pthread_mutex_unlock(&LOCK_hostname)); +#endif struct in_addr in; memcpy_fixed((char*) &in,(char*) *hostent->h_addr_list, sizeof(in.s_addr)); *res_length= (ulong) (strmov(result, inet_ntoa(in)) - result); @@ -731,9 +759,18 @@ my_bool reverse_lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message) } initid->max_length=32; initid->maybe_null=1; +#if !defined(HAVE_GETHOSTBYADDR_R) || !defined(HAVE_SOLARIS_STYLE_GETHOST) + (void) pthread_mutex_init(&LOCK_hostname,MY_MUTEX_INIT_SLOW); +#endif return 0; } +void reverse_lookup_deinit(UDF_INIT *initid) +{ +#if !defined(HAVE_GETHOSTBYADDR_R) || !defined(HAVE_SOLARIS_STYLE_GETHOST) + (void) pthread_mutex_destroy(&LOCK_hostname); +#endif +} char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *null_value, char *error) @@ -776,6 +813,7 @@ char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, return 0; } struct hostent *hp; +#if defined(HAVE_GETHOSTBYADDR_R) && defined(HAVE_SOLARIS_STYLE_GETHOST) int tmp_errno; if (!(hp=gethostbyaddr_r((char*) &taddr,sizeof(taddr), AF_INET, &tmp_hostent, name_buff,sizeof(name_buff), @@ -784,10 +822,19 @@ char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, *null_value=1; return 0; } +#else + VOID(pthread_mutex_lock(&LOCK_hostname)); + if (!(hp= gethostbyaddr((char*) &taddr, sizeof(taddr), AF_INET))) + { + VOID(pthread_mutex_unlock(&LOCK_hostname)); + *null_value= 1; + return 0; + } + VOID(pthread_mutex_unlock(&LOCK_hostname)); +#endif *res_length=(ulong) (strmov(result,hp->h_name) - result); return result; } -#endif // defined(HAVE_GETHOSTBYADDR_R) && defined(HAVE_SOLARIS_STYLE_GETHOST) /* ** Syntax for the new aggregate commands are: @@ -859,21 +906,29 @@ avgcost_deinit( UDF_INIT* initid ) delete initid->ptr; } + +/* This is only for MySQL 4.0 compability */ void -avgcost_reset( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message ) +avgcost_reset(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message) { - struct avgcost_data* data = (struct avgcost_data*)initid->ptr; - data->totalprice = 0.0; - data->totalquantity = 0; - data->count = 0; + avgcost_clear(initid, is_null, message); + avgcost_add(initid, args, is_null, message); +} - *is_null = 0; - avgcost_add( initid, args, is_null, message ); +/* This is needed to get things to work in MySQL 4.1.1 and above */ + +void +avgcost_clear(UDF_INIT* initid, char* is_null, char* message) +{ + struct avgcost_data* data = (struct avgcost_data*)initid->ptr; + data->totalprice= 0.0; + data->totalquantity= 0; + data->count= 0; } void -avgcost_add( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message ) +avgcost_add(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message) { if (args->args[0] && args->args[1]) { diff --git a/sql/uniques.cc b/sql/uniques.cc index 967392d12d5..d060965aa66 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -101,12 +101,12 @@ bool Unique::flush() bool Unique::get(TABLE *table) { SORTPARAM sort_param; - table->found_records=elements+tree.elements_in_tree; + table->sort.found_records=elements+tree.elements_in_tree; if (my_b_tell(&file) == 0) { /* Whole tree is in memory; Don't use disk if you don't need to */ - if ((record_pointers=table->record_pointers= (byte*) + if ((record_pointers=table->sort.record_pointers= (byte*) my_malloc(size * tree.elements_in_tree, MYF(0)))) { (void) tree_walk(&tree, (tree_walk_action) unique_write_to_ptrs, @@ -118,7 +118,7 @@ bool Unique::get(TABLE *table) if (flush()) return 1; - IO_CACHE *outfile=table->io_cache; + IO_CACHE *outfile=table->sort.io_cache; BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer; uint maxbuffer= file_ptrs.elements - 1; uchar *sort_buffer; @@ -126,8 +126,8 @@ bool Unique::get(TABLE *table) bool error=1; /* Open cached file if it isn't open */ - outfile=table->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), - MYF(MY_ZEROFILL)); + outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), + MYF(MY_ZEROFILL)); if (!outfile || ! my_b_inited(outfile) && open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER, @@ -138,7 +138,8 @@ bool Unique::get(TABLE *table) bzero((char*) &sort_param,sizeof(sort_param)); sort_param.max_rows= elements; sort_param.sort_form=table; - sort_param.sort_length=sort_param.ref_length=size; + sort_param.rec_length= sort_param.sort_length= sort_param.ref_length= + size; sort_param.keys= max_in_memory_size / sort_param.sort_length; sort_param.not_killable=1; diff --git a/sql/unireg.cc b/sql/unireg.cc index 748e45f8b0a..e3bf763f700 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -29,13 +29,13 @@ #include <m_ctype.h> #include <assert.h> -#define FCOMP 11 /* Byte per packat f{lt */ +#define FCOMP 17 /* Bytes for a packed field */ static uchar * pack_screens(List<create_field> &create_fields, uint *info_length, uint *screens, bool small_file); static uint pack_keys(uchar *keybuff,uint key_count, KEY *key_info, ulong data_offset); -static bool pack_header(uchar *forminfo, enum db_type table_type, +static bool pack_header(uchar *forminfo,enum db_type table_type, List<create_field> &create_fields, uint info_length, uint screens, uint table_options, ulong data_offset, handler *file); @@ -49,11 +49,32 @@ static bool make_empty_rec(int file, enum db_type table_type, uint reclength, uint null_fields, ulong data_offset); +/* + Create a frm (table definition) file + + SYNOPSIS + mysql_create_frm() + thd Thread handler + file_name Name of file (including database and .frm) + db Name of database + table Name of table + create_info create info parameters + create_fields Fields to create + keys number of keys to create + key_info Keys to create + db_file Handler to use. May be zero, in which case we use + create_info->db_type + RETURN + 0 ok + 1 error +*/ -int rea_create_table(my_string file_name, - HA_CREATE_INFO *create_info, - List<create_field> &create_fields, - uint keys, KEY *key_info) +bool mysql_create_frm(THD *thd, my_string file_name, + const char *db, const char *table, + HA_CREATE_INFO *create_info, + List<create_field> &create_fields, + uint keys, KEY *key_info, + handler *db_file) { uint reclength,info_length,screens,key_info_length,maxlength,null_fields; File file; @@ -61,13 +82,13 @@ int rea_create_table(my_string file_name, uchar fileinfo[64],forminfo[288],*keybuff; TYPELIB formnames; uchar *screen_buff; - handler *db_file; - DBUG_ENTER("rea_create_table"); + DBUG_ENTER("mysql_create_frm"); formnames.type_names=0; if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0))) DBUG_RETURN(1); - db_file=get_new_handler((TABLE*) 0, create_info->db_type); + if (db_file == NULL) + db_file= get_new_handler((TABLE*) 0, create_info->db_type); /* If fixed row records, we need one bit to check for deleted rows */ if (!(create_info->table_options & HA_OPTION_PACK_RECORD)) @@ -78,13 +99,12 @@ int rea_create_table(my_string file_name, screens, create_info->table_options, data_offset, db_file)) { - NET *net=my_pthread_getspecific_ptr(NET*,THR_NET); my_free((gptr) screen_buff,MYF(0)); - if (net->last_errno != ER_TOO_MANY_FIELDS) + if (thd->net.last_errno != ER_TOO_MANY_FIELDS) DBUG_RETURN(1); // Try again without UNIREG screens (to get more columns) - net->last_error[0]=0; + thd->net.last_error[0]=0; if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,1))) DBUG_RETURN(1); if (pack_header(forminfo, create_info->db_type, create_fields,info_length, @@ -97,15 +117,15 @@ int rea_create_table(my_string file_name, reclength=uint2korr(forminfo+266); null_fields=uint2korr(forminfo+282); - if ((file=create_frm(file_name, reclength, fileinfo, + if ((file=create_frm(file_name, db, table, reclength, fileinfo, create_info, keys)) < 0) { my_free((gptr) screen_buff,MYF(0)); DBUG_RETURN(1); } - uint key_buff_length=uint2korr(fileinfo+14); - keybuff=(uchar*) my_alloca(key_buff_length); + uint key_buff_length=keys*(7+NAME_LEN+MAX_REF_PARTS*9)+16; + keybuff=(uchar*) my_malloc(key_buff_length, MYF(0)); key_info_length= pack_keys(keybuff, keys, key_info, data_offset); VOID(get_form_pos(file,fileinfo,&formnames)); if (!(filepos=make_new_entry(file,fileinfo,&formnames,""))) @@ -113,6 +133,7 @@ int rea_create_table(my_string file_name, maxlength=(uint) next_io_size((ulong) (uint2korr(forminfo)+1000)); int2store(forminfo+2,maxlength); int4store(fileinfo+10,(ulong) (filepos+maxlength)); + int4store(fileinfo+47,key_buff_length); fileinfo[26]= (uchar) test((create_info->max_rows == 1) && (create_info->min_rows == 1) && (keys == 0)); int2store(fileinfo+28,key_info_length); @@ -160,24 +181,74 @@ int rea_create_table(my_string file_name, #endif my_free((gptr) screen_buff,MYF(0)); - my_afree((gptr) keybuff); + my_free((gptr) keybuff, MYF(0)); if (opt_sync_frm && !(create_info->options & HA_LEX_CREATE_TMP_TABLE) && my_sync(file, MYF(MY_WME))) goto err2; - if (my_close(file,MYF(MY_WME)) || - ha_create_table(file_name,create_info,0)) + if (my_close(file,MYF(MY_WME))) goto err3; + + { + /* Unescape all UCS2 intervals: were escaped in pack_headers */ + List_iterator<create_field> it(create_fields); + create_field *field; + while ((field=it++)) + { + if (field->interval && field->charset->mbminlen > 1) + unhex_type2(field->interval); + } + } DBUG_RETURN(0); err: my_free((gptr) screen_buff,MYF(0)); - my_afree((gptr) keybuff); + my_free((gptr) keybuff, MYF(0)); err2: VOID(my_close(file,MYF(MY_WME))); err3: my_delete(file_name,MYF(0)); DBUG_RETURN(1); +} /* mysql_create_frm */ + + +/* + Create a frm (table definition) file and the tables + + SYNOPSIS + rea_create_table() + thd Thread handler + file_name Name of file (including database and .frm) + db Name of database + table Name of table + create_info create info parameters + create_fields Fields to create + keys number of keys to create + key_info Keys to create + db_file Handler to use. May be zero, in which case we use + create_info->db_type + RETURN + 0 ok + 1 error +*/ + +int rea_create_table(THD *thd, my_string file_name, + const char *db, const char *table, + HA_CREATE_INFO *create_info, + List<create_field> &create_fields, + uint keys, KEY *key_info) +{ + DBUG_ENTER("rea_create_table"); + + if (mysql_create_frm(thd, file_name, db, table, create_info, + create_fields, keys, key_info, NULL)) + DBUG_RETURN(1); + if (ha_create_table(file_name,create_info,0)) + { + my_delete(file_name,MYF(0)); + DBUG_RETURN(1); + } + DBUG_RETURN(0); } /* rea_create_table */ @@ -263,7 +334,7 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, ulong data_offset) { uint key_parts,length; - uchar *pos, *keyname_pos, *key_alg_pos; + uchar *pos, *keyname_pos; KEY *key,*end; KEY_PART_INFO *key_part,*key_part_end; DBUG_ENTER("pack_keys"); @@ -272,10 +343,12 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, key_parts=0; for (key=keyinfo,end=keyinfo+key_count ; key != end ; key++) { - pos[0]=(uchar) (key->flags ^ HA_NOSAME); - int2store(pos+1,key->key_length); - pos[3]=key->key_parts; - pos+=4; + int2store(pos, (key->flags ^ HA_NOSAME)); + int2store(pos+2,key->key_length); + pos[4]= (uchar) key->key_parts; + pos[5]= (uchar) key->algorithm; + pos[6]=pos[7]=0; // For the future + pos+=8; key_parts+=key->key_parts; DBUG_PRINT("loop",("flags: %d key_parts: %d at %lx", key->flags,key->key_parts, @@ -310,18 +383,19 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, } *(pos++)=0; - /* For MySQL 4.0; Store key algoritms last */ - key_alg_pos= pos; - for (key=keyinfo ; key != end ; key++) + if (key_count > 127 || key_parts > 127) { - *(pos++)= (uchar) key->algorithm; + keybuff[0]= (key_count & 0x7f) | 0x80; + keybuff[1]= key_count >> 7; + int2store(keybuff+2,key_parts); } - - keybuff[0]=(uchar) key_count; - keybuff[1]=(uchar) key_parts; - length=(uint) (keyname_pos-keybuff); - int2store(keybuff+2,length); - length=(uint) (key_alg_pos-keyname_pos); + else + { + keybuff[0]=(uchar) key_count; + keybuff[1]=(uchar) key_parts; + keybuff[2]= keybuff[3]= 0; + } + length=(uint) (pos-keyname_pos); int2store(keybuff+4,length); DBUG_RETURN((uint) (pos-keybuff)); } /* pack_keys */ @@ -334,9 +408,9 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, uint info_length, uint screens, uint table_options, ulong data_offset, handler *file) { - uint length,int_count,int_length,no_empty, int_parts, - time_stamp_pos,null_fields; - ulong reclength,totlength,n_length; + uint length,int_count,int_length,no_empty, int_parts; + uint time_stamp_pos,null_fields; + ulong reclength, totlength, n_length, com_length; DBUG_ENTER("pack_header"); if (create_fields.elements > MAX_FIELDS) @@ -346,7 +420,8 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, } totlength=reclength=0L; - no_empty=int_count=int_parts=int_length=time_stamp_pos=null_fields=0; + no_empty=int_count=int_parts=int_length=time_stamp_pos=null_fields= + com_length=0; n_length=2L; /* Check fields */ @@ -356,6 +431,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, while ((field=it++)) { totlength+= field->length; + com_length+= field->comment.length; if (MTYP_TYPENR(field->unireg_check) == Field::NOEMPTY || field->unireg_check & MTYP_NOEMPTY_BIT) { @@ -363,8 +439,12 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, MTYP_NOEMPTY_BIT); no_empty++; } - if ((MTYP_TYPENR(field->unireg_check) == Field::TIMESTAMP_FIELD || - f_packtype(field->pack_flag) == (int) FIELD_TYPE_TIMESTAMP) && + /* + We mark first TIMESTAMP field with NOW() in DEFAULT or ON UPDATE + as auto-update field. + */ + if (field->sql_type == FIELD_TYPE_TIMESTAMP && + MTYP_TYPENR(field->unireg_check) != Field::NONE && !time_stamp_pos) time_stamp_pos= (uint) field->offset+ (uint) data_offset + 1; length=field->pack_length; @@ -375,6 +455,28 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, if (field->interval) { uint old_int_count=int_count; + + if (field->charset->mbminlen > 1) + { + /* Escape UCS2 intervals using HEX notation */ + for (uint pos= 0; pos < field->interval->count; pos++) + { + char *dst; + uint length= field->interval->type_lengths[pos], hex_length; + const char *src= field->interval->type_names[pos]; + const char *srcend= src + length; + hex_length= length * 2; + field->interval->type_lengths[pos]= hex_length; + field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1); + for ( ; src < srcend; src++) + { + *dst++= _dig_vec_upper[((uchar) *src) >> 4]; + *dst++= _dig_vec_upper[((uchar) *src) & 15]; + } + *dst= '\0'; + } + } + field->interval_id=get_interval_id(&int_count,create_fields,field); if (old_int_count != int_count) { @@ -398,14 +500,15 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, /* Hack to avoid bugs with small static rows in MySQL */ reclength=max(file->min_record_length(table_options),reclength); if (info_length+(ulong) create_fields.elements*FCOMP+288+ - n_length+int_length > 65535L || int_count > 255) + n_length+int_length+com_length > 65535L || int_count > 255) { my_error(ER_TOO_MANY_FIELDS,MYF(0)); DBUG_RETURN(1); } bzero((char*)forminfo,288); - length=info_length+create_fields.elements*FCOMP+288+n_length+int_length; + length=(info_length+create_fields.elements*FCOMP+288+n_length+int_length+ + com_length); int2store(forminfo,length); forminfo[256] = (uint8) screens; int2store(forminfo+258,create_fields.elements); @@ -421,6 +524,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, int2store(forminfo+278,80); /* Columns needed */ int2store(forminfo+280,22); /* Rows needed */ int2store(forminfo+282,null_fields); + int2store(forminfo+284,com_length); DBUG_RETURN(0); } /* pack_header */ @@ -459,7 +563,7 @@ static bool pack_fields(File file, List<create_field> &create_fields, ulong data_offset) { reg2 uint i; - uint int_count; + uint int_count, comment_length=0; uchar buff[MAX_FIELD_WIDTH]; create_field *field; DBUG_ENTER("pack_fields"); @@ -475,13 +579,27 @@ static bool pack_fields(File file, List<create_field> &create_fields, buff[0]= (uchar) field->row; buff[1]= (uchar) field->col; buff[2]= (uchar) field->sc_length; - buff[3]= (uchar) field->length; + int2store(buff+3, field->length); /* The +1 is here becasue the col offset in .frm file have offset 1 */ recpos= field->offset+1 + (uint) data_offset; - int2store(buff+4,recpos); - int2store(buff+6,field->pack_flag); - int2store(buff+8,field->unireg_check); - buff[10]= (uchar) field->interval_id; + int3store(buff+5,recpos); + int2store(buff+8,field->pack_flag); + int2store(buff+10,field->unireg_check); + buff[12]= (uchar) field->interval_id; + buff[13]= (uchar) field->sql_type; + if (field->sql_type == FIELD_TYPE_GEOMETRY) + { + buff[14]= (uchar) field->geom_type; +#ifndef HAVE_SPATIAL + DBUG_ASSERT(0); // Should newer happen +#endif + } + else if (field->charset) + buff[14]= (uchar) field->charset->number; + else + buff[14]= 0; // Numerical + int2store(buff+15, field->comment.length); + comment_length+= field->comment.length; set_if_bigger(int_count,field->interval_id); if (my_write(file,(byte*) buff,FCOMP,MYF_RW)) DBUG_RETURN(1); @@ -507,7 +625,7 @@ static bool pack_fields(File file, List<create_field> &create_fields, /* Write intervals */ if (int_count) { - String tmp((char*) buff,sizeof(buff)); + String tmp((char*) buff,sizeof(buff), &my_charset_bin); tmp.length(0); it.rewind(); int_count=0; @@ -528,6 +646,18 @@ static bool pack_fields(File file, List<create_field> &create_fields, if (my_write(file,(byte*) tmp.ptr(),tmp.length(),MYF_RW)) DBUG_RETURN(1); } + if (comment_length) + { + it.rewind(); + int_count=0; + while ((field=it++)) + { + if (field->comment.length) + if (my_write(file, (byte*) field->comment.str, field->comment.length, + MYF_RW)) + DBUG_RETURN(1); + } + } DBUG_RETURN(0); } @@ -560,6 +690,7 @@ static bool make_empty_rec(File file,enum db_type table_type, DBUG_RETURN(1); } + table.in_use= current_thd; table.db_low_byte_first= handler->low_byte_first(); table.blob_ptr_size=portable_sizeof_char_ptr; @@ -581,10 +712,13 @@ static bool make_empty_rec(File file,enum db_type table_type, 1 << (null_count & 7), field->pack_flag, field->sql_type, + field->charset, + field->geom_type, field->unireg_check, field->interval, field->field_name, &table); + if (!(field->flags & NOT_NULL_FLAG)) null_count++; @@ -597,7 +731,7 @@ static bool make_empty_rec(File file,enum db_type table_type, if (field->def && (regfield->real_type() != FIELD_TYPE_YEAR || field->def->val_int() != 0)) - field->def->save_in_field(regfield, 1); + (void) field->def->save_in_field(regfield, 1); else if (regfield->real_type() == FIELD_TYPE_ENUM && (field->flags & NOT_NULL_FLAG)) { @@ -605,9 +739,9 @@ static bool make_empty_rec(File file,enum db_type table_type, regfield->store((longlong) 1); } else if (type == Field::YES) // Old unireg type - regfield->store(ER(ER_YES),(uint) strlen(ER(ER_YES))); + regfield->store(ER(ER_YES),(uint) strlen(ER(ER_YES)),system_charset_info); else if (type == Field::NO) // Old unireg type - regfield->store(ER(ER_NO), (uint) strlen(ER(ER_NO))); + regfield->store(ER(ER_NO), (uint) strlen(ER(ER_NO)),system_charset_info); else regfield->reset(); delete regfield; diff --git a/sql/unireg.h b/sql/unireg.h index 37157311e15..3fb30315c81 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -45,9 +45,10 @@ #define LIBLEN FN_REFLEN-FN_LEN /* Max l{ngd p} dev */ #define MAX_DBKEY_LENGTH (FN_LEN*2+1+1+4+4) /* extra 4+4 bytes for slave tmp * tables */ +#define MAX_ALIAS_NAME 256 #define MAX_FIELD_NAME 34 /* Max colum name length +2 */ #define MAX_SYS_VAR_LENGTH 32 -#define MAX_KEY 32 /* Max used keys */ +#define MAX_KEY 64 /* Max used keys */ #define MAX_REF_PARTS 16 /* Max parts used as ref */ #define MAX_KEY_LENGTH 1024 /* max possible key */ #if SIZEOF_OFF_T > 4 @@ -57,13 +58,35 @@ #endif #define MAX_HOSTNAME 61 /* len+1 in mysql.user */ -#define MAX_FIELD_WIDTH 256 /* Max column width +1 */ -#define MAX_TABLES (sizeof(table_map)*8-1) /* Max tables in join */ +#define MAX_MBWIDTH 3 /* Max multibyte sequence */ +#define MAX_FIELD_CHARLENGTH 255 +#define CONVERT_IF_BIGGER_TO_BLOB 255 +/* Max column width +1 */ +#define MAX_FIELD_WIDTH (MAX_FIELD_CHARLENGTH*MAX_MBWIDTH+1) + +#define MAX_DATE_WIDTH 10 /* YYYY-MM-DD */ +#define MAX_TIME_WIDTH 23 /* -DDDDDD HH:MM:SS.###### */ +#define MAX_DATETIME_FULL_WIDTH 29 /* YYYY-MM-DD HH:MM:SS.###### AM */ +#define MAX_DATETIME_WIDTH 19 /* YYYY-MM-DD HH:MM:SS */ + +#define MAX_TABLES (sizeof(table_map)*8-3) /* Max tables in join */ +#define PARAM_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-3)) +#define OUTER_REF_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-2)) #define RAND_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-1)) +#define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \ + RAND_TABLE_BIT) #define MAX_FIELDS 4096 /* Limit in the .frm file */ #define MAX_SORT_MEMORY (2048*1024-MALLOC_OVERHEAD) #define MIN_SORT_MEMORY (32*1024-MALLOC_OVERHEAD) + +/* Memory allocated when parsing a statement / saving a statement */ +#define MEM_ROOT_BLOCK_SIZE 8192 +#define MEM_ROOT_PREALLOC 8192 +#define TRANS_MEM_ROOT_BLOCK_SIZE 4096 +#define TRANS_MEM_ROOT_PREALLOC 4096 + +#define DEFAULT_ERROR_COUNT 64 #define EXTRA_RECORDS 10 /* Extra records in sort */ #define SCROLL_EXTRA 5 /* Extra scroll-rows. */ #define FIELD_NAME_USED ((uint) 32768) /* Bit set if fieldname used */ @@ -91,17 +114,18 @@ #define SPECIAL_NO_PRIOR 128 /* Don't prioritize threads */ #define SPECIAL_BIG_SELECTS 256 /* Don't use heap tables */ #define SPECIAL_NO_HOST_CACHE 512 /* Don't cache hosts */ -#define SPECIAL_LONG_LOG_FORMAT 1024 +#define SPECIAL_SHORT_LOG_FORMAT 1024 #define SPECIAL_SAFE_MODE 2048 +#define SPECIAL_LOG_QUERIES_NOT_USING_INDEXES 4096 /* Log q not using indexes */ /* Extern defines */ -#define store_record(A,B) bmove_allign((A)->record[B],(A)->record[0],(size_t) (A)->reclength) -#define restore_record(A,B) bmove_allign((A)->record[0],(A)->record[B],(size_t) (A)->reclength) -#define cmp_record(A,B) memcmp((A)->record[0],(A)->record[B],(size_t) (A)->reclength) +#define store_record(A,B) bmove_align((A)->B,(A)->record[0],(size_t) (A)->reclength) +#define restore_record(A,B) bmove_align((A)->record[0],(A)->B,(size_t) (A)->reclength) +#define cmp_record(A,B) memcmp((A)->record[0],(A)->B,(size_t) (A)->reclength) #define empty_record(A) { \ -bmove_allign((A)->record[0],(A)->record[2],(size_t) (A)->reclength); \ -bfill((A)->null_flags,(A)->null_bytes,255);\ -} + restore_record((A),default_values); \ + bfill((A)->null_flags,(A)->null_bytes,255);\ + } /* Defines for use with openfrm, openprt and openfrd */ @@ -124,10 +148,10 @@ bfill((A)->null_flags,(A)->null_bytes,255);\ #define MTYP_NOEMPTY_BIT 128 /* - * Minimum length pattern before Turbo Boyer-Moore is used - * for SELECT "text" LIKE "%pattern%", excluding the two - * wildcards in class Item_func_like. - */ + Minimum length pattern before Turbo Boyer-Moore is used + for SELECT "text" LIKE "%pattern%", excluding the two + wildcards in class Item_func_like. +*/ #define MIN_TURBOBM_PATTERN_LEN 3 /* @@ -137,8 +161,11 @@ bfill((A)->null_flags,(A)->null_bytes,255);\ */ #define BIN_LOG_HEADER_SIZE 4 +#define FLOATING_POINT_BUFFER 331 + +#define DEFAULT_KEY_CACHE_NAME "default" - /* Include prototypes for unireg */ +/* Include prototypes for unireg */ #include "mysqld_error.h" #include "structs.h" /* All structs we need */ |