summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore33
-rwxr-xr-xBUILD/compile-solaris-sparc2
-rwxr-xr-xBUILD/compile-solaris-sparc-purify2
-rwxr-xr-xBitKeeper/triggers/post-commit8
-rw-r--r--acinclude.m436
-rw-r--r--client/errmsg.c125
-rw-r--r--heap/_check.c59
-rw-r--r--heap/hp_delete.c4
-rw-r--r--heap/hp_scan.c1
-rw-r--r--heap/hp_update.c4
-rw-r--r--heap/hp_write.c5
-rw-r--r--include/errmsg.h9
-rw-r--r--include/my_base.h3
-rw-r--r--innobase/row/row0sel.c12
-rw-r--r--isam/extra.c6
-rw-r--r--libmysql/errmsg.c17
-rw-r--r--libmysql/libmysql.c12
-rw-r--r--libmysqld/lib_sql.cc40
-rw-r--r--myisam/mi_check.c12
-rw-r--r--myisam/mi_extra.c4
-rw-r--r--myisam/mi_rnext_same.c5
-rw-r--r--myisam/sort.c22
-rw-r--r--myisammrg/myrg_extra.c3
-rw-r--r--mysql-test/mysql-test-run.sh71
-rw-r--r--mysql-test/r/alter_table.result2
-rw-r--r--mysql-test/r/distinct.result24
-rw-r--r--mysql-test/r/func_math.result3
-rw-r--r--mysql-test/r/group_by.result15
-rw-r--r--mysql-test/r/innodb.result37
-rw-r--r--mysql-test/r/multi_update.result118
-rw-r--r--mysql-test/r/null.result19
-rw-r--r--mysql-test/r/select.result13
-rw-r--r--mysql-test/r/temp_table.result5
-rw-r--r--mysql-test/r/type_timestamp.result32
-rw-r--r--mysql-test/t/alter_table.test2
-rw-r--r--mysql-test/t/distinct.test10
-rw-r--r--mysql-test/t/func_math.test1
-rw-r--r--mysql-test/t/group_by.test3
-rw-r--r--mysql-test/t/innodb.test36
-rw-r--r--mysql-test/t/multi_update.test106
-rw-r--r--mysql-test/t/null.test14
-rw-r--r--mysql-test/t/select.test8
-rw-r--r--mysql-test/t/temp_table.test9
-rw-r--r--mysql-test/t/type_timestamp.test21
-rw-r--r--mysys/mf_iocache.c2
-rw-r--r--sql-bench/crash-me.sh173
-rw-r--r--sql-bench/server-cfg.sh44
-rw-r--r--sql/field.cc28
-rw-r--r--sql/field.h2
-rw-r--r--sql/field_conv.cc24
-rw-r--r--sql/ha_innodb.cc108
-rw-r--r--sql/handler.cc4
-rw-r--r--sql/item.cc32
-rw-r--r--sql/item.h27
-rw-r--r--sql/item_cmpfunc.cc44
-rw-r--r--sql/item_cmpfunc.h9
-rw-r--r--sql/item_func.cc25
-rw-r--r--sql/item_func.h3
-rw-r--r--sql/item_timefunc.cc4
-rw-r--r--sql/item_timefunc.h4
-rw-r--r--sql/log.cc70
-rw-r--r--sql/mysql_priv.h25
-rw-r--r--sql/mysqld.cc40
-rw-r--r--sql/opt_range.cc6
-rw-r--r--sql/password.c2
-rw-r--r--sql/set_var.cc19
-rw-r--r--sql/set_var.h16
-rw-r--r--sql/sql_base.cc21
-rw-r--r--sql/sql_class.cc4
-rw-r--r--sql/sql_class.h107
-rw-r--r--sql/sql_delete.cc67
-rw-r--r--sql/sql_handler.cc4
-rw-r--r--sql/sql_insert.cc24
-rw-r--r--sql/sql_olap.cc4
-rw-r--r--sql/sql_parse.cc77
-rw-r--r--sql/sql_select.cc251
-rw-r--r--sql/sql_select.h22
-rw-r--r--sql/sql_table.cc9
-rw-r--r--sql/sql_udf.cc2
-rw-r--r--sql/sql_union.cc8
-rw-r--r--sql/sql_update.cc789
-rw-r--r--sql/sql_yacc.yy113
-rw-r--r--sql/table.h17
-rw-r--r--sql/unireg.cc2
-rw-r--r--strings/strto.c5
-rw-r--r--support-files/make_mysql_pkg.pl89
-rw-r--r--support-files/mysql.server.sh17
-rw-r--r--vio/vio.c3
88 files changed, 2090 insertions, 1229 deletions
diff --git a/.bzrignore b/.bzrignore
index f0fcf652045..20d6e35bb62 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -75,6 +75,9 @@ Makefile.in'
PENDING/*
TAGS
aclocal.m4
+autom4te-2.53.cache/output.0
+autom4te-2.53.cache/requests
+autom4te-2.53.cache/traces.0
autom4te.cache/*
autom4te.cache/output.0
autom4te.cache/requests
@@ -123,6 +126,9 @@ bdb/db/crdel_auto.c
bdb/db/db_auto.c
bdb/dbinc_auto/*.*
bdb/dbreg/dbreg_auto.c
+bdb/dist/autom4te-2.53.cache/output.0
+bdb/dist/autom4te-2.53.cache/requests
+bdb/dist/autom4te-2.53.cache/traces.0
bdb/dist/autom4te.cache/*
bdb/dist/autom4te.cache/output.0
bdb/dist/autom4te.cache/requests
@@ -205,6 +211,14 @@ bdb/test/include.tcl
bdb/test/logtrack.list
bdb/txn/txn_auto.c
binary/*
+bkpull.log
+bkpull.log.2
+bkpull.log.3
+bkpull.log.4
+bkpull.log.5
+bkpull.log.6
+bkpush.log
+build.log
client/insert_test
client/log_event.cc
client/log_event.h
@@ -232,6 +246,7 @@ config.status
configure
configure.lineno
core
+core.2430
db-*.*.*
dbug/user.t
depcomp
@@ -249,6 +264,9 @@ include/my_config.h
include/my_global.h
include/mysql_version.h
include/widec.h
+innobase/autom4te-2.53.cache/output.0
+innobase/autom4te-2.53.cache/requests
+innobase/autom4te-2.53.cache/traces.0
innobase/autom4te.cache/*
innobase/autom4te.cache/output.0
innobase/autom4te.cache/requests
@@ -313,6 +331,7 @@ libmysqld/item_buff.cc
libmysqld/item_cmpfunc.cc
libmysqld/item_create.cc
libmysqld/item_func.cc
+libmysqld/item_row.cc
libmysqld/item_strfunc.cc
libmysqld/item_sum.cc
libmysqld/item_timefunc.cc
@@ -448,6 +467,7 @@ mysys/test_thr_alarm
mysys/test_thr_lock
mysys/test_vsnprintf
mysys/testhash
+pull.log
regex/re
repl-tests/test-repl-ts/repl-timestamp.master.reject
repl-tests/test-repl/foo-dump-slave.master.
@@ -515,6 +535,7 @@ sql/mysqld-purecov
sql/mysqld-purify
sql/mysqld-quantify
sql/new.cc
+sql/safe_to_cache_query.txt
sql/share/*.sys
sql/share/charsets/gmon.out
sql/share/gmon.out
@@ -524,6 +545,7 @@ sql/share/norwegian/errmsg.sys
sql/sql_select.cc.orig
sql/sql_yacc.cc
sql/sql_yacc.h
+sql/sql_yacc.output
sql/sql_yacc.yy.orig
sql_error.cc
sql_prepare.cc
@@ -557,14 +579,3 @@ vio/test-ssl
vio/test-sslclient
vio/test-sslserver
vio/viotest-ssl
-bkpull.log
-bkpull.log.2
-bkpull.log.3
-build.log
-sql/safe_to_cache_query.txt
-bkpull.log.4
-bkpull.log.5
-bkpull.log.6
-bkpush.log
-sql/sql_yacc.output
-libmysqld/item_row.cc
diff --git a/BUILD/compile-solaris-sparc b/BUILD/compile-solaris-sparc
index 083a6e3d68e..143a4b7867d 100755
--- a/BUILD/compile-solaris-sparc
+++ b/BUILD/compile-solaris-sparc
@@ -11,6 +11,6 @@ then
(cd gemini && aclocal && autoheader && aclocal && automake && autoconf)
fi
-CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa" CXX=gcc CXXFLAGS="-Wimplicit -Wreturn-type -Wid-clash-51 -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa -g" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client
+CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa" CXX=gcc CXXFLAGS="-Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa -g" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client
gmake -j 4
diff --git a/BUILD/compile-solaris-sparc-purify b/BUILD/compile-solaris-sparc-purify
index 2fb5c88cd7b..71a60e45cb0 100755
--- a/BUILD/compile-solaris-sparc-purify
+++ b/BUILD/compile-solaris-sparc-purify
@@ -18,7 +18,7 @@ aclocal && autoheader && aclocal && automake && autoconf
(cd bdb/dist && sh s_all)
(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
-CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-function-dec -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-berkeley-db --with-innodb $EXTRA_CONFIG_FLAGS
+CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-berkeley-db --with-innodb $EXTRA_CONFIG_FLAGS
gmake -j 4
diff --git a/BitKeeper/triggers/post-commit b/BitKeeper/triggers/post-commit
index f8ab599fc98..a2a0ecb2701 100755
--- a/BitKeeper/triggers/post-commit
+++ b/BitKeeper/triggers/post-commit
@@ -19,6 +19,8 @@ BK_STATUS=$BK_STATUS$BK_COMMIT
if [ "$BK_STATUS" = OK ]
then
+CHANGESET=`bk -R prs -r+ -h -d':I:' ChangeSet`
+
#++
# dev-public@
#--
@@ -28,7 +30,7 @@ then
List-ID: <bk.mysql-4.1>
From: $FROM
To: $TO
-Subject: bk commit - 4.1 tree
+Subject: bk commit - 4.1 tree ($CHANGESET)
EOF
bk changes -v -r+
@@ -44,7 +46,7 @@ EOF
List-ID: <bk.mysql-4.1>
From: $FROM
To: $INTERNALS
-Subject: bk commit into 4.1 tree
+Subject: bk commit into 4.1 tree ($CHANGESET)
Below is the list of changes that have just been committed into a local
4.1 repository of $USER. When $USER does a push these changes will
@@ -71,7 +73,7 @@ EOF
List-ID: <bk.mysql-4.1>
From: $FROM
To: $DOCS
-Subject: bk commit - 4.1 tree (Manual)
+Subject: bk commit - 4.1 tree (Manual) ($CHANGESET)
EOF
bk changes -v -r+
diff --git a/acinclude.m4 b/acinclude.m4
index 7190593af27..44436bf0b6d 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -520,7 +520,8 @@ fi
AC_DEFUN(MYSQL_STACK_DIRECTION,
[AC_CACHE_CHECK(stack direction for C alloca, ac_cv_c_stack_direction,
- [AC_TRY_RUN([find_stack_direction ()
+ [AC_TRY_RUN([#include <stdlib.h>
+ int find_stack_direction ()
{
static char *addr = 0;
auto char dummy;
@@ -532,7 +533,7 @@ AC_DEFUN(MYSQL_STACK_DIRECTION,
else
return (&dummy > addr) ? 1 : -1;
}
- main ()
+ int main ()
{
exit (find_stack_direction() < 0);
}], ac_cv_c_stack_direction=1, ac_cv_c_stack_direction=-1,
@@ -1327,5 +1328,36 @@ AC_DEFUN(MYSQL_SYS_LARGEFILE,
fi
])
+
+# Local version of _AC_PROG_CXX_EXIT_DECLARATION that does not
+# include #stdlib.h as this breaks things on Solaris
+# (Conflicts with pthreads and big file handling)
+
+m4_define([_AC_PROG_CXX_EXIT_DECLARATION],
+[for ac_declaration in \
+ ''\
+ 'extern "C" void std::exit (int) throw (); using std::exit;' \
+ 'extern "C" void std::exit (int); using std::exit;' \
+ 'extern "C" void exit (int) throw ();' \
+ 'extern "C" void exit (int);' \
+ 'void exit (int);'
+do
+ _AC_COMPILE_IFELSE([AC_LANG_PROGRAM([@%:@include <stdlib.h>
+$ac_declaration],
+ [exit (42);])],
+ [],
+ [continue])
+ _AC_COMPILE_IFELSE([AC_LANG_PROGRAM([$ac_declaration],
+ [exit (42);])],
+ [break])
+done
+rm -f conftest*
+if test -n "$ac_declaration"; then
+ echo '#ifdef __cplusplus' >>confdefs.h
+ echo $ac_declaration >>confdefs.h
+ echo '#endif' >>confdefs.h
+fi
+])# _AC_PROG_CXX_EXIT_DECLARATION
+
dnl ---------------------------------------------------------------------------
diff --git a/client/errmsg.c b/client/errmsg.c
deleted file mode 100644
index 6cb28f3f53e..00000000000
--- a/client/errmsg.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/* Copyright (C) 2000 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/* Error messages for MySQL clients */
-/* error messages for the demon is in share/language/errmsg.sys */
-
-#include <my_global.h>
-#include <my_sys.h>
-#include "errmsg.h"
-
-#ifdef GERMAN
-const char *client_errors[]=
-{
- "Unbekannter MySQL Fehler",
- "Kann UNIX-Socket nicht anlegen (%d)",
- "Keine Verbindung zu lokalem MySQL Server, socket: '%-.64s' (%d)",
- "Keine Verbindung zu MySQL Server auf %-.64s (%d)",
- "Kann TCP/IP-Socket nicht anlegen (%d)",
- "Unbekannter MySQL Server Host (%-.64s) (%d)",
- "MySQL Server nicht vorhanden",
- "Protokolle ungleich. Server Version = % d Client Version = %d",
- "MySQL client got out of memory",
- "Wrong host info",
- "Localhost via UNIX socket",
- "%-.64s via TCP/IP",
- "Error in server handshake",
- "Lost connection to MySQL server during query",
- "Commands out of sync; You can't run this command now",
- "Verbindung ueber Named Pipe; Host: %-.64s",
- "Kann nicht auf Named Pipe warten. Host: %-.64s pipe: %-.32s (%lu)",
- "Kann Named Pipe nicht oeffnen. Host: %-.64s pipe: %-.32s (%lu)",
- "Kann den Status der Named Pipe nicht setzen. Host: %-.64s pipe: %-.32s (%lu)",
- "Can't initialize character set %-.64s (path: %-.64s)",
- "Got packet bigger than 'max_allowed_packet'",
- "Embedded server",
- "Error on SHOW SLAVE STATUS:",
- "Error on SHOW SLAVE HOSTS:",
- "Error connecting to slave:",
- "Error connecting to master:"
-
-};
-
-/* Start of code added by Roberto M. Serqueira - martinsc@uol.com.br - 05.24.2001 */
-
-#elif defined PORTUGUESE
-const char *client_errors[]=
-{
- "Erro desconhecido do MySQL",
- "Não pode criar 'UNIX socket' (%d)",
- "Não pode se conectar ao servidor MySQL local através do 'socket' '%-.64s' (%d)",
- "Não pode se conectar ao servidor MySQL em '%-.64s' (%d)",
- "Não pode criar 'socket TCP/IP' (%d)",
- "'Host' servidor MySQL '%-.64s' (%d) desconhecido",
- "Servidor MySQL desapareceu",
- "Incompatibilidade de protocolos. Versão do Servidor: %d - Versão do Cliente: %d",
- "Cliente do MySQL com falta de memória",
- "Informação inválida de 'host'",
- "Localhost via 'UNIX socket'",
- "%-.64s via 'TCP/IP'",
- "Erro na negociação de acesso ao servidor",
- "Conexão perdida com servidor MySQL durante 'query'",
- "Comandos fora de sincronismo. Você não pode executar este comando agora",
- "%-.64s via 'named pipe'",
- "Não pode esperar pelo 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
- "Não pode abrir 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
- "Não pode estabelecer o estado do 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
- "Não pode inicializar conjunto de caracteres %-.64s (caminho %-.64s)",
- "Obteve pacote maior do que 'max_allowed_packet'",
- "Embedded server",
- "Error on SHOW SLAVE STATUS:",
- "Error on SHOW SLAVE HOSTS:",
- "Error connecting to slave:",
- "Error connecting to master:"
-};
-
-#else /* ENGLISH */
-const char *client_errors[]=
-{
- "Unknown MySQL error",
- "Can't create UNIX socket (%d)",
- "Can't connect to local MySQL server through socket '%-.64s' (%d)",
- "Can't connect to MySQL server on '%-.64s' (%d)",
- "Can't create TCP/IP socket (%d)",
- "Unknown MySQL Server Host '%-.64s' (%d)",
- "MySQL server has gone away",
- "Protocol mismatch. Server Version = %d Client Version = %d",
- "MySQL client run out of memory",
- "Wrong host info",
- "Localhost via UNIX socket",
- "%-.64s via TCP/IP",
- "Error in server handshake",
- "Lost connection to MySQL server during query",
- "Commands out of sync; You can't run this command now",
- "%-.64s via named pipe",
- "Can't wait for named pipe to host: %-.64s pipe: %-.32s (%lu)",
- "Can't open named pipe to host: %-.64s pipe: %-.32s (%lu)",
- "Can't set state of named pipe to host: %-.64s pipe: %-.32s (%lu)",
- "Can't initialize character set %-.64s (path: %-.64s)",
- "Got packet bigger than 'max_allowed_packet'",
- "Embedded server",
- "Error on SHOW SLAVE STATUS:",
- "Error on SHOW SLAVE HOSTS:",
- "Error connecting to slave:",
- "Error connecting to master:"
-};
-#endif
-
-
-void init_client_errs(void)
-{
- my_errmsg[CLIENT_ERRMAP] = &client_errors[0];
-}
diff --git a/heap/_check.c b/heap/_check.c
index 4a6482901d9..5ee511bf92a 100644
--- a/heap/_check.c
+++ b/heap/_check.c
@@ -20,19 +20,34 @@
static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records,
ulong blength, my_bool print_status);
-static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records,
- my_bool print_status);
-/* Returns 0 if the HEAP is ok */
+
+/*
+ Check if keys and rows are ok in a heap table
+
+ SYNOPSIS
+ heap_check_heap()
+ info Table handler
+ print_status Prints some extra status
+
+ NOTES
+ Doesn't change the state of the table handler
+
+ RETURN VALUES
+ 0 ok
+ 1 error
+*/
int heap_check_heap(HP_INFO *info, my_bool print_status)
{
int error;
uint key;
+ ulong records=0, deleted=0, pos, next_block;
HP_SHARE *share=info->s;
- DBUG_ENTER("heap_check_keys");
+ HP_INFO save_info= *info; /* Needed because scan_init */
+ DBUG_ENTER("heap_check_heap");
- for (error=key=0 ; key < share->keys ; key++)
+ for (error=key= 0 ; key < share->keys ; key++)
{
if (share->keydef[key].algorithm == HA_KEY_ALG_BTREE)
error|= check_one_rb_key(info, key, share->records, print_status);
@@ -40,7 +55,41 @@ int heap_check_heap(HP_INFO *info, my_bool print_status)
error|= check_one_key(share->keydef + key, key, share->records,
share->blength, print_status);
}
+ /*
+ This is basicly the same code as in hp_scan, but we repeat it here to
+ get shorter DBUG log file.
+ */
+ for (pos=next_block= 0 ; ; pos++)
+ {
+ if (pos < next_block)
+ {
+ info->current_ptr+= share->block.recbuffer;
+ }
+ else
+ {
+ next_block+= share->block.records_in_block;
+ if (next_block >= share->records+share->deleted)
+ {
+ next_block= share->records+share->deleted;
+ if (pos >= next_block)
+ break; /* End of file */
+ }
+ }
+ _hp_find_record(info,pos);
+ if (!info->current_ptr[share->reclength])
+ deleted++;
+ else
+ records++;
+ }
+
+ if (records != share->records || deleted != share->deleted)
+ {
+ DBUG_PRINT("error",("Found rows: %lu (%lu) deleted %lu (%lu)",
+ records, share->records, deleted, share->deleted));
+ error= 1;
+ }
+ *info= save_info;
DBUG_RETURN(error);
}
diff --git a/heap/hp_delete.c b/heap/hp_delete.c
index 4ba2f2c5310..73e431e6e66 100644
--- a/heap/hp_delete.c
+++ b/heap/hp_delete.c
@@ -49,6 +49,10 @@ int heap_delete(HP_INFO *info, const byte *record)
pos[share->reclength]=0; /* Record deleted */
share->deleted++;
info->current_hash_ptr=0;
+#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
+ DBUG_EXECUTE("check_heap",heap_check_heap(info, 0););
+#endif
+
DBUG_RETURN(0);
err:
if (++(share->records) == share->blength)
diff --git a/heap/hp_scan.c b/heap/hp_scan.c
index 487d48c3a95..59e544ca590 100644
--- a/heap/hp_scan.c
+++ b/heap/hp_scan.c
@@ -62,6 +62,7 @@ int heap_scan(register HP_INFO *info, byte *record)
}
if (!info->current_ptr[share->reclength])
{
+ DBUG_PRINT("warning",("Found deleted record"));
info->update= HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND;
DBUG_RETURN(my_errno=HA_ERR_RECORD_DELETED);
}
diff --git a/heap/hp_update.c b/heap/hp_update.c
index b789ab82b84..2ed0edf08de 100644
--- a/heap/hp_update.c
+++ b/heap/hp_update.c
@@ -49,6 +49,10 @@ int heap_update(HP_INFO *info, const byte *old, const byte *heap_new)
memcpy(pos,heap_new,(size_t) share->reclength);
if (++(share->records) == share->blength) share->blength+= share->blength;
+
+#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
+ DBUG_EXECUTE("check_heap",heap_check_heap(info, 0););
+#endif
if (auto_key_changed)
heap_update_auto_increment(info, heap_new);
DBUG_RETURN(0);
diff --git a/heap/hp_write.c b/heap/hp_write.c
index 87211d4c224..f92d8caa633 100644
--- a/heap/hp_write.c
+++ b/heap/hp_write.c
@@ -61,9 +61,13 @@ int heap_write(HP_INFO *info, const byte *record)
info->current_ptr=pos;
info->current_hash_ptr=0;
info->update|=HA_STATE_AKTIV;
+#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
+ DBUG_EXECUTE("check_heap",heap_check_heap(info, 0););
+#endif
if (share->auto_key)
heap_update_auto_increment(info, record);
DBUG_RETURN(0);
+
err:
DBUG_PRINT("info",("Duplicate key: %d", keydef - share->keydef));
info->errkey= keydef - share->keydef;
@@ -83,6 +87,7 @@ err:
*((byte**) pos)=share->del_link;
share->del_link=pos;
pos[share->reclength]=0; /* Record deleted */
+
DBUG_RETURN(my_errno);
} /* heap_write */
diff --git a/include/errmsg.h b/include/errmsg.h
index 703395a3742..5f462565e33 100644
--- a/include/errmsg.h
+++ b/include/errmsg.h
@@ -28,7 +28,7 @@ extern const char *client_errors[]; /* Error messages */
#define CR_MIN_ERROR 2000 /* For easier client code */
#define CR_MAX_ERROR 2999
-#if defined(OS2) && defined( MYSQL_SERVER)
+#if defined(OS2) && defined(MYSQL_SERVER)
#define CER(X) client_errors[(X)-CR_MIN_ERROR]
#else
#define ER(X) client_errors[(X)-CR_MIN_ERROR]
@@ -51,8 +51,8 @@ extern const char *client_errors[]; /* Error messages */
#define CR_SERVER_LOST 2013
#define CR_COMMANDS_OUT_OF_SYNC 2014
#define CR_NAMEDPIPE_CONNECTION 2015
-#define CR_NAMEDPIPEWAIT_ERROR 2016
-#define CR_NAMEDPIPEOPEN_ERROR 2017
+#define CR_NAMEDPIPEWAIT_ERROR 2016
+#define CR_NAMEDPIPEOPEN_ERROR 2017
#define CR_NAMEDPIPESETSTATE_ERROR 2018
#define CR_CANT_READ_CHARSET 2019
#define CR_NET_PACKET_TOO_LARGE 2020
@@ -62,9 +62,9 @@ extern const char *client_errors[]; /* Error messages */
#define CR_PROBE_SLAVE_CONNECT 2024
#define CR_PROBE_MASTER_CONNECT 2025
#define CR_SSL_CONNECTION_ERROR 2026
+#define CR_MALFORMED_PACKET 2027
/* new 4.1 error codes */
-#define CR_INVALID_CONN_HANDLE 2027
#define CR_NULL_POINTER 2028
#define CR_NO_PREPARE_STMT 2029
#define CR_NOT_ALL_PARAMS_BOUND 2030
@@ -85,3 +85,4 @@ extern const char *client_errors[]; /* Error messages */
#define CR_SHARED_MEMORY_CONNECT_ABANDODED_ERROR 2044
#define CR_SHARED_MEMORY_CONNECT_SET_ERROR 2045
#define CR_CONN_UNKNOW_PROTOCOL 2046
+#define CR_INVALID_CONN_HANDLE 2047
diff --git a/include/my_base.h b/include/my_base.h
index b4e39952f22..ccca3f74ec4 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -110,7 +110,8 @@ enum ha_extra_function {
HA_EXTRA_BULK_INSERT_BEGIN,
HA_EXTRA_BULK_INSERT_FLUSH, /* Flush one index */
HA_EXTRA_BULK_INSERT_END,
- HA_EXTRA_PREPARE_FOR_DELETE
+ HA_EXTRA_PREPARE_FOR_DELETE,
+ HA_EXTRA_PREPARE_FOR_UPDATE /* Remove read cache if problems */
};
/* The following is parameter to ha_panic() */
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index ce6ed091a48..a3744089258 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -2702,14 +2702,22 @@ row_search_for_mysql(
unique_search_from_clust_index = TRUE;
- if (prebuilt->select_lock_type == LOCK_NONE
+ if (trx->mysql_n_tables_locked == 0
+ && prebuilt->select_lock_type == LOCK_NONE
&& trx->isolation_level > TRX_ISO_READ_UNCOMMITTED
&& trx->read_view) {
/* This is a SELECT query done as a consistent read,
and the read view has already been allocated:
let us try a search shortcut through the hash
- index */
+ index.
+ NOTE that we must also test that
+ mysql_n_tables_locked == 0, because this might
+ also be INSERT INTO ... SELECT ... or
+ CREATE TABLE ... SELECT ... . Our algorithm is
+ NOT prepared to inserts interleaved with the SELECT,
+ and if we try that, we can deadlock on the adaptive
+ hash index semaphore! */
if (btr_search_latch.writer != RW_LOCK_NOT_LOCKED) {
/* There is an x-latch request: release
diff --git a/isam/extra.c b/isam/extra.c
index 570c396955f..e2f13532ddf 100644
--- a/isam/extra.c
+++ b/isam/extra.c
@@ -123,6 +123,7 @@ int nisam_extra(N_INFO *info, enum ha_extra_function function)
}
#endif
if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)))
+ {
if (!(init_io_cache(&info->rec_cache,info->dfile,0,
WRITE_CACHE,info->s->state.data_file_length,
(pbool) (info->lock_type != F_UNLCK),
@@ -131,7 +132,12 @@ int nisam_extra(N_INFO *info, enum ha_extra_function function)
info->opt_flag|=WRITE_CACHE_USED;
info->update&= ~HA_STATE_ROW_CHANGED;
}
+ }
break;
+ case HA_EXTRA_PREPARE_FOR_UPDATE:
+ if (info->s->data_file_type != DYNAMIC_RECORD)
+ break;
+ /* Remove read/write cache if dynamic rows */
case HA_EXTRA_NO_CACHE:
if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
{
diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c
index bfd253728ce..2eecb5c2afd 100644
--- a/libmysql/errmsg.c
+++ b/libmysql/errmsg.c
@@ -51,7 +51,7 @@ const char *client_errors[]=
"Error connecting to slave:",
"Error connecting to master:",
"SSL connection error",
- "Invalid connection handle",
+ "Malformed packet",
"Invalid use of null pointer",
"Statement not prepared",
"Not all parameters data supplied",
@@ -70,7 +70,8 @@ const char *client_errors[]=
"Can't open shared memory. %s event don't create for client (%lu)",
"Can't open shared memory. Server abandoded and don't sent the answer event (%lu)",
"Can't open shared memory. Can't send the request event to server (%lu)",
- "Wrong or unknown protocol"
+ "Wrong or unknown protocol",
+ "Invalid connection handle"
};
/* Start of code added by Roberto M. Serqueira - martinsc@uol.com.br - 05.24.2001 */
@@ -83,7 +84,7 @@ const char *client_errors[]=
"Não pode se conectar ao servidor MySQL local através do 'socket' '%-.64s' (%d)",
"Não pode se conectar ao servidor MySQL em '%-.64s' (%d)",
"Não pode criar 'socket TCP/IP' (%d)",
- "'Host' servidor MySQL '%-.64s' (%d) desconhecido",
+ "'Host' servidor MySQL '%-.64s' (%d) desconhecido",
"Servidor MySQL desapareceu",
"Incompatibilidade de protocolos. Versão do Servidor: %d - Versão do Cliente: %d",
"Cliente do MySQL com falta de memória",
@@ -105,7 +106,7 @@ const char *client_errors[]=
"Error connecting to slave:",
"Error connecting to master:",
"SSL connection error",
- "Invalid connection handle",
+ "Malformed packet",
"Invalid use of null pointer",
"Statement not prepared",
"Not all parameters data supplied",
@@ -124,7 +125,8 @@ const char *client_errors[]=
"Can't open shared memory. %s event don't create for client (%lu)",
"Can't open shared memory. Server abandoded and don't sent the answer event (%lu)",
"Can't open shared memory. Can't send the request event to server (%lu)",
- "Wrong or unknown protocol"
+ "Wrong or unknown protocol",
+ "Invalid connection handle"
};
#else /* ENGLISH */
@@ -157,7 +159,7 @@ const char *client_errors[]=
"Error connecting to slave:",
"Error connecting to master:",
"SSL connection error",
- "Invalid connection handle",
+ "Malformed packet",
"Invalid use of null pointer",
"Statement not prepared",
"Not all parameters data supplied",
@@ -176,7 +178,8 @@ const char *client_errors[]=
"Can't open shared memory. %s event don't create for client (%lu)",
"Can't open shared memory. Server abandoded and don't sent the answer event (%lu)",
"Can't open shared memory. Can't send the request event to server (%lu)",
- "Wrong or unknown protocol"
+ "Wrong or unknown protocol",
+ "Invalid connection handle"
};
#endif
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index 94a32477c83..1e33bbb6a93 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -554,7 +554,7 @@ net_safe_read(MYSQL *mysql)
DBUG_PRINT("error",("Wrong connection or packet. fd: %s len: %d",
vio_description(net->vio),len));
end_server(mysql);
- net->last_errno=(net->last_errno == ER_NET_PACKET_TOO_LARGE ?
+ net->last_errno=(net->last_errno == ER_NET_PACKET_TOO_LARGE ?
CR_NET_PACKET_TOO_LARGE:
CR_SERVER_LOST);
strmov(net->last_error,ER(net->last_errno));
@@ -1196,7 +1196,7 @@ static MYSQL_DATA *read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
ulong pkt_len;
ulong len;
uchar *cp;
- char *to;
+ char *to, *end_to;
MYSQL_DATA *result;
MYSQL_ROWS **prev_ptr,*cur;
NET *net = &mysql->net;
@@ -1242,6 +1242,7 @@ static MYSQL_DATA *read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
*prev_ptr=cur;
prev_ptr= &cur->next;
to= (char*) (cur->data+fields+1);
+ end_to=to+pkt_len-1;
for (field=0 ; field < fields ; field++)
{
if ((len=(ulong) net_field_length(&cp)) == NULL_LENGTH)
@@ -1251,6 +1252,13 @@ static MYSQL_DATA *read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
else
{
cur->data[field] = to;
+ if (to+len > end_to)
+ {
+ free_rows(result);
+ net->last_errno=CR_MALFORMED_PACKET;
+ strmov(net->last_error,ER(net->last_errno));
+ DBUG_RETURN(0);
+ }
memcpy(to,(char*) cp,len); to[len]=0;
to+=len+1;
cp+=len;
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index 4726cbb44a7..91b815c3e3b 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -274,11 +274,39 @@ static bool check_user(THD *thd,enum_server_command command, const char *user,
}
+/*
+ Make a copy of array and the strings array points to
+*/
+
+char **copy_arguments(int argc, char **argv)
+{
+ uint length= 0;
+ char **from, **res, **end= argv+argc;
+
+ for (from=argv ; from != end ; from++)
+ length+= strlen(*from);
+
+ if ((res= (char**) my_malloc(sizeof(argv)*(argc+1)+length+argc,
+ MYF(MY_WME))))
+ {
+ char **to= res, *to_str= (char*) (res+argc+1);
+ for (from=argv ; from != end ;)
+ {
+ *to++= to_str;
+ to_str= strmov(to_str, *from++)+1;
+ }
+ *to= 0; // Last ptr should be null
+ }
+ return res;
+}
+
+
extern "C"
{
static my_bool inited, org_my_init_done;
ulong max_allowed_packet, net_buffer_length;
+char ** copy_arguments_ptr= 0;
int STDCALL mysql_server_init(int argc, char **argv, char **groups)
{
@@ -302,7 +330,7 @@ int STDCALL mysql_server_init(int argc, char **argv, char **groups)
argvp = (char ***) &fake_argv;
}
if (!groups)
- groups = (char**) fake_groups;
+ groups = (char**) fake_groups;
my_umask=0660; // Default umask for new files
my_umask_dir=0700; // Default umask for new directories
@@ -318,6 +346,14 @@ int STDCALL mysql_server_init(int argc, char **argv, char **groups)
MY_INIT((char *)"mysql_embedded"); // init my_sys library & pthreads
}
+ /*
+ Make a copy of the arguments to guard against applications that
+ may change or move the initial arguments.
+ */
+ if (argvp == &argv)
+ if (!(copy_arguments_ptr= argv= copy_arguments(argc, argv)))
+ return 1;
+
tzset(); // Set tzname
start_time=time((time_t*) 0);
@@ -565,6 +601,8 @@ int STDCALL mysql_server_init(int argc, char **argv, char **groups)
void STDCALL mysql_server_end()
{
+ my_free((char*) copy_arguments_ptr, MYF(MY_ALLOW_ZERO_PTR));
+ copy_arguments_ptr=0;
clean_up(0);
#ifdef THREAD
/* Don't call my_thread_end() if the application is using MY_INIT() */
diff --git a/myisam/mi_check.c b/myisam/mi_check.c
index d3222a770a8..34222a5703b 100644
--- a/myisam/mi_check.c
+++ b/myisam/mi_check.c
@@ -2329,13 +2329,13 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
for (i=0 ; i < sort_info.total_keys ; i++)
{
sort_param[i].read_cache=param->read_cache;
+ /*
+ two approaches: the same amount of memory for each thread
+ or the memory for the same number of keys for each thread...
+ In the second one all the threads will fill their sort_buffers
+ (and call write_keys) at the same time, putting more stress on i/o.
+ */
sort_param[i].sortbuff_size=
- /*
- two approaches: the same amount of memory for each thread
- or the memory for the same number of keys for each thread...
- In the second one all the threads will fill their sort_buffers
- (and call write_keys) at the same time, putting more stress on i/o.
- */
#ifndef USING_SECOND_APPROACH
param->sort_buffer_length/sort_info.total_keys;
#else
diff --git a/myisam/mi_extra.c b/myisam/mi_extra.c
index 39eb4b0bd99..d7a3aea516d 100644
--- a/myisam/mi_extra.c
+++ b/myisam/mi_extra.c
@@ -165,6 +165,10 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
HA_STATE_EXTEND_BLOCK);
}
break;
+ case HA_EXTRA_PREPARE_FOR_UPDATE:
+ if (info->s->data_file_type != DYNAMIC_RECORD)
+ break;
+ /* Remove read/write cache if dynamic rows */
case HA_EXTRA_NO_CACHE:
if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
{
diff --git a/myisam/mi_rnext_same.c b/myisam/mi_rnext_same.c
index 88146c89f85..200e715bddc 100644
--- a/myisam/mi_rnext_same.c
+++ b/myisam/mi_rnext_same.c
@@ -28,14 +28,13 @@
int mi_rnext_same(MI_INFO *info, byte *buf)
{
int error;
- uint inx,flag,not_used;
+ uint inx,not_used;
MI_KEYDEF *keyinfo;
DBUG_ENTER("mi_rnext_same");
if ((int) (inx=info->lastinx) < 0 || info->lastpos == HA_OFFSET_ERROR)
DBUG_RETURN(my_errno=HA_ERR_WRONG_INDEX);
keyinfo=info->s->keyinfo+inx;
- flag=SEARCH_BIGGER; /* Read next */
if (fast_mi_readinfo(info))
DBUG_RETURN(my_errno);
@@ -57,7 +56,7 @@ int mi_rnext_same(MI_INFO *info, byte *buf)
for (;;)
{
if ((error=_mi_search_next(info,keyinfo,info->lastkey,
- info->lastkey_length,flag,
+ info->lastkey_length,SEARCH_BIGGER,
info->s->state.key_root[inx])))
break;
if (ha_key_cmp(keyinfo->seg,info->lastkey2,info->lastkey,
diff --git a/myisam/sort.c b/myisam/sort.c
index c9ba0f51d9c..7ad23df9358 100644
--- a/myisam/sort.c
+++ b/myisam/sort.c
@@ -376,7 +376,6 @@ pthread_handler_decl(thr_find_all_keys,arg)
mi_check_print_error(info->sort_info->param,"Sort buffer to small"); /* purecov: tested */
goto err; /* purecov: tested */
}
-// (*info->lock_in_memory)(info->sort_info->param);/* Everything is allocated */
if (info->sort_info->param->testflag & T_VERBOSE)
printf("Key %d - Allocating buffer for %d keys\n",info->key+1,keys);
@@ -456,9 +455,9 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
byte *mergebuf=0;
LINT_INIT(length);
- for (i=0, sinfo=sort_param ; i<sort_info->total_keys ; i++,
- rec_per_key_part+=sinfo->keyinfo->keysegs,
- sinfo++)
+ for (i= 0, sinfo= sort_param ;
+ i < sort_info->total_keys ;
+ i++, rec_per_key_part+=sinfo->keyinfo->keysegs, sinfo++)
{
if (!sinfo->sort_keys)
{
@@ -484,15 +483,18 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
}
}
my_free((gptr) sinfo->sort_keys,MYF(0));
- my_free(mi_get_rec_buff_ptr(info, sinfo->rec_buff), MYF(MY_ALLOW_ZERO_PTR));
+ my_free(mi_get_rec_buff_ptr(info, sinfo->rec_buff),
+ MYF(MY_ALLOW_ZERO_PTR));
sinfo->sort_keys=0;
}
- for (i=0, sinfo=sort_param ; i<sort_info->total_keys ; i++,
- delete_dynamic(&sinfo->buffpek),
- close_cached_file(&sinfo->tempfile),
- close_cached_file(&sinfo->tempfile_for_exceptions),
- sinfo++)
+ for (i= 0, sinfo= sort_param ;
+ i < sort_info->total_keys ;
+ i++,
+ delete_dynamic(&sinfo->buffpek),
+ close_cached_file(&sinfo->tempfile),
+ close_cached_file(&sinfo->tempfile_for_exceptions),
+ sinfo++)
{
if (got_error)
continue;
diff --git a/myisammrg/myrg_extra.c b/myisammrg/myrg_extra.c
index ad17b0f82f2..d375b45df99 100644
--- a/myisammrg/myrg_extra.c
+++ b/myisammrg/myrg_extra.c
@@ -38,7 +38,8 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function,
}
else
{
- if (function == HA_EXTRA_NO_CACHE || function == HA_EXTRA_RESET)
+ if (function == HA_EXTRA_NO_CACHE || function == HA_EXTRA_RESET ||
+ function == HA_EXTRA_PREPARE_FOR_UPDATE)
info->cache_in_use=0;
if (function == HA_EXTRA_RESET || function == HA_EXTRA_RESET_STATE)
{
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index 745cde325f9..edfff5c4d46 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -47,13 +47,17 @@ which ()
sleep_until_file_deleted ()
{
- file=$1
+ pid=$1;
+ file=$2
loop=$SLEEP_TIME_FOR_DELETE
while (test $loop -gt 0)
do
if [ ! -r $file ]
then
- sleep $SLEEP_TIME_AFTER_RESTART
+ if test $pid != "0"
+ then
+ wait_for_pid $pid
+ fi
return
fi
sleep 1
@@ -79,6 +83,13 @@ sleep_until_file_created ()
exit 1;
}
+# For the future
+
+wait_for_pid()
+{
+ pid=$1
+}
+
# No paths below as we can't be sure where the program is!
SED=sed
@@ -152,6 +163,7 @@ TOT_TEST=0
USERT=0
SYST=0
REALT=0
+FAST_START=""
MYSQL_TMP_DIR=$MYSQL_TEST_DIR/var/tmp
SLAVE_LOAD_TMPDIR=../../var/tmp #needs to be same length to test logging
RES_SPACE=" "
@@ -319,7 +331,7 @@ while test $# -gt 0; do
VALGRIND="valgrind --alignment=8 --leak-check=yes"
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc"
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc"
- SLEEP_TIME_AFTER_RESTART=60
+ #SLEEP_TIME_AFTER_RESTART=120
SLEEP_TIME_FOR_DELETE=60
;;
--valgrind-options=*)
@@ -340,6 +352,9 @@ while test $# -gt 0; do
--debug=d:t:i:O,$MYSQL_TEST_DIR/var/log/slave.trace"
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT --debug"
;;
+ --fast)
+ FAST_START=1
+ ;;
-- ) shift; break ;;
--* ) $ECHO "Unrecognized option: $1"; exit 1 ;;
* ) break ;;
@@ -736,12 +751,19 @@ EOF
manager_term()
{
- ident=$1
+ pid=$1
+ ident=$2
shift
if [ $USE_MANAGER = 0 ] ; then
- $MYSQLADMIN --no-defaults -uroot --socket=$MYSQL_TMP_DIR/$ident.sock -O \
- connect_timeout=5 -O shutdown_timeout=20 shutdown >> $MYSQL_MANAGER_LOG 2>&1
- return
+ $MYSQLADMIN --no-defaults -uroot --socket=$MYSQL_TMP_DIR/$ident.sock --connect_timeout=5 --shutdown_timeout=20 shutdown >> $MYSQL_MANAGER_LOG 2>&1
+ res=$?
+ # Some systems require an extra connect
+ $MYSQLADMIN --no-defaults -uroot --socket=$MYSQL_TMP_DIR/$ident.sock --connect_timeout=1 ping >> $MYSQL_MANAGER_LOG 2>&1
+ if test $res = 0
+ then
+ wait_for_pid $pid
+ fi
+ return $res
fi
$MYSQL_MANAGER_CLIENT $MANAGER_QUIET_OPT --user=$MYSQL_MANAGER_USER \
--password=$MYSQL_MANAGER_PW --port=$MYSQL_MANAGER_PORT <<EOF
@@ -983,12 +1005,13 @@ stop_slave ()
fi
if [ x$this_slave_running = x1 ]
then
- manager_term $slave_ident
+ pid=`$CAT $slave_pid`
+ manager_term $pid $slave_ident
if [ $? != 0 ] && [ -f $slave_pid ]
then # try harder!
$ECHO "slave not cooperating with mysqladmin, will try manual kill"
- kill `$CAT $slave_pid`
- sleep_until_file_deleted $slave_pid
+ kill $pid
+ sleep_until_file_deleted $pid $slave_pid
if [ -f $slave_pid ] ; then
$ECHO "slave refused to die. Sending SIGKILL"
kill -9 `$CAT $slave_pid`
@@ -1007,12 +1030,13 @@ stop_master ()
{
if [ x$MASTER_RUNNING = x1 ]
then
- manager_term master
+ pid=`$CAT $MASTER_MYPID`
+ manager_term $pid master
if [ $? != 0 ] && [ -f $MASTER_MYPID ]
then # try harder!
$ECHO "master not cooperating with mysqladmin, will try manual kill"
- kill `$CAT $MASTER_MYPID`
- sleep_until_file_deleted $MASTER_MYPID
+ kill $pid
+ sleep_until_file_deleted $pid $MASTER_MYPID
if [ -f $MASTER_MYPID ] ; then
$ECHO "master refused to die. Sending SIGKILL"
kill -9 `$CAT $MASTER_MYPID`
@@ -1233,14 +1257,19 @@ run_testcase ()
if [ -z "$USE_RUNNING_SERVER" ]
then
- # Ensure that no old mysqld test servers are running
- $MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
- $MYSQLADMIN --no-defaults --socket=$SLAVE_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
- $MYSQLADMIN --no-defaults --host=$hostname --port=$MASTER_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
- $MYSQLADMIN --no-defaults --host=$hostname --port=$SLAVE_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
- $MYSQLADMIN --no-defaults --host=$hostname --port=`expr $SLAVE_MYPORT + 1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
- sleep_until_file_deleted $MASTER_MYPID
- sleep_until_file_deleted $SLAVE_MYPID
+ if [ -z "$FAST_START" ]
+ then
+ # Ensure that no old mysqld test servers are running
+ $MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
+ $MYSQLADMIN --no-defaults --socket=$SLAVE_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
+ $MYSQLADMIN --no-defaults --host=$hostname --port=$MASTER_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
+ $MYSQLADMIN --no-defaults --host=$hostname --port=$SLAVE_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
+ $MYSQLADMIN --no-defaults --host=$hostname --port=`expr $SLAVE_MYPORT + 1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
+ sleep_until_file_deleted 0 $MASTER_MYPID
+ sleep_until_file_deleted 0 $SLAVE_MYPID
+ else
+ rm $MASTER_MYPID $SLAVE_MYPID
+ fi
# Kill any running managers
if [ -f "$MANAGER_PID_FILE" ]
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index 159421e8f88..cba486109ac 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -1,4 +1,4 @@
-drop table if exists t1;
+drop table if exists t1,t2;
create table t1 (
col1 int not null auto_increment primary key,
col2 varchar(30) not null,
diff --git a/mysql-test/r/distinct.result b/mysql-test/r/distinct.result
index fec5ece8ddf..809eb22f987 100644
--- a/mysql-test/r/distinct.result
+++ b/mysql-test/r/distinct.result
@@ -198,6 +198,30 @@ a
select distinct 1 from t1,t3 where t1.a=t3.a;
1
1
+explain SELECT distinct t1.a from t1;
+table type possible_keys key key_len ref rows Extra
+t1 index NULL PRIMARY 4 NULL 2 Using index
+explain SELECT distinct t1.a from t1 order by a desc;
+table type possible_keys key key_len ref rows Extra
+t1 index NULL PRIMARY 4 NULL 2 Using index
+explain SELECT t1.a from t1 group by a order by a desc;
+table type possible_keys key key_len ref rows Extra
+t1 index NULL PRIMARY 4 NULL 2 Using index
+explain SELECT distinct t1.a from t1 order by a desc limit 1;
+table type possible_keys key key_len ref rows Extra
+t1 index NULL PRIMARY 4 NULL 2 Using index
+explain SELECT distinct a from t3 order by a desc limit 2;
+table type possible_keys key key_len ref rows Extra
+t3 index NULL a 5 NULL 204 Using index
+explain SELECT distinct a,b from t3 order by a+1;
+table type possible_keys key key_len ref rows Extra
+t3 ALL NULL NULL NULL NULL 204 Using temporary; Using filesort
+explain SELECT distinct a,b from t3 order by a limit 10;
+table type possible_keys key key_len ref rows Extra
+t3 index NULL a 5 NULL 204 Using temporary
+explain SELECT a,b from t3 group by a,b order by a+1;
+table type possible_keys key key_len ref rows Extra
+t3 ALL NULL NULL NULL NULL 204 Using temporary; Using filesort
drop table t1,t2,t3,t4;
CREATE TABLE t1 (name varchar(255));
INSERT INTO t1 VALUES ('aa'),('ab'),('ac'),('ad'),('ae');
diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result
index f1c0de2f88a..811a16fff6c 100644
--- a/mysql-test/r/func_math.result
+++ b/mysql-test/r/func_math.result
@@ -31,9 +31,10 @@ log10(100) log10(18) log10(-4) log10(0) log10(NULL)
select pow(10,log10(10)),power(2,4);
pow(10,log10(10)) power(2,4)
10.000000 16.000000
+set @@rand_seed1=10000000,@@rand_seed2=1000000;
select rand(999999),rand();
rand(999999) rand()
-0.014231365187309 0.8078568166195
+0.014231365187309 0.028870999839968
select pi(),sin(pi()/2),cos(pi()/2),abs(tan(pi())),cot(1),asin(1),acos(0),atan(1);
PI() sin(pi()/2) cos(pi()/2) abs(tan(pi())) cot(1) asin(1) acos(0) atan(1)
3.141593 1.000000 0.000000 0.000000 0.64209262 1.570796 1.570796 0.785398
diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result
index b5a35b981db..696d8200b70 100644
--- a/mysql-test/r/group_by.result
+++ b/mysql-test/r/group_by.result
@@ -207,6 +207,14 @@ Documentation 0
Host communication 0
kkkkkkkkkkk lllllllllll 3
Test Procedures 0
+select value,description,COUNT(bug_id) from t2 left join t1 on t2.program=t1.product and t2.value=t1.component where program="AAAAA" group by value having COUNT(bug_id) IN (0,2);
+value description COUNT(bug_id)
+BBBBBBBBBBBBB - conversion 2
+BBBBBBBBBBBBB - eeeeeeeee 0
+BBBBBBBBBBBBB - generic 2
+Documentation 0
+Host communication 0
+Test Procedures 0
drop table t1,t2;
create table t1 (foo int);
insert into t1 values (1);
@@ -234,6 +242,13 @@ userid count(*)
3 3
2 1
1 2
+select userid,count(*) from t1 group by userid desc having (count(*)+1) IN (4,3);
+userid count(*)
+3 3
+1 2
+select userid,count(*) from t1 group by userid desc having 3 IN (1,COUNT(*));
+userid count(*)
+3 3
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid desc;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range spID spID 5 NULL 2 Using where; Using index
diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result
index 81064143872..467436a2f85 100644
--- a/mysql-test/r/innodb.result
+++ b/mysql-test/r/innodb.result
@@ -1021,3 +1021,40 @@ id code name
7 4 Matt
COMMIT;
DROP TABLE t1;
+drop table if exists t1,t2;
+create table t1 (n int(10), d int(10)) type=innodb;
+create table t2 (n int(10), d int(10)) type=innodb;
+insert into t1 values(1,1),(1,2);
+insert into t2 values(1,10),(2,20);
+UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
+select * from t1;
+n d
+1 10
+1 10
+select * from t2;
+n d
+1 30
+2 20
+drop table t1,t2;
+create table t1 (a int, b int) type=innodb;
+insert into t1 values(20,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a;
+b ifnull(t2.b,"this is null")
+NULL this is null
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+b ifnull(t2.b,"this is null")
+NULL this is null
+insert into t1 values(10,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+b ifnull(t2.b,"this is null")
+NULL this is null
+NULL this is null
+drop table t1;
+create table t1 (a varchar(10) not null) type=myisam;
+create table t2 (b varchar(10) not null unique) type=innodb;
+select t1.a from t1,t2 where t1.a=t2.b;
+a
+drop table t1,t2;
diff --git a/mysql-test/r/multi_update.result b/mysql-test/r/multi_update.result
index ce3f7e90f6b..7d1f5bd53f6 100644
--- a/mysql-test/r/multi_update.result
+++ b/mysql-test/r/multi_update.result
@@ -20,7 +20,7 @@ count(*)
10
select count(*) from t2 where t = "bbb";
count(*)
-10
+50
select count(*) from t2 where id2 > 90;
count(*)
50
@@ -70,71 +70,61 @@ create table t1(id1 int not null primary key, t varchar(100)) pack_keys = 1;
create table t2(id2 int not null, t varchar(100), index(id2)) pack_keys = 1;
delete t1 from t1,t2 where t1.id1 = t2.id2 and t1.id1 > 500;
drop table t1,t2;
-DROP TABLE IF EXISTS a,b,c;
-CREATE TABLE a (
+CREATE TABLE t1 (
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
-INSERT INTO a VALUES (1,'aaa'),(2,'aaa'),(3,'aaa');
-CREATE TABLE b (
+INSERT INTO t1 VALUES (1,'aaa'),(2,'aaa'),(3,'aaa');
+CREATE TABLE t2 (
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
-INSERT INTO b VALUES (2,'bbb'),(3,'bbb'),(4,'bbb');
-CREATE TABLE c (
+INSERT INTO t2 VALUES (2,'bbb'),(3,'bbb'),(4,'bbb');
+CREATE TABLE t3 (
id int(11) NOT NULL default '0',
mydate datetime default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
-INSERT INTO c VALUES (1,'2002-02-04 00:00:00'),(3,'2002-05-12 00:00:00'),(5,'2002-05-12 00:00:00'),(6,'2002-06-22
+INSERT INTO t3 VALUES (1,'2002-02-04 00:00:00'),(3,'2002-05-12 00:00:00'),(5,'2002-05-12 00:00:00'),(6,'2002-06-22
00:00:00'),(7,'2002-07-22 00:00:00');
-delete a,b,c from a,b,c
-where to_days(now())-to_days(c.mydate)>=30
-and c.id=a.id and c.id=b.id;
-select * from c;
+delete t1,t2,t3 from t1,t2,t3 where to_days(now())-to_days(t3.mydate)>=30 and t3.id=t1.id and t3.id=t2.id;
+select * from t3;
id mydate
1 2002-02-04 00:00:00
5 2002-05-12 00:00:00
6 2002-06-22 00:00:00
7 2002-07-22 00:00:00
-DROP TABLE IF EXISTS a,b,c;
-drop table if exists parent, child;
-CREATE TABLE IF NOT EXISTS `parent` (
+DROP TABLE IF EXISTS t1,t2,t3;
+CREATE TABLE IF NOT EXISTS `t1` (
`id` int(11) NOT NULL auto_increment,
`tst` text,
`tst1` text,
PRIMARY KEY (`id`)
) TYPE=MyISAM;
-CREATE TABLE IF NOT EXISTS `child` (
+CREATE TABLE IF NOT EXISTS `t2` (
`ID` int(11) NOT NULL auto_increment,
`ParId` int(11) default NULL,
`tst` text,
`tst1` text,
PRIMARY KEY (`ID`),
-KEY `IX_ParId_child` (`ParId`),
-FOREIGN KEY (`ParId`) REFERENCES `test.parent` (`id`)
+KEY `IX_ParId_t2` (`ParId`),
+FOREIGN KEY (`ParId`) REFERENCES `t1` (`id`)
) TYPE=MyISAM;
-INSERT INTO parent(tst,tst1)
-VALUES("MySQL","MySQL AB"), ("MSSQL","Microsoft"), ("ORACLE","ORACLE");
-INSERT INTO child(ParId)
-VALUES(1), (2), (3);
-select * from child;
+INSERT INTO t1(tst,tst1) VALUES("MySQL","MySQL AB"), ("MSSQL","Microsoft"), ("ORACLE","ORACLE");
+INSERT INTO t2(ParId) VALUES(1), (2), (3);
+select * from t2;
ID ParId tst tst1
1 1 NULL NULL
2 2 NULL NULL
3 3 NULL NULL
-UPDATE child, parent
-SET child.tst = parent.tst,
-child.tst1 = parent.tst1
-WHERE child.ParId = parent.Id;
-select * from child;
+UPDATE t2, t1 SET t2.tst = t1.tst, t2.tst1 = t1.tst1 WHERE t2.ParId = t1.Id;
+select * from t2;
ID ParId tst tst1
1 1 MySQL MySQL AB
2 2 MSSQL Microsoft
3 3 ORACLE ORACLE
-drop table parent, child;
drop table if exists t1, t2 ;
create table t1 (n numeric(10));
create table t2 (n numeric(10));
@@ -176,3 +166,73 @@ n d
2 20
unlock tables;
drop table t1,t2;
+set sql_safe_updates=1;
+create table t1 (n int(10), d int(10));
+create table t2 (n int(10), d int(10));
+insert into t1 values(1,1);
+insert into t2 values(1,10),(2,20);
+UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
+You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
+set sql_safe_updates=0;
+drop table t1,t2;
+set timestamp=1038401397;
+create table t1 (n int(10) not null primary key, d int(10), t timestamp);
+create table t2 (n int(10) not null primary key, d int(10), t timestamp);
+insert into t1 values(1,1,NULL);
+insert into t2 values(1,10,NULL),(2,20,NULL);
+set timestamp=1038000000;
+UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
+select * from t1;
+n d t
+1 10 20021123002000
+select * from t2;
+n d t
+1 10 20021127154957
+2 20 20021127154957
+UPDATE t1,t2 SET 1=2 WHERE t1.n=t2.n;
+You have an error in your SQL syntax. Check the manual that corresponds to your MySQL server version for the right syntax to use near '1=2 WHERE t1.n=t2.n' at line 1
+drop table t1,t2;
+set timestamp=0;
+set sql_safe_updates=0;
+create table t1 (n int(10) not null primary key, d int(10));
+create table t2 (n int(10) not null primary key, d int(10));
+insert into t1 values(1,1), (3,3);
+insert into t2 values(1,10),(2,20);
+UPDATE t2 left outer join t1 on t1.n=t2.n SET t1.d=t2.d;
+select * from t1;
+n d
+1 10
+3 3
+select * from t2;
+n d
+1 10
+2 20
+drop table t1,t2;
+create table t1 (n int(10), d int(10));
+create table t2 (n int(10), d int(10));
+insert into t1 values(1,1),(1,2);
+insert into t2 values(1,10),(2,20);
+UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
+select * from t1;
+n d
+1 10
+1 10
+select * from t2;
+n d
+1 30
+2 20
+drop table t1,t2;
+create table t1 (n int(10), d int(10));
+create table t2 (n int(10), d int(10));
+insert into t1 values(1,1),(3,2);
+insert into t2 values(1,10),(1,20);
+UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
+select * from t1;
+n d
+1 10
+3 2
+select * from t2;
+n d
+1 30
+1 30
+drop table t1,t2;
diff --git a/mysql-test/r/null.result b/mysql-test/r/null.result
index 07724a56025..cdea66cbf58 100644
--- a/mysql-test/r/null.result
+++ b/mysql-test/r/null.result
@@ -6,7 +6,7 @@ select 1 | NULL,1 & NULL,1+NULL,1-NULL;
NULL NULL NULL NULL
select NULL=NULL,NULL<>NULL,IFNULL(NULL,1.1)+0,IFNULL(NULL,1) | 0;
NULL=NULL NULL<>NULL IFNULL(NULL,1.1)+0 IFNULL(NULL,1) | 0
-NULL NULL 1.1 1
+NULL NULL 1 1
select strcmp("a",NULL),(1<NULL)+0.0,NULL regexp "a",null like "a%","a%" like null;
strcmp("a",NULL) (1<NULL)+0.0 NULL regexp "a" null like "a%" "a%" like null
NULL NULL NULL NULL NULL
@@ -56,3 +56,20 @@ indexed_field
NULL
NULL
DROP TABLE t1;
+create table t1 (a int, b int) type=myisam;
+insert into t1 values(20,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a;
+b ifnull(t2.b,"this is null")
+NULL this is null
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+b ifnull(t2.b,"this is null")
+NULL this is null
+insert into t1 values(10,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+b ifnull(t2.b,"this is null")
+NULL this is null
+NULL this is null
+drop table t1;
diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result
index 94c93dbc3dc..46343eb8248 100644
--- a/mysql-test/r/select.result
+++ b/mysql-test/r/select.result
@@ -3478,3 +3478,16 @@ a a a
2 2 2
3 3 3
drop table t1;
+drop table if exists t1,t2;
+CREATE TABLE t1 ( aa char(2), id int(11) NOT NULL auto_increment, t2_id int(11) NOT NULL default '0', PRIMARY KEY (id), KEY replace_id (t2_id)) TYPE=MyISAM;
+INSERT INTO t1 VALUES ("1",8264,2506),("2",8299,2517),("3",8301,2518),("4",8302,2519),("5",8303,2520),("6",8304,2521),("7",8305,2522);
+CREATE TABLE t2 ( id int(11) NOT NULL auto_increment, PRIMARY KEY (id)) TYPE=MyISAM;
+INSERT INTO t2 VALUES (2517), (2518), (2519), (2520), (2521), (2522);
+select * from t1, t2 WHERE t1.t2_id = t2.id and t1.t2_id > 0 order by t1.id LIMIT 0, 5;
+aa id t2_id id
+2 8299 2517 2517
+3 8301 2518 2518
+4 8302 2519 2519
+5 8303 2520 2520
+6 8304 2521 2521
+drop table if exists t1,t2;
diff --git a/mysql-test/r/temp_table.result b/mysql-test/r/temp_table.result
index 45f879e182b..7c8d10cf0a6 100644
--- a/mysql-test/r/temp_table.result
+++ b/mysql-test/r/temp_table.result
@@ -72,6 +72,11 @@ id val elt(two.val,'one','two')
2 1 one
4 2 two
drop table t1,t2;
+create temporary table t1 (a int not null);
+insert into t1 values (1),(1);
+alter table t1 add primary key (a);
+Duplicate entry '1' for key 1
+drop table t1;
drop table if exists t1;
CREATE TABLE t1 (
d datetime default NULL
diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result
index bd5e9f04992..088f3b205b9 100644
--- a/mysql-test/r/type_timestamp.result
+++ b/mysql-test/r/type_timestamp.result
@@ -1,11 +1,31 @@
-drop table if exists t1;
-CREATE TABLE t1 ( t timestamp);
+drop table if exists t1,t2;
+CREATE TABLE t1 (a int, t timestamp);
+CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
-insert into t1 values(NULL);
+insert into t1 values(1,NULL);
+insert into t1 values(2,"2002-03-03");
+SET TIMESTAMP=1235;
+insert into t1 values(3,NULL);
+SET TIMESTAMP=1236;
+insert into t1 (a) values(4);
+insert into t2 values(5,"2002-03-04"),(6,NULL),(7,"2002-03-05"),(8,"00-00-00");
+SET TIMESTAMP=1237;
+insert into t1 select * from t2;
+SET TIMESTAMP=1238;
+insert into t1 (a) select a+1 from t2 where a=8;
select * from t1;
-t
-19700101032034
-drop table t1;
+a t
+1 19700101032034
+2 20020303000000
+3 19700101032035
+4 19700101032036
+5 20020304000000
+6 19700101032037
+7 20020305000000
+8 00000000000000
+9 19700101032038
+drop table t1,t2;
+SET TIMESTAMP=1234;
CREATE TABLE t1 (value TEXT NOT NULL, id VARCHAR(32) NOT NULL, stamp timestamp, PRIMARY KEY (id));
INSERT INTO t1 VALUES ("my value", "myKey","1999-04-02 00:00:00");
SELECT stamp FROM t1 WHERE id="myKey";
diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test
index 2b329f3ec6e..1c3987e2a31 100644
--- a/mysql-test/t/alter_table.test
+++ b/mysql-test/t/alter_table.test
@@ -2,7 +2,7 @@
# Test of alter table
#
-drop table if exists t1;
+drop table if exists t1,t2;
create table t1 (
col1 int not null auto_increment primary key,
col2 varchar(30) not null,
diff --git a/mysql-test/t/distinct.test b/mysql-test/t/distinct.test
index aaffea3c5a5..7f75b6b1687 100644
--- a/mysql-test/t/distinct.test
+++ b/mysql-test/t/distinct.test
@@ -88,6 +88,16 @@ select distinct t1.a from t1,t3 where t1.a=t3.a;
#flush status;
select distinct 1 from t1,t3 where t1.a=t3.a;
#show status like 'Handler%';
+
+explain SELECT distinct t1.a from t1;
+explain SELECT distinct t1.a from t1 order by a desc;
+explain SELECT t1.a from t1 group by a order by a desc;
+explain SELECT distinct t1.a from t1 order by a desc limit 1;
+explain SELECT distinct a from t3 order by a desc limit 2;
+explain SELECT distinct a,b from t3 order by a+1;
+explain SELECT distinct a,b from t3 order by a limit 10;
+explain SELECT a,b from t3 group by a,b order by a+1;
+
drop table t1,t2,t3,t4;
CREATE TABLE t1 (name varchar(255));
diff --git a/mysql-test/t/func_math.test b/mysql-test/t/func_math.test
index 74e8a5ce092..bd125dafd53 100644
--- a/mysql-test/t/func_math.test
+++ b/mysql-test/t/func_math.test
@@ -13,6 +13,7 @@ select ln(exp(10)),exp(ln(sqrt(10))*2),ln(-1),ln(0),ln(NULL);
select log2(8),log2(15),log2(-2),log2(0),log2(NULL);
select log10(100),log10(18),log10(-4),log10(0),log10(NULL);
select pow(10,log10(10)),power(2,4);
+set @@rand_seed1=10000000,@@rand_seed2=1000000;
select rand(999999),rand();
select pi(),sin(pi()/2),cos(pi()/2),abs(tan(pi())),cot(1),asin(1),acos(0),atan(1);
select degrees(pi()),radians(360);
diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test
index 4670feca500..4682463c11f 100644
--- a/mysql-test/t/group_by.test
+++ b/mysql-test/t/group_by.test
@@ -208,6 +208,7 @@ INSERT INTO t2 VALUES ('Web Interface','AAAAAAAA-AAA','id0001','','');
INSERT INTO t2 VALUES ('Host communication','AAAAA','id0001','','');
select value,description,bug_id from t2 left join t1 on t2.program=t1.product and t2.value=t1.component where program="AAAAA";
select value,description,COUNT(bug_id) from t2 left join t1 on t2.program=t1.product and t2.value=t1.component where program="AAAAA" group by value;
+select value,description,COUNT(bug_id) from t2 left join t1 on t2.program=t1.product and t2.value=t1.component where program="AAAAA" group by value having COUNT(bug_id) IN (0,2);
drop table t1,t2;
@@ -236,6 +237,8 @@ CREATE TABLE t1 (
INSERT INTO t1 VALUES (1,1,1),(2,2,2),(2,1,1),(3,3,3),(4,3,3),(5,3,3);
explain select userid,count(*) from t1 group by userid desc;
select userid,count(*) from t1 group by userid desc;
+select userid,count(*) from t1 group by userid desc having (count(*)+1) IN (4,3);
+select userid,count(*) from t1 group by userid desc having 3 IN (1,COUNT(*));
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid desc;
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid;
select spid,count(*) from t1 where spid between 1 and 2 group by spid;
diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test
index e6d57899082..eb5b0c9efd2 100644
--- a/mysql-test/t/innodb.test
+++ b/mysql-test/t/innodb.test
@@ -660,3 +660,39 @@ insert into t1 (code, name) values (3, 'Jeremy'), (4, 'Matt');
select id, code, name from t1 order by id;
COMMIT;
DROP TABLE t1;
+
+#
+# Test of multi-table-update
+#
+drop table if exists t1,t2;
+create table t1 (n int(10), d int(10)) type=innodb;
+create table t2 (n int(10), d int(10)) type=innodb;
+insert into t1 values(1,1),(1,2);
+insert into t2 values(1,10),(2,20);
+UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
+select * from t1;
+select * from t2;
+drop table t1,t2;
+
+#
+# Testing of IFNULL
+#
+create table t1 (a int, b int) type=innodb;
+insert into t1 values(20,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a;
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+insert into t1 values(10,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+drop table t1;
+
+#
+# Test of read_through not existing const_table
+#
+
+create table t1 (a varchar(10) not null) type=myisam;
+create table t2 (b varchar(10) not null unique) type=innodb;
+select t1.a from t1,t2 where t1.a=t2.b;
+drop table t1,t2;
diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test
index b3a51ff65bc..b79b0749c82 100644
--- a/mysql-test/t/multi_update.test
+++ b/mysql-test/t/multi_update.test
@@ -1,9 +1,6 @@
#
-# Only run the test if we are using --big-test, because this test takes a
-# long time
+# Test of update statement that uses many tables.
#
-#-- require r/big_test.require
-#eval select $BIG_TEST as using_big_test;
drop table if exists t1,t2,t3;
create table t1(id1 int not null auto_increment primary key, t char(12));
@@ -80,67 +77,59 @@ while ($1)
enable_query_log;
delete t1 from t1,t2 where t1.id1 = t2.id2 and t1.id1 > 500;
drop table t1,t2;
-DROP TABLE IF EXISTS a,b,c;
-CREATE TABLE a (
+
+CREATE TABLE t1 (
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
-INSERT INTO a VALUES (1,'aaa'),(2,'aaa'),(3,'aaa');
-CREATE TABLE b (
+INSERT INTO t1 VALUES (1,'aaa'),(2,'aaa'),(3,'aaa');
+CREATE TABLE t2 (
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
-INSERT INTO b VALUES (2,'bbb'),(3,'bbb'),(4,'bbb');
-CREATE TABLE c (
+INSERT INTO t2 VALUES (2,'bbb'),(3,'bbb'),(4,'bbb');
+CREATE TABLE t3 (
id int(11) NOT NULL default '0',
mydate datetime default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
-INSERT INTO c VALUES (1,'2002-02-04 00:00:00'),(3,'2002-05-12 00:00:00'),(5,'2002-05-12 00:00:00'),(6,'2002-06-22
+INSERT INTO t3 VALUES (1,'2002-02-04 00:00:00'),(3,'2002-05-12 00:00:00'),(5,'2002-05-12 00:00:00'),(6,'2002-06-22
00:00:00'),(7,'2002-07-22 00:00:00');
-delete a,b,c from a,b,c
-where to_days(now())-to_days(c.mydate)>=30
-and c.id=a.id and c.id=b.id;
-select * from c;
-DROP TABLE IF EXISTS a,b,c;
-drop table if exists parent, child;
-CREATE TABLE IF NOT EXISTS `parent` (
+delete t1,t2,t3 from t1,t2,t3 where to_days(now())-to_days(t3.mydate)>=30 and t3.id=t1.id and t3.id=t2.id;
+select * from t3;
+DROP TABLE IF EXISTS t1,t2,t3;
+
+CREATE TABLE IF NOT EXISTS `t1` (
`id` int(11) NOT NULL auto_increment,
`tst` text,
`tst1` text,
PRIMARY KEY (`id`)
) TYPE=MyISAM;
-CREATE TABLE IF NOT EXISTS `child` (
+CREATE TABLE IF NOT EXISTS `t2` (
`ID` int(11) NOT NULL auto_increment,
`ParId` int(11) default NULL,
`tst` text,
`tst1` text,
PRIMARY KEY (`ID`),
- KEY `IX_ParId_child` (`ParId`),
- FOREIGN KEY (`ParId`) REFERENCES `test.parent` (`id`)
+ KEY `IX_ParId_t2` (`ParId`),
+ FOREIGN KEY (`ParId`) REFERENCES `t1` (`id`)
) TYPE=MyISAM;
-INSERT INTO parent(tst,tst1)
-VALUES("MySQL","MySQL AB"), ("MSSQL","Microsoft"), ("ORACLE","ORACLE");
+INSERT INTO t1(tst,tst1) VALUES("MySQL","MySQL AB"), ("MSSQL","Microsoft"), ("ORACLE","ORACLE");
-INSERT INTO child(ParId)
-VALUES(1), (2), (3);
+INSERT INTO t2(ParId) VALUES(1), (2), (3);
-select * from child;
-
-UPDATE child, parent
-SET child.tst = parent.tst,
-child.tst1 = parent.tst1
-WHERE child.ParId = parent.Id;
+select * from t2;
-select * from child;
+UPDATE t2, t1 SET t2.tst = t1.tst, t2.tst1 = t1.tst1 WHERE t2.ParId = t1.Id;
+select * from t2;
-drop table parent, child;
drop table if exists t1, t2 ;
+
create table t1 (n numeric(10));
create table t2 (n numeric(10));
insert into t2 values (1),(2),(4),(8),(16),(32);
@@ -174,3 +163,54 @@ select * from t1;
select * from t2;
unlock tables;
drop table t1,t2;
+
+#
+# Test safe updates and timestamps
+#
+set sql_safe_updates=1;
+create table t1 (n int(10), d int(10));
+create table t2 (n int(10), d int(10));
+insert into t1 values(1,1);
+insert into t2 values(1,10),(2,20);
+--error 1175
+UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
+set sql_safe_updates=0;
+drop table t1,t2;
+set timestamp=1038401397;
+create table t1 (n int(10) not null primary key, d int(10), t timestamp);
+create table t2 (n int(10) not null primary key, d int(10), t timestamp);
+insert into t1 values(1,1,NULL);
+insert into t2 values(1,10,NULL),(2,20,NULL);
+set timestamp=1038000000;
+UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
+select * from t1;
+select * from t2;
+--error 1064
+UPDATE t1,t2 SET 1=2 WHERE t1.n=t2.n;
+drop table t1,t2;
+set timestamp=0;
+set sql_safe_updates=0;
+create table t1 (n int(10) not null primary key, d int(10));
+create table t2 (n int(10) not null primary key, d int(10));
+insert into t1 values(1,1), (3,3);
+insert into t2 values(1,10),(2,20);
+UPDATE t2 left outer join t1 on t1.n=t2.n SET t1.d=t2.d;
+select * from t1;
+select * from t2;
+drop table t1,t2;
+create table t1 (n int(10), d int(10));
+create table t2 (n int(10), d int(10));
+insert into t1 values(1,1),(1,2);
+insert into t2 values(1,10),(2,20);
+UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
+select * from t1;
+select * from t2;
+drop table t1,t2;
+create table t1 (n int(10), d int(10));
+create table t2 (n int(10), d int(10));
+insert into t1 values(1,1),(3,2);
+insert into t2 values(1,10),(1,20);
+UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
+select * from t1;
+select * from t2;
+drop table t1,t2;
diff --git a/mysql-test/t/null.test b/mysql-test/t/null.test
index 8bd9e806118..ad32e0be6ff 100644
--- a/mysql-test/t/null.test
+++ b/mysql-test/t/null.test
@@ -34,3 +34,17 @@ SELECT * FROM t1 WHERE indexed_field=NULL;
SELECT * FROM t1 WHERE indexed_field IS NULL;
SELECT * FROM t1 WHERE indexed_field<=>NULL;
DROP TABLE t1;
+
+#
+# Testing of IFNULL
+#
+create table t1 (a int, b int) type=myisam;
+insert into t1 values(20,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a;
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+insert into t1 values(10,null);
+select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
+t2.b=t3.a order by 1;
+drop table t1;
diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test
index 34ad496f285..64287dc4170 100644
--- a/mysql-test/t/select.test
+++ b/mysql-test/t/select.test
@@ -1799,5 +1799,11 @@ select * from t1 natural right join (t1 as t2 left join t1 as t3 using (a));
# natural join
select * from t1 natural join (t1 as t2 left join t1 as t3 using (a));
select * from (t1 as t2 left join t1 as t3 using (a)) natural join t1;
-
drop table t1;
+drop table if exists t1,t2;
+CREATE TABLE t1 ( aa char(2), id int(11) NOT NULL auto_increment, t2_id int(11) NOT NULL default '0', PRIMARY KEY (id), KEY replace_id (t2_id)) TYPE=MyISAM;
+INSERT INTO t1 VALUES ("1",8264,2506),("2",8299,2517),("3",8301,2518),("4",8302,2519),("5",8303,2520),("6",8304,2521),("7",8305,2522);
+CREATE TABLE t2 ( id int(11) NOT NULL auto_increment, PRIMARY KEY (id)) TYPE=MyISAM;
+INSERT INTO t2 VALUES (2517), (2518), (2519), (2520), (2521), (2522);
+select * from t1, t2 WHERE t1.t2_id = t2.id and t1.t2_id > 0 order by t1.id LIMIT 0, 5;
+drop table if exists t1,t2;
diff --git a/mysql-test/t/temp_table.test b/mysql-test/t/temp_table.test
index 10168cf13c7..665e690a322 100644
--- a/mysql-test/t/temp_table.test
+++ b/mysql-test/t/temp_table.test
@@ -62,6 +62,15 @@ select one.id, two.val, elt(two.val,'one','two') from t1 one, t2 two where two.i
drop table t1,t2;
#
+# Test of failed ALTER TABLE on temporary table
+#
+create temporary table t1 (a int not null);
+insert into t1 values (1),(1);
+-- error 1062
+alter table t1 add primary key (a);
+drop table t1;
+
+#
# In MySQL 4.0.4 doing a GROUP BY on a NULL column created a disk based
# temporary table when a memory based one would be good enough.
diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test
index 19af6b0c49c..2929184df93 100644
--- a/mysql-test/t/type_timestamp.test
+++ b/mysql-test/t/type_timestamp.test
@@ -2,14 +2,25 @@
# Test timestamp
#
-drop table if exists t1;
-CREATE TABLE t1 ( t timestamp);
+drop table if exists t1,t2;
+CREATE TABLE t1 (a int, t timestamp);
+CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
-insert into t1 values(NULL);
+insert into t1 values(1,NULL);
+insert into t1 values(2,"2002-03-03");
+SET TIMESTAMP=1235;
+insert into t1 values(3,NULL);
+SET TIMESTAMP=1236;
+insert into t1 (a) values(4);
+insert into t2 values(5,"2002-03-04"),(6,NULL),(7,"2002-03-05"),(8,"00-00-00");
+SET TIMESTAMP=1237;
+insert into t1 select * from t2;
+SET TIMESTAMP=1238;
+insert into t1 (a) select a+1 from t2 where a=8;
select * from t1;
-drop table t1;
-
+drop table t1,t2;
+SET TIMESTAMP=1234;
CREATE TABLE t1 (value TEXT NOT NULL, id VARCHAR(32) NOT NULL, stamp timestamp, PRIMARY KEY (id));
INSERT INTO t1 VALUES ("my value", "myKey","1999-04-02 00:00:00");
SELECT stamp FROM t1 WHERE id="myKey";
diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c
index 43b3d30915f..e9c35175bf9 100644
--- a/mysys/mf_iocache.c
+++ b/mysys/mf_iocache.c
@@ -445,6 +445,8 @@ void init_io_cache_share(IO_CACHE *info, IO_CACHE_SHARE *s, uint num_threads)
s->active=0; /* to catch errors */
info->share=s;
info->read_function=_my_b_read_r;
+ /* Ensure that the code doesn't use pointer to the IO_CACHE object */
+ info->current_pos= info->current_end= 0;
}
/*
diff --git a/sql-bench/crash-me.sh b/sql-bench/crash-me.sh
index 5fa67773566..6c3ee9bd0dc 100644
--- a/sql-bench/crash-me.sh
+++ b/sql-bench/crash-me.sh
@@ -280,13 +280,14 @@ report("Order by function","order_by_function",
"select a from crash_me order by a+1");
report("Order by on unused column",'order_on_unused',
"select b from crash_me order by a");
-check_and_report("Order by DESC is remembered",'order_by_remember_desc',
- ["create table crash_q (s int,s1 int)",
- "insert into crash_q values(1,1)",
- "insert into crash_q values(3,1)",
- "insert into crash_q values(2,1)"],
- "select s,s1 from crash_q order by s1 DESC,s",
- ["drop table crash_q $drop_attr"],[3,2,1],7,undef(),3);
+# little bit deprecated
+#check_and_report("Order by DESC is remembered",'order_by_remember_desc',
+# ["create table crash_q (s int,s1 int)",
+# "insert into crash_q values(1,1)",
+# "insert into crash_q values(3,1)",
+# "insert into crash_q values(2,1)"],
+# "select s,s1 from crash_q order by s1 DESC,s",
+# ["drop table crash_q $drop_attr"],[3,2,1],7,undef(),3);
report("Compute",'compute',
"select a from crash_me order by a compute sum(a) by a");
report("INSERT with Value lists",'insert_multi_value',
@@ -844,7 +845,6 @@ try_and_report("Automatic row id", "automatic_rowid",
["DAYOFWEEK","dayofweek","dayofweek(DATE '1997-02-01')",7,0],
["DAYOFYEAR","dayofyear","dayofyear(DATE '1997-02-01')",32,0],
["QUARTER","quarter","quarter(DATE '1997-02-01')",1,0],
- ["WEEK","week","week(DATE '1997-02-01')",5,0],
["YEAR","year","year(DATE '1997-02-01')",1997,0],
["CURTIME","curtime","curtime()",0,2],
["HOUR","hour","hour('12:13:14')",12,0],
@@ -980,7 +980,6 @@ try_and_report("Automatic row id", "automatic_rowid",
["ASCII in string cast",'ascii_string',"ascii('a')",'a',1],
["EBCDIC in string cast",'ebcdic_string',"ebcdic('a')",'a',1],
["TRUNC (1 arg)",'trunc1arg',"trunc(222.6)",222,0],
- ["NOROUND",'noround',"noround(222.6)",222.6,0],
["FIXED",'fixed',"fixed(222.6666,10,2)",'222.67',0],
["FLOAT",'float',"float(6666.66,4)",6667,0],
["LENGTH",'length',"length(1)",2,0],
@@ -1187,12 +1186,31 @@ else
}
-if ($limits{'func_extra_noround'} eq 'yes')
+# Test: NOROUND
{
- report("Ignoring NOROUND","ignoring_noround",
- "create table crash_q (a int)",
- "insert into crash_q values(noround(10.22))",
- "drop table crash_q $drop_attr");
+ my $resultat = 'undefined';
+ my $error;
+ print "NOROUND: ";
+ save_incomplete('func_extra_noround','Function NOROUND');
+
+# 1) check if noround() function is supported
+ $error = safe_query("select noround(22.6) $end_query");
+ if ($error ne 1) # syntax error -- noround is not supported
+ {
+ $resultat = 'no'
+ } else # Ok, now check if it really works
+ {
+ $error=safe_query( "create table crash_me_nr (a int)",
+ "insert into crash_me_nr values(noround(10.2))",
+ "drop table crash_me_nr $drop_attr");
+ if ($error eq 1) {
+ $resultat = "syntax only";
+ } else {
+ $resultat = 'yes';
+ }
+ }
+ print "$resultat\n";
+ save_config_data('func_extra_noround',$resultat,"Function NOROUND");
}
check_parenthesis("func_sql_","CURRENT_USER");
@@ -1200,6 +1218,32 @@ check_parenthesis("func_sql_","SESSION_USER");
check_parenthesis("func_sql_","SYSTEM_USER");
check_parenthesis("func_sql_","USER");
+# Test: WEEK()
+{
+ my $explain="";
+ my $resultat="no";
+ my $error;
+ print "WEEK:";
+ save_incomplete('func_odbc_week','WEEK');
+ $error = safe_query_result("select week(DATE '1997-02-01') $end_query",5,0);
+ # actually this query must return 4 or 5 in the $last_result,
+ # $error can be 1 (not supported at all) , -1 ( probably USA weeks)
+ # and 0 - EURO weeks
+ if ($error == -1) {
+ if ($last_result == 4) {
+ $resultat = 'USA';
+ $explain = ' started from Sunday';
+ } else {
+ $resultat='error';
+ $explain = " must return 4 or 5, but $last_error";
+ }
+ } elsif ($error == 0) {
+ $resultat = 'EURO';
+ $explain = ' started from Monday';
+ }
+ print " $resultat\n";
+ save_config_data('func_odbc_week',$resultat,"WEEK $explain");
+}
report("LIKE on numbers","like_with_number",
"create table crash_q (a int,b int)",
@@ -1682,28 +1726,36 @@ report("views","views",
"create view crash_q as select a from crash_me",
"drop view crash_q $drop_attr");
-report("foreign key syntax","foreign_key_syntax",
- create_table("crash_q",["a integer not null"],["primary key (a)"]),
- create_table("crash_q2",["a integer not null",
- "foreign key (a) references crash_q (a)"],
- []),
- "insert into crash_q values (1)",
- "insert into crash_q2 values (1)",
- "drop table crash_q2 $drop_attr",
- "drop table crash_q $drop_attr");
-
-if ($limits{'foreign_key_syntax'} eq 'yes')
+# Test: foreign key
{
- report_fail("foreign keys","foreign_key",
- create_table("crash_q",["a integer not null"],
- ["primary key (a)"]),
- create_table("crash_q2",["a integer not null",
- "foreign key (a) references crash_q (a)"],
- []),
- "insert into crash_q values (1)",
- "insert into crash_q2 values (2)",
- "drop table crash_q2 $drop_attr",
- "drop table crash_q $drop_attr");
+ my $resultat = 'undefined';
+ my $error;
+ print "foreign keys: ";
+ save_incomplete('foreign_key','foreign keys');
+
+# 1) check if foreign keys are supported
+ safe_query(create_table("crash_me_qf",["a integer not null"],
+ ["primary key (a)"]));
+ $error = safe_query( create_table("crash_me_qf2",["a integer not null",
+ "foreign key (a) references crash_me_qf (a)"], []));
+
+ if ($error eq 1) # OK -- syntax is supported
+ {
+ $resultat = 'error';
+ # now check if foreign key really works
+ safe_query( "insert into crash_me_qf values (1)");
+ if (safe_query( "insert into crash_me_qf2 values (2)") eq 1) {
+ $resultat = 'syntax only';
+ } else {
+ $resultat = 'yes';
+ }
+
+ } else {
+ $resultat = "no";
+ }
+ safe_query( "drop table crash_me_qf2 $drop_attr","drop table crash_me_qf $drop_attr");
+ print "$resultat\n";
+ save_config_data('foreign_key',$resultat,"foreign keys");
}
report("Create SCHEMA","create_schema",
@@ -1720,32 +1772,22 @@ if ($limits{'foreign_key'} eq 'yes')
}
}
-report("Column constraints","constraint_check",
- "create table crash_q (a int check (a>0))",
- "drop table crash_q $drop_attr");
+check_constraint("Column constraints","constraint_check",
+ "create table crash_q (a int check (a>0))",
+ "insert into crash_q values(0)",
+ "drop table crash_q $drop_attr");
-report("Ignoring column constraints","ignoring_constraint_check",
- "create table crash_q (a int check (a>0))",
- "insert into crash_q values(0)",
- "drop table crash_q $drop_attr") if ($limits{'constraint_check'} eq 'yes');
-report("Table constraints","constraint_check_table",
- "create table crash_q (a int ,b int, check (a>b))",
- "drop table crash_q $drop_attr");
-
-report("Ignoring table constraints","ignoring_constraint_check_table",
+check_constraint("Table constraints","constraint_check_table",
"create table crash_q (a int ,b int, check (a>b))",
"insert into crash_q values(0,0)",
- "drop table crash_q $drop_attr") if ($limits{'constraint_check_table'} eq 'yes');
-
-report("Named constraints","constraint_check_named",
- "create table crash_q (a int ,b int, constraint abc check (a>b))",
"drop table crash_q $drop_attr");
-report("Ignoring named constraints","ignoring_constraint_check_named",
+check_constraint("Named constraints","constraint_check_named",
"create table crash_q (a int ,b int, constraint abc check (a>b))",
"insert into crash_q values(0,0)",
- "drop table crash_q $drop_attr") if ($limits{'constraint_check_named'} eq 'yes');
+ "drop table crash_q $drop_attr");
+
report("NULL constraint (SyBase style)","constraint_null",
"create table crash_q (a int null)",
@@ -2236,6 +2278,29 @@ sub check_parenthesis {
save_config_data($param_name,$resultat,$fn);
}
+sub check_constraint {
+ my $prompt = shift;
+ my $key = shift;
+ my $create = shift;
+ my $check = shift;
+ my $drop = shift;
+ save_incomplete($key,$prompt);
+ print "$prompt=";
+ my $res = 'no';
+
+ if ( ($t=safe_query($create)) == 1)
+ {
+ $res='yes';
+ if (safe_query($check) == 1)
+ {
+ $res='syntax only';
+ }
+ }
+ safe_query($drop);
+ save_config_data($key,$res,$prompt);
+ print "$res\n";
+}
+
sub usage
{
print <<EOF;
@@ -2317,7 +2382,7 @@ $0 takes the following options:
--user='user_name'
User name to log into the SQL server.
---start-cmd='command to restart server'
+--db-start-cmd='command to restart server'
Automaticly restarts server with this command if the database server dies.
--sleep='time in seconds' (Default $opt_sleep)
diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh
index 213ecacea6f..7f96c06ef99 100644
--- a/sql-bench/server-cfg.sh
+++ b/sql-bench/server-cfg.sh
@@ -216,6 +216,7 @@ sub version
}
$sth->finish;
$dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -251,7 +252,7 @@ sub create
$query="create table $table_name (";
foreach $field (@$fields)
{
- $field =~ s/ decimal/ double(10,2)/i;
+# $field =~ s/ decimal/ double(10,2)/i;
$field =~ s/ big_decimal/ double(10,2)/i;
$query.= $field . ',';
}
@@ -431,6 +432,8 @@ sub version
{ # Strip pre- and endspace
$tmp=$1;
$tmp =~ s/\s+/ /g; # Remove unnecessary spaces
+ $tmp .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
+
return $tmp;
}
}
@@ -619,6 +622,7 @@ sub new
sub version
{
my ($version,$dir);
+ $version = "PostgreSQL version ???";
foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/usr/local/pg/data")
{
if ($dir && -e "$dir/PG_VERSION")
@@ -627,11 +631,13 @@ sub version
if ($? == 0)
{
chomp($version);
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return "PostgreSQL $version";
}
}
}
- return "PostgreSQL version ???";
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
+ return $version;
}
@@ -895,6 +901,7 @@ sub new
sub version
{
my ($version,$dir);
+ $version="Solid version ??";
foreach $dir ($ENV{'SOLIDDIR'},"/usr/local/solid", "/my/local/solid")
{
if ($dir && -e "$dir/bin/solcon")
@@ -903,11 +910,13 @@ sub version
if ($? == 0)
{
chomp($version);
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
}
}
- return "Solid version ???";
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
+ return $version;
}
sub connect
@@ -1136,6 +1145,8 @@ sub version
{
$version="Empress version ???";
}
+
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -1403,6 +1414,7 @@ sub version
}
$sth->finish;
$dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -1647,6 +1659,7 @@ sub version
}
$sth->finish;
$dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -1846,7 +1859,9 @@ sub new
sub version
{
my ($self)=@_;
- return "Access 2000"; #DBI/ODBC can't return the server version
+ my $version="Access 2000";
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
+ return $version; #DBI/ODBC can't return the server version
}
sub connect
@@ -2028,7 +2043,8 @@ sub new
sub version
{
my ($self)=@_;
- my($sth,@row);
+ my($sth,@row, $version);
+ $version='MS SQL server ?';
$dbh=$self->connect();
$sth = $dbh->prepare("SELECT \@\@VERSION") or die $DBI::errstr;
$sth->execute or die $DBI::errstr;
@@ -2036,10 +2052,11 @@ sub version
if ($row[0]) {
@server = split(/\n/,$row[0]);
chomp(@server);
- return "$server[0]";
- } else {
- return "Microsoft SQL server ?";
- }
+ $version= "$server[0]";
+ }
+ $sth->finish;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
+ return $version;
}
sub connect
@@ -2232,8 +2249,8 @@ sub version
}
$sth->finish;
$dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
-
}
sub connect
@@ -2466,6 +2483,7 @@ sub version
}
$sth->finish;
$dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -2842,6 +2860,7 @@ sub version
#
$version = $dbh->func(18, GetInfo);
$dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -3041,6 +3060,7 @@ sub version
# $version =~ s/.*version \"(.*)\"$/$1/;
$dbh->disconnect;
$version = "6.0Beta";
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -3246,6 +3266,7 @@ sub version
#$version = $dbh->func(18, GetInfo);
$version="FrontBase 3.3";
# $dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
@@ -3365,7 +3386,7 @@ sub fix_for_insert
# Configuration for SAPDB
#############################################################################
-package db_Sapdb;
+package db_sapdb;
sub new
{
@@ -3453,6 +3474,7 @@ sub version
}
$sth->finish;
$dbh->disconnect;
+ $version .= "/ODBC" if ($self->{'data_source'} =~ /:ODBC:/);
return $version;
}
diff --git a/sql/field.cc b/sql/field.cc
index f0f3b22f1cc..7b3b88a69f1 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1576,11 +1576,14 @@ void Field_medium::sql_type(String &res) const
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ char *end;
while (len && my_isspace(system_charset_info,*from))
{
len--; from++;
}
long tmp;
+ String tmp_str(from,len);
+ from= tmp_str.c_ptr(); // Add end null if needed
int error= 0;
errno=0;
if (unsigned_flag)
@@ -1592,12 +1595,13 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
error= 1;
}
else
- tmp=(long) my_strntoul(cs,from,len,NULL,10);
+ tmp=(long) my_strntoul(cs,from,len,&end,10);
}
else
- tmp=my_strntol(cs,from,len,NULL,10);
- if (errno || current_thd->count_cuted_fields && !test_if_int(from,len))
- {
+ tmp=my_strntol(cs,from,len,&end,10);
+ if (errno ||
+ (from+len != end && current_thd->count_cuted_fields &&
+ !test_if_int(from,len)))
current_thd->cuted_fields++;
error= 1;
}
@@ -1821,11 +1825,14 @@ void Field_long::sql_type(String &res) const
int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ char *end;
while (len && my_isspace(system_charset_info,*from))
{ // For easy error check
len--; from++;
}
longlong tmp;
+ String tmp_str(from,len);
+ from= tmp_str.c_ptr(); // Add end null if needed
int error= 0;
errno=0;
if (unsigned_flag)
@@ -1837,15 +1844,14 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
error= 1;
}
else
- tmp=(longlong) my_strntoull(cs,from,len,NULL,10);
+ tmp=(longlong) my_strntoull(cs,from,len,&end,10);
}
else
- tmp=my_strntoll(cs,from,len,NULL,10);
- if (errno || current_thd->count_cuted_fields && !test_if_int(from,len))
- {
- current_thd->cuted_fields++;
- error= 1;
- }
+ tmp=my_strntoll(cs,from,len,&end,10);
+ if (errno ||
+ (from+len != end && current_thd->count_cuted_fields &&
+ !test_if_int(from,len)))
+ current_thd->cuted_fields++;
#ifdef WORDS_BIGENDIAN
if (table->db_low_byte_first)
{
diff --git a/sql/field.h b/sql/field.h
index 9fc72cf56ec..16929a363dd 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1069,7 +1069,7 @@ Field *make_field(char *ptr, uint32 field_length,
uint pack_length_to_packflag(uint type);
uint32 calc_pack_length(enum_field_types type,uint32 length);
bool set_field_to_null(Field *field);
-bool set_field_to_null_with_conversions(Field *field);
+bool set_field_to_null_with_conversions(Field *field, bool no_conversions);
uint find_enum(TYPELIB *typelib,const char *x, uint length);
ulonglong find_set(TYPELIB *typelib,const char *x, uint length);
bool test_if_int(const char *str,int length);
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index ab71f324732..409c22d61d4 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -122,8 +122,26 @@ set_field_to_null(Field *field)
}
+/*
+ Set field to NULL or TIMESTAMP or to next auto_increment number
+
+ SYNOPSIS
+ set_field_to_null_with_conversions()
+ field Field to update
+ no_conversion Set to 1 if we should return 1 if field can't
+ take null values.
+ If set to 0 we will do store the 'default value'
+ if the field is a special field. If not we will
+ give an error.
+
+ RETURN VALUES
+ 0 Field could take 0 or an automatic conversion was used
+ 1 Field could not take NULL and no conversion was used.
+ If no_conversion was not set, an error message is printed
+*/
+
bool
-set_field_to_null_with_conversions(Field *field)
+set_field_to_null_with_conversions(Field *field, bool no_conversions)
{
if (field->real_maybe_null())
{
@@ -131,6 +149,8 @@ set_field_to_null_with_conversions(Field *field)
field->reset();
return 0;
}
+ if (no_conversions)
+ return 1;
/*
Check if this is a special type, which will get a special walue
@@ -156,8 +176,6 @@ set_field_to_null_with_conversions(Field *field)
}
-
-
static void do_skip(Copy_field *copy __attribute__((unused)))
{
}
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 7787b543f34..14810bada31 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -90,10 +90,11 @@ long innobase_mirrored_log_groups, innobase_log_files_in_group,
are determined in innobase_init below: */
char* innobase_data_home_dir = NULL;
+char* innobase_data_file_path = NULL;
char* innobase_log_group_home_dir = NULL;
char* innobase_log_arch_dir = NULL;
-/* The following has a midleading name: starting from 4.0.5 this also
-affects Windows */
+/* The following has a misleading name: starting from 4.0.5, this also
+affects Windows: */
char* innobase_unix_file_flush_method = NULL;
/* Below we have boolean-valued start-up parameters, and their default
@@ -104,14 +105,7 @@ my_bool innobase_log_archive = FALSE;
my_bool innobase_use_native_aio = FALSE;
my_bool innobase_fast_shutdown = TRUE;
-/*
- Set default InnoDB data file size to 10 MB and let it be
- auto-extending. Thus users can use InnoDB without having to
- specify any startup options.
-*/
-
-char *innobase_data_file_path= (char*) "ibdata1:10M:autoextend";
-static char *internal_innobase_data_file_path=0;
+static char *internal_innobase_data_file_path = NULL;
/* The following counter is used to convey information to InnoDB
about server activity: in selects it is not sensible to call
@@ -650,46 +644,59 @@ innobase_init(void)
DBUG_ENTER("innobase_init");
- os_innodb_umask = (ulint)my_umask;
+ os_innodb_umask = (ulint)my_umask;
- /*
- When using the embedded server, the datadirectory is not
- in the current directory.
- */
- if (mysql_embedded)
- default_path=mysql_real_data_home;
- else
- {
- /* It's better to use current lib, to keep path's short */
- current_dir[0] = FN_CURLIB;
- current_dir[1] = FN_LIBCHAR;
- current_dir[2] = 0;
- default_path=current_dir;
+ /* First calculate the default path for innodb_data_home_dir etc.,
+ in case the user has not given any value.
+
+ Note that when using the embedded server, the datadirectory is not
+ necessarily the current directory of this program. */
+
+ if (mysql_embedded) {
+ default_path = mysql_real_data_home;
+ } else {
+ /* It's better to use current lib, to keep paths short */
+ current_dir[0] = FN_CURLIB;
+ current_dir[1] = FN_LIBCHAR;
+ current_dir[2] = 0;
+ default_path = current_dir;
}
+ ut_a(default_path);
+
if (specialflag & SPECIAL_NO_PRIOR) {
srv_set_thread_priorities = FALSE;
} else {
srv_set_thread_priorities = TRUE;
srv_query_thread_priority = QUERY_PRIOR;
}
+
+ /* Set InnoDB initialization parameters according to the values
+ read from MySQL .cnf file */
- /*
- Set InnoDB initialization parameters according to the values
- read from MySQL .cnf file
- */
+ /*--------------- Data files -------------------------*/
- // Make a copy of innobase_data_file_path to not modify the original
- internal_innobase_data_file_path=my_strdup(innobase_data_file_path,
- MYF(MY_WME));
+ /* The default dir for data files is the datadir of MySQL */
srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
default_path);
- srv_arch_dir = (innobase_log_arch_dir ? innobase_log_arch_dir :
- default_path);
- ret = (bool)
- srv_parse_data_file_paths_and_sizes(internal_innobase_data_file_path,
+ /* Set default InnoDB data file size to 10 MB and let it be
+ auto-extending. Thus users can use InnoDB in >= 4.0 without having
+ to specify any startup options. */
+
+ if (!innobase_data_file_path) {
+ innobase_data_file_path = (char*) "ibdata1:10M:autoextend";
+ }
+
+ /* Since InnoDB edits the argument in the next call, we make another
+ copy of it: */
+
+ internal_innobase_data_file_path = my_strdup(innobase_data_file_path,
+ MYF(MY_WME));
+
+ ret = (bool) srv_parse_data_file_paths_and_sizes(
+ internal_innobase_data_file_path,
&srv_data_file_names,
&srv_data_file_sizes,
&srv_data_file_is_raw_partition,
@@ -697,12 +704,26 @@ innobase_init(void)
&srv_auto_extend_last_data_file,
&srv_last_file_size_max);
if (ret == FALSE) {
- sql_print_error("InnoDB: syntax error in innodb_data_file_path");
- DBUG_RETURN(TRUE);
+ sql_print_error(
+ "InnoDB: syntax error in innodb_data_file_path");
+ DBUG_RETURN(TRUE);
}
- if (!innobase_log_group_home_dir)
- innobase_log_group_home_dir= default_path;
+ /* -------------- Log files ---------------------------*/
+
+ /* The default dir for log files is the datadir of MySQL */
+
+ if (!innobase_log_group_home_dir) {
+ innobase_log_group_home_dir = default_path;
+ }
+
+ /* Since innodb_log_arch_dir has no relevance under MySQL,
+ starting from 4.0.6 we always set it the same as
+ innodb_log_group_home_dir: */
+
+ innobase_log_arch_dir = innobase_log_group_home_dir;
+
+ srv_arch_dir = innobase_log_arch_dir;
ret = (bool)
srv_parse_log_group_home_dirs(innobase_log_group_home_dir,
@@ -716,9 +737,9 @@ innobase_init(void)
DBUG_RETURN(TRUE);
}
- srv_file_flush_method_str = (innobase_unix_file_flush_method ?
- innobase_unix_file_flush_method :
- NULL);
+ /* --------------------------------------------------*/
+
+ srv_file_flush_method_str = innobase_unix_file_flush_method;
srv_n_log_groups = (ulint) innobase_mirrored_log_groups;
srv_n_log_files = (ulint) innobase_log_files_in_group;
@@ -741,7 +762,9 @@ innobase_init(void)
srv_fast_shutdown = (ibool) innobase_fast_shutdown;
srv_print_verbose_log = mysql_embedded ? 0 : 1;
+
if (strcmp(default_charset_info->name, "latin1") == 0) {
+
/* Store the character ordering table to InnoDB.
For non-latin1 charsets we use the MySQL comparison
functions, and consequently we do not need to know
@@ -4179,3 +4202,4 @@ ha_innobase::get_auto_increment()
}
#endif /* HAVE_INNOBASE_DB */
+
diff --git a/sql/handler.cc b/sql/handler.cc
index cdd007f2cc2..a36a77484e5 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -883,13 +883,13 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
void ha_key_cache(void)
{
if (keybuff_size)
- (void) init_key_cache(keybuff_size);
+ (void) init_key_cache((ulong) keybuff_size);
}
void ha_resize_key_cache(void)
{
- (void) resize_key_cache(keybuff_size);
+ (void) resize_key_cache((ulong) keybuff_size);
}
diff --git a/sql/item.cc b/sql/item.cc
index 1c46f9abb7e..875eeb4d940 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -701,7 +701,7 @@ void Item_field::save_org_in_field(Field *to)
if (field->is_null())
{
null_value=1;
- set_field_to_null_with_conversions(to);
+ set_field_to_null_with_conversions(to, 1);
}
else
{
@@ -711,12 +711,12 @@ void Item_field::save_org_in_field(Field *to)
}
}
-int Item_field::save_in_field(Field *to)
+int Item_field::save_in_field(Field *to, bool no_conversions)
{
if (result_field->is_null())
{
null_value=1;
- return set_field_to_null_with_conversions(to);
+ return set_field_to_null_with_conversions(to, no_conversions);
}
else
{
@@ -744,9 +744,9 @@ int Item_field::save_in_field(Field *to)
1 Field doesn't support NULL values and can't handle 'field = NULL'
*/
-int Item_null::save_in_field(Field *field)
+int Item_null::save_in_field(Field *field, bool no_conversions)
{
- return set_field_to_null_with_conversions(field);
+ return set_field_to_null_with_conversions(field, no_conversions);
}
@@ -768,7 +768,7 @@ int Item_null::save_safe_in_field(Field *field)
}
-int Item::save_in_field(Field *field)
+int Item::save_in_field(Field *field, bool no_conversions)
{
int error;
if (result_type() == STRING_RESULT ||
@@ -781,7 +781,7 @@ int Item::save_in_field(Field *field)
str_value.set_quick(buff,sizeof(buff),cs);
result=val_str(&str_value);
if (null_value)
- return set_field_to_null_with_conversions(field);
+ return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
error=field->store(result->ptr(),result->length(),cs);
str_value.set_quick(0, 0, cs);
@@ -798,14 +798,15 @@ int Item::save_in_field(Field *field)
{
longlong nr=val_int();
if (null_value)
- return set_field_to_null_with_conversions(field);
+ return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
error=field->store(nr);
}
return (error) ? -1 : 0;
}
-int Item_string::save_in_field(Field *field)
+
+int Item_string::save_in_field(Field *field, bool no_conversions)
{
String *result;
result=val_str(&str_value);
@@ -815,7 +816,8 @@ int Item_string::save_in_field(Field *field)
return (field->store(result->ptr(),result->length(),charset())) ? -1 : 0;
}
-int Item_int::save_in_field(Field *field)
+
+int Item_int::save_in_field(Field *field, bool no_conversions)
{
longlong nr=val_int();
if (null_value)
@@ -824,7 +826,8 @@ int Item_int::save_in_field(Field *field)
return (field->store(nr)) ? -1 : 0;
}
-int Item_real::save_in_field(Field *field)
+
+int Item_real::save_in_field(Field *field, bool no_conversions)
{
double nr=val();
if (null_value)
@@ -877,7 +880,7 @@ longlong Item_varbinary::val_int()
}
-int Item_varbinary::save_in_field(Field *field)
+int Item_varbinary::save_in_field(Field *field, bool no_conversions)
{
int error;
field->set_notnull();
@@ -1030,9 +1033,10 @@ bool Item_ref::check_loop(uint id)
DBUG_RETURN((*ref)->check_loop(id));
}
+
/*
-** If item is a const function, calculate it and return a const item
-** The original item is freed if not returned
+ If item is a const function, calculate it and return a const item
+ The original item is freed if not returned
*/
Item_result item_cmp_type(Item_result a,Item_result b)
diff --git a/sql/item.h b/sql/item.h
index 11b141613f3..5321c5874a4 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -54,11 +54,11 @@ public:
void set_name(const char *str,uint length=0);
void init_make_field(Send_field *tmp_field,enum enum_field_types type);
virtual bool fix_fields(THD *, struct st_table_list *, Item **);
- virtual int save_in_field(Field *field);
+ virtual int save_in_field(Field *field, bool no_conversions);
virtual void save_org_in_field(Field *field)
- { (void) save_in_field(field); }
+ { (void) save_in_field(field, 1); }
virtual int save_safe_in_field(Field *field)
- { return save_in_field(field); }
+ { return save_in_field(field, 1); }
virtual bool send(THD *thd, String *str);
virtual bool eq(const Item *, bool binary_cmp) const;
virtual Item_result result_type () const { return REAL_RESULT; }
@@ -194,7 +194,7 @@ public:
}
void make_field(Send_field *field);
bool fix_fields(THD *, struct st_table_list *, Item **);
- int save_in_field(Field *field);
+ int save_in_field(Field *field,bool no_conversions);
void save_org_in_field(Field *field);
table_map used_tables() const;
enum Item_result result_type () const
@@ -219,7 +219,7 @@ public:
longlong val_int();
String *val_str(String *str);
void make_field(Send_field *field);
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
int save_safe_in_field(Field *field);
enum Item_result result_type () const
{ return STRING_RESULT; }
@@ -289,7 +289,7 @@ public:
double val() { return (double) value; }
String *val_str(String*);
void make_field(Send_field *field);
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
bool basic_const_item() const { return 1; }
Item *new_item() { return new Item_int(name,value,max_length); }
void print(String *str);
@@ -329,7 +329,7 @@ public:
max_length=length;
}
Item_real(double value_par) :value(value_par) {}
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
enum Type type() const { return REAL_ITEM; }
double val() { return value; }
longlong val_int() { return (longlong) (value+(value > 0 ? 0.5 : -0.5));}
@@ -372,7 +372,7 @@ public:
double val() { return atof(str_value.ptr()); }
longlong val_int() { return strtoll(str_value.ptr(),(char**) 0,10); }
String *val_str(String*) { return (String*) &str_value; }
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
void make_field(Send_field *field);
enum Item_result result_type () const { return STRING_RESULT; }
bool basic_const_item() const { return 1; }
@@ -392,7 +392,7 @@ public:
Item_default() { name= (char*) "DEFAULT"; }
enum Type type() const { return DEFAULT_ITEM; }
void make_field(Send_field *field) {}
- int save_in_field(Field *field)
+ int save_in_field(Field *field, bool no_conversions)
{
field->set_default();
return 0;
@@ -430,7 +430,7 @@ public:
double val() { return (double) Item_varbinary::val_int(); }
longlong val_int();
String *val_str(String*) { return &str_value; }
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
void make_field(Send_field *field);
enum Item_result result_type () const { return INT_RESULT; }
};
@@ -490,7 +490,8 @@ public:
bool send(THD *thd, String *tmp) { return (*ref)->send(thd, tmp); }
void make_field(Send_field *field) { (*ref)->make_field(field); }
bool fix_fields(THD *, struct st_table_list *, Item **);
- int save_in_field(Field *field) { return (*ref)->save_in_field(field); }
+ int save_in_field(Field *field, bool no_conversions)
+ { return (*ref)->save_in_field(field, no_conversions); }
void save_org_in_field(Field *field) { (*ref)->save_org_in_field(field); }
enum Item_result result_type () const { return (*ref)->result_type(); }
table_map used_tables() const { return (*ref)->used_tables(); }
@@ -510,9 +511,9 @@ class Item_int_with_ref :public Item_int
public:
Item_int_with_ref(longlong i, Item *ref_arg) :Item_int(i), ref(ref_arg)
{}
- int save_in_field(Field *field)
+ int save_in_field(Field *field, bool no_conversions)
{
- return ref->save_in_field(field);
+ return ref->save_in_field(field, no_conversions);
}
};
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 146758600c0..1c72ee56212 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -77,7 +77,7 @@ static bool convert_constant_item(Field *field, Item **item)
{
if ((*item)->const_item() && (*item)->type() != Item::INT_ITEM)
{
- if (!(*item)->save_in_field(field) && !((*item)->null_value))
+ if (!(*item)->save_in_field(field, 1) && !((*item)->null_value))
{
Item *tmp=new Item_int_with_ref(field->val_int(), *item);
if (tmp)
@@ -519,15 +519,29 @@ longlong Item_func_between::val_int()
return 0;
}
+static Item_result item_store_type(Item_result a,Item_result b)
+{
+ if (a == STRING_RESULT || b == STRING_RESULT)
+ return STRING_RESULT;
+ else if (a == REAL_RESULT || b == REAL_RESULT)
+ return REAL_RESULT;
+ else
+ return INT_RESULT;
+}
+
void
Item_func_ifnull::fix_length_and_dec()
{
maybe_null=args[1]->maybe_null;
max_length=max(args[0]->max_length,args[1]->max_length);
decimals=max(args[0]->decimals,args[1]->decimals);
- cached_result_type=args[0]->result_type();
+ if ((cached_result_type=item_store_type(args[0]->result_type(),
+ args[1]->result_type())) !=
+ REAL_RESULT)
+ decimals= 0;
}
+
double
Item_func_ifnull::val()
{
@@ -1163,6 +1177,18 @@ void Item_func_in::update_used_tables()
const_item_cache&=item->const_item();
}
+void Item_func_in::split_sum_func(List<Item> &fields)
+{
+ if (item->with_sum_func && item->type() != SUM_FUNC_ITEM)
+ item->split_sum_func(fields);
+ else if (item->used_tables() || item->type() == SUM_FUNC_ITEM)
+ {
+ fields.push_front(item);
+ item=new Item_ref((Item**) fields.head_ref(),0,item->name);
+ }
+ Item_func::split_sum_func(fields);
+}
+
longlong Item_func_bit_or::val_int()
{
@@ -1394,15 +1420,15 @@ longlong Item_cond_or::val_int()
Item *and_expressions(Item *a, Item *b, Item **org_item)
{
if (!a)
- return (*org_item= b);
+ return (*org_item= (Item*) b);
if (a == *org_item)
{
Item_cond *res;
- if ((res= new Item_cond_and(a, b)))
+ if ((res= new Item_cond_and(a, (Item*) b)))
res->used_tables_cache= a->used_tables() | b->used_tables();
return res;
}
- if (((Item_cond_and*) a)->add(b))
+ if (((Item_cond_and*) a)->add((Item*) b))
return 0;
((Item_cond_and*) a)->used_tables_cache|= b->used_tables();
return a;
@@ -1797,7 +1823,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
i -= u;
}
if (i < 0)
- return true;
+ return 1;
register const int v = plm1 - i;
turboShift = u - v;
@@ -1814,7 +1840,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
}
j += shift;
}
- return false;
+ return 0;
}
else
{
@@ -1828,7 +1854,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
i -= u;
}
if (i < 0)
- return true;
+ return 1;
register const int v = plm1 - i;
turboShift = u - v;
@@ -1845,7 +1871,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
}
j += shift;
}
- return false;
+ return 0;
}
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 74c9dec7ef8..d3e83a55add 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -435,9 +435,11 @@ class Item_func_in :public Item_int_func
longlong val_int();
bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref)
{
- return (item->check_cols(1) ||
- item->fix_fields(thd, tlist, &item) ||
- Item_func::fix_fields(thd, tlist, ref));
+ bool res=(item->check_cols(1) ||
+ item->fix_fields(thd, tlist, &item) ||
+ Item_func::fix_fields(thd, tlist, ref));
+ with_sum_func= with_sum_func || item->with_sum_func;
+ return res;
}
void fix_length_and_dec();
~Item_func_in() { delete item; delete array; delete in_item; }
@@ -448,6 +450,7 @@ class Item_func_in :public Item_int_func
enum Functype functype() const { return IN_FUNC; }
const char *func_name() const { return " IN "; }
void update_used_tables();
+ void split_sum_func(List<Item> &fields);
bool check_loop(uint id)
{
DBUG_ENTER("Item_func_in::check_loop");
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 1611b5f2257..c84b554b522 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -779,21 +779,20 @@ double Item_func_round::val()
}
-double Item_func_rand::val()
+void Item_func_rand::fix_length_and_dec()
{
- THD* thd = current_thd;
+ decimals=NOT_FIXED_DEC;
+ max_length=float_length(decimals);
if (arg_count)
{ // Only use argument once in query
uint32 tmp= (uint32) (args[0]->val_int());
- randominit(&thd->rand,(uint32) (tmp*0x10001L+55555555L),
- (uint32) (tmp*0x10000001L));
-#ifdef DELETE_ITEMS
- delete args[0];
-#endif
- arg_count=0;
+ if ((rand= (struct rand_struct*) sql_alloc(sizeof(*rand))))
+ randominit(rand,(uint32) (tmp*0x10001L+55555555L),
+ (uint32) (tmp*0x10000001L));
}
- else if (!thd->rand_used)
+ else
{
+ THD *thd= current_thd;
/*
No need to send a Rand log event if seed was given eg: RAND(seed),
as it will be replicated in the query as such.
@@ -805,8 +804,14 @@ double Item_func_rand::val()
thd->rand_used=1;
thd->rand_saved_seed1=thd->rand.seed1;
thd->rand_saved_seed2=thd->rand.seed2;
+ rand= &thd->rand;
}
- return rnd(&thd->rand);
+}
+
+
+double Item_func_rand::val()
+{
+ return rnd(rand);
}
longlong Item_func_sign::val_int()
diff --git a/sql/item_func.h b/sql/item_func.h
index 67c088f2bd9..771881a0465 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -522,14 +522,15 @@ public:
class Item_func_rand :public Item_real_func
{
+ struct rand_struct *rand;
public:
Item_func_rand(Item *a) :Item_real_func(a) {}
Item_func_rand() :Item_real_func() {}
double val();
const char *func_name() const { return "rand"; }
- void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); }
bool const_item() const { return 0; }
table_map used_tables() const { return RAND_TABLE_BIT; }
+ void fix_length_and_dec();
};
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 7c085a1b25a..7e2e8f7cfbd 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -429,7 +429,7 @@ String *Item_date::val_str(String *str)
}
-int Item_date::save_in_field(Field *field)
+int Item_date::save_in_field(Field *field, bool no_conversions)
{
TIME ltime;
timestamp_type t_type=TIMESTAMP_FULL;
@@ -567,7 +567,7 @@ bool Item_func_now::get_date(TIME *res,
}
-int Item_func_now::save_in_field(Field *to)
+int Item_func_now::save_in_field(Field *to, bool no_conversions)
{
to->set_notnull();
to->store_time(&ltime,TIMESTAMP_FULL);
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index f9b987324f0..40397351c18 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -325,7 +325,7 @@ public:
decimals=0;
max_length=10*thd_charset()->mbmaxlen;
}
- int save_in_field(Field *to);
+ int save_in_field(Field *to, bool no_conversions);
void make_field(Send_field *tmp_field)
{
init_make_field(tmp_field,FIELD_TYPE_DATE);
@@ -406,7 +406,7 @@ public:
enum Item_result result_type () const { return STRING_RESULT; }
double val() { return (double) value; }
longlong val_int() { return value; }
- int save_in_field(Field *to);
+ int save_in_field(Field *to, bool no_conversions);
String *val_str(String *str);
const char *func_name() const { return "now"; }
void fix_length_and_dec();
diff --git a/sql/log.cc b/sql/log.cc
index 0e1af8e5dae..dc7b5789efb 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1062,40 +1062,44 @@ bool MYSQL_LOG::write(Log_event* event_info)
No check for auto events flag here - this write method should
never be called if auto-events are enabled
*/
- if (thd && thd->last_insert_id_used)
+ if (thd)
{
- Intvar_log_event e(thd,(uchar)LAST_INSERT_ID_EVENT,thd->last_insert_id);
- e.set_log_pos(this);
- if (thd->server_id)
- e.server_id = thd->server_id;
- if (e.write(file))
- goto err;
- }
- if (thd && thd->insert_id_used)
- {
- Intvar_log_event e(thd,(uchar)INSERT_ID_EVENT,thd->last_insert_id);
- e.set_log_pos(this);
- if (thd->server_id)
- e.server_id = thd->server_id;
- if (e.write(file))
- goto err;
- }
- if (thd && thd->rand_used)
- {
- Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2);
- e.set_log_pos(this);
- if (e.write(file))
- goto err;
- }
- if (thd && thd->variables.convert_set)
- {
- char buf[1024] = "SET CHARACTER SET ";
- char* p = strend(buf);
- p = strmov(p, thd->variables.convert_set->name);
- Query_log_event e(thd, buf, (ulong)(p - buf), 0);
- e.set_log_pos(this);
- if (e.write(file))
- goto err;
+ if (thd->last_insert_id_used)
+ {
+ Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT,
+ thd->last_insert_id);
+ e.set_log_pos(this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->insert_id_used)
+ {
+ Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id);
+ e.set_log_pos(this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->rand_used)
+ {
+ Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2);
+ e.set_log_pos(this);
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->variables.convert_set)
+ {
+ char buf[256], *p;
+ p= strmov(strmov(buf, "SET CHARACTER SET "),
+ thd->variables.convert_set->name);
+ Query_log_event e(thd, buf, (ulong) (p - buf), 0);
+ e.set_log_pos(this);
+ if (e.write(file))
+ goto err;
+ }
}
event_info->set_log_pos(this);
if (event_info->write(file) ||
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index f4b556248da..cb9e3a362b4 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -249,6 +249,20 @@ typedef struct st_sql_list {
uint elements;
byte *first;
byte **next;
+
+ inline void empty()
+ {
+ elements=0;
+ first=0;
+ next= &first;
+ }
+ inline void link_in_list(byte *element,byte **next_ptr)
+ {
+ elements++;
+ (*next)=element;
+ next= next_ptr;
+ *next=0;
+ }
} SQL_LIST;
@@ -443,6 +457,10 @@ int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields,
List<Item> &values,COND *conds,
ORDER *order, ha_rows limit,
enum enum_duplicates handle_duplicates);
+int mysql_multi_update(THD *thd, TABLE_LIST *table_list,
+ List<Item> *fields, List<Item> *values,
+ COND *conds, ulong options,
+ enum enum_duplicates handle_duplicates);
int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
List<List_item> &values, List<Item> &update_fields,
List<Item> &update_values, enum_duplicates flag);
@@ -545,7 +563,7 @@ void store_position_for_column(const char *name);
bool add_to_list(SQL_LIST &list,Item *group,bool asc=0);
void add_join_on(TABLE_LIST *b,Item *expr);
void add_join_natural(TABLE_LIST *a,TABLE_LIST *b);
-bool add_proc_to_list(Item *item);
+bool add_proc_to_list(THD *thd, Item *item);
TABLE *unlink_open_table(THD *thd,TABLE *list,TABLE *find);
SQL_SELECT *make_select(TABLE *head, table_map const_tables,
@@ -664,6 +682,7 @@ extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN];
extern char pidfile_name[FN_REFLEN], time_zone[30], *opt_init_file;
extern char blob_newline;
extern double log_10[32];
+extern ulonglong keybuff_size;
extern ulong refresh_version,flush_version, thread_id,query_id,opened_tables;
extern ulong created_tmp_tables, created_tmp_disk_tables;
extern ulong aborted_threads,aborted_connects;
@@ -682,8 +701,7 @@ extern ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count;
extern ulong ha_read_key_count, ha_read_next_count, ha_read_prev_count;
extern ulong ha_read_first_count, ha_read_last_count;
extern ulong ha_read_rnd_count, ha_read_rnd_next_count;
-extern ulong ha_commit_count, ha_rollback_count;
-extern ulong keybuff_size,table_cache_size;
+extern ulong ha_commit_count, ha_rollback_count,table_cache_size;
extern ulong max_connections,max_connect_errors, connect_timeout;
extern ulong max_insert_delayed_threads, max_user_connections;
extern ulong long_query_count, what_to_log,flush_time,opt_sql_mode;
@@ -732,6 +750,7 @@ extern SHOW_COMP_OPTION have_innodb;
extern SHOW_COMP_OPTION have_berkeley_db;
extern struct system_variables global_system_variables;
extern struct system_variables max_system_variables;
+extern struct rand_struct sql_rand;
/* optional things, have_* variables */
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 3df75b3643d..81676b61b1f 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -32,6 +32,7 @@
#include <nisam.h>
#include <thr_alarm.h>
#include <ft_global.h>
+#include <assert.h>
#ifndef DBUG_OFF
#define ONE_THREAD
@@ -322,7 +323,8 @@ ulong thd_startup_options=(OPTION_UPDATE_LOG | OPTION_AUTO_IS_NULL |
uint protocol_version=PROTOCOL_VERSION;
struct system_variables global_system_variables;
struct system_variables max_system_variables;
-ulong keybuff_size,table_cache_size,
+ulonglong keybuff_size;
+ulong table_cache_size,
thread_stack,
thread_stack_min,what_to_log= ~ (1L << (uint) COM_TIME),
query_buff_size,
@@ -451,7 +453,7 @@ pthread_attr_t connection_attrib;
#include <process.h>
#if !defined(EMBEDDED_LIBRARY)
HANDLE hEventShutdown;
-static char *event_name;
+static char shutdown_event_name[40];
#include "nt_servc.h"
static NTService Service; // Service object for WinNT
#endif
@@ -1019,6 +1021,7 @@ static void set_root(const char *path)
sql_perror("chroot");
unireg_abort(1);
}
+ my_setwd("/", MYF(0));
#endif
}
@@ -1391,7 +1394,7 @@ or misconfigured. This error can also be caused by malfunctioning hardware.\n",
We will try our best to scrape up some info that will hopefully help diagnose\n\
the problem, but since we have already crashed, something is definitely wrong\n\
and this may fail.\n\n");
- fprintf(stderr, "key_buffer_size=%ld\n", keybuff_size);
+ fprintf(stderr, "key_buffer_size=%lu\n", (ulong) keybuff_size);
fprintf(stderr, "read_buffer_size=%ld\n", global_system_variables.read_buff_size);
fprintf(stderr, "sort_buffer_size=%ld\n", thd->variables.sortbuff_size);
fprintf(stderr, "max_used_connections=%ld\n", max_used_connections);
@@ -1399,8 +1402,9 @@ and this may fail.\n\n");
fprintf(stderr, "threads_connected=%d\n", thread_count);
fprintf(stderr, "It is possible that mysqld could use up to \n\
key_buffer_size + (read_buffer_size + sort_buffer_size)*max_connections = %ld K\n\
-bytes of memory\n", (keybuff_size + (global_system_variables.read_buff_size +
- thd->variables.sortbuff_size) *
+bytes of memory\n", ((ulong) keybuff_size +
+ (global_system_variables.read_buff_size +
+ thd->variables.sortbuff_size) *
max_connections)/ 1024);
fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n");
@@ -2102,6 +2106,7 @@ int main(int argc, char **argv)
(void) grant_init((THD*) 0);
init_max_user_conn();
init_update_queries();
+ DBUG_ASSERT(current_thd == 0);
#ifdef HAVE_DLOPEN
if (!opt_noacl)
@@ -2110,6 +2115,7 @@ int main(int argc, char **argv)
/* init_slave() must be called after the thread keys are created */
init_slave();
+ DBUG_ASSERT(current_thd == 0);
if (opt_bin_log && !server_id)
{
server_id= !master_host ? 1 : 2;
@@ -2346,6 +2352,14 @@ bool default_service_handling(char **argv,
int main(int argc, char **argv)
{
+
+ /* When several instances are running on the same machine, we
+ need to have an unique named hEventShudown through the
+ application PID e.g.: MySQLShutdown1890; MySQLShutdown2342
+ */
+ int2str((int) GetCurrentProcessId(),strmov(shutdown_event_name,
+ "MySQLShutdown"), 10);
+
if (Service.GetOS()) /* true NT family */
{
char file_path[FN_REFLEN];
@@ -2360,10 +2374,9 @@ int main(int argc, char **argv)
if (Service.IsService(argv[1]))
{
/* start an optional service */
- event_name= argv[1];
- load_default_groups[0]= argv[1];
+ load_default_groups[0]= argv[1];
start_mode= 1;
- Service.Init(event_name, mysql_service);
+ Service.Init(argv[1], mysql_service);
return 0;
}
}
@@ -2382,9 +2395,8 @@ int main(int argc, char **argv)
use_opt_args=1;
opt_argc=argc;
opt_argv=argv;
- event_name= argv[2];
start_mode= 1;
- Service.Init(event_name, mysql_service);
+ Service.Init(argv[2], mysql_service);
return 0;
}
}
@@ -2404,7 +2416,6 @@ int main(int argc, char **argv)
{
/* start the default service */
start_mode= 1;
- event_name= "MySqlShutdown";
Service.Init(MYSQL_SERVICENAME, mysql_service);
return 0;
}
@@ -3764,8 +3775,9 @@ struct my_option my_long_options[] =
IO_SIZE, 0},
{"key_buffer_size", OPT_KEY_BUFFER_SIZE,
"The size of the buffer used for index blocks. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford; 64M on a 256M machine that mainly runs MySQL is quite common.",
- (gptr*) &keybuff_size, (gptr*) &keybuff_size, 0, GET_ULONG, REQUIRED_ARG,
- KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD, IO_SIZE, 0},
+ (gptr*) &keybuff_size, (gptr*) &keybuff_size, 0, GET_ULL,
+ REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD,
+ IO_SIZE, 0},
{"long_query_time", OPT_LONG_QUERY_TIME,
"Log all queries that have taken more than long_query_time seconds to execute to file.",
(gptr*) &global_system_variables.long_query_time,
@@ -4268,6 +4280,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case 'h':
strmake(mysql_real_data_home,argument, sizeof(mysql_real_data_home)-1);
+ /* Correct pointer set by my_getopt (for embedded library) */
+ mysql_data_home= mysql_real_data_home;
break;
case 'L':
strmake(language, argument, sizeof(language)-1);
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 0fad5769998..d76737e8e31 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1019,7 +1019,7 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part,
field->cmp_type() != value->result_type())
DBUG_RETURN(0);
- if (value->save_in_field(field) > 0)
+ if (value->save_in_field(field, 1) > 0)
{
/* This happens when we try to insert a NULL field in a not null column */
// TODO; Check if we can we remove the following block.
@@ -1028,9 +1028,9 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part,
/* convert column_name <=> NULL -> column_name IS NULL */
// Get local copy of key
char *str= (char*) alloc_root(param->mem_root,1);
- if (!*str)
+ if (!str)
DBUG_RETURN(0);
- *str = 1;
+ *str= 1;
DBUG_RETURN(new SEL_ARG(field,str,str));
}
DBUG_RETURN(&null_element); // cmp with NULL is never true
diff --git a/sql/password.c b/sql/password.c
index 48181ea18e6..318c8e84db3 100644
--- a/sql/password.c
+++ b/sql/password.c
@@ -43,7 +43,7 @@
void randominit(struct rand_struct *rand_st,ulong seed1, ulong seed2)
{ /* For mysql 3.21.# */
#ifdef HAVE_purify
- bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */
+ bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */
#endif
rand_st->max_value= 0x3FFFFFFFL;
rand_st->max_value_dbl=(double) rand_st->max_value;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 566ca6da860..691add191b2 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -122,7 +122,7 @@ sys_var_thd_ulong sys_interactive_timeout("interactive_timeout",
&SV::net_interactive_timeout);
sys_var_thd_ulong sys_join_buffer_size("join_buffer_size",
&SV::join_buff_size);
-sys_var_long_ptr sys_key_buffer_size("key_buffer_size",
+sys_var_ulonglong_ptr sys_key_buffer_size("key_buffer_size",
&keybuff_size,
fix_key_buffer_size);
sys_var_bool_ptr sys_local_infile("local_infile",
@@ -693,6 +693,23 @@ void sys_var_long_ptr::set_default(THD *thd, enum_var_type type)
}
+bool sys_var_ulonglong_ptr::update(THD *thd, set_var *var)
+{
+ ulonglong tmp= var->value->val_int();
+ if (option_limits)
+ *value= (ulonglong) getopt_ull_limit_value(tmp, option_limits);
+ else
+ *value= (ulonglong) tmp;
+ return 0;
+}
+
+
+void sys_var_ulonglong_ptr::set_default(THD *thd, enum_var_type type)
+{
+ *value= (ulonglong) option_limits->def_value;
+}
+
+
bool sys_var_bool_ptr::update(THD *thd, set_var *var)
{
*value= (my_bool) var->save_result.ulong_value;
diff --git a/sql/set_var.h b/sql/set_var.h
index de1e27e0da8..39a5995e30f 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -86,6 +86,22 @@ public:
};
+class sys_var_ulonglong_ptr :public sys_var
+{
+public:
+ ulonglong *value;
+ sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr)
+ :sys_var(name_arg),value(value_ptr) {}
+ sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr,
+ sys_after_update_func func)
+ :sys_var(name_arg,func), value(value_ptr) {}
+ bool update(THD *thd, set_var *var);
+ void set_default(THD *thd, enum_var_type type);
+ SHOW_TYPE type() { return SHOW_LONGLONG; }
+ byte *value_ptr(THD *thd, enum_var_type type) { return (byte*) value; }
+};
+
+
class sys_var_bool_ptr :public sys_var
{
public:
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 63affe5fde6..e2b36106fb0 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2224,8 +2224,8 @@ static key_map get_key_map_from_key_list(TABLE *table,
}
/****************************************************************************
-** This just drops in all fields instead of current '*' field
-** Returns pointer to last inserted field if ok
+ This just drops in all fields instead of current '*' field
+ Returns pointer to last inserted field if ok
****************************************************************************/
bool
@@ -2239,21 +2239,26 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name,
for (; tables ; tables=tables->next)
{
TABLE *table=tables->table;
- if (grant_option && !thd->master_access &&
- check_grant_all_columns(thd,SELECT_ACL,table) )
- DBUG_RETURN(-1);
if (!table_name || (!strcmp(table_name,tables->alias) &&
(!db_name || !strcmp(tables->db,db_name))))
{
+ /* Ensure that we have access right to all columns */
+ if (grant_option && !thd->master_access &&
+ check_grant_all_columns(thd,SELECT_ACL,table) )
+ DBUG_RETURN(-1);
Field **ptr=table->field,*field;
thd->used_tables|=table->map;
while ((field = *ptr++))
{
Item_field *item= new Item_field(field);
if (!found++)
- (void) it->replace(item);
+ (void) it->replace(item); // Replace '*'
else
it->after(item);
+ /*
+ Mark if field used before in this select.
+ Used by 'insert' to verify if a field name is used twice
+ */
if (field->query_id == thd->query_id)
thd->dupp_field=field;
field->query_id=thd->query_id;
@@ -2377,7 +2382,7 @@ fill_record(List<Item> &fields,List<Item> &values)
while ((field=(Item_field*) f++))
{
value=v++;
- if (value->save_in_field(field->field) > 0)
+ if (value->save_in_field(field->field, 0) > 0)
DBUG_RETURN(1);
}
DBUG_RETURN(0);
@@ -2395,7 +2400,7 @@ fill_record(Field **ptr,List<Item> &values)
while ((field = *ptr++))
{
value=v++;
- if (value->save_in_field(field) == 1)
+ if (value->save_in_field(field, 0) == 1)
DBUG_RETURN(1);
}
DBUG_RETURN(0);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 57cd0e7a13d..ebd1d9d2b3c 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -37,7 +37,6 @@
#include <mysys_err.h>
#include <assert.h>
-extern struct rand_struct sql_rand;
/*****************************************************************************
** Instansiate templates
@@ -172,9 +171,8 @@ THD::THD():user_time(0), fatal_error(0),
{
pthread_mutex_lock(&LOCK_thread_count);
ulong tmp=(ulong) (rnd(&sql_rand) * 3000000);
- randominit(&rand, tmp + (ulong) start_time,
- tmp + (ulong) thread_id);
pthread_mutex_unlock(&LOCK_thread_count);
+ randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id);
}
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 5326b66e56e..e04c92cffb0 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -679,7 +679,7 @@ public:
}
virtual bool send_fields(List<Item> &list,uint flag)=0;
virtual bool send_data(List<Item> &items)=0;
- virtual void initialize_tables (JOIN *join=0) {}
+ virtual bool initialize_tables (JOIN *join=0) { return 0; }
virtual void send_error(uint errcode,const char *err)
{
my_message(errcode, err, MYF(0));
@@ -743,10 +743,10 @@ class select_insert :public select_result {
List<Item> *fields;
ulonglong last_insert_id;
COPY_INFO info;
- uint save_time_stamp;
select_insert(TABLE *table_par,List<Item> *fields_par,enum_duplicates duplic)
- :table(table_par),fields(fields_par), last_insert_id(0), save_time_stamp(0) {
+ :table(table_par),fields(fields_par), last_insert_id(0)
+ {
bzero((char*) &info,sizeof(info));
info.handle_duplicates=duplic;
}
@@ -790,8 +790,8 @@ class select_union :public select_result {
public:
TABLE *table;
COPY_INFO info;
- uint save_time_stamp;
TMP_TABLE_PARAM *tmp_table_param;
+ bool not_describe;
select_union(TABLE *table_par);
~select_union();
@@ -923,58 +923,61 @@ public:
friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique);
};
- class multi_delete : public select_result {
- TABLE_LIST *delete_tables, *table_being_deleted;
+class multi_delete : public select_result
+{
+ TABLE_LIST *delete_tables, *table_being_deleted;
#ifdef SINISAS_STRIP
- IO_CACHE **tempfiles;
- byte *memory_lane;
+ IO_CACHE **tempfiles;
+ byte *memory_lane;
#else
- Unique **tempfiles;
+ Unique **tempfiles;
#endif
- THD *thd;
- ha_rows deleted;
- uint num_of_tables;
- int error;
- bool do_delete, transactional_tables, log_delayed, normal_tables;
- public:
- multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables);
- ~multi_delete();
- int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list,
- uint flag) { return 0; }
- bool send_data(List<Item> &items);
- void initialize_tables (JOIN *join);
- void send_error(uint errcode,const char *err);
- int do_deletes (bool from_send_error);
- bool send_eof();
- };
-
- class multi_update : public select_result {
- TABLE_LIST *update_tables, *table_being_updated;
- COPY_INFO *infos;
- TABLE **tmp_tables;
- THD *thd;
- ha_rows updated, found;
- List<Item> fields;
- List <Item> **fields_by_tables;
- enum enum_duplicates dupl;
- uint num_of_tables, num_fields, num_updated, *save_time_stamps, *field_sequence;
- int error;
- bool do_update, not_trans_safe;
- public:
- multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> &fs,
- enum enum_duplicates handle_duplicates,
- uint num);
- ~multi_update();
- int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list,
+ THD *thd;
+ ha_rows deleted;
+ uint num_of_tables;
+ int error;
+ bool do_delete, transactional_tables, log_delayed, normal_tables;
+public:
+ multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables);
+ ~multi_delete();
+ int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
+ bool send_fields(List<Item> &list,
uint flag) { return 0; }
- bool send_data(List<Item> &items);
- void initialize_tables (JOIN *join);
- void send_error(uint errcode,const char *err);
- int do_updates (bool from_send_error);
- bool send_eof();
- };
+ bool send_data(List<Item> &items);
+ bool initialize_tables (JOIN *join);
+ void send_error(uint errcode,const char *err);
+ int do_deletes (bool from_send_error);
+ bool send_eof();
+};
+
+
+class multi_update : public select_result
+{
+ TABLE_LIST *all_tables, *update_tables, *table_being_updated;
+ THD *thd;
+ TABLE **tmp_tables, *main_table;
+ TMP_TABLE_PARAM *tmp_table_param;
+ ha_rows updated, found;
+ List <Item> *fields, *values;
+ List <Item> **fields_for_table, **values_for_table;
+ uint table_count;
+ Copy_field *copy_field;
+ enum enum_duplicates handle_duplicates;
+ bool do_update, trans_safe, transactional_tables, log_delayed;
+
+public:
+ multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> *fields,
+ List<Item> *values, enum_duplicates handle_duplicates);
+ ~multi_update();
+ int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
+ bool send_fields(List<Item> &list, uint flag) { return 0; }
+ bool send_data(List<Item> &items);
+ bool initialize_tables (JOIN *join);
+ void send_error(uint errcode,const char *err);
+ int do_updates (bool from_send_error);
+ bool send_eof();
+};
+
class select_dumpvar :public select_result {
ha_rows row_count;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d35790da1b0..6440838ae94 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -226,12 +226,13 @@ cleanup:
extern "C" int refposcmp2(void* arg, const void *a,const void *b)
{
+ /* arg is a pointer to file->ref_length */
return memcmp(a,b, *(int*) arg);
}
multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt,
uint num_of_tables_arg)
- : delete_tables (dt), thd(thd_arg), deleted(0),
+ : delete_tables(dt), thd(thd_arg), deleted(0),
num_of_tables(num_of_tables_arg), error(0),
do_delete(0), transactional_tables(0), log_delayed(0), normal_tables(0)
{
@@ -244,31 +245,22 @@ multi_delete::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
{
DBUG_ENTER("multi_delete::prepare");
unit= u;
- do_delete = true;
+ do_delete= 1;
thd->proc_info="deleting from main table";
-
- if (thd->options & OPTION_SAFE_UPDATES)
- {
- TABLE_LIST *table_ref;
- for (table_ref=delete_tables; table_ref; table_ref=table_ref->next)
- {
- TABLE *table=table_ref->table;
- if ((thd->options & OPTION_SAFE_UPDATES) && !table->quick_keys)
- {
- my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
- DBUG_RETURN(1);
- }
- }
- }
DBUG_RETURN(0);
}
-void
+bool
multi_delete::initialize_tables(JOIN *join)
{
- int counter=0;
TABLE_LIST *walk;
+ Unique **tempfiles_ptr;
+ DBUG_ENTER("initialize_tables");
+
+ if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
+ DBUG_RETURN(1);
+
table_map tables_to_delete_from=0;
for (walk= delete_tables ; walk ; walk=walk->next)
tables_to_delete_from|= walk->table->map;
@@ -282,9 +274,10 @@ multi_delete::initialize_tables(JOIN *join)
{
/* We are going to delete from this table */
TABLE *tbl=walk->table=tab->table;
+ walk=walk->next;
/* Don't use KEYREAD optimization on this table */
tbl->no_keyread=1;
- walk=walk->next;
+ tbl->used_keys= 0;
if (tbl->file->has_transactions())
log_delayed= transactional_tables= 1;
else if (tbl->tmp_table != NO_TMP_TABLE)
@@ -294,19 +287,17 @@ multi_delete::initialize_tables(JOIN *join)
}
}
walk= delete_tables;
- walk->table->used_keys=0;
- for (walk=walk->next ; walk ; walk=walk->next, counter++)
+ tempfiles_ptr= tempfiles;
+ for (walk=walk->next ; walk ; walk=walk->next)
{
- tables_to_delete_from|= walk->table->map;
TABLE *table=walk->table;
- /* Don't use key read with MULTI-TABLE-DELETE */
- table->used_keys=0;
- tempfiles[counter] = new Unique (refposcmp2,
- (void *) &table->file->ref_length,
- table->file->ref_length,
- MEM_STRIP_BUF_SIZE);
+ *tempfiles_ptr++= new Unique (refposcmp2,
+ (void *) &table->file->ref_length,
+ table->file->ref_length,
+ MEM_STRIP_BUF_SIZE);
}
init_ftfuncs(thd, thd->lex.current_select->select_lex(), 1);
+ DBUG_RETURN(thd->fatal_error != 0);
}
@@ -321,7 +312,7 @@ multi_delete::~multi_delete()
t->no_keyread=0;
}
- for (uint counter = 0; counter < num_of_tables-1; counter++)
+ for (uint counter= 0; counter < num_of_tables-1; counter++)
{
if (tempfiles[counter])
delete tempfiles[counter];
@@ -428,7 +419,7 @@ int multi_delete::do_deletes(bool from_send_error)
else
table_being_deleted = delete_tables;
- do_delete = false;
+ do_delete= 0;
for (table_being_deleted=table_being_deleted->next;
table_being_deleted ;
table_being_deleted=table_being_deleted->next, counter++)
@@ -483,7 +474,7 @@ bool multi_delete::send_eof()
was a non-transaction-safe table involved, since
modifications in it cannot be rolled back.
*/
- if (deleted)
+ if (deleted && (error <= 0 || normal_tables))
{
mysql_update_log.write(thd,thd->query,thd->query_length);
if (mysql_bin_log.is_open())
@@ -493,11 +484,17 @@ bool multi_delete::send_eof()
if (mysql_bin_log.write(&qinfo) && !normal_tables)
local_error=1; // Log write failed: roll back the SQL statement
}
- /* Commit or rollback the current SQL statement */
- VOID(ha_autocommit_or_rollback(thd,local_error > 0));
-
- query_cache_invalidate3(thd, delete_tables, 1);
+ if (!log_delayed)
+ thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
}
+ /* Commit or rollback the current SQL statement */
+ if (transactional_tables)
+ if (ha_autocommit_or_rollback(thd,local_error > 0))
+ local_error=1;
+
+ if (deleted)
+ query_cache_invalidate3(thd, delete_tables, 1);
+
if (local_error)
::send_error(thd);
else
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 909e1643fe5..6ea319a72e4 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -180,10 +180,10 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
Item *item;
for (key_len=0 ; (item=it_ke++) ; key_part++)
{
- (void) item->save_in_field(key_part->field);
+ (void) item->save_in_field(key_part->field, 1);
key_len+=key_part->store_length;
}
- if (!(key= (byte*) sql_calloc(ALIGN_SIZE(key_len))))
+ if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len))))
{
send_error(thd,ER_OUTOFMEMORY);
goto err;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 15c6df0398d..4ad9b6bae95 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -41,7 +41,8 @@ static void unlink_blobs(register TABLE *table);
/*
Check if insert fields are correct
- Resets form->time_stamp if a timestamp value is set
+ Updates table->time_stamp to point to timestamp field or 0, depending on
+ if timestamp should be updated or not.
*/
int
@@ -87,11 +88,12 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields,
my_error(ER_FIELD_SPECIFIED_TWICE,MYF(0), thd->dupp_field->field_name);
return -1;
}
+ table->time_stamp=0;
if (table->timestamp_field && // Don't set timestamp if used
- table->timestamp_field->query_id == thd->query_id)
- table->time_stamp=0; // This should be saved
+ table->timestamp_field->query_id != thd->query_id)
+ table->time_stamp= table->timestamp_field->offset()+1;
}
- // For the values we need select_priv
+ // For the values we need select_priv
table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege);
return 0;
}
@@ -109,7 +111,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
!(thd->master_access & SUPER_ACL));
bool transactional_table, log_delayed, bulk_insert=0;
uint value_count;
- uint save_time_stamp;
ulong counter = 1;
ulonglong id;
COPY_INFO info;
@@ -167,7 +168,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
table= table_list->table;
thd->proc_info="init";
thd->used_tables=0;
- save_time_stamp=table->time_stamp;
values= its++;
if (check_insert_fields(thd,table,fields,*values,1) ||
setup_tables(insert_table_list) ||
@@ -175,10 +175,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
(duplic == DUP_UPDATE &&
(setup_fields(thd, insert_table_list, update_fields, 0, 0, 0) ||
setup_fields(thd, insert_table_list, update_values, 0, 0, 0))))
- {
- table->time_stamp= save_time_stamp;
goto abort;
- }
if (find_real_table_in_list(table_list->next,
table_list->db, table_list->real_name))
{
@@ -195,14 +192,10 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW,
ER(ER_WRONG_VALUE_COUNT_ON_ROW),
MYF(0),counter);
- table->time_stamp=save_time_stamp;
goto abort;
}
if (setup_fields(thd,insert_table_list,*values,0,0,0))
- {
- table->time_stamp= save_time_stamp;
goto abort;
- }
}
its.rewind ();
/*
@@ -364,7 +357,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
}
}
thd->proc_info="end";
- table->time_stamp=save_time_stamp; // Restore auto timestamp ptr
table->next_number_field=0;
thd->count_cuted_fields=0;
thd->next_insert_id=0; // Reset this if wrongly used
@@ -1339,7 +1331,6 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_ENTER("select_insert::prepare");
unit= u;
- save_time_stamp=table->time_stamp;
if (check_insert_fields(thd,table,*fields,values,1))
DBUG_RETURN(1);
@@ -1360,8 +1351,6 @@ select_insert::~select_insert()
{
if (table)
{
- if (save_time_stamp)
- table->time_stamp=save_time_stamp;
table->next_number_field=0;
table->file->extra(HA_EXTRA_RESET);
}
@@ -1467,7 +1456,6 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
/* First field to copy */
field=table->field+table->fields - values.elements;
- save_time_stamp=table->time_stamp;
if (table->timestamp_field) // Don't set timestamp if used
{
table->timestamp_field->set_time();
diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc
index 930e052ab90..6eb4fbcaaf6 100644
--- a/sql/sql_olap.cc
+++ b/sql/sql_olap.cc
@@ -75,7 +75,7 @@ static int make_new_olap_select(LEX *lex, SELECT_LEX *select_lex, List<Item> new
!strcmp(((Item_field*)new_item)->table_name,iif->table_name) &&
!strcmp(((Item_field*)new_item)->field_name,iif->field_name))
{
- not_found=false;
+ not_found= 0;
((Item_field*)new_item)->db_name=iif->db_name;
Item_field *new_one=new Item_field(iif->db_name, iif->table_name, iif->field_name);
privlist.push_back(new_one);
@@ -151,7 +151,7 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex)
if (cursor->do_redirect)
{
cursor->table= ((TABLE_LIST*) cursor->table)->table;
- cursor->do_redirect=false;
+ cursor->do_redirect= 0;
}
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 7f3a4986038..8e8e2c44e01 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -193,6 +193,8 @@ static bool check_user(THD *thd,enum_server_command command, const char *user,
thd->db_length=0;
USER_RESOURCES ur;
+ if (passwd[0] && strlen(passwd) != SCRAMBLE_LENGTH)
+ return 1;
if (!(thd->user = my_strdup(user, MYF(0))))
{
send_error(thd,ER_OUT_OF_RESOURCES);
@@ -419,7 +421,7 @@ end:
}
-static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them=false)
+static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0)
{
(void) pthread_mutex_lock(&LOCK_user_conn);
@@ -593,8 +595,6 @@ check_connections(THD *thd)
char *user= (char*) net->read_pos+5;
char *passwd= strend(user)+1;
char *db=0;
- if (passwd[0] && strlen(passwd) != SCRAMBLE_LENGTH)
- return ER_HANDSHAKE_ERROR;
if (thd->client_capabilities & CLIENT_CONNECT_WITH_DB)
db=strend(passwd)+1;
if (thd->client_capabilities & CLIENT_INTERACTIVE)
@@ -1914,59 +1914,24 @@ mysql_execute_command(THD *thd)
DBUG_VOID_RETURN;
}
{
- multi_update *result;
- uint table_count;
- TABLE_LIST *auxi;
- const char *msg=0;
-
- for (auxi= (TABLE_LIST*) tables, table_count=0 ; auxi ; auxi=auxi->next)
- table_count++;
-
+ const char *msg= 0;
if (select_lex->order_list.elements)
- msg="ORDER BY";
+ msg= "ORDER BY";
else if (select_lex->select_limit && select_lex->select_limit !=
HA_POS_ERROR)
- msg="LIMIT";
+ msg= "LIMIT";
if (msg)
{
net_printf(thd, ER_WRONG_USAGE, "UPDATE", msg);
res= 1;
break;
}
-
- tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege);
- if ((res=open_and_lock_tables(thd,tables)))
- break;
- unit->select_limit_cnt= HA_POS_ERROR;
- if (!setup_fields(thd,tables,select_lex->item_list,1,0,0) &&
- !setup_fields(thd,tables,lex->value_list,0,0,0) &&
- !thd->fatal_error &&
- (result=new multi_update(thd,tables,select_lex->item_list,
- lex->duplicates, table_count)))
- {
- List <Item> total_list;
- List_iterator <Item> field_list(select_lex->item_list);
- List_iterator <Item> value_list(lex->value_list);
- Item *item;
- while ((item=field_list++))
- total_list.push_back(item);
- while ((item=value_list++))
- total_list.push_back(item);
-
- res= mysql_select(thd, tables, total_list,
- select_lex->where,
- (ORDER *)NULL, (ORDER *)NULL, (Item *)NULL,
- (ORDER *)NULL,
- select_lex->options | thd->options |
- SELECT_NO_JOIN_CACHE,
- result, unit, select_lex, 0);
- delete result;
- if (thd->net.report_error)
- res= -1;
- }
- else
- res= -1; // Error is not sent
- close_thread_tables(thd);
+ res= mysql_multi_update(thd,tables,
+ &select_lex->item_list,
+ &lex->value_list,
+ select_lex->where,
+ select_lex->options,
+ lex->duplicates, unit, select_lex);
}
break;
case SQLCOM_REPLACE:
@@ -3022,16 +2987,6 @@ mysql_parse(THD *thd, char *inBuf, uint length)
}
-inline static void
-link_in_list(SQL_LIST *list,byte *element,byte **next)
-{
- list->elements++;
- (*list->next)=element;
- list->next=next;
- *next=0;
-}
-
-
/*****************************************************************************
** Store field definition for create
** Return 0 if ok
@@ -3344,7 +3299,7 @@ void store_position_for_column(const char *name)
}
bool
-add_proc_to_list(Item *item)
+add_proc_to_list(THD* thd, Item *item)
{
ORDER *order;
Item **item_ptr;
@@ -3355,7 +3310,7 @@ add_proc_to_list(Item *item)
*item_ptr= item;
order->item=item_ptr;
order->free_me=0;
- link_in_list(&current_lex->proc_list,(byte*) order,(byte**) &order->next);
+ thd->lex.proc_list.link_in_list((byte*) order,(byte**) &order->next);
return 0;
}
@@ -3409,7 +3364,7 @@ bool add_to_list(SQL_LIST &list,Item *item,bool asc)
order->asc = asc;
order->free_me=0;
order->used=0;
- link_in_list(&list,(byte*) order,(byte**) &order->next);
+ list.link_in_list((byte*) order,(byte**) &order->next);
DBUG_RETURN(0);
}
@@ -3500,7 +3455,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(Table_ident *table,
}
}
}
- link_in_list(&table_list, (byte*) ptr, (byte**) &ptr->next);
+ table_list.link_in_list((byte*) ptr, (byte**) &ptr->next);
DBUG_RETURN(ptr);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 50820b931b6..a5c69763863 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -132,7 +132,9 @@ static void read_cached_record(JOIN_TAB *tab);
static bool cmp_buffer_with_ref(JOIN_TAB *tab);
static bool setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields,
List<Item> &all_fields,ORDER *new_order);
-static ORDER *create_distinct_group(ORDER *order, List<Item> &fields);
+static ORDER *create_distinct_group(THD *thd, ORDER *order,
+ List<Item> &fields,
+ bool *all_order_by_fields_used);
static bool test_if_subpart(ORDER *a,ORDER *b);
static TABLE *get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables);
static void calc_group_buffer(JOIN *join,ORDER *group);
@@ -248,7 +250,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
if (!fake_select_lex)
select_lex->join= this;
union_part= (unit->first_select()->next_select() != 0);
-
+
/* Check that all tables, fields, conds and order are ok */
if (setup_tables(tables_list) ||
@@ -343,6 +345,10 @@ JOIN::prepare(TABLE_LIST *tables_init,
this->group= group_list != 0;
row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR :
unit->select_limit_cnt);
+ /* select_limit is used to decide if we are likely to scan the whole table */
+ select_limit= unit->select_limit_cnt;
+ if (having || (select_options & OPTION_FOUND_ROWS))
+ select_limit= HA_POS_ERROR;
do_send_rows = (unit->select_limit_cnt) ? 1 : 0;
this->unit= unit;
@@ -371,6 +377,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
int
JOIN::optimize()
{
+ ha_rows select_limit;
DBUG_ENTER("JOIN::optimize");
#ifdef HAVE_REF_TO_FIELDS // Not done yet
@@ -403,7 +410,8 @@ JOIN::optimize()
// normal error processing & cleanup
DBUG_RETURN(-1);
- if (cond_value == Item::COND_FALSE || (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
+ if (cond_value == Item::COND_FALSE ||
+ (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
{ /* Impossible cond */
zero_result_cause= "Impossible WHERE";
DBUG_RETURN(0);
@@ -451,11 +459,12 @@ JOIN::optimize()
found_const_table_map= 0;
}
thd->proc_info= "preparing";
- result->initialize_tables(this);
+ if (result->initialize_tables(this))
+ DBUG_RETURN(-1);
if (const_table_map != found_const_table_map &&
!(select_options & SELECT_DESCRIBE))
{
- zero_result_cause= "";
+ zero_result_cause= "no matching row in const table";
select_options= 0; //TODO why option in return_zero_rows was droped
DBUG_RETURN(0);
}
@@ -513,21 +522,46 @@ JOIN::optimize()
if (! hidden_group_fields)
select_distinct=0;
}
- else if (select_distinct && tables - const_tables == 1 &&
- (unit->select_limit_cnt == HA_POS_ERROR ||
- (select_options & OPTION_FOUND_ROWS) ||
- order &&
- !(skip_sort_order=
- test_if_skip_sort_order(&join_tab[const_tables],
- order,
- unit->select_limit_cnt,
- 1))))
+ else if (select_distinct && tables - const_tables == 1)
{
- if ((group_list= create_distinct_group(order, fields_list)))
- {
- select_distinct= 0;
- no_order= !order;
- group= 1; // For end_write_group
+ /*
+ We are only using one table. In this case we change DISTINCT to a
+ GROUP BY query if:
+ - The GROUP BY can be done through indexes (no sort) and the ORDER
+ BY only uses selected fields.
+ (In this case we can later optimize away GROUP BY and ORDER BY)
+ - We are scanning the whole table without LIMIT
+ This can happen if:
+ - We are using CALC_FOUND_ROWS
+ - We are using an ORDER BY that can't be optimized away.
+
+ We don't want to use this optimization when we are using LIMIT
+ because in this case we can just create a temporary table that
+ holds LIMIT rows and stop when this table is full.
+ */
+ JOIN_TAB *tab= &join_tab[const_tables];
+ bool all_order_fields_used;
+ if (order)
+ skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1);
+ if ((group=create_distinct_group(thd, order, fields_list,
+ &all_order_fields_used)))
+ {
+ bool skip_group= (skip_sort_order &&
+ test_if_skip_sort_order(tab, group, select_limit,
+ 1) != 0);
+ if ((skip_group && all_order_fields_used) ||
+ select_limit == HA_POS_ERROR ||
+ (order && !skip_sort_order))
+ {
+ /* Change DISTINCT to GROUP BY */
+ select_distinct= 0;
+ no_order= !order;
+ if (all_order_fields_used)
+ order=0;
+ group=1; // For end_write_group
+ }
+ else
+ group= 0;
}
else if (thd->fatal_error) // End of memory
DBUG_RETURN(-1);
@@ -727,11 +761,9 @@ JOIN::exec()
order=group_list;
if (order &&
(const_tables == tables ||
- (simple_order &&
+ ((simple_order || skip_sort_order) &&
test_if_skip_sort_order(&join_tab[const_tables], order,
- (select_options & OPTION_FOUND_ROWS) ?
- HA_POS_ERROR : unit->select_limit_cnt,
- 0))))
+ select_limit, 0))))
order=0;
select_describe(this, need_tmp,
order != 0 && !skip_sort_order,
@@ -759,7 +791,7 @@ JOIN::exec()
group_list ? 0 : select_distinct,
group_list && simple_group,
(order == 0 || skip_sort_order) &&
- !(select_options & OPTION_FOUND_ROWS),
+ select_limit != HA_POS_ERROR,
select_options, unit)))
DBUG_VOID_RETURN;
@@ -813,9 +845,10 @@ JOIN::exec()
/* Optimize "select distinct b from t1 order by key_part_1 limit #" */
if (order && skip_sort_order)
{
- (void) test_if_skip_sort_order(&this->join_tab[const_tables],
- order, unit->select_limit_cnt, 0);
- order=0;
+ /* Should always succeed */
+ if (test_if_skip_sort_order(&this->join_tab[const_tables],
+ order, unit->select_limit_cnt, 0))
+ order=0;
}
}
@@ -989,8 +1022,7 @@ JOIN::exec()
}
}
{
- ha_rows select_limit= unit->select_limit_cnt;
- if (having || group || (select_options & OPTION_FOUND_ROWS))
+ if (group)
select_limit= HA_POS_ERROR;
else
{
@@ -1002,7 +1034,13 @@ JOIN::exec()
JOIN_TAB *end_table= &join_tab[tables];
for (; table < end_table ; table++)
{
- if (table->select_cond)
+ /*
+ table->keyuse is set in the case there was an original WHERE clause
+ on the table that was optimized away.
+ table->on_expr tells us that it was a LEFT JOIN and there will be
+ at least one row generated from the table.
+ */
+ if (table->select_cond || (table->keyuse && !table->on_expr))
{
/* We have to sort all rows */
select_limit= HA_POS_ERROR;
@@ -1186,13 +1224,21 @@ static ha_rows get_quick_record_count(SQL_SELECT *select,TABLE *table,
}
+/*
+ Calculate the best possible join and initialize the join structure
+
+ RETURN VALUES
+ 0 ok
+ 1 Fatal error
+*/
+
static bool
make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
DYNAMIC_ARRAY *keyuse_array)
{
int error;
uint i,table_count,const_count,found_ref,refs,key,const_ref,eq_part;
- table_map const_table_map,found_const_table_map,all_table_map;
+ table_map found_const_table_map,all_table_map;
TABLE **table_vector;
JOIN_TAB *stat,*stat_end,*s,**stat_ref;
SQL_SELECT *select;
@@ -1212,7 +1258,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
join->best_ref=stat_vector;
stat_end=stat+table_count;
- const_table_map=found_const_table_map=all_table_map=0;
+ found_const_table_map=all_table_map=0;
const_count=0;
for (s=stat,i=0 ; tables ; s++,tables=tables->next,i++)
@@ -1303,7 +1349,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
DBUG_RETURN(1);
/* Read tables with 0 or 1 rows (system tables) */
- join->const_table_map=const_table_map;
+ join->const_table_map= 0;
for (POSITION *p_pos=join->positions, *p_end=p_pos+const_count;
p_pos < p_end ;
@@ -1340,16 +1386,16 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
if (s->dependent) // If dependent on some table
{
// All dep. must be constants
- if (s->dependent & ~(join->const_table_map))
+ if (s->dependent & ~(found_const_table_map))
continue;
if (table->file->records <= 1L &&
!(table->file->table_flags() & HA_NOT_EXACT_COUNT))
{ // system table
- int tmp;
+ int tmp= 0;
s->type=JT_SYSTEM;
join->const_table_map|=table->map;
set_position(join,const_count++,s,(KEYUSE*) 0);
- if ((tmp=join_read_const_table(s,join->positions+const_count-1)))
+ if ((tmp= join_read_const_table(s,join->positions+const_count-1)))
{
if (tmp > 0)
DBUG_RETURN(1); // Fatal error
@@ -1374,7 +1420,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
{
if (keyuse->val->type() != Item::NULL_ITEM)
{
- if (!((~join->const_table_map) & keyuse->used_tables))
+ if (!((~found_const_table_map) & keyuse->used_tables))
const_ref|= (key_map) 1 << keyuse->keypart;
else
refs|=keyuse->used_tables;
@@ -1395,7 +1441,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
join->const_table_map|=table->map;
set_position(join,const_count++,s,start_keyuse);
if (create_ref_for_key(join, s, start_keyuse,
- join->const_table_map))
+ found_const_table_map))
DBUG_RETURN(1);
if ((tmp=join_read_const_table(s,
join->positions+const_count-1)))
@@ -1443,8 +1489,8 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
{
ha_rows records;
if (!select)
- select=make_select(s->table, join->const_table_map,
- join->const_table_map,
+ select=make_select(s->table, found_const_table_map,
+ found_const_table_map,
and_conds(conds,s->on_expr),&error);
records=get_quick_record_count(select,s->table, s->const_keys,
join->row_limit);
@@ -2607,12 +2653,13 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables,
bool
store_val_in_field(Field *field,Item *item)
{
+ bool error;
THD *thd=current_thd;
ha_rows cuted_fields=thd->cuted_fields;
thd->count_cuted_fields=1;
- (void) item->save_in_field(field);
+ error= item->save_in_field(field, 1);
thd->count_cuted_fields=0;
- return cuted_fields != thd->cuted_fields;
+ return error || cuted_fields != thd->cuted_fields;
}
@@ -2957,6 +3004,38 @@ make_join_readinfo(JOIN *join, uint options)
}
+/*
+ Give error if we some tables are done with a full join
+
+ SYNOPSIS
+ error_if_full_join()
+ join Join condition
+
+ USAGE
+ This is used by multi_table_update and multi_table_delete when running
+ in safe mode
+
+ RETURN VALUES
+ 0 ok
+ 1 Error (full join used)
+*/
+
+bool error_if_full_join(JOIN *join)
+{
+ for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
+ tab < end;
+ tab++)
+ {
+ if (tab->type == JT_ALL && (!tab->select || !tab->select->quick))
+ {
+ my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
+ return(1);
+ }
+ }
+ return(0);
+}
+
+
static void
join_free(JOIN *join)
{
@@ -3021,9 +3100,7 @@ join_free(JOIN *join)
}
join->group_fields.delete_elements();
join->tmp_table_param.copy_funcs.delete_elements();
- if (join->tmp_table_param.copy_field) // Because of bug in ecc
- delete [] join->tmp_table_param.copy_field;
- join->tmp_table_param.copy_field=0;
+ join->tmp_table_param.cleanup();
DBUG_VOID_RETURN;
}
@@ -3655,12 +3732,34 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
/****************************************************************************
- Create a temp table according to a field list.
- Set distinct if duplicates could be removed
- Given fields field pointers are changed to point at tmp_table
- for send_fields
+ Create internal temporary table
****************************************************************************/
+/*
+ Create field for temporary table
+
+ SYNOPSIS
+ create_tmp_field()
+ thd Thread handler
+ table Temporary table
+ item Item to create a field for
+ type Type of item (normally item->type)
+ copy_func If set and item is a function, store copy of item
+ in this array
+ group 1 if we are going to do a relative group by on result
+ modify_item 1 if item->result_field should point to new item.
+ This is relevent for how fill_record() is going to
+ work:
+ If modify_item is 1 then fill_record() will update
+ the record in the original table.
+ If modify_item is 0 then fill_record() will update
+ the temporary table
+
+ RETURN
+ 0 on error
+ new_created field
+*/
+
Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
Item_result_field ***copy_func, Field **from_field,
bool group, bool modify_item)
@@ -3778,6 +3877,13 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
+/*
+ Create a temp table according to a field list.
+ Set distinct if duplicates could be removed
+ Given fields field pointers are changed to point at tmp_table
+ for send_fields
+*/
+
TABLE *
create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
ORDER *group, bool distinct, bool save_sum_fields,
@@ -3853,13 +3959,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
NullS))
{
bitmap_clear_bit(&temp_pool, temp_pool_slot);
- DBUG_RETURN(NULL); /* purecov: inspected */
+ DBUG_RETURN(NULL); /* purecov: inspected */
}
if (!(param->copy_field=copy=new Copy_field[field_count]))
{
bitmap_clear_bit(&temp_pool, temp_pool_slot);
- my_free((gptr) table,MYF(0)); /* purecov: inspected */
- DBUG_RETURN(NULL); /* purecov: inspected */
+ my_free((gptr) table,MYF(0)); /* purecov: inspected */
+ DBUG_RETURN(NULL); /* purecov: inspected */
}
param->funcs=copy_func;
strmov(tmpname,path);
@@ -3940,9 +4046,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
else
{
+ /*
+ The last parameter to create_tmp_field() is a bit tricky:
+
+ We need to set it to 0 in union, to get fill_record() to modify the
+ temporary table.
+ We need to set it to 1 on multi-table-update and in select to
+ write rows to the temporary table.
+ We here distinguish between UNION and multi-table-updates by the fact
+ that in the later case group is set to the row pointer.
+ */
Field *new_field=create_tmp_field(thd, table, item,type, &copy_func,
tmp_from_field, group != 0,
- not_all_columns);
+ not_all_columns || group !=0);
if (!new_field)
{
if (thd->fatal_error)
@@ -4258,7 +4374,6 @@ static bool open_tmp_table(TABLE *table)
table->db_stat=0;
return(1);
}
- /* VOID(ha_lock(table,F_WRLCK)); */ /* Single thread table */
(void) table->file->extra(HA_EXTRA_QUICK); /* Faster */
return(0);
}
@@ -4411,12 +4526,11 @@ free_tmp_table(THD *thd, TABLE *entry)
* If a HEAP table gets full, create a MyISAM table and copy all rows to this
*/
-bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
- bool ignore_last_dupp_key_error)
+bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
+ int error, bool ignore_last_dupp_key_error)
{
TABLE new_table;
const char *save_proc_info;
- THD *thd=current_thd;
int write_err;
DBUG_ENTER("create_myisam_from_heap");
@@ -5392,7 +5506,8 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE)
goto end;
- if (create_myisam_from_heap(table, &join->tmp_table_param, error,1))
+ if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
+ error,1))
DBUG_RETURN(-1); // Not a table_is_full error
table->uniques=0; // To ensure rows are the same
}
@@ -5469,7 +5584,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_funcs(join->tmp_table_param.funcs);
if ((error=table->file->write_row(table->record[0])))
{
- if (create_myisam_from_heap(table, &join->tmp_table_param, error, 0))
+ if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
+ error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
/* Change method to update rows */
table->file->index_init(0);
@@ -5563,7 +5679,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if ((error=table->file->write_row(table->record[0])))
{
- if (create_myisam_from_heap(table, &join->tmp_table_param,
+ if (create_myisam_from_heap(join.thd, table,
+ &join->tmp_table_param,
error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
}
@@ -6769,12 +6886,14 @@ setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields,
*/
static ORDER *
-create_distinct_group(ORDER *order_list,List<Item> &fields)
+create_distinct_group(THD *thd, ORDER *order_list, List<Item> &fields,
+ bool *all_order_by_fields_used)
{
List_iterator<Item> li(fields);
Item *item;
ORDER *order,*group,**prev;
+ *all_order_by_fields_used= 1;
while ((item=li++))
item->marker=0; /* Marker that field is not used */
@@ -6783,13 +6902,15 @@ create_distinct_group(ORDER *order_list,List<Item> &fields)
{
if (order->in_field_list)
{
- ORDER *ord=(ORDER*) sql_memdup(order,sizeof(ORDER));
+ ORDER *ord=(ORDER*) thd->memdup((char*) order,sizeof(ORDER));
if (!ord)
return 0;
*prev=ord;
prev= &ord->next;
(*ord->item)->marker=1;
}
+ else
+ *all_order_by_fields_used= 0;
}
li.rewind();
@@ -6799,7 +6920,7 @@ create_distinct_group(ORDER *order_list,List<Item> &fields)
continue;
if (!item->marker)
{
- ORDER *ord=(ORDER*) sql_calloc(sizeof(ORDER));
+ ORDER *ord=(ORDER*) thd->calloc(sizeof(ORDER));
if (!ord)
return 0;
ord->item=li.ref();
@@ -7251,7 +7372,7 @@ copy_sum_funcs(Item_sum **func_ptr)
{
Item_sum *func;
for (; (func = *func_ptr) ; func_ptr++)
- (void) func->save_in_field(func->result_field);
+ (void) func->save_in_field(func->result_field, 1);
return;
}
@@ -7282,7 +7403,7 @@ copy_funcs(Item_result_field **func_ptr)
{
Item_result_field *func;
for (; (func = *func_ptr) ; func_ptr++)
- (void) func->save_in_field(func->result_field);
+ (void) func->save_in_field(func->result_field, 1);
return;
}
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 3b89c1ce0d3..31693628be5 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -115,7 +115,8 @@ typedef struct st_position { /* Used in find_best */
/* Param to create temporary tables when doing SELECT:s */
-class TMP_TABLE_PARAM {
+class TMP_TABLE_PARAM :public Sql_alloc
+{
public:
List<Item> copy_funcs;
List_iterator_fast<Item> copy_funcs_it;
@@ -321,12 +322,12 @@ class store_key_field: public store_key
copy_field.set(to_field,from_field,0);
}
}
- bool copy()
- {
- copy_field.do_copy(&copy_field);
- return err != 0;
- }
- const char *name() const { return field_name; }
+ bool copy()
+ {
+ copy_field.do_copy(&copy_field);
+ return err != 0;
+ }
+ const char *name() const { return field_name; }
};
@@ -343,8 +344,7 @@ public:
{}
bool copy()
{
- (void) item->save_in_field(to_field);
- return err != 0;
+ return item->save_in_field(to_field, 1) || err != 0;
}
const char *name() const { return "func"; }
};
@@ -367,7 +367,8 @@ public:
if (!inited)
{
inited=1;
- (void)item->save_in_field(to_field);
+ if (item->save_in_field(to_field, 1))
+ err= 1;
}
return err != 0;
}
@@ -375,3 +376,4 @@ public:
};
bool cp_buffer_from_ref(TABLE_REF *ref);
+bool error_if_full_join(JOIN *join);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 00077bda39f..6d4669894b9 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1885,8 +1885,14 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
/* We changed a temporary table */
if (error)
{
+ /*
+ * The following function call will also free a
+ * new_table pointer.
+ * Therefore, here new_table pointer is not free'd as it is
+ * free'd in close_temporary() which is called by by the
+ * close_temporary_table() function.
+ */
close_temporary_table(thd,new_db,tmp_name);
- my_free((gptr) new_table,MYF(0));
goto err;
}
/* Close lock if this is a transactional table */
@@ -2206,7 +2212,6 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (to->file->external_lock(thd,F_UNLCK))
error=1;
err:
- tmp_error = ha_recovery_logging(thd,TRUE);
free_io_cache(from);
*copied= found_count;
*deleted=delete_count;
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 35e33caf572..51d43b41833 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -203,6 +203,8 @@ void udf_init()
new_thd->version--; // Force close to free memory
close_thread_tables(new_thd);
delete new_thd;
+ /* Remember that we don't have a THD */
+ my_pthread_setspecific_ptr(THR_THD, 0);
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index e170f6c040e..705152ee9f2 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -41,7 +41,7 @@ int mysql_union(THD *thd, LEX *lex, select_result *result)
***************************************************************************/
select_union::select_union(TABLE *table_par)
- :table(table_par)
+ :table(table_par), not_describe(0)
{
bzero((char*) &info,sizeof(info));
/*
@@ -59,7 +59,7 @@ select_union::~select_union()
int select_union::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
{
unit= u;
- if (save_time_stamp && list.elements != table->fields)
+ if (not_describe && list.elements != table->fields)
{
my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT,
ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT),MYF(0));
@@ -117,7 +117,7 @@ int st_select_lex_unit::prepare(THD *thd, select_result *result)
prepared= 1;
union_result=0;
res= 0;
- found_rows_for_union= false;
+ found_rows_for_union= 0;
TMP_TABLE_PARAM tmp_table_param;
this->thd= thd;
this->result= result;
@@ -165,7 +165,7 @@ int st_select_lex_unit::prepare(THD *thd, select_result *result)
if (!(union_result=new select_union(table)))
goto err;
- union_result->save_time_stamp=1;
+ union_result->not_describe=1;
union_result->tmp_table_param=&tmp_table_param;
// prepare selects
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index c3ae435d851..3aab5cd30a9 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -29,10 +29,12 @@ static bool compare_record(TABLE *table, ulong query_id)
{
if (!table->blob_fields)
return cmp_record(table,1);
+ /* Compare null bits */
if (memcmp(table->null_flags,
table->null_flags+table->rec_buff_length,
table->null_bytes))
return 1; // Diff in NULL value
+ /* Compare updated fields */
for (Field **ptr=table->field ; *ptr ; ptr++)
{
if ((*ptr)->query_id == query_id &&
@@ -56,7 +58,7 @@ int mysql_update(THD *thd,
bool safe_update= thd->options & OPTION_SAFE_UPDATES;
bool used_key_is_modified, transactional_table, log_delayed;
int error=0;
- uint save_time_stamp, used_index, want_privilege;
+ uint used_index, want_privilege;
ulong query_id=thd->query_id, timestamp_query_id;
key_map old_used_keys;
TABLE *table;
@@ -73,7 +75,6 @@ int mysql_update(THD *thd,
fix_tables_pointers(thd->lex.all_selects_list);
table= table_list->table;
- save_time_stamp=table->time_stamp;
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
thd->proc_info="init";
@@ -103,6 +104,7 @@ int mysql_update(THD *thd,
{
timestamp_query_id=table->timestamp_field->query_id;
table->timestamp_field->query_id=thd->query_id-1;
+ table->time_stamp= table->timestamp_field->offset() +1;
}
/* Check the fields we are going to modify */
@@ -122,7 +124,6 @@ int mysql_update(THD *thd,
table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege);
if (setup_fields(thd,update_table_list,values,0,0,0))
{
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
DBUG_RETURN(-1); /* purecov: inspected */
}
@@ -133,7 +134,6 @@ int mysql_update(THD *thd,
(select && select->check_quick(safe_update, limit)) || !limit)
{
delete select;
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
if (error)
{
DBUG_RETURN(-1); // Error in where
@@ -148,7 +148,6 @@ int mysql_update(THD *thd,
if (safe_update && !using_limit)
{
delete select;
- table->time_stamp=save_time_stamp;
send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE);
DBUG_RETURN(1);
}
@@ -167,8 +166,8 @@ int mysql_update(THD *thd,
if (used_key_is_modified || order)
{
/*
- ** We can't update table directly; We must first search after all
- ** matching rows before updating the table!
+ We can't update table directly; We must first search after all
+ matching rows before updating the table!
*/
table->file->extra(HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE);
IO_CACHE tempfile;
@@ -176,7 +175,6 @@ int mysql_update(THD *thd,
DISK_BUFFER_SIZE, MYF(MY_WME)))
{
delete select; /* purecov: inspected */
- table->time_stamp=save_time_stamp; // Restore timestamp pointer /* purecov: inspected */
DBUG_RETURN(-1);
}
if (old_used_keys & ((key_map) 1 << used_index))
@@ -207,7 +205,6 @@ int mysql_update(THD *thd,
== HA_POS_ERROR)
{
delete select;
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
DBUG_RETURN(-1);
}
}
@@ -261,7 +258,6 @@ int mysql_update(THD *thd,
if (error >= 0)
{
delete select;
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
DBUG_RETURN(-1);
}
}
@@ -311,7 +307,6 @@ int mysql_update(THD *thd,
end_read_record(&info);
thd->proc_info="end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
- table->time_stamp=save_time_stamp; // Restore auto timestamp pointer
transactional_table= table->file->has_transactions();
log_delayed= (transactional_table || table->tmp_table);
if (updated && (error <= 0 || !transactional_table))
@@ -365,331 +360,344 @@ int mysql_update(THD *thd,
DBUG_RETURN(0);
}
+
/***************************************************************************
Update multiple tables from join
***************************************************************************/
-multi_update::multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> &fs,
- enum enum_duplicates handle_duplicates,
- uint num)
- : update_tables (ut), thd(thd_arg), updated(0), found(0), fields(fs),
- dupl(handle_duplicates), num_of_tables(num), num_fields(0), num_updated(0),
- error(0), do_update(false)
+/*
+ Setup multi-update handling and call SELECT to do the join
+*/
+
+int mysql_multi_update(THD *thd,
+ TABLE_LIST *table_list,
+ List<Item> *fields,
+ List<Item> *values,
+ COND *conds,
+ ulong options,
+ enum enum_duplicates handle_duplicates,
+ SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
{
- save_time_stamps = (uint *) sql_calloc (sizeof(uint) * num_of_tables);
- tmp_tables = (TABLE **)NULL;
- int counter=0;
- ulong timestamp_query_id;
- not_trans_safe=false;
- for (TABLE_LIST *dt=ut ; dt ; dt=dt->next,counter++)
+ int res;
+ multi_update *result;
+ TABLE_LIST *tl;
+ DBUG_ENTER("mysql_multi_update");
+
+ table_list->grant.want_privilege=(SELECT_ACL & ~table_list->grant.privilege);
+ if ((res=open_and_lock_tables(thd,table_list)))
+ DBUG_RETURN(res);
+
+ thd->select_limit=HA_POS_ERROR;
+ if (setup_fields(thd, table_list, *fields, 1, 0, 0))
+ DBUG_RETURN(-1);
+
+ /*
+ Count tables and setup timestamp handling
+ */
+ for (tl= (TABLE_LIST*) table_list ; tl ; tl=tl->next)
{
- TABLE *table=ut->table;
- // (void) ut->table->file->extra(HA_EXTRA_NO_KEYREAD);
- dt->table->used_keys=0;
+ TABLE *table= tl->table;
if (table->timestamp_field)
{
- // Don't set timestamp column if this is modified
- timestamp_query_id=table->timestamp_field->query_id;
- table->timestamp_field->query_id=thd->query_id-1;
- if (table->timestamp_field->query_id == thd->query_id)
- table->time_stamp=0;
- else
- table->timestamp_field->query_id=timestamp_query_id;
+ table->time_stamp=0;
+ // Only set timestamp column if this is not modified
+ if (table->timestamp_field->query_id != thd->query_id)
+ table->time_stamp= table->timestamp_field->offset() +1;
}
- save_time_stamps[counter]=table->time_stamp;
}
- error = 1; // In case we do not reach prepare we have to reset timestamps
+
+ if (!(result=new multi_update(thd, table_list, fields, values,
+ handle_duplicates)))
+ DBUG_RETURN(-1);
+
+ List<Item> total_list;
+ res= mysql_select(thd,table_list,total_list,
+ conds, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
+ (ORDER *)NULL,
+ options | SELECT_NO_JOIN_CACHE,
+ result, unit, select_lex, 0);
+
+end:
+ delete result;
+ DBUG_RETURN(res);
}
-int
-multi_update::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
+
+multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list,
+ List<Item> *field_list, List<Item> *value_list,
+ enum enum_duplicates handle_duplicates_arg)
+ :all_tables(table_list), update_tables(0), thd(thd_arg), tmp_tables(0),
+ updated(0), found(0), fields(field_list), values(value_list),
+ table_count(0), copy_field(0), handle_duplicates(handle_duplicates_arg),
+ do_update(1), trans_safe(0)
+{}
+
+
+/*
+ Connect fields with tables and create list of tables that are updated
+*/
+
+int multi_update::prepare(List<Item> &not_used_values)
{
+ TABLE_LIST *table_ref;
+ SQL_LIST update;
+ table_map tables_to_update= 0;
+ Item_field *item;
+ List_iterator_fast<Item> field_it(*fields);
+ List_iterator_fast<Item> value_it(*values);
+ uint i, max_fields;
DBUG_ENTER("multi_update::prepare");
- unit= u;
- do_update = true;
+
thd->count_cuted_fields=1;
thd->cuted_fields=0L;
- thd->proc_info="updating the main table";
- TABLE_LIST *table_ref;
+ thd->proc_info="updating main table";
+
+ while ((item= (Item_field *) field_it++))
+ tables_to_update|= item->used_tables();
- if (thd->options & OPTION_SAFE_UPDATES)
+ if (!tables_to_update)
{
- for (table_ref=update_tables; table_ref; table_ref=table_ref->next)
- {
- TABLE *table=table_ref->table;
- if ((thd->options & OPTION_SAFE_UPDATES) && !table->quick_keys)
- {
- my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
- DBUG_RETURN(1);
- }
- }
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "You didn't specify any tables to UPDATE");
+ DBUG_RETURN(1);
}
+
/*
- Here I have to connect fields with tables and only update tables that
- need to be updated.
- I calculate num_updated and fill-up table_sequence
- Set table_list->shared to true or false, depending on whether table is
- to be updated or not
+ We have to check values after setup_tables to get used_keys right in
+ reference tables
*/
- Item_field *item;
- List_iterator<Item> it(fields);
- num_fields=fields.elements;
- field_sequence = (uint *) sql_alloc(sizeof(uint)*num_fields);
- uint *int_ptr=field_sequence;
- while ((item= (Item_field *)it++))
- {
- unsigned int counter=0;
- for (table_ref=update_tables; table_ref;
- table_ref=table_ref->next, counter++)
- {
- if (table_ref->table == item->field->table)
- {
- if (!table_ref->shared)
- {
- TABLE *tbl=table_ref->table;
- num_updated++;
- table_ref->shared=1;
- if (!not_trans_safe && !table_ref->table->file->has_transactions())
- not_trans_safe=true;
- // to be moved if initialize_tables has to be used
- tbl->no_keyread=1;
- tbl->used_keys=0;
- }
- break;
- }
- }
- if (!table_ref)
- {
- net_printf(thd, ER_NOT_SUPPORTED_YET, "JOIN SYNTAX WITH MULTI-TABLE UPDATES");
- DBUG_RETURN(1);
- }
- else
- *int_ptr++=counter;
- }
- if (!num_updated--)
- {
- net_printf(thd, ER_NOT_SUPPORTED_YET, "SET CLAUSE MUST CONTAIN TABLE.FIELD REFERENCE");
+ if (setup_fields(thd, all_tables, *values, 1,0,0))
DBUG_RETURN(1);
- }
/*
- Here, I have to allocate the array of temporary tables
- I have to treat a case of num_updated=1 differently in send_data() method.
+ Save tables beeing updated in update_tables
+ update_table->shared is position for table
+ Don't use key read on tables that are updated
*/
- if (num_updated)
+
+ update.empty();
+ for (table_ref= all_tables; table_ref; table_ref=table_ref->next)
{
- tmp_tables = (TABLE **) sql_calloc(sizeof(TABLE *) * num_updated);
- infos = (COPY_INFO *) sql_calloc(sizeof(COPY_INFO) * num_updated);
- fields_by_tables = (List_item **)sql_calloc(sizeof(List_item *) * (num_updated + 1));
- unsigned int counter;
- List<Item> *temp_fields;
- for (table_ref=update_tables, counter = 0; table_ref; table_ref=table_ref->next)
+ TABLE *table=table_ref->table;
+ if (tables_to_update & table->map)
{
- if (!table_ref->shared)
- continue;
- // Here we have to add row offset as an additional field ...
- if (!(temp_fields = (List_item *)sql_calloc(sizeof(List_item))))
- {
- error = 1; // A proper error message is due here
+ TABLE_LIST *tl= (TABLE_LIST*) thd->memdup((char*) table_ref,
+ sizeof(*tl));
+ if (!tl)
DBUG_RETURN(1);
- }
- temp_fields->empty();
- it.rewind(); int_ptr=field_sequence;
- while ((item= (Item_field *)it++))
- {
- if (*int_ptr++ == counter)
- temp_fields->push_back(item);
- }
- if (counter)
- {
- Field_string offset(table_ref->table->file->ref_length, false,
- "offset", table_ref->table, my_charset_bin);
- temp_fields->push_front(new Item_field(((Field *)&offset)));
-
- // Make a temporary table
- int cnt=counter-1;
- TMP_TABLE_PARAM tmp_table_param;
- bzero((char*) &tmp_table_param,sizeof(tmp_table_param));
- tmp_table_param.field_count=temp_fields->elements;
- if (!(tmp_tables[cnt]=create_tmp_table(thd, &tmp_table_param,
- *temp_fields,
- (ORDER*) 0, 1, 0, 0,
- TMP_TABLE_ALL_COLUMNS,
- unit)))
- {
- error = 1; // A proper error message is due here
- DBUG_RETURN(1);
- }
- tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
- tmp_tables[cnt]->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- infos[cnt].handle_duplicates=DUP_IGNORE;
- temp_fields->pop(); // because we shall use those for values only ...
- }
- fields_by_tables[counter]=temp_fields;
- counter++;
+ update.link_in_list((byte*) tl, (byte**) &tl->next);
+ tl->shared= table_count++;
+ table->no_keyread=1;
+ table->used_keys=0;
+ table->pos_in_table_list= tl;
}
}
- init_ftfuncs(thd, thd->lex.current_select->select_lex(), 1);
- error = 0; // Timestamps do not need to be restored, so far ...
- DBUG_RETURN(0);
+ table_count= update.elements;
+ update_tables= (TABLE_LIST*) update.first;
+
+ tmp_tables = (TABLE **) thd->calloc(sizeof(TABLE *) * table_count);
+ tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
+ table_count);
+ fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
+ table_count);
+ values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
+ table_count);
+ if (thd->fatal_error)
+ DBUG_RETURN(1);
+ for (i=0 ; i < table_count ; i++)
+ {
+ fields_for_table[i]= new List_item;
+ values_for_table[i]= new List_item;
+ }
+ if (thd->fatal_error)
+ DBUG_RETURN(1);
+
+ /* Split fields into fields_for_table[] and values_by_table[] */
+
+ field_it.rewind();
+ while ((item= (Item_field *) field_it++))
+ {
+ Item *value= value_it++;
+ uint offset= item->field->table->pos_in_table_list->shared;
+ fields_for_table[offset]->push_back(item);
+ values_for_table[offset]->push_back(value);
+ }
+ if (thd->fatal_error)
+ DBUG_RETURN(1);
+
+ /* Allocate copy fields */
+ max_fields=0;
+ for (i=0 ; i < table_count ; i++)
+ set_if_bigger(max_fields, fields_for_table[i]->elements);
+ copy_field= new Copy_field[max_fields];
+ init_ftfuncs(thd,1);
+ DBUG_RETURN(thd->fatal_error != 0);
}
-void
+/*
+ Store first used table in main_table as this should be updated first
+ This is because we know that no row in this table will be read twice.
+
+ Create temporary tables to store changed values for all other tables
+ that are updated.
+*/
+
+bool
multi_update::initialize_tables(JOIN *join)
{
-#ifdef NOT_YET
- We skip it as it only makes a mess ...........
- TABLE_LIST *walk;
- table_map tables_to_update_from=0;
- for (walk= update_tables ; walk ; walk=walk->next)
- tables_to_update_from|= walk->table->map;
-
- walk= update_tables;
- for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
- tab < end;
- tab++)
+ TABLE_LIST *table_ref;
+ DBUG_ENTER("initialize_tables");
+
+ if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
+ DBUG_RETURN(1);
+ main_table=join->join_tab->table;
+ trans_safe= transactional_tables= main_table->file->has_transactions();
+ log_delayed= trans_safe || main_table->tmp_table != NO_TMP_TABLE;
+
+ /* Create a temporary table for all tables after except main table */
+ for (table_ref= update_tables; table_ref; table_ref=table_ref->next)
{
- if (tab->table->map & tables_to_update_from)
+ TABLE *table=table_ref->table;
+ if (table != main_table)
{
-// We are going to update from this table
- TABLE *tbl=walk->table=tab->table;
- /* Don't use KEYREAD optimization on this table */
- tbl->no_keyread=1;
- walk=walk->next;
+ uint cnt= table_ref->shared;
+ ORDER group;
+ List<Item> temp_fields= *fields_for_table[cnt];
+ TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt;
+
+ /*
+ Create a temporary table to store all fields that are changed for this
+ table. The first field in the temporary table is a pointer to the
+ original row so that we can find and update it
+ */
+
+ /* ok to be on stack as this is not referenced outside of this func */
+ Field_string offset(table->file->ref_length, 0, "offset",
+ table, 1);
+ if (temp_fields.push_front(new Item_field(((Field *) &offset))))
+ DBUG_RETURN(1);
+
+ /* Make an unique key over the first field to avoid duplicated updates */
+ bzero((char*) &group, sizeof(group));
+ group.asc= 1;
+ group.item= (Item**) temp_fields.head_ref();
+
+ tmp_param->quick_group=1;
+ tmp_param->field_count=temp_fields.elements;
+ tmp_param->group_parts=1;
+ tmp_param->group_length= table->file->ref_length;
+ if (!(tmp_tables[cnt]=create_tmp_table(thd,
+ tmp_param,
+ temp_fields,
+ (ORDER*) &group, 0, 0, 0,
+ TMP_TABLE_ALL_COLUMNS)))
+ DBUG_RETURN(1);
+ tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
}
}
-#endif
+ DBUG_RETURN(0);
}
multi_update::~multi_update()
{
- int counter = 0;
- for (table_being_updated=update_tables ;
- table_being_updated ;
- counter++, table_being_updated=table_being_updated->next)
+ TABLE_LIST *table;
+ for (table= update_tables ; table; table= table->next)
+ table->table->no_keyread=0;
+
+ if (tmp_tables)
{
- TABLE *table=table_being_updated->table;
- table->no_keyread=0;
- if (error)
- table->time_stamp=save_time_stamps[counter];
+ for (uint cnt = 0; cnt < table_count; cnt++)
+ {
+ if (tmp_tables[cnt])
+ {
+ free_tmp_table(thd, tmp_tables[cnt]);
+ tmp_table_param[cnt].cleanup();
+ }
+ }
}
- if (tmp_tables)
- for (uint counter = 0; counter < num_updated; counter++)
- if (tmp_tables[counter])
- free_tmp_table(thd,tmp_tables[counter]);
+ if (copy_field)
+ delete [] copy_field;
+ thd->count_cuted_fields=0; // Restore this setting
+ if (!trans_safe)
+ thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
}
-bool multi_update::send_data(List<Item> &values)
+bool multi_update::send_data(List<Item> &not_used_values)
{
- List<Item> real_values(values);
- for (uint counter = 0; counter < fields.elements; counter++)
- real_values.pop();
- // We have skipped fields ....
- if (!num_updated)
+ TABLE_LIST *cur_table;
+ DBUG_ENTER("multi_update::send_data");
+
+ found++;
+ for (cur_table= update_tables; cur_table ; cur_table= cur_table->next)
{
- for (table_being_updated=update_tables ;
- table_being_updated ;
- table_being_updated=table_being_updated->next)
+ TABLE *table= cur_table->table;
+ /* Check if we are using outer join and we didn't find the row */
+ if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
+ continue;
+
+ uint offset= cur_table->shared;
+ table->file->position(table->record[0]);
+ if (table == main_table)
{
- if (!table_being_updated->shared)
- continue;
- TABLE *table=table_being_updated->table;
- /* Check if we are using outer join and we didn't find the row */
- if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
- return 0;
- table->file->position(table->record[0]);
- // Only one table being updated receives a completely different treatment
table->status|= STATUS_UPDATED;
- store_record(table,1);
- if (fill_record(fields,real_values) || thd->net.report_error)
- return 1;
- found++;
- if (/* compare_record(table, query_id) && */
- !(error=table->file->update_row(table->record[1], table->record[0])))
- updated++;
- table->file->extra(HA_EXTRA_NO_CACHE);
- return error;
- }
- }
- else
- {
- int secure_counter= -1;
- for (table_being_updated=update_tables ;
- table_being_updated ;
- table_being_updated=table_being_updated->next, secure_counter++)
- {
- if (!table_being_updated->shared)
- continue;
-
- TABLE *table=table_being_updated->table;
- /* Check if we are using outer join and we didn't find the row */
- if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
- continue;
- table->file->position(table->record[0]);
- Item *item;
- List_iterator<Item> it(real_values);
- List <Item> values_by_table;
- uint *int_ptr=field_sequence;
- while ((item= (Item *)it++))
- {
- if (*int_ptr++ == (uint) (secure_counter + 1))
- values_by_table.push_back(item);
- }
- // Here I am breaking values as per each table
- if (secure_counter < 0)
+ store_record(table,1);
+ if (fill_record(*fields_for_table[offset], *values_for_table[offset]))
+ DBUG_RETURN(1);
+ if (compare_record(table, thd->query_id))
{
- table->status|= STATUS_UPDATED;
- store_record(table,1);
- if (fill_record(*fields_by_tables[0], values_by_table) ||
- thd->net.report_error)
- return 1;
- found++;
- if (/*compare_record(table, query_id) && */
- !(error=table->file->update_row(table->record[1], table->record[0])))
+ int error;
+ if (!updated++)
{
- updated++;
- table->file->extra(HA_EXTRA_NO_CACHE);
+ /*
+ Inform the main table that we are going to update the table even
+ while we may be scanning it. This will flush the read cache
+ if it's used.
+ */
+ main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
}
- else
+ if ((error=table->file->update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0));
- if (!error) error=1;
- return 1;
+ updated--;
+ DBUG_RETURN(1);
}
}
- else
+ }
+ else
+ {
+ int error;
+ TABLE *tmp_table= tmp_tables[offset];
+ fill_record(tmp_table->field+1, *values_for_table[offset]);
+
+ /* Store pointer to row */
+ memcpy((char*) tmp_table->field[0]->ptr,
+ (char*) table->file->ref, table->file->ref_length);
+ /* Write row, ignoring duplicated updates to a row */
+ if ((error= tmp_table->file->write_row(tmp_table->record[0])) &&
+ (error != HA_ERR_FOUND_DUPP_KEY &&
+ error != HA_ERR_FOUND_DUPP_UNIQUE))
{
- // Here we insert into each temporary table
- values_by_table.push_front(new Item_string((char*) table->file->ref,
- table->file->ref_length,
- system_charset_info));
- fill_record(tmp_tables[secure_counter]->field,values_by_table);
- error= thd->net.report_error ||
- write_record(tmp_tables[secure_counter], &(infos[secure_counter]));
- if (error)
+ if (create_myisam_from_heap(table, tmp_table_param + offset, error, 1))
{
- error=-1;
- return 1;
+ do_update=0;
+ DBUG_RETURN(1); // Not a table_is_full error
}
}
}
}
- return 0;
+ DBUG_RETURN(0);
}
+
void multi_update::send_error(uint errcode,const char *err)
{
-
- //TODO error should be sent at the query processing end
/* First send error what ever it is ... */
- ::send_error(thd,errcode,err);
-
- /* reset used flags */
- // update_tables->table->no_keyread=0;
+ ::send_error(&thd->net,errcode,err);
/* If nothing updated return */
if (!updated)
@@ -698,99 +706,124 @@ void multi_update::send_error(uint errcode,const char *err)
/* Something already updated so we have to invalidate cache */
query_cache_invalidate3(thd, update_tables, 1);
- /* Below can happen when thread is killed early ... */
- if (!table_being_updated)
- table_being_updated=update_tables;
-
/*
- If rows from the first table only has been updated and it is transactional,
- just do rollback.
- The same if all tables are transactional, regardless of where we are.
- In all other cases do attempt updates ...
+ If all tables that has been updated are trans safe then just do rollback.
+ If not attempt to do remaining updates.
*/
- if ((table_being_updated->table->file->has_transactions() &&
- table_being_updated == update_tables) || !not_trans_safe)
+
+ if (trans_safe)
ha_rollback_stmt(thd);
- else if (do_update && num_updated)
- VOID(do_updates(true));
+ else if (do_update && table_count > 1)
+ {
+ /* Add warning here */
+ VOID(do_updates(0));
+ }
}
-int multi_update::do_updates (bool from_send_error)
+int multi_update::do_updates(bool from_send_error)
{
- int local_error= 0, counter= 0;
-
- if (from_send_error)
+ TABLE_LIST *cur_table;
+ int local_error;
+ ha_rows org_updated;
+ TABLE *table;
+ DBUG_ENTER("do_updates");
+
+ do_update= 0; // Don't retry this function
+ for (cur_table= update_tables; cur_table ; cur_table= cur_table->next)
{
- /* Found out table number for 'table_being_updated' */
- for (TABLE_LIST *aux=update_tables;
- aux != table_being_updated;
- aux=aux->next)
- counter++;
- }
- else
- table_being_updated = update_tables;
-
- do_update = false;
- for (table_being_updated=table_being_updated->next;
- table_being_updated ;
- table_being_updated=table_being_updated->next, counter++)
- {
- if (!table_being_updated->shared)
- continue;
+ table = cur_table->table;
+ if (table == main_table)
+ continue; // Already updated
- TABLE *table = table_being_updated->table;
- TABLE *tmp_table=tmp_tables[counter];
- if (tmp_table->file->extra(HA_EXTRA_NO_CACHE))
- {
- local_error=1;
- break;
- }
- List<Item> list;
- Field **ptr=tmp_table->field,*field;
- // This is supposed to be something like insert_fields
- thd->used_tables|=tmp_table->map;
- while ((field = *ptr++))
+ org_updated= updated;
+ byte *ref_pos;
+ TABLE *tmp_table= tmp_tables[cur_table->shared];
+ tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
+ table->file->extra(HA_EXTRA_NO_CACHE);
+
+ /*
+ Setup copy functions to copy fields from temporary table
+ */
+ List_iterator_fast<Item> field_it(*fields_for_table[cur_table->shared]);
+ Field **field= tmp_table->field+1; // Skip row pointer
+ Copy_field *copy_field_ptr= copy_field, *copy_field_end;
+ for ( ; *field ; field++)
{
- list.push_back((Item *)new Item_field(field));
- if (field->query_id == thd->query_id)
- thd->dupp_field=field;
- field->query_id=thd->query_id;
- tmp_table->used_keys&=field->part_of_key;
+ Item_field *item= (Item_field* ) field_it++;
+ (copy_field_ptr++)->set(item->field, *field, 0);
}
- tmp_table->used_fields=tmp_table->fields;
- local_error=0;
- list.pop(); // we get position some other way ...
- local_error = tmp_table->file->rnd_init(1);
- if (local_error)
- return local_error;
- while (!(local_error=tmp_table->file->rnd_next(tmp_table->record[0])) &&
- (!thd->killed || from_send_error || not_trans_safe))
+ copy_field_end=copy_field_ptr;
+
+ if ((local_error = tmp_table->file->rnd_init(1)))
+ goto err;
+
+ ref_pos= (byte*) tmp_table->field[0]->ptr;
+ for (;;)
{
- found++;
- local_error= table->file->rnd_pos(table->record[0],
- (byte*) (*(tmp_table->field))->ptr);
- if (local_error)
- return local_error;
+ if (thd->killed && trans_safe)
+ goto err;
+ if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
+ {
+ if (local_error == HA_ERR_END_OF_FILE)
+ break;
+ if (local_error == HA_ERR_RECORD_DELETED)
+ continue; // May happen on dup key
+ goto err;
+ }
+ found++;
+ if ((local_error= table->file->rnd_pos(table->record[0], ref_pos)))
+ goto err;
table->status|= STATUS_UPDATED;
- store_record(table,1);
- local_error= (fill_record(*fields_by_tables[counter + 1],list) ||
- thd->net.report_error ||
- /* compare_record(table, query_id) || */
- table->file->update_row(table->record[1],
- table->record[0]));
- if (local_error)
+ store_record(table,1);
+
+ /* Copy data from temporary table to current table */
+ for (copy_field_ptr=copy_field;
+ copy_field_ptr != copy_field_end;
+ copy_field_ptr++)
+ (*copy_field_ptr->do_copy)(copy_field_ptr);
+
+ if (compare_record(table, thd->query_id))
{
- table->file->print_error(local_error,MYF(0));
- break;
+ if ((local_error=table->file->update_row(table->record[1],
+ table->record[0])))
+ {
+ if (local_error != HA_ERR_FOUND_DUPP_KEY ||
+ handle_duplicates != DUP_IGNORE)
+ goto err;
+ }
+ updated++;
+ if (table->tmp_table != NO_TMP_TABLE)
+ log_delayed= 1;
}
+ }
+
+ if (updated != org_updated)
+ {
+ if (table->tmp_table != NO_TMP_TABLE)
+ log_delayed= 1; // Tmp tables forces delay log
+ if (table->file->has_transactions())
+ log_delayed= transactional_tables= 1;
else
- updated++;
+ trans_safe= 0; // Can't do safe rollback
}
- if (local_error == HA_ERR_END_OF_FILE)
- local_error = 0;
}
- return local_error;
+ DBUG_RETURN(0);
+
+err:
+ if (!from_send_error)
+ table->file->print_error(local_error,MYF(0));
+
+ if (updated != org_updated)
+ {
+ if (table->tmp_table != NO_TMP_TABLE)
+ log_delayed= 1;
+ if (table->file->has_transactions())
+ log_delayed= transactional_tables= 1;
+ else
+ trans_safe= 0;
+ }
+ DBUG_RETURN(1);
}
@@ -798,61 +831,57 @@ int multi_update::do_updates (bool from_send_error)
bool multi_update::send_eof()
{
- thd->proc_info="updating the reference tables";
+ char buff[80];
+ thd->proc_info="updating reference tables";
/* Does updates for the last n - 1 tables, returns 0 if ok */
- int local_error = (num_updated) ? do_updates(false) : 0;
-
- /* reset used flags */
-#ifndef NOT_USED
- update_tables->table->no_keyread=0;
-#endif
- if (local_error == -1)
- local_error= 0;
+ int local_error = (table_count) ? do_updates(0) : 0;
thd->proc_info= "end";
- // TODO: Error should be sent at the query processing end
- if (local_error)
- send_error(local_error, "An error occured in multi-table update");
/*
Write the SQL statement to the binlog if we updated
- rows and we succeeded, or also in an error case when there
- was a non-transaction-safe table involved, since
- modifications in it cannot be rolled back.
+ rows and we succeeded or if we updated some non
+ transacational tables
*/
- if (updated || not_trans_safe)
+ if (updated && (local_error <= 0 || !trans_safe))
{
mysql_update_log.write(thd,thd->query,thd->query_length);
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
-
- /*
- mysql_bin_log is not open if binlogging or replication
- is not used
- */
+ if (mysql_bin_log.is_open())
+ {
+ Query_log_event qinfo(thd, thd->query, thd->query_length,
+ log_delayed);
+ if (mysql_bin_log.write(&qinfo) && trans_safe)
+ local_error= 1; // Rollback update
+ }
+ if (!log_delayed)
+ thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
+ }
- if (mysql_bin_log.is_open() && mysql_bin_log.write(&qinfo) &&
- !not_trans_safe)
- local_error=1; /* Log write failed: roll back the SQL statement */
+ if (transactional_tables)
+ {
+ if (ha_autocommit_or_rollback(thd, local_error != 0))
+ local_error=1;
+ }
- /* Commit or rollback the current SQL statement */
- VOID(ha_autocommit_or_rollback(thd, local_error > 0));
+ if (local_error > 0) // if the above log write did not fail ...
+ {
+ /* Safety: If we haven't got an error before (should not happen) */
+ my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
+ MYF(0));
+ ::send_error(&thd->net);
+ return 1;
}
- else
- local_error= 0; // this can happen only if it is end of file error
- if (!local_error) // if the above log write did not fail ...
+
+
+ sprintf(buff,ER(ER_UPDATE_INFO), (long) found, (long) updated,
+ (long) thd->cuted_fields);
+ if (updated)
{
- char buff[80];
- sprintf(buff,ER(ER_UPDATE_INFO), (long) found, (long) updated,
- (long) thd->cuted_fields);
- if (updated)
- {
- query_cache_invalidate3(thd, update_tables, 1);
- }
- ::send_ok(thd,
- (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
- thd->insert_id_used ? thd->insert_id() : 0L,buff);
+ query_cache_invalidate3(thd, update_tables, 1);
}
- thd->count_cuted_fields=0;
+ ::send_ok(&thd->net,
+ (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
+ thd->insert_id_used ? thd->insert_id() : 0L,buff);
return 0;
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 62090873178..3109aadca38 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -749,7 +749,10 @@ change:
LEX *lex = Lex;
lex->sql_command = SQLCOM_CHANGE_MASTER;
bzero((char*) &lex->mi, sizeof(lex->mi));
- } master_defs;
+ }
+ master_defs
+ {}
+ ;
master_defs:
master_def
@@ -830,7 +833,7 @@ create:
lex->create_info.table_charset=thd->db_charset?thd->db_charset:default_charset_info;
}
create2
-
+ {}
| CREATE opt_unique_or_fulltext INDEX ident key_alg ON table_ident
{
LEX *lex=Lex;
@@ -1382,8 +1385,9 @@ alter:
lex->alter_keys_onoff=LEAVE_AS_IS;
lex->simple_alter=1;
}
- alter_list;
-
+ alter_list
+ {}
+ ;
| ALTER DATABASE ident opt_db_default_character_set
{
LEX *lex=Lex;
@@ -1549,7 +1553,9 @@ repair:
lex->sql_command = SQLCOM_REPAIR;
lex->check_opt.init();
}
- table_list opt_mi_repair_type;
+ table_list opt_mi_repair_type
+ {}
+ ;
opt_mi_repair_type:
/* empty */ { Lex->check_opt.flags = T_MEDIUM; }
@@ -1571,7 +1577,9 @@ analyze:
lex->sql_command = SQLCOM_ANALYZE;
lex->check_opt.init();
}
- table_list opt_mi_check_type;
+ table_list opt_mi_check_type
+ {}
+ ;
check:
CHECK_SYM table_or_tables
@@ -1580,7 +1588,9 @@ check:
lex->sql_command = SQLCOM_CHECK;
lex->check_opt.init();
}
- table_list opt_mi_check_type;
+ table_list opt_mi_check_type
+ {}
+ ;
opt_mi_check_type:
/* empty */ { Lex->check_opt.flags = T_MEDIUM; }
@@ -1604,14 +1614,18 @@ optimize:
lex->sql_command = SQLCOM_OPTIMIZE;
lex->check_opt.init();
}
- table_list opt_mi_check_type;
+ table_list opt_mi_check_type
+ {}
+ ;
rename:
RENAME table_or_tables
{
Lex->sql_command=SQLCOM_RENAME_TABLE;
}
- table_to_table_list;
+ table_to_table_list
+ {}
+ ;
table_to_table_list:
table_to_table
@@ -1642,7 +1656,7 @@ select_init:
{
LEX *lex= Lex;
SELECT_LEX_NODE * sel= lex->current_select;
- if (sel->set_braces(true))
+ if (sel->set_braces(1))
{
send_error(lex->thd, ER_SYNTAX_ERROR);
YYABORT;
@@ -1656,7 +1670,7 @@ select_init2:
select_part2
{
LEX *lex= Lex;
- if (lex->current_select->set_braces(false))
+ if (lex->current_select->set_braces(0))
{
send_error(lex->thd, ER_SYNTAX_ERROR);
YYABORT;
@@ -2482,7 +2496,7 @@ join_table:
select_derived:
{
LEX *lex= Lex;
- lex->derived_tables= true;
+ lex->derived_tables= 1;
if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE ||
mysql_new_select(lex, 1))
YYABORT;
@@ -2618,7 +2632,7 @@ olap_opt:
| WITH CUBE_SYM
{
LEX *lex=Lex;
- lex->olap = true;
+ lex->olap= 1;
if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
{
net_printf(lex->thd, ER_WRONG_USAGE, "WITH CUBE",
@@ -2632,7 +2646,7 @@ olap_opt:
| WITH ROLLUP_SYM
{
LEX *lex= Lex;
- lex->olap= true;
+ lex->olap= 1;
if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
{
net_printf(lex->thd, ER_WRONG_USAGE, "WITH ROLLUP",
@@ -2699,6 +2713,7 @@ limit_clause:
}
}
limit_options
+ {}
;
limit_options:
@@ -2753,7 +2768,7 @@ procedure_clause:
lex->proc_list.elements=0;
lex->proc_list.first=0;
lex->proc_list.next= (byte**) &lex->proc_list.first;
- if (add_proc_to_list(new Item_field(NULL,NULL,$2.str)))
+ if (add_proc_to_list(lex->thd, new Item_field(NULL,NULL,$2.str)))
YYABORT;
Lex->safe_to_cache_query=0;
}
@@ -2771,10 +2786,11 @@ procedure_list2:
procedure_item:
remember_name expr
{
- if (add_proc_to_list($2))
+ LEX *lex= Lex;
+ if (add_proc_to_list(lex->thd, $2))
YYABORT;
if (!$2->name)
- $2->set_name($1,(uint) ((char*) Lex->tok_end - $1));
+ $2->set_name($1,(uint) ((char*) lex->tok_end - $1));
}
;
@@ -2842,7 +2858,10 @@ do: DO_SYM
if (!(lex->insert_list = new List_item))
YYABORT;
}
- values;
+ values
+ {}
+ ;
+
/*
Drop : delete tables or index
*/
@@ -2928,6 +2947,8 @@ replace:
Select->set_lock_for_tables($3);
}
insert_field_spec
+ {}
+ {}
;
insert_lock_option:
@@ -3122,13 +3143,15 @@ single_multi:
YYABORT;
}
where_clause opt_order_clause
- delete_limit_clause
+ delete_limit_clause {}
| table_wild_list
{ mysql_init_multi_delete(Lex); }
FROM join_table_list where_clause
| FROM table_wild_list
{ mysql_init_multi_delete(Lex); }
- USING join_table_list where_clause;
+ USING join_table_list where_clause
+ {}
+ ;
table_wild_list:
table_wild_one {}
@@ -3184,7 +3207,9 @@ show: SHOW
lex->wild=0;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
}
- show_param;
+ show_param
+ {}
+ ;
show_param:
DATABASES wild
@@ -3354,13 +3379,13 @@ describe:
if (!Select->add_table_to_list($2, NULL,0))
YYABORT;
}
- opt_describe_column
+ opt_describe_column {}
| describe_command { Lex->describe=1; } select
{
LEX *lex=Lex;
lex->select_lex.options|= SELECT_DESCRIBE;
- };
-
+ }
+ ;
describe_command:
DESC
@@ -3381,14 +3406,16 @@ flush:
LEX *lex=Lex;
lex->sql_command= SQLCOM_FLUSH; lex->type=0;
}
- flush_options;
+ flush_options
+ {}
+ ;
flush_options:
flush_options ',' flush_option
| flush_option;
flush_option:
- table_or_tables { Lex->type|= REFRESH_TABLES; } opt_table_list
+ table_or_tables { Lex->type|= REFRESH_TABLES; } opt_table_list {}
| TABLES WITH READ_SYM LOCK_SYM { Lex->type|= REFRESH_TABLES | REFRESH_READ_LOCK; }
| QUERY_SYM CACHE_SYM { Lex->type|= REFRESH_QUERY_CACHE_FREE; }
| HOSTS_SYM { Lex->type|= REFRESH_HOSTS; }
@@ -3409,7 +3436,10 @@ reset:
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_RESET; lex->type=0;
- } reset_options;
+ } reset_options
+ {}
+ ;
+
reset_options:
reset_options ',' reset_option
| reset_option;
@@ -3840,7 +3870,9 @@ set:
lex->option_type=OPT_DEFAULT;
lex->var_list.empty();
}
- option_value_list;
+ option_value_list
+ {}
+ ;
opt_option:
/* empty */ {}
@@ -3964,7 +3996,9 @@ lock:
{
Lex->sql_command=SQLCOM_LOCK_TABLES;
}
- table_lock_list;
+ table_lock_list
+ {}
+ ;
table_or_tables:
TABLE_SYM
@@ -4074,7 +4108,9 @@ revoke:
lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0;
bzero((char*) &lex->mqh, sizeof(lex->mqh));
}
- grant_privileges ON opt_table FROM user_list;
+ grant_privileges ON opt_table FROM user_list
+ {}
+ ;
grant:
GRANT
@@ -4090,7 +4126,9 @@ grant:
bzero(&(lex->mqh),sizeof(lex->mqh));
}
grant_privileges ON opt_table TO_SYM user_list
- require_clause grant_options;
+ require_clause grant_options
+ {}
+ ;
grant_privileges:
grant_privilege_list {}
@@ -4103,10 +4141,10 @@ grant_privilege_list:
| grant_privilege_list ',' grant_privilege;
grant_privilege:
- SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list
- | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list
- | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list
- | REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list
+ SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list {}
+ | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list {}
+ | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list {}
+ | REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list {}
| DELETE_SYM { Lex->grant |= DELETE_ACL;}
| USAGE {}
| INDEX { Lex->grant |= INDEX_ACL;}
@@ -4333,7 +4371,8 @@ grant_option:
;
begin:
- BEGIN_SYM { Lex->sql_command = SQLCOM_BEGIN;} opt_work;
+ BEGIN_SYM { Lex->sql_command = SQLCOM_BEGIN;} opt_work {}
+ ;
opt_work:
/* empty */ {}
@@ -4376,7 +4415,7 @@ union_list:
YYABORT;
lex->current_select->linkage=UNION_TYPE;
}
- select_init
+ select_init {}
;
union_opt:
diff --git a/sql/table.h b/sql/table.h
index 18079e183ce..149cc6bca13 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -118,21 +118,22 @@ struct st_table {
table_map map; /* ID bit of table (1,2,4,8,16...) */
ulong version,flush_version;
uchar *null_flags;
- IO_CACHE *io_cache; /* If sorted trough file*/
- byte *record_pointers; /* If sorted in memory */
- ha_rows found_records; /* How many records in sort */
+ IO_CACHE *io_cache; /* If sorted trough filebyte *record_pointers; /* If sorted in memory */
+ ha_rows found_records; /* How many records in sort */
ORDER *group;
ha_rows quick_rows[MAX_KEY];
uint quick_key_parts[MAX_KEY];
key_part_map const_key_parts[MAX_KEY];
ulong query_id;
- uint temp_pool_slot;
-
+ union /* Temporary variables */
+ {
+ uint temp_pool_slot; /* Used by intern temp tables */
+ struct st_table_list *pos_in_table_list;
+ };
/* number of select if it is derived table */
uint derived_select_number;
-
- THD *in_use; /* Which thread uses this */
+ THD *in_use; /* Which thread uses this */
struct st_table *next,*prev;
};
@@ -161,10 +162,10 @@ typedef struct st_table_list
GRANT_INFO grant;
thr_lock_type lock_type;
uint outer_join; /* Which join type */
+ uint shared; /* Used in union or in multi-upd */
uint32 db_length, real_name_length;
bool straight; /* optimize with prev table */
bool updating; /* for replicate-do/ignore table */
- bool shared; /* Used twice in union */
bool do_redirect; /* To get the struct in UNION's */
} TABLE_LIST;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 81310c4a863..a171ba42ff3 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -590,7 +590,7 @@ static bool make_empty_rec(File file,enum db_type table_type,
if (field->def &&
(regfield->real_type() != FIELD_TYPE_YEAR ||
field->def->val_int() != 0))
- (void) field->def->save_in_field(regfield);
+ (void) field->def->save_in_field(regfield, 1);
else if (regfield->real_type() == FIELD_TYPE_ENUM &&
(field->flags & NOT_NULL_FLAG))
{
diff --git a/strings/strto.c b/strings/strto.c
index cd70616d294..cd8dee53692 100644
--- a/strings/strto.c
+++ b/strings/strto.c
@@ -103,6 +103,7 @@ function (const char *nptr,char **endptr,int base)
}
/* Check for a sign. */
+ negative= 0;
if (*s == '-')
{
negative = 1;
@@ -110,11 +111,9 @@ function (const char *nptr,char **endptr,int base)
}
else if (*s == '+')
{
- negative = 0;
++s;
}
- else
- negative = 0;
+
if (base == 16 && s[0] == '0' && my_toupper (system_charset_info, s[1]) == 'X')
s += 2;
diff --git a/support-files/make_mysql_pkg.pl b/support-files/make_mysql_pkg.pl
index 75345c7275b..22283d57098 100644
--- a/support-files/make_mysql_pkg.pl
+++ b/support-files/make_mysql_pkg.pl
@@ -17,16 +17,30 @@
# History:
#
# When Who What
-# -------------------------------------------------------------
+# ------------------------------------------------------------------
# 2001-09-16 Marc Liyanage First version
+# 2001-11-18 Marc Liyanage Improved configure directory options
+#
use strict;
use DirHandle;
my $data = {};
-$data->{PREFIX_DIR} = "/usr/local";
-$data->{CONFIG} = "--prefix=$data->{PREFIX_DIR} --with-innodb";
+$data->{PREFIX_DIR} = "/usr/local/mysql";
+$data->{CONFIG} = join(" ",
+ "--prefix=$data->{PREFIX_DIR}",
+ "--localstatedir=$data->{PREFIX_DIR}/data",
+ "--libdir=$data->{PREFIX_DIR}/lib",
+ "--includedir=$data->{PREFIX_DIR}/include",
+ "--with-named-z-libs=/usr/local/libz.a",
+ "--with-innodb",
+ "--with-server-suffix='-entropy.ch'",
+ "--with-comment='http://www.entropy.ch/software/macosx/mysql/'",
+ "--with-mysqld-user=mysql",
+ "--enable-assembler",
+ "CFLAGS=\"-DHAVE_BROKEN_REALPATH -lncurses\"",
+);
@@ -177,8 +191,7 @@ sub make_binary_distribution {
# Now we build a fake /usr/local directory hierarchy.
-# This will be fed to the pax tool to create
-# the archive.
+# This will be fed to the pax tool to create the archive.
#
sub create_pax_root {
@@ -190,7 +203,7 @@ sub create_pax_root {
chdir($data->{PAXROOT_DIR});
my $tarfile = "$data->{OLDWD}/$data->{BINARY_TARBALL_FILENAME}";
- if(system("tar -xzf $tarfile")) {
+ if (system("tar -xzf $tarfile")) {
abort($data, "Unable to extract $tarfile inside $data->{PAXROOT_DIR}");
}
@@ -213,14 +226,35 @@ sub create_pax_root {
# First create the symlinks in the bin directory
#
+ # 2001-02-13: we no longer use symlinks for the binaries, we
+ # use small dummy scripts instead because the
+ # mysql scripts do a lot of guesswork with their
+ # own path and that will not work when called via the symlink
+ #
+# symlink("../mysql/bin/$_", "$_") foreach (grep {$_ !~ /^\.+$/} DirHandle->new("../mysql/bin")->read());
+
chdir("bin");
- symlink("../mysql/bin/$_", "$_") foreach (grep {$_ !~ /^\.+$/} DirHandle->new("../mysql/bin")->read());
+
+ foreach my $command (grep {$_ !~ /^\.+$/} DirHandle->new("../mysql/bin")->read()) {
+
+ my $scriptcode = qq+#!/bin/sh\n# Part of the entropy.ch mysql package\ncd /usr/local/mysql/\nexec ./bin/$command "\$\@"\n+;
+ open(SCRIPTFILE, ">$command") or die "Unable to write open $command\n";
+ print SCRIPTFILE $scriptcode;
+ close(SCRIPTFILE);
+ chmod(0755, $command);
+
+ }
+
+
+
+
+
# Now include the man pages. Two problems here:
# 1.) the make_binary_distribution script does not seem
# to include the man pages, so we have to copy them over
- # now.
+ # now. [outdated, was fixed by MySQL!]
# 2.) The man pages could be in different sections, so
# we have to recursively copy *and* symlink them.
#
@@ -230,7 +264,7 @@ sub create_pax_root {
# arrays which in turn will be stored in a hash, using
# the section numbers as hash keys.
#
- chdir($data->{OLDWD});
+ chdir("$data->{PAXROOT_DIR}/mysql");
my %man_sections;
foreach my $manpage (grep {$_ =~ /^.+\.(\d+)$/} DirHandle->new("man")->read()) {
@@ -249,14 +283,12 @@ sub create_pax_root {
foreach my $section (keys(%man_sections)) {
- system("mkdir -p $data->{PAXROOT_DIR}/mysql/man/man$section/");
system("mkdir -p man$section");
chdir("man$section");
foreach my $manpage (@{$man_sections{$section}}) {
- system("cp $data->{OLDWD}/man/$manpage $data->{PAXROOT_DIR}/mysql/man/man$section/");
- symlink("../../../mysql/man/man$section/$manpage", $manpage)
+ symlink("../../../mysql/man/$manpage", $manpage)
}
@@ -265,6 +297,35 @@ sub create_pax_root {
}
+
+ # Fix up the library and lib directories. They are packed up wrong in the
+ # binary distribution tarball.
+ #
+ # (no longer needed as of 3.23.47)
+ # (oops, still needed because 3.23.47 is broken...)
+ #
+# if (-d "$data->{PAXROOT_DIR}/mysql/lib/mysql") {
+# abort($data, "$data->{PAXROOT_DIR}/mysql/lib/mysql exists, layout has changed!");
+# }
+# chdir("$data->{PAXROOT_DIR}/mysql/lib/");
+# system("mkdir -p mysql");
+# system("mv * mysql");
+
+# if (-d "$data->{PAXROOT_DIR}/mysql/include/mysql") {
+# abort($data, "$data->{PAXROOT_DIR}/mysql/include/mysql exists, layout has changed!");
+# }
+# chdir("$data->{PAXROOT_DIR}/mysql/include/");
+# system("mkdir -p mysql");
+# system("mv * mysql");
+
+
+
+
+
+
+
+
+
}
@@ -310,7 +371,7 @@ sub create_package {
my $size_compressed = `du -sk $data->{PACKAGE_DIR} | cut -f 1`;
chomp($size_compressed);
- my $numfiles = `find /tmp/mysql-3.23.42-paxroot/ | wc -l`;
+ my $numfiles = `find /tmp/mysql-$data->{VERSION}-paxroot | wc -l`;
$numfiles--;
open(SIZESFILE, ">$data->{PKG_RESOURCES_DIR}/mysql-$data->{VERSION}.sizes") or abort("Unable to write open sizes file $data->{PKG_RESOURCES_DIR}/mysql-$data->{VERSION}.sizes");
@@ -411,4 +472,4 @@ Relocatable NO
Required NO
InstallOnly NO
RequiresReboot NO
-InstallFat NO \ No newline at end of file
+InstallFat NO
diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh
index 139bbb3fd6b..d52ade03836 100644
--- a/support-files/mysql.server.sh
+++ b/support-files/mysql.server.sh
@@ -2,7 +2,7 @@
# Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB
# This file is public domain and comes with NO WARRANTY of any kind
-# Mysql daemon start/stop script.
+# MySQL daemon start/stop script.
# Usually this is put in /etc/init.d (at least on machines SYSV R4 based
# systems) and linked to /etc/rc3.d/S99mysql and /etc/rc0.d/K01mysql.
@@ -20,17 +20,17 @@
# Required-Stop: $local_fs $network $remote_fs
# Default-Start: 3 5
# Default-Stop: 3 5
-# Short-Description: start and stop MySLQ
+# Short-Description: start and stop MySQL
# Description: MySQL is a very fast and reliable SQL database engine.
### END INIT INFO
# If you install MySQL on some other places than @prefix@, then you
-# have to do one of the following thing for this script to work:
+# have to do one of the following things for this script to work:
#
-# - Run this script from the MySQL installation directory
+# - Run this script from within the MySQL installation directory
# - Create a /etc/my.cnf file with the following information:
# [mysqld]
-# basedir=path-to-mysql-installation-directory
+# basedir=<path-to-mysql-installation-directory>
# - Add the above to any other configuration file (for example ~/.my.ini)
# and copy my_print_defaults to /usr/bin
# - Add the path to the mysql-installation-directory to the basedir variable
@@ -79,7 +79,8 @@ parse_arguments() {
done
}
-# Get arguments from the my.cnf file, groups [mysqld] and [mysql_server]
+# Get arguments from the my.cnf file,
+# groups [mysqld] [mysql_server] and [mysql.server]
if test -x ./bin/my_print_defaults
then
print_defaults="./bin/my_print_defaults"
@@ -117,7 +118,7 @@ else
test -z "$print_defaults" && print_defaults="my_print_defaults"
fi
-parse_arguments `$print_defaults $defaults mysqld mysql_server`
+parse_arguments `$print_defaults mysqld mysql_server mysql.server`
# Safeguard (relative paths, core dumps..)
cd $basedir
@@ -154,7 +155,7 @@ case "$mode" in
sleep 1
while [ -s $pid_file -a "$flags" != aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ]
do
- [ -z "$flags" ] && echo -n "Wait for mysqld to exit" || echo -n "."
+ [ -z "$flags" ] && echo "Wait for mysqld to exit\c" || echo ".\c"
flags=a$flags
sleep 1
done
diff --git a/vio/vio.c b/vio/vio.c
index b1eb86fc948..e629cd043b5 100644
--- a/vio/vio.c
+++ b/vio/vio.c
@@ -134,6 +134,9 @@ Vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost)
vio->sd);
#if !defined(___WIN__) && !defined(__EMX__) && !defined(OS2)
#if !defined(NO_FCNTL_NONBLOCK)
+#if defined(__FreeBSD__)
+ fcntl(sd, F_SETFL, vio->fcntl_mode); /* Yahoo! FreeBSD patch */
+#endif
vio->fcntl_mode = fcntl(sd, F_GETFL);
#elif defined(HAVE_SYS_IOCTL_H) /* hpux */
/* Non blocking sockets doesn't work good on HPUX 11.0 */