summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <igor@rurik.mysql.com>2006-07-01 15:31:28 -0700
committerunknown <igor@rurik.mysql.com>2006-07-01 15:31:28 -0700
commit912e54d7b8452d74a459907a5023f2b7c6ab025c (patch)
tree75f44943450160b535b36e44ec1ced13b9a08364
parent1b66efe9d625df7d4d88dbacb8b97a9976bcf449 (diff)
parent3cf92fb7d67b22f33f4846ecf68c3f56c301cc20 (diff)
downloadmariadb-git-912e54d7b8452d74a459907a5023f2b7c6ab025c.tar.gz
Merge ibabaev@bk-internal.mysql.com:/home/bk/mysql-5.0-opt
into rurik.mysql.com:/home/igor/mysql-5.0-opt
-rw-r--r--.bzrignore1
-rw-r--r--VC++Files/mysql.sln21
-rw-r--r--VC++Files/mysys/mysys.vcproj102
-rw-r--r--VC++Files/sql/mysqld.vcproj2
-rw-r--r--client/mysqldump.c24
-rw-r--r--config/ac-macros/compiler_flag.m422
-rw-r--r--configure.in8
-rw-r--r--include/Makefile.am2
-rw-r--r--include/my_libwrap.h28
-rw-r--r--include/my_sys.h10
-rw-r--r--include/sql_common.h1
-rw-r--r--libmysqld/lib_sql.cc7
-rw-r--r--libmysqld/libmysqld.c48
-rwxr-xr-xmysql-test/mysql-test-run.pl3
-rw-r--r--mysql-test/r/auto_increment.result2
-rw-r--r--mysql-test/r/bdb.result34
-rw-r--r--mysql-test/r/create.result2
-rw-r--r--mysql-test/r/ctype_ucs2_def.result3
-rw-r--r--mysql-test/r/distinct.result51
-rw-r--r--mysql-test/r/federated.result88
-rw-r--r--mysql-test/r/func_compress.result4
-rw-r--r--mysql-test/r/func_math.result2
-rw-r--r--mysql-test/r/func_sapdb.result6
-rw-r--r--mysql-test/r/func_system.result2
-rw-r--r--mysql-test/r/func_time.result10
-rw-r--r--mysql-test/r/func_timestamp.result2
-rw-r--r--mysql-test/r/gis.result10
-rw-r--r--mysql-test/r/information_schema.result20
-rw-r--r--mysql-test/r/key.result10
-rw-r--r--mysql-test/r/lock_multi.result15
-rw-r--r--mysql-test/r/mysqldump.result28
-rw-r--r--mysql-test/r/ps.result105
-rw-r--r--mysql-test/r/query_cache.result2
-rw-r--r--mysql-test/r/rpl_get_lock.result2
-rw-r--r--mysql-test/r/rpl_master_pos_wait.result2
-rw-r--r--mysql-test/r/show_check.result60
-rw-r--r--mysql-test/r/sp-prelocking.result18
-rw-r--r--mysql-test/r/sp-security.result31
-rw-r--r--mysql-test/r/sp.result67
-rw-r--r--mysql-test/r/subselect.result8
-rw-r--r--mysql-test/r/symlink.result6
-rw-r--r--mysql-test/r/trigger.result14
-rw-r--r--mysql-test/r/type_blob.result2
-rw-r--r--mysql-test/r/type_timestamp.result2
-rw-r--r--mysql-test/r/udf.result18
-rw-r--r--mysql-test/r/variables.result28
-rw-r--r--mysql-test/r/view.result14
-rw-r--r--mysql-test/t/bdb.test35
-rw-r--r--mysql-test/t/create.test2
-rw-r--r--mysql-test/t/ctype_ucs2_def-master.opt2
-rw-r--r--mysql-test/t/ctype_ucs2_def.test5
-rw-r--r--mysql-test/t/distinct.test28
-rw-r--r--mysql-test/t/federated.test55
-rw-r--r--mysql-test/t/func_sapdb.test2
-rw-r--r--mysql-test/t/func_time.test13
-rw-r--r--mysql-test/t/func_timestamp.test6
-rw-r--r--mysql-test/t/gis.test7
-rw-r--r--mysql-test/t/key.test11
-rw-r--r--mysql-test/t/lock_multi.test50
-rw-r--r--mysql-test/t/mysqldump.test35
-rw-r--r--mysql-test/t/ps.test118
-rw-r--r--mysql-test/t/rpl_openssl.test4
-rw-r--r--mysql-test/t/show_check.test72
-rw-r--r--mysql-test/t/sp-prelocking.test31
-rw-r--r--mysql-test/t/sp-security.test46
-rw-r--r--mysql-test/t/sp.test73
-rw-r--r--mysql-test/t/trigger.test108
-rw-r--r--mysql-test/t/type_timestamp.test6
-rw-r--r--mysql-test/t/udf.test12
-rw-r--r--mysql-test/t/variables.test16
-rw-r--r--mysql-test/t/wait_timeout.test1
-rw-r--r--mysys/Makefile.am2
-rw-r--r--mysys/mf_dirname.c4
-rw-r--r--mysys/my_delete.c51
-rw-r--r--mysys/my_init.c24
-rw-r--r--mysys/my_lib.c2
-rw-r--r--mysys/my_libwrap.c42
-rw-r--r--mysys/my_malloc.c2
-rw-r--r--mysys/safemalloc.c2
-rw-r--r--mysys/thr_lock.c2
-rw-r--r--ndb/include/kernel/GlobalSignalNumbers.h16
-rw-r--r--ndb/include/kernel/signaldata/AlterTable.hpp1
-rw-r--r--ndb/include/kernel/signaldata/BackupContinueB.hpp3
-rw-r--r--ndb/include/kernel/signaldata/BackupImpl.hpp22
-rw-r--r--ndb/include/kernel/signaldata/BackupSignalData.hpp8
-rw-r--r--ndb/include/kernel/signaldata/CreateTable.hpp1
-rw-r--r--ndb/include/kernel/signaldata/DictLock.hpp78
-rw-r--r--ndb/include/kernel/signaldata/DictTabInfo.hpp11
-rw-r--r--ndb/include/kernel/signaldata/DropTable.hpp1
-rw-r--r--ndb/include/kernel/signaldata/LqhFrag.hpp33
-rw-r--r--ndb/include/kernel/signaldata/TupFrag.hpp15
-rw-r--r--ndb/include/ndb_version.h.in2
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp14
-rw-r--r--ndb/src/common/debugger/SignalLoggerManager.cpp2
-rw-r--r--ndb/src/common/debugger/signaldata/BackupImpl.cpp6
-rw-r--r--ndb/src/common/debugger/signaldata/BackupSignalData.cpp6
-rw-r--r--ndb/src/common/debugger/signaldata/DictTabInfo.cpp8
-rw-r--r--ndb/src/common/debugger/signaldata/LqhFrag.cpp6
-rw-r--r--ndb/src/common/debugger/signaldata/SignalNames.cpp6
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt6
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp163
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.hpp15
-rw-r--r--ndb/src/kernel/blocks/backup/BackupFormat.hpp17
-rw-r--r--ndb/src/kernel/blocks/backup/BackupInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp346
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp107
-rw-r--r--ndb/src/kernel/blocks/dbdict/DictLock.txt94
-rw-r--r--ndb/src/kernel/blocks/dbdih/Dbdih.hpp28
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihInit.cpp6
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp177
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp10
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp94
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp5
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp79
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp6
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrMain.cpp4
-rw-r--r--ndb/src/kernel/main.cpp4
-rw-r--r--ndb/src/kernel/vm/DLFifoList.hpp14
-rw-r--r--ndb/src/kernel/vm/pc.hpp2
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.cpp2
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp10
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp4
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp24
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp20
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp3
-rw-r--r--ndb/src/ndbapi/ndberror.c1
-rw-r--r--ndb/test/ndbapi/testDict.cpp321
-rw-r--r--ndb/tools/restore/Restore.cpp58
-rw-r--r--ndb/tools/restore/Restore.hpp15
-rw-r--r--ndb/tools/restore/consumer_restore.cpp10
-rw-r--r--scripts/Makefile.am6
-rw-r--r--scripts/make_binary_distribution.sh20
-rw-r--r--scripts/mysql_upgrade_shell.sh (renamed from scripts/mysql_upgrade.sh)0
-rw-r--r--server-tools/instance-manager/instance_options.cc9
-rw-r--r--sql-common/client.c86
-rw-r--r--sql/ha_federated.cc416
-rw-r--r--sql/ha_federated.h18
-rw-r--r--sql/ha_ndbcluster.cc113
-rw-r--r--sql/item_geofunc.h2
-rw-r--r--sql/item_strfunc.cc4
-rw-r--r--sql/item_timefunc.cc11
-rw-r--r--sql/lock.cc44
-rw-r--r--sql/log.cc26
-rw-r--r--sql/log_event.cc79
-rw-r--r--sql/log_event.h21
-rw-r--r--sql/mysql_priv.h1
-rw-r--r--sql/mysqld.cc25
-rw-r--r--sql/opt_range.cc4
-rw-r--r--sql/set_var.cc4
-rw-r--r--sql/set_var.h2
-rw-r--r--sql/share/errmsg.txt2
-rw-r--r--sql/slave.cc34
-rw-r--r--sql/slave.h5
-rw-r--r--sql/sp.cc107
-rw-r--r--sql/sp.h14
-rw-r--r--sql/sp_head.cc74
-rw-r--r--sql/sp_head.h10
-rw-r--r--sql/sql_acl.cc59
-rw-r--r--sql/sql_base.cc14
-rw-r--r--sql/sql_class.h47
-rw-r--r--sql/sql_db.cc164
-rw-r--r--sql/sql_handler.cc11
-rw-r--r--sql/sql_insert.cc41
-rw-r--r--sql/sql_lex.cc3
-rw-r--r--sql/sql_lex.h13
-rw-r--r--sql/sql_parse.cc417
-rw-r--r--sql/sql_select.cc189
-rw-r--r--sql/sql_show.cc40
-rw-r--r--sql/sql_table.cc29
-rw-r--r--sql/sql_trigger.cc22
-rw-r--r--sql/sql_udf.cc6
-rw-r--r--sql/sql_udf.h2
-rw-r--r--sql/sql_view.cc9
-rw-r--r--sql/sql_yacc.yy63
-rw-r--r--sql/table.cc29
-rw-r--r--sql/table.h3
-rw-r--r--sql/tztime.cc6
-rw-r--r--strings/Makefile.am6
-rw-r--r--strings/ctype-mb.c20
-rw-r--r--support-files/mysql.spec.sh29
180 files changed, 4847 insertions, 1215 deletions
diff --git a/.bzrignore b/.bzrignore
index eafc384fda5..ef02a085144 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1285,3 +1285,4 @@ vio/viotest-sslconnect.cpp
vio/viotest.cpp
zlib/*.ds?
zlib/*.vcproj
+BitKeeper/etc/RESYNC_TREE
diff --git a/VC++Files/mysql.sln b/VC++Files/mysql.sln
index 3e3e4c67e17..bd0cae1d5d8 100644
--- a/VC++Files/mysql.sln
+++ b/VC++Files/mysql.sln
@@ -1110,8 +1110,8 @@ Global
{DB28DE80-837F-4497-9AA9-CC0A20584C98}.Release.Build.0 = Release|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic.ActiveCfg = TLS|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic.Build.0 = TLS|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.ActiveCfg = Release|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.Build.0 = Release|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.ActiveCfg = nt|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.Build.0 = nt|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Debug.ActiveCfg = Debug|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Debug.Build.0 = Debug|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Embedded_Classic.ActiveCfg = TLS|Win32
@@ -1126,18 +1126,18 @@ Global
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Embedded_Release.Build.0 = TLS|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max.ActiveCfg = Max|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max.Build.0 = Max|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.ActiveCfg = Max|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.Build.0 = Max|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.ActiveCfg = Release|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.Build.0 = Release|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.ActiveCfg = Max nt|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.Build.0 = Max nt|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.ActiveCfg = nt|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.Build.0 = nt|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro.ActiveCfg = Release|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro.Build.0 = Release|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl.ActiveCfg = Release|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl.Build.0 = Release|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.ActiveCfg = Release|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.Build.0 = Release|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.ActiveCfg = Release|Win32
- {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.Build.0 = Release|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.ActiveCfg = nt|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.Build.0 = nt|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.ActiveCfg = nt|Win32
+ {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.Build.0 = nt|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Release.ActiveCfg = Release|Win32
{44D9C7DC-6636-4B82-BD01-6876C64017DF}.Release.Build.0 = Release|Win32
{AC47623D-933C-4A80-83BB-B6AF7CB28B4B}.classic.ActiveCfg = classic|Win32
@@ -1427,6 +1427,7 @@ Global
{DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max.ActiveCfg = Release|Win32
{DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max.Build.0 = Release|Win32
{DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max nt.ActiveCfg = Release|Win32
+ {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max nt.Build.0 = Release|Win32
{DA224DAB-5006-42BE-BB77-16E8BE5326D5}.nt.ActiveCfg = Release|Win32
{DA224DAB-5006-42BE-BB77-16E8BE5326D5}.nt.Build.0 = Release|Win32
{DA224DAB-5006-42BE-BB77-16E8BE5326D5}.pro.ActiveCfg = Release|Win32
diff --git a/VC++Files/mysys/mysys.vcproj b/VC++Files/mysys/mysys.vcproj
index 1053b605119..3885e18cea8 100644
--- a/VC++Files/mysys/mysys.vcproj
+++ b/VC++Files/mysys/mysys.vcproj
@@ -22,7 +22,7 @@
Optimization="0"
OptimizeForProcessor="2"
AdditionalIncludeDirectories="../include,../zlib"
- PreprocessorDefinitions="_DEBUG;SAFEMALLOC;SAFE_MUTEX;_WINDOWS;USE_SYMDIR"
+ PreprocessorDefinitions="__NT__;_DEBUG;SAFEMALLOC;SAFE_MUTEX;_WINDOWS;USE_SYMDIR"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\debug/mysys.pch"
AssemblerListingLocation=".\debug/"
@@ -109,6 +109,56 @@
Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
</Configuration>
<Configuration
+ Name="Max nt|Win32"
+ OutputDirectory=".\max_nt"
+ IntermediateDirectory=".\max_nt"
+ ConfigurationType="4"
+ UseOfMFC="0"
+ ATLMinimizesCRunTimeLibraryUsage="FALSE">
+ <Tool
+ Name="VCCLCompilerTool"
+ Optimization="2"
+ InlineFunctionExpansion="1"
+ OptimizeForProcessor="2"
+ AdditionalIncludeDirectories="../include,../zlib"
+ PreprocessorDefinitions="__NT__;USE_SYMDIR;NDEBUG;DBUG_OFF;_WINDOWS"
+ StringPooling="TRUE"
+ RuntimeLibrary="0"
+ EnableFunctionLevelLinking="TRUE"
+ PrecompiledHeaderFile=".\max_nt/mysys.pch"
+ AssemblerListingLocation=".\max_nt/"
+ ObjectFile=".\max_nt/"
+ ProgramDataBaseFileName=".\max_nt/"
+ WarningLevel="3"
+ SuppressStartupBanner="TRUE"
+ CompileAs="0"/>
+ <Tool
+ Name="VCCustomBuildTool"/>
+ <Tool
+ Name="VCLibrarianTool"
+ OutputFile="..\lib_release\mysys-max-nt.lib"
+ SuppressStartupBanner="TRUE"/>
+ <Tool
+ Name="VCMIDLTool"/>
+ <Tool
+ Name="VCPostBuildEventTool"/>
+ <Tool
+ Name="VCPreBuildEventTool"/>
+ <Tool
+ Name="VCPreLinkEventTool"/>
+ <Tool
+ Name="VCResourceCompilerTool"
+ Culture="1033"/>
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"/>
+ <Tool
+ Name="VCXMLDataGeneratorTool"/>
+ <Tool
+ Name="VCManagedWrapperGeneratorTool"/>
+ <Tool
+ Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
+ </Configuration>
+ <Configuration
Name="Release|Win32"
OutputDirectory=".\release"
IntermediateDirectory=".\release"
@@ -159,6 +209,56 @@
Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
</Configuration>
<Configuration
+ Name="nt|Win32"
+ OutputDirectory=".\nt"
+ IntermediateDirectory=".\nt"
+ ConfigurationType="4"
+ UseOfMFC="0"
+ ATLMinimizesCRunTimeLibraryUsage="FALSE">
+ <Tool
+ Name="VCCLCompilerTool"
+ Optimization="2"
+ InlineFunctionExpansion="1"
+ OptimizeForProcessor="2"
+ AdditionalIncludeDirectories="../include,../zlib"
+ PreprocessorDefinitions="__NT__;DBUG_OFF;_WINDOWS;NDEBUG"
+ StringPooling="TRUE"
+ RuntimeLibrary="0"
+ EnableFunctionLevelLinking="TRUE"
+ PrecompiledHeaderFile=".\nt/mysys.pch"
+ AssemblerListingLocation=".\nt/"
+ ObjectFile=".\nt/"
+ ProgramDataBaseFileName=".\nt/"
+ WarningLevel="3"
+ SuppressStartupBanner="TRUE"
+ CompileAs="0"/>
+ <Tool
+ Name="VCCustomBuildTool"/>
+ <Tool
+ Name="VCLibrarianTool"
+ OutputFile="..\lib_release\mysys-nt.lib"
+ SuppressStartupBanner="TRUE"/>
+ <Tool
+ Name="VCMIDLTool"/>
+ <Tool
+ Name="VCPostBuildEventTool"/>
+ <Tool
+ Name="VCPreBuildEventTool"/>
+ <Tool
+ Name="VCPreLinkEventTool"/>
+ <Tool
+ Name="VCResourceCompilerTool"
+ Culture="1033"/>
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"/>
+ <Tool
+ Name="VCXMLDataGeneratorTool"/>
+ <Tool
+ Name="VCManagedWrapperGeneratorTool"/>
+ <Tool
+ Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
+ </Configuration>
+ <Configuration
Name="TLS_DEBUG|Win32"
OutputDirectory=".\mysys___Win32_TLS_DEBUG"
IntermediateDirectory=".\mysys___Win32_TLS_DEBUG"
diff --git a/VC++Files/sql/mysqld.vcproj b/VC++Files/sql/mysqld.vcproj
index 3f20cffec0a..c9675f3fd8a 100644
--- a/VC++Files/sql/mysqld.vcproj
+++ b/VC++Files/sql/mysqld.vcproj
@@ -85,7 +85,7 @@
InlineFunctionExpansion="1"
OptimizeForProcessor="2"
AdditionalIncludeDirectories="../bdb/build_win32,../include,../regex,../extra/yassl/include,../zlib"
- PreprocessorDefinitions="NDEBUG;DBUG_OFF;USE_SYMDIR;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN"
+ PreprocessorDefinitions="__NT__;NDEBUG;DBUG_OFF;USE_SYMDIR;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN"
StringPooling="TRUE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="TRUE"
diff --git a/client/mysqldump.c b/client/mysqldump.c
index e89e8064c9a..53cb06be6f3 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -1503,9 +1503,15 @@ static uint get_table_structure(char *table, char *db, char *table_type,
field= mysql_fetch_field_direct(result, 0);
if (strcmp(field->name, "View") == 0)
{
+ char *scv_buff = NULL;
+
if (verbose)
fprintf(stderr, "-- It's a view, create dummy table for view\n");
+ /* save "show create" statement for later */
+ if ((row= mysql_fetch_row(result)) && (scv_buff=row[1]))
+ scv_buff= my_strdup(scv_buff, MYF(0));
+
mysql_free_result(result);
/*
@@ -1523,9 +1529,22 @@ static uint get_table_structure(char *table, char *db, char *table_type,
"SHOW FIELDS FROM %s", result_table);
if (mysql_query_with_error_report(sock, 0, query_buff))
{
+ /*
+ View references invalid or privileged table/col/fun (err 1356),
+ so we cannot create a stand-in table. Be defensive and dump
+ a comment with the view's 'show create' statement. (Bug #17371)
+ */
+
+ if (mysql_errno(sock) == ER_VIEW_INVALID)
+ fprintf(sql_file, "\n-- failed on view %s: %s\n\n", result_table, scv_buff ? scv_buff : "");
+
+ my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR));
+
safe_exit(EX_MYSQLERR);
- DBUG_RETURN(0);
+ DBUG_RETURN(0);
}
+ else
+ my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR));
if ((result= mysql_store_result(sock)))
{
@@ -1566,6 +1585,9 @@ static uint get_table_structure(char *table, char *db, char *table_type,
}
mysql_free_result(result);
+ if (path)
+ my_fclose(sql_file, MYF(MY_WME));
+
seen_views= 1;
DBUG_RETURN(0);
}
diff --git a/config/ac-macros/compiler_flag.m4 b/config/ac-macros/compiler_flag.m4
index a236f61a198..88097c7a62e 100644
--- a/config/ac-macros/compiler_flag.m4
+++ b/config/ac-macros/compiler_flag.m4
@@ -38,3 +38,25 @@ AC_DEFUN([AC_SYS_OS_COMPILER_FLAG],
fi
])
+AC_DEFUN([AC_CHECK_NOEXECSTACK],
+[
+ AC_CACHE_CHECK(whether --noexecstack is desirable for .S files,
+ mysql_cv_as_noexecstack, [dnl
+ cat > conftest.c <<EOF
+void foo (void) { }
+EOF
+ if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS
+ -S -o conftest.s conftest.c 1>&AS_MESSAGE_LOG_FD]) \
+ && grep .note.GNU-stack conftest.s >/dev/null \
+ && AC_TRY_COMMAND([${CC-cc} $CCASFLAGS $CPPFLAGS -Wa,--noexecstack
+ -c -o conftest.o conftest.s 1>&AS_MESSAGE_LOG_FD])
+ then
+ mysql_cv_as_noexecstack=yes
+ else
+ mysql_cv_as_noexecstack=no
+ fi
+ rm -f conftest*])
+ if test $mysql_cv_as_noexecstack = yes; then
+ CCASFLAGS="$CCASFLAGS -Wa,--noexecstack"
+ fi
+])
diff --git a/configure.in b/configure.in
index fd2cf3baf97..48454a11309 100644
--- a/configure.in
+++ b/configure.in
@@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc)
AC_CANONICAL_SYSTEM
# The Docs Makefile.am parses this line!
# remember to also change ndb version below and update version.c in ndb
-AM_INIT_AUTOMAKE(mysql, 5.0.23)
+AM_INIT_AUTOMAKE(mysql, 5.0.24)
AM_CONFIG_HEADER(config.h)
PROTOCOL_VERSION=10
@@ -19,7 +19,7 @@ SHARED_LIB_VERSION=$SHARED_LIB_MAJOR_VERSION:0:0
# ndb version
NDB_VERSION_MAJOR=5
NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=23
+NDB_VERSION_BUILD=24
NDB_VERSION_STATUS=""
# Set all version vars based on $VERSION. How do we do this more elegant ?
@@ -515,6 +515,10 @@ AM_PROG_CC_STDC
# We need an assembler, too
AM_PROG_AS
+CCASFLAGS="$CCASFLAGS $ASFLAGS"
+
+# Check if we need noexec stack for assembler
+AC_CHECK_NOEXECSTACK
if test "$am_cv_prog_cc_stdc" = "no"
then
diff --git a/include/Makefile.am b/include/Makefile.am
index 07c32e3127b..2dbea3fe07f 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -31,7 +31,7 @@ noinst_HEADERS = config-win.h config-os2.h config-netware.h \
my_aes.h my_tree.h hash.h thr_alarm.h \
thr_lock.h t_ctype.h violite.h md5.h base64.h \
mysql_version.h.in my_handler.h my_time.h decimal.h \
- my_user.h
+ my_user.h my_libwrap.h
# mysql_version.h are generated
CLEANFILES = mysql_version.h my_config.h readline openssl
diff --git a/include/my_libwrap.h b/include/my_libwrap.h
new file mode 100644
index 00000000000..6437cbaed84
--- /dev/null
+++ b/include/my_libwrap.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 2000 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef HAVE_LIBWRAP
+#include <tcpd.h>
+#include <syslog.h>
+#ifdef NEED_SYS_SYSLOG_H
+#include <sys/syslog.h>
+#endif /* NEED_SYS_SYSLOG_H */
+
+extern void my_fromhost(struct request_info *req);
+extern int my_hosts_access(struct request_info *req);
+extern char *my_eval_client(struct request_info *req);
+
+#endif /* HAVE_LIBWRAP */
diff --git a/include/my_sys.h b/include/my_sys.h
index 229389f1ac5..b00b59c4779 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -157,7 +157,7 @@ extern gptr my_realloc(gptr oldpoint,uint Size,myf MyFlags);
extern void my_no_flags_free(gptr ptr);
extern gptr my_memdup(const byte *from,uint length,myf MyFlags);
extern char *my_strdup(const char *from,myf MyFlags);
-extern char *my_strdup_with_length(const byte *from, uint length,
+extern char *my_strdup_with_length(const char *from, uint length,
myf MyFlags);
/* we do use FG (as a no-op) in below so that a typo on FG is caught */
#define my_free(PTR,FG) ((void)FG,my_no_flags_free(PTR))
@@ -541,6 +541,7 @@ typedef int (*Process_option_func)(void *ctx, const char *group_name,
#include <my_alloc.h>
+
/* Prototypes for mysys and my_func functions */
extern int my_copy(const char *from,const char *to,myf MyFlags);
@@ -613,6 +614,13 @@ extern File my_sopen(const char *path, int oflag, int shflag, int pmode);
#endif
extern int check_if_legal_filename(const char *path);
+#if defined(__WIN__) && defined(__NT__)
+extern int nt_share_delete(const char *name,myf MyFlags);
+#define my_delete_allow_opened(fname,flags) nt_share_delete((fname),(flags))
+#else
+#define my_delete_allow_opened(fname,flags) my_delete((fname),(flags))
+#endif
+
#ifndef TERMINATE
extern void TERMINATE(FILE *file);
#endif
diff --git a/include/sql_common.h b/include/sql_common.h
index c07a4a831bb..9fc8d4f457b 100644
--- a/include/sql_common.h
+++ b/include/sql_common.h
@@ -22,6 +22,7 @@ extern const char *not_error_sqlstate;
extern "C" {
#endif
+extern CHARSET_INFO *default_client_charset_info;
MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
my_bool default_value, uint server_capabilities);
void free_rows(MYSQL_DATA *cur);
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index a2fdae994b1..a2c570c2fbc 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -37,6 +37,8 @@ extern "C"
int check_user(THD *thd, enum enum_server_command command,
const char *passwd, uint passwd_len, const char *db,
bool check_count);
+void thd_init_client_charset(THD *thd, uint cs_number);
+
C_MODE_START
#include <mysql.h>
@@ -600,11 +602,14 @@ err:
return NULL;
}
+
#ifdef NO_EMBEDDED_ACCESS_CHECKS
int check_embedded_connection(MYSQL *mysql)
{
int result;
THD *thd= (THD*)mysql->thd;
+ thd_init_client_charset(thd, mysql->charset->number);
+ thd->update_charset();
Security_context *sctx= thd->security_ctx;
sctx->host_or_ip= sctx->host= (char*) my_localhost;
strmake(sctx->priv_host, (char*) my_localhost, MAX_HOSTNAME-1);
@@ -623,6 +628,8 @@ int check_embedded_connection(MYSQL *mysql)
char scramble_buff[SCRAMBLE_LENGTH];
int passwd_len;
+ thd_init_client_charset(thd, mysql->charset->number);
+ thd->update_charset();
if (mysql->options.client_ip)
{
sctx->host= my_strdup(mysql->options.client_ip, MYF(0));
diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c
index cad1bd4c47b..5df61783451 100644
--- a/libmysqld/libmysqld.c
+++ b/libmysqld/libmysqld.c
@@ -90,49 +90,7 @@ static void end_server(MYSQL *mysql)
}
-static int mysql_init_charset(MYSQL *mysql)
-{
- char charset_name_buff[16], *charset_name;
-
- if ((charset_name=mysql->options.charset_name))
- {
- const char *save=charsets_dir;
- if (mysql->options.charset_dir)
- charsets_dir=mysql->options.charset_dir;
- mysql->charset=get_charset_by_name(mysql->options.charset_name,
- MYF(MY_WME));
- charsets_dir=save;
- }
- else if (mysql->server_language)
- {
- charset_name=charset_name_buff;
- sprintf(charset_name,"%d",mysql->server_language); /* In case of errors */
- mysql->charset=get_charset((uint8) mysql->server_language, MYF(MY_WME));
- }
- else
- mysql->charset=default_charset_info;
-
- if (!mysql->charset)
- {
- mysql->net.last_errno=CR_CANT_READ_CHARSET;
- strmov(mysql->net.sqlstate, "HY0000");
- if (mysql->options.charset_dir)
- sprintf(mysql->net.last_error,ER(mysql->net.last_errno),
- charset_name ? charset_name : "unknown",
- mysql->options.charset_dir);
- else
- {
- char cs_dir_name[FN_REFLEN];
- get_charsets_dir(cs_dir_name);
- sprintf(mysql->net.last_error,ER(mysql->net.last_errno),
- charset_name ? charset_name : "unknown",
- cs_dir_name);
- }
- return mysql->net.last_errno;
- }
- return 0;
-}
-
+int mysql_init_character_set(MYSQL *mysql);
MYSQL * STDCALL
mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
@@ -222,10 +180,10 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
init_embedded_mysql(mysql, client_flag, db_name);
- if (check_embedded_connection(mysql))
+ if (mysql_init_character_set(mysql))
goto error;
- if (mysql_init_charset(mysql))
+ if (check_embedded_connection(mysql))
goto error;
mysql->server_status= SERVER_STATUS_AUTOCOMMIT;
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 3293487a0ac..5226e6f80df 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -1033,7 +1033,8 @@ sub executable_setup () {
$path_client_bindir= mtr_path_exists("$glob_basedir/client_release",
"$glob_basedir/client_debug",
"$glob_basedir/bin",);
- $exe_mysqld= mtr_exe_exists ("$path_client_bindir/mysqld-max",
+ $exe_mysqld= mtr_exe_exists ("$path_client_bindir/mysqld-max-nt",
+ "$path_client_bindir/mysqld-max",
"$path_client_bindir/mysqld-nt",
"$path_client_bindir/mysqld",
"$path_client_bindir/mysqld-debug",
diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result
index 3797af11a11..afbff905699 100644
--- a/mysql-test/r/auto_increment.result
+++ b/mysql-test/r/auto_increment.result
@@ -143,7 +143,7 @@ explain extended select last_insert_id();
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache last_insert_id() AS `last_insert_id()`
+Note 1003 select last_insert_id() AS `last_insert_id()`
insert into t1 set i = 254;
ERROR 23000: Duplicate entry '254' for key 1
select last_insert_id();
diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result
index af6319afe99..ee7cdceefda 100644
--- a/mysql-test/r/bdb.result
+++ b/mysql-test/r/bdb.result
@@ -1928,4 +1928,38 @@ create table t1 (a int) engine=bdb;
commit;
alter table t1 add primary key(a);
drop table t1;
+set autocommit=1;
+reset master;
+create table bug16206 (a int);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+show binlog events;
+Log_name Pos Event_type Server_id End_log_pos Info
+f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
+f n Query 1 n use `test`; create table bug16206 (a int)
+f n Query 1 n use `test`; insert into bug16206 values(1)
+f n Query 1 n use `test`; insert into bug16206 values(2)
+drop table bug16206;
+reset master;
+create table bug16206 (a int) engine= bdb;
+insert into bug16206 values(0);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+insert into bug16206 values(3);
+show binlog events;
+Log_name Pos Event_type Server_id End_log_pos Info
+f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
+f n Query 1 n use `test`; create table bug16206 (a int) engine= bdb
+f n Query 1 n use `test`; insert into bug16206 values(0)
+f n Query 1 n use `test`; insert into bug16206 values(1)
+f n Query 1 n use `test`; BEGIN
+f n Query 1 n use `test`; insert into bug16206 values(2)
+f n Query 1 n use `test`; COMMIT
+f n Query 1 n use `test`; insert into bug16206 values(3)
+drop table bug16206;
+set autocommit=0;
End of 5.0 tests
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index 27a6c8a9d03..c5b77ea4925 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -607,7 +607,7 @@ create database mysqltest;
use mysqltest;
drop database mysqltest;
create table test.t1 like x;
-ERROR 42000: Incorrect database name 'NULL'
+ERROR 3D000: No database selected
drop table if exists test.t1;
create database mysqltest;
use mysqltest;
diff --git a/mysql-test/r/ctype_ucs2_def.result b/mysql-test/r/ctype_ucs2_def.result
index 897dbac251c..2f9dc4ae616 100644
--- a/mysql-test/r/ctype_ucs2_def.result
+++ b/mysql-test/r/ctype_ucs2_def.result
@@ -1,3 +1,6 @@
+show variables like 'collation_server';
+Variable_name Value
+collation_server ucs2_unicode_ci
show variables like "%character_set_ser%";
Variable_name Value
character_set_server ucs2
diff --git a/mysql-test/r/distinct.result b/mysql-test/r/distinct.result
index 89b17d69f40..a3d1e8bf3bb 100644
--- a/mysql-test/r/distinct.result
+++ b/mysql-test/r/distinct.result
@@ -504,6 +504,57 @@ a 2 b
2 2 4
3 2 5
DROP TABLE t1,t2;
+CREATE TABLE t1(a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
+EXPLAIN SELECT DISTINCT a FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 4 NULL 3 Using index
+EXPLAIN SELECT DISTINCT a,b FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1_1 ALL NULL NULL NULL NULL 3 Using temporary
+1 SIMPLE t1_2 index NULL PRIMARY 4 NULL 3 Using index; Distinct
+EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2
+WHERE t1_1.a = t1_2.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1_1 ALL PRIMARY NULL NULL NULL 3 Using temporary
+1 SIMPLE t1_2 eq_ref PRIMARY PRIMARY 4 test.t1_1.a 1 Using index; Distinct
+EXPLAIN SELECT a FROM t1 GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 4 NULL 3 Using index
+EXPLAIN SELECT a,b FROM t1 GROUP BY a,b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+EXPLAIN SELECT DISTINCT a,b FROM t1 GROUP BY a,b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+CREATE TABLE t2(a INT, b INT, c INT, d INT, PRIMARY KEY (a,b));
+INSERT INTO t2 VALUES (1,1,1,50), (1,2,3,40), (2,1,3,4);
+EXPLAIN SELECT DISTINCT a FROM t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index
+EXPLAIN SELECT DISTINCT a,a FROM t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index
+EXPLAIN SELECT DISTINCT b,a FROM t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index
+EXPLAIN SELECT DISTINCT a,c FROM t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using temporary
+EXPLAIN SELECT DISTINCT c,a,b FROM t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 3
+EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using temporary; Using filesort
+CREATE UNIQUE INDEX c_b_unq ON t2 (c,b);
+EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 3
+DROP TABLE t1,t2;
create table t1 (id int, dsc varchar(50));
insert into t1 values (1, "line number one"), (2, "line number two"), (3, "line number three");
select distinct id, IFNULL(dsc, '-') from t1;
diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result
index f11da4ee62f..2eb0c81ec2e 100644
--- a/mysql-test/r/federated.result
+++ b/mysql-test/r/federated.result
@@ -1558,6 +1558,8 @@ id
3
4
5
+DROP TABLE federated.t1;
+DROP TABLE federated.t1;
DROP TABLE IF EXISTS federated.bug_17377_table;
CREATE TABLE federated.bug_17377_table (
`fld_cid` bigint(20) NOT NULL auto_increment,
@@ -1601,6 +1603,92 @@ fld_cid fld_name fld_parentid fld_delt
5 Torkel 0 0
DROP TABLE federated.t1;
DROP TABLE federated.bug_17377_table;
+create table federated.t1 (i1 int, i2 int, i3 int);
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20));
+create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 2 2
+3 7 12
+4 5 2
+9 10 15
+select * from federated.t2;
+id c1 c2
+9 abc def
+5 opq lmn
+2 test t t test
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 15 2
+3 7 12
+4 5 2
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+5 opq lmn
+9 abc ppc
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+2 15 2
+3 7 12
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+9 abc ppc
+drop table federated.t1, federated.t2;
+drop table federated.t1, federated.t2;
+create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1));
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id));
+create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
+create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 2 2
+3 7 12
+4 5 2
+9 10 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t t test
+5 opq lmn
+9 abc def
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 15 2
+3 7 12
+4 5 2
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+5 opq lmn
+9 abc ppc
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+2 15 2
+3 7 12
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+9 abc ppc
+drop table federated.t1, federated.t2;
+drop table federated.t1, federated.t2;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
diff --git a/mysql-test/r/func_compress.result b/mysql-test/r/func_compress.result
index 8d6fa9927ce..e3d31566741 100644
--- a/mysql-test/r/func_compress.result
+++ b/mysql-test/r/func_compress.result
@@ -11,7 +11,7 @@ explain extended select uncompress(compress(@test_compress_string));
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))`
+Note 1003 select uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))`
select uncompressed_length(compress(@test_compress_string))=length(@test_compress_string);
uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)
1
@@ -19,7 +19,7 @@ explain extended select uncompressed_length(compress(@test_compress_string))=len
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)`
+Note 1003 select (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)`
select uncompressed_length(compress(@test_compress_string));
uncompressed_length(compress(@test_compress_string))
117
diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result
index 43748257203..fc9bfb3b612 100644
--- a/mysql-test/r/func_math.result
+++ b/mysql-test/r/func_math.result
@@ -90,7 +90,7 @@ explain extended select rand(999999),rand();
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache rand(999999) AS `rand(999999)`,rand() AS `rand()`
+Note 1003 select rand(999999) AS `rand(999999)`,rand() AS `rand()`
select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6);
pi() format(sin(pi()/2),6) format(cos(pi()/2),6) format(abs(tan(pi())),6) format(cot(1),6) format(asin(1),6) format(acos(0),6) format(atan(1),6)
3.141593 1.000000 0.000000 0.000000 0.642093 1.570796 1.570796 0.785398
diff --git a/mysql-test/r/func_sapdb.result b/mysql-test/r/func_sapdb.result
index d984eee80fa..64eb6eefd1a 100644
--- a/mysql-test/r/func_sapdb.result
+++ b/mysql-test/r/func_sapdb.result
@@ -81,6 +81,12 @@ makedate(1997,1)
select makedate(1997,0);
makedate(1997,0)
NULL
+select makedate(9999,365);
+makedate(9999,365)
+9999-12-31
+select makedate(9999,366);
+makedate(9999,366)
+NULL
select addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002");
addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002")
1998-01-02 01:01:01.000001
diff --git a/mysql-test/r/func_system.result b/mysql-test/r/func_system.result
index 1c1c6dff21e..00bef09715d 100644
--- a/mysql-test/r/func_system.result
+++ b/mysql-test/r/func_system.result
@@ -41,7 +41,7 @@ explain extended select database(), user();
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache database() AS `database()`,user() AS `user()`
+Note 1003 select database() AS `database()`,user() AS `user()`
create table t1 (version char(40)) select database(), user(), version() as 'version';
show create table t1;
Table Create Table
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index 593ce7b26c8..96591086f00 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -1,4 +1,5 @@
drop table if exists t1,t2,t3;
+set time_zone="+03:00";
select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29");
from_days(to_days("960101")) to_days(960201)-to_days("19960101") to_days(date_add(curdate(), interval 1 day))-to_days(curdate()) weekday("1997-11-29")
1996-01-01 31 1 5
@@ -360,6 +361,12 @@ extract(SECOND FROM "1999-01-02 10:11:12")
select extract(MONTH FROM "2001-02-00");
extract(MONTH FROM "2001-02-00")
2
+SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
+DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE)
+9999-12-31 00:00:00
+SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
+DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE)
+9999-12-31 00:00:00
SELECT EXTRACT(QUARTER FROM '2004-01-15') AS quarter;
quarter
1
@@ -722,7 +729,7 @@ explain extended select period_add("9602",-12),period_diff(199505,"9404"),from_d
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)`
+Note 1003 select period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)`
SET @TMP=NOW();
CREATE TABLE t1 (d DATETIME);
INSERT INTO t1 VALUES (NOW());
@@ -945,3 +952,4 @@ id day id day
1 2005-06-01 3 2005-07-15
3 2005-07-01 3 2005-07-15
DROP TABLE t1,t2;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/func_timestamp.result b/mysql-test/r/func_timestamp.result
index d9912f08b72..495fedea9e6 100644
--- a/mysql-test/r/func_timestamp.result
+++ b/mysql-test/r/func_timestamp.result
@@ -1,4 +1,5 @@
drop table if exists t1;
+set time_zone="+03:00";
create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null,
Jahr smallint not null, index(Tag), index(Monat), index(Jahr) );
insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998);
@@ -9,3 +10,4 @@ Date Unix
1998-9-16 09:26:00 905927160
1998-9-16 09:26:00 905927160
drop table t1;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result
index 13e2d56d83e..7a0f689df36 100644
--- a/mysql-test/r/gis.result
+++ b/mysql-test/r/gis.result
@@ -694,3 +694,13 @@ alter table t1 add primary key pti(pt);
ERROR 42000: BLOB/TEXT column 'pt' used in key specification without a key length
alter table t1 add primary key pti(pt(20));
drop table t1;
+create table t1 (g GEOMETRY);
+select * from t1;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def test t1 t1 g g 255 4294967295 0 Y 144 0 63
+g
+select asbinary(g) from t1;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def asbinary(g) 252 8192 0 Y 128 0 63
+asbinary(g)
+drop table t1;
diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result
index 6da07922251..64969fcdf44 100644
--- a/mysql-test/r/information_schema.result
+++ b/mysql-test/r/information_schema.result
@@ -369,11 +369,11 @@ show keys from v4;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
select * from information_schema.views where TABLE_NAME like "v%";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE
-NULL test v0 select sql_no_cache `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER
-NULL test v1 select sql_no_cache `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER
-NULL test v2 select sql_no_cache `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER
-NULL test v3 select sql_no_cache `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
-NULL test v4 select sql_no_cache `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
+NULL test v0 /* ALGORITHM=UNDEFINED */ select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER
+NULL test v1 /* ALGORITHM=UNDEFINED */ select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER
+NULL test v2 /* ALGORITHM=UNDEFINED */ select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER
+NULL test v3 /* ALGORITHM=UNDEFINED */ select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
+NULL test v4 /* ALGORITHM=UNDEFINED */ select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
drop view v0, v1, v2, v3, v4;
create table t1 (a int);
grant select,update,insert on t1 to mysqltest_1@localhost;
@@ -464,9 +464,9 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION;
create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION;
select * from information_schema.views;
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE
-NULL test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
-NULL test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER
-NULL test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
+NULL test v1 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
+NULL test v2 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER
+NULL test v3 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
grant select (a) on test.t1 to joe@localhost with grant option;
select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES;
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
@@ -687,7 +687,7 @@ Warnings:
Warning 1356 View 'test.v2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
show create table v3;
View Create View
-v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select sql_no_cache `test`.`sub1`(1) AS `c`
+v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select `test`.`sub1`(1) AS `c`
Warnings:
Warning 1356 View 'test.v3' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
drop view v2;
@@ -1121,7 +1121,7 @@ select * from information_schema.views
where table_name='v1' or table_name='v2';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE
NULL test v1 NONE YES root@localhost DEFINER
-NULL test v2 select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER
+NULL test v2 /* ALGORITHM=UNDEFINED */ select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER
drop view v1, v2;
drop table t1;
drop user mysqltest_1@localhost;
diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result
index bc9d3935bc4..a6f05143b3e 100644
--- a/mysql-test/r/key.result
+++ b/mysql-test/r/key.result
@@ -330,6 +330,16 @@ alter table t1 add key (c1,c1,c2);
ERROR 42S21: Duplicate column name 'c1'
drop table t1;
create table t1 (
+i1 INT NOT NULL,
+i2 INT NOT NULL,
+UNIQUE i1idx (i1),
+UNIQUE i2idx (i2));
+desc t1;
+Field Type Null Key Default Extra
+i1 int(11) NO UNI
+i2 int(11) NO UNI
+drop table t1;
+create table t1 (
c1 int,
c2 varchar(20) not null,
primary key (c1),
diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result
index 2188d58e526..c80108f723a 100644
--- a/mysql-test/r/lock_multi.result
+++ b/mysql-test/r/lock_multi.result
@@ -67,6 +67,21 @@ Select_priv
N
use test;
use test;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+ FLUSH TABLES WITH READ LOCK;
+CREATE TABLE t2 (c1 int);
+UNLOCK TABLES;
+UNLOCK TABLES;
+DROP TABLE t1, t2;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+ FLUSH TABLES WITH READ LOCK;
+CREATE TABLE t2 AS SELECT * FROM t1;
+ERROR HY000: Table 't2' was not locked with LOCK TABLES
+UNLOCK TABLES;
+UNLOCK TABLES;
+DROP TABLE t1;
create table t1 (f1 int(12) unsigned not null auto_increment, primary key(f1)) engine=innodb;
lock tables t1 write;
alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; //
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index e496e1ef35d..f9714e067e6 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -2743,6 +2743,25 @@ end AFTER # root@localhost
drop trigger tr1;
drop trigger tr2;
drop table t1, t2;
+create table t (qty int, price int);
+insert into t values(3, 50);
+insert into t values(5, 51);
+create view v1 as select qty, price, qty*price as value from t;
+create view v2 as select qty from v1;
+mysqldump {
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v1` AS select `t`.`qty` AS `qty`,`t`.`price` AS `price`,(`t`.`qty` * `t`.`price`) AS `value` from `t` */;
+
+} mysqldump {
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v2` AS select `v1`.`qty` AS `qty` from `v1` */;
+
+} mysqldump
+drop view v1;
+drop view v2;
+drop table t;
/*!50003 CREATE FUNCTION `f`() RETURNS bigint(20)
return 42 */|
/*!50003 CREATE PROCEDURE `p`()
@@ -2757,6 +2776,15 @@ p CREATE DEFINER=`root`@`localhost` PROCEDURE `p`()
select 42
drop function f;
drop procedure p;
+create table t1 ( id serial );
+create view v1 as select * from t1;
+drop table t1;
+mysqldump {
+
+-- failed on view `v1`: CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1`
+
+} mysqldump
+drop view v1;
create database mysqldump_test_db;
use mysqldump_test_db;
create table t1 (id int);
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index abebfc8cd93..3ce2f5169e2 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -1158,3 +1158,108 @@ Warnings:
Error 1146 Table 'test.t4' doesn't exist
deallocate prepare stmt;
drop table t1, t2, t3;
+create database mysqltest_long_database_name_to_thrash_heap;
+use test;
+create table t1 (i int);
+prepare stmt from "alter table test.t1 rename t1";
+use mysqltest_long_database_name_to_thrash_heap;
+execute stmt;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+prepare stmt from "alter table test.t1 rename t1";
+use test;
+execute stmt;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+deallocate prepare stmt;
+use mysqltest_long_database_name_to_thrash_heap;
+prepare stmt_create from "create table t1 (i int)";
+prepare stmt_insert from "insert into t1 (i) values (1)";
+prepare stmt_update from "update t1 set i=2";
+prepare stmt_delete from "delete from t1 where i=2";
+prepare stmt_select from "select * from t1";
+prepare stmt_alter from "alter table t1 add column (b int)";
+prepare stmt_alter1 from "alter table t1 drop column b";
+prepare stmt_analyze from "analyze table t1";
+prepare stmt_optimize from "optimize table t1";
+prepare stmt_show from "show tables like 't1'";
+prepare stmt_truncate from "truncate table t1";
+prepare stmt_drop from "drop table t1";
+drop table t1;
+use test;
+execute stmt_create;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+use test;
+execute stmt_insert;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+i
+1
+execute stmt_update;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+i
+2
+execute stmt_delete;
+execute stmt_select;
+i
+execute stmt_alter;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+b int(11) YES NULL
+execute stmt_alter1;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+execute stmt_analyze;
+Table Op Msg_type Msg_text
+mysqltest_long_database_name_to_thrash_heap.t1 analyze status Table is already up to date
+execute stmt_optimize;
+Table Op Msg_type Msg_text
+mysqltest_long_database_name_to_thrash_heap.t1 optimize status Table is already up to date
+execute stmt_show;
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+execute stmt_truncate;
+execute stmt_drop;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+drop database mysqltest_long_database_name_to_thrash_heap;
+prepare stmt_create from "create table t1 (i int)";
+ERROR 3D000: No database selected
+prepare stmt_insert from "insert into t1 (i) values (1)";
+ERROR 3D000: No database selected
+prepare stmt_update from "update t1 set i=2";
+ERROR 3D000: No database selected
+prepare stmt_delete from "delete from t1 where i=2";
+ERROR 3D000: No database selected
+prepare stmt_select from "select * from t1";
+ERROR 3D000: No database selected
+prepare stmt_alter from "alter table t1 add column (b int)";
+ERROR 3D000: No database selected
+prepare stmt_alter1 from "alter table t1 drop column b";
+ERROR 3D000: No database selected
+prepare stmt_analyze from "analyze table t1";
+ERROR 3D000: No database selected
+prepare stmt_optimize from "optimize table t1";
+ERROR 3D000: No database selected
+prepare stmt_show from "show tables like 't1'";
+ERROR 3D000: No database selected
+prepare stmt_truncate from "truncate table t1";
+ERROR 3D000: No database selected
+prepare stmt_drop from "drop table t1";
+ERROR 3D000: No database selected
+create temporary table t1 (i int);
+ERROR 3D000: No database selected
+use test;
diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result
index 4bf4ebb910d..926a980f9c4 100644
--- a/mysql-test/r/query_cache.result
+++ b/mysql-test/r/query_cache.result
@@ -231,7 +231,7 @@ explain extended select benchmark(1,1) from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system NULL NULL NULL NULL 0 const row not found
Warnings:
-Note 1003 select sql_no_cache benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1`
+Note 1003 select benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1`
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
diff --git a/mysql-test/r/rpl_get_lock.result b/mysql-test/r/rpl_get_lock.result
index 26f33bfb42c..da300d99964 100644
--- a/mysql-test/r/rpl_get_lock.result
+++ b/mysql-test/r/rpl_get_lock.result
@@ -25,7 +25,7 @@ explain extended select is_free_lock("lock"), is_used_lock("lock");
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")`
+Note 1003 select is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")`
select is_free_lock("lock2");
is_free_lock("lock2")
1
diff --git a/mysql-test/r/rpl_master_pos_wait.result b/mysql-test/r/rpl_master_pos_wait.result
index e92d1ffa361..2f3e47999cf 100644
--- a/mysql-test/r/rpl_master_pos_wait.result
+++ b/mysql-test/r/rpl_master_pos_wait.result
@@ -11,7 +11,7 @@ explain extended select master_pos_wait('master-bin.999999',0,2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)`
+Note 1003 select master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)`
select master_pos_wait('master-bin.999999',0);
stop slave sql_thread;
master_pos_wait('master-bin.999999',0)
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index 61a820b4469..994501767ba 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -565,3 +565,63 @@ DROP TABLE tyt2;
DROP TABLE urkunde;
SHOW TABLES FROM non_existing_database;
ERROR 42000: Unknown database 'non_existing_database'
+DROP VIEW IF EXISTS v1;
+DROP PROCEDURE IF EXISTS p1;
+CREATE VIEW v1 AS SELECT 1;
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 AS `1`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE 1;
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1;
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache 1 AS `1`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE PROCEDURE p1()
+BEGIN
+SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1';
+PREPARE stmt FROM @s;
+EXECUTE stmt;
+DROP PREPARE stmt;
+END |
+CALL p1();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1`
+DROP PROCEDURE p1;
+DROP VIEW v1;
diff --git a/mysql-test/r/sp-prelocking.result b/mysql-test/r/sp-prelocking.result
index 2335513b28a..7d8dd862748 100644
--- a/mysql-test/r/sp-prelocking.result
+++ b/mysql-test/r/sp-prelocking.result
@@ -237,3 +237,21 @@ deallocate prepare stmt;
drop table t1;
drop view v1, v2, v3;
drop function bug15683;
+drop table if exists t1, t2, t3;
+drop function if exists bug19634;
+create table t1 (id int, data int);
+create table t2 (id int);
+create table t3 (data int);
+create function bug19634() returns int return (select count(*) from t3);
+prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id and bug19634()";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+create trigger t1_bi before delete on t1 for each row insert into t3 values (old.data);
+prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+drop function bug19634;
+drop table t1, t2, t3;
+End of 5.0 tests
diff --git a/mysql-test/r/sp-security.result b/mysql-test/r/sp-security.result
index 04f2f58ba37..a53b4c4d246 100644
--- a/mysql-test/r/sp-security.result
+++ b/mysql-test/r/sp-security.result
@@ -420,3 +420,34 @@ ERROR HY000: There is no 'mysqltest_1'@'localhost' registered
---> connection: root
DROP USER mysqltest_2@localhost;
DROP DATABASE mysqltest;
+GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow';
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO
+user19857@localhost;
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+Host User Password
+localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C
+
+---> connection: mysqltest_2_con
+use test;
+CREATE PROCEDURE sp19857() DETERMINISTIC
+BEGIN
+DECLARE a INT;
+SET a=1;
+SELECT a;
+END //
+SHOW CREATE PROCEDURE test.sp19857;
+Procedure sql_mode Create Procedure
+sp19857 CREATE DEFINER=`user19857`@`localhost` PROCEDURE `sp19857`()
+ DETERMINISTIC
+BEGIN
+DECLARE a INT;
+SET a=1;
+SELECT a;
+END
+DROP PROCEDURE IF EXISTS test.sp19857;
+
+---> connection: root
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+Host User Password
+localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C
+DROP USER user19857@localhost;
diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result
index ff378f1f43b..96bf2f01f86 100644
--- a/mysql-test/r/sp.result
+++ b/mysql-test/r/sp.result
@@ -4990,4 +4990,71 @@ CALL bug18037_p2()|
DROP FUNCTION bug18037_f1|
DROP PROCEDURE bug18037_p1|
DROP PROCEDURE bug18037_p2|
+use test|
+create table t3 (i int)|
+insert into t3 values (1), (2)|
+create database mysqltest1|
+use mysqltest1|
+create function bug17199() returns varchar(2) deterministic return 'ok'|
+use test|
+select *, mysqltest1.bug17199() from t3|
+i mysqltest1.bug17199()
+1 ok
+2 ok
+use mysqltest1|
+create function bug18444(i int) returns int no sql deterministic return i + 1|
+use test|
+select mysqltest1.bug18444(i) from t3|
+mysqltest1.bug18444(i)
+2
+3
+drop database mysqltest1|
+create database mysqltest1 charset=utf8|
+create database mysqltest2 charset=utf8|
+create procedure mysqltest1.p1()
+begin
+-- alters the default collation of database test
+alter database character set koi8r;
+end|
+use mysqltest1|
+call p1()|
+show create database mysqltest1|
+Database Create Database
+mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */
+show create database mysqltest2|
+Database Create Database
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */
+alter database mysqltest1 character set utf8|
+use mysqltest2|
+call mysqltest1.p1()|
+show create database mysqltest1|
+Database Create Database
+mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */
+show create database mysqltest2|
+Database Create Database
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */
+drop database mysqltest1|
+drop database mysqltest2|
+use test|
+drop table if exists t3|
+drop procedure if exists bug15217|
+create table t3 as select 1|
+create procedure bug15217()
+begin
+declare var1 char(255);
+declare cur1 cursor for select * from t3;
+open cur1;
+fetch cur1 into var1;
+select concat('data was: /', var1, '/');
+close cur1;
+end |
+call bug15217()|
+concat('data was: /', var1, '/')
+data was: /1/
+flush tables |
+call bug15217()|
+concat('data was: /', var1, '/')
+data was: /1/
+drop table t3|
+drop procedure bug15217|
drop table t1,t2;
diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result
index e4bc59e4d19..07630ffee0f 100644
--- a/mysql-test/r/subselect.result
+++ b/mysql-test/r/subselect.result
@@ -1019,19 +1019,19 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found
2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found
Warnings:
-Note 1003 select sql_no_cache (select sql_no_cache rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1`
+Note 1003 select (select rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1`
EXPLAIN EXTENDED SELECT (SELECT ENCRYPT('test') FROM t1) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found
2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found
Warnings:
-Note 1003 select sql_no_cache (select sql_no_cache encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1`
+Note 1003 select (select encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1`
EXPLAIN EXTENDED SELECT (SELECT BENCHMARK(1,1) FROM t1) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found
2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found
Warnings:
-Note 1003 select sql_no_cache (select sql_no_cache benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1`
+Note 1003 select (select benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1`
drop table t1;
CREATE TABLE `t1` (
`mot` varchar(30) character set latin1 NOT NULL default '',
@@ -1126,7 +1126,7 @@ id select_type table type possible_keys key key_len ref rows Extra
2 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3
3 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3
Warnings:
-Note 1003 select sql_no_cache `test`.`t1`.`a` AS `a`,(select sql_no_cache (select sql_no_cache rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1`
+Note 1003 select `test`.`t1`.`a` AS `a`,(select (select rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1`
drop table t1;
select t1.Continent, t2.Name, t2.Population from t1 LEFT JOIN t2 ON t1.Code = t2.Country where t2.Population IN (select max(t2.Population) AS Population from t2, t1 where t2.Country = t1.Code group by Continent);
ERROR 42S02: Table 'test.t1' doesn't exist
diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result
index 3ce7cc0c835..272836c450a 100644
--- a/mysql-test/r/symlink.result
+++ b/mysql-test/r/symlink.result
@@ -74,18 +74,24 @@ t9 CREATE TABLE `t9` (
) ENGINE=MyISAM AUTO_INCREMENT=16725 DEFAULT CHARSET=latin1 DATA DIRECTORY='MYSQLTEST_VARDIR/tmp/' INDEX DIRECTORY='MYSQLTEST_VARDIR/run/'
drop database mysqltest;
create table t1 (a int not null) engine=myisam;
+Warnings:
+Warning 0 DATA DIRECTORY option ignored
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
alter table t1 add b int;
+Warnings:
+Warning 0 DATA DIRECTORY option ignored
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
`b` int(11) default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Warnings:
+Warning 0 INDEX DIRECTORY option ignored
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result
index d4791c6b117..f3e797d2344 100644
--- a/mysql-test/r/trigger.result
+++ b/mysql-test/r/trigger.result
@@ -295,7 +295,7 @@ create trigger trg before insert on t1 for each row set @a:=1;
create trigger trg after insert on t1 for each row set @a:=1;
ERROR HY000: Trigger already exists
create trigger trg2 before insert on t1 for each row set @a:=1;
-ERROR HY000: Trigger already exists
+ERROR 42000: This version of MySQL doesn't yet support 'multiple triggers with the same action time and event for one table'
create trigger trg before insert on t3 for each row set @a:=1;
ERROR HY000: Trigger already exists
create trigger trg2 before insert on t3 for each row set @a:=1;
@@ -1078,3 +1078,15 @@ i1
43
51
DROP TABLE t1;
+create trigger wont_work after update on mysql.user for each row
+begin
+set @a:= 1;
+end|
+ERROR HY000: Triggers can not be created on system tables
+use mysql|
+create trigger wont_work after update on event for each row
+begin
+set @a:= 1;
+end|
+ERROR HY000: Triggers can not be created on system tables
+End of 5.0 tests
diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result
index b366b1ed755..4fd220045c2 100644
--- a/mysql-test/r/type_blob.result
+++ b/mysql-test/r/type_blob.result
@@ -517,7 +517,7 @@ coercibility(load_file('../../std_data/words.dat'));
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))`
+Note 1003 select charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))`
update t1 set imagem=load_file('../../std_data/words.dat') where id=1;
select if(imagem is null, "ERROR", "OK"),length(imagem) from t1 where id = 1;
if(imagem is null, "ERROR", "OK") length(imagem)
diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result
index 61ed6bbabf3..0817cc3b6c7 100644
--- a/mysql-test/r/type_timestamp.result
+++ b/mysql-test/r/type_timestamp.result
@@ -1,4 +1,5 @@
drop table if exists t1,t2;
+set time_zone="+03:00";
CREATE TABLE t1 (a int, t timestamp);
CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
@@ -491,3 +492,4 @@ a b c
5 NULL 2001-09-09 04:46:59
6 NULL 2006-06-06 06:06:06
drop table t1;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/udf.result b/mysql-test/r/udf.result
index be52fd7f87c..484c42c41bf 100644
--- a/mysql-test/r/udf.result
+++ b/mysql-test/r/udf.result
@@ -76,6 +76,24 @@ call XXX2();
metaphon(testval)
HL
drop procedure xxx2;
+CREATE TABLE bug19904(n INT, v varchar(10));
+INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four');
+SELECT myfunc_double(n) AS f FROM bug19904;
+f
+49.00
+50.00
+NULL
+51.00
+52.00
+SELECT metaphon(v) AS f FROM bug19904;
+f
+ON
+TW
+NULL
+0R
+FR
+DROP TABLE bug19904;
+End of 5.0 tests.
DROP FUNCTION metaphon;
DROP FUNCTION myfunc_double;
DROP FUNCTION myfunc_nonexist;
diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result
index 8cee60cf49a..a0e516d2397 100644
--- a/mysql-test/r/variables.result
+++ b/mysql-test/r/variables.result
@@ -75,7 +75,7 @@ explain extended select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3`
+Note 1003 select (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3`
select @t5;
@t5
1.23456
@@ -135,7 +135,7 @@ explain extended select last_insert_id(345);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache last_insert_id(345) AS `last_insert_id(345)`
+Note 1003 select last_insert_id(345) AS `last_insert_id(345)`
select @@IDENTITY,last_insert_id(), @@identity;
@@IDENTITY last_insert_id() @@identity
345 345 345
@@ -143,7 +143,7 @@ explain extended select @@IDENTITY,last_insert_id(), @@identity;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity`
+Note 1003 select 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity`
set big_tables=OFF, big_tables=ON, big_tables=0, big_tables=1, big_tables="OFF", big_tables="ON";
set global concurrent_insert=2;
show variables like 'concurrent_insert';
@@ -421,6 +421,28 @@ set tmp_table_size=100;
set tx_isolation="READ-COMMITTED";
set wait_timeout=100;
set log_warnings=1;
+select @@session.insert_id;
+@@session.insert_id
+1
+set @save_insert_id=@@session.insert_id;
+set session insert_id=20;
+select @@session.insert_id;
+@@session.insert_id
+20
+set session last_insert_id=100;
+select @@session.insert_id;
+@@session.insert_id
+20
+select @@session.last_insert_id;
+@@session.last_insert_id
+100
+select @@session.insert_id;
+@@session.insert_id
+20
+set @@session.insert_id=@save_insert_id;
+select @@session.insert_id;
+@@session.insert_id
+1
create table t1 (a int not null auto_increment, primary key(a));
create table t2 (a int not null auto_increment, primary key(a));
insert into t1 values(null),(null),(null);
diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result
index 5bb407f4256..72cffb9531c 100644
--- a/mysql-test/r/view.result
+++ b/mysql-test/r/view.result
@@ -672,7 +672,7 @@ drop table t1;
CREATE VIEW v1 (f1,f2,f3,f4) AS SELECT connection_id(), pi(), current_user(), version();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4`
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4`
drop view v1;
create table t1 (s1 int);
create table t2 (s2 int);
@@ -787,7 +787,7 @@ create function `f``1` () returns int return 5;
create view v1 as select test.`f``1` ();
show create view v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`f``1`() AS `test.``f````1`` ()`
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`f``1`() AS `test.``f````1`` ()`
select * from v1;
test.`f``1` ()
5
@@ -1868,14 +1868,14 @@ create table t2 (b timestamp default now());
create view v1 as select a,b,t1.a < now() from t1,t2 where t1.a < now();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now())
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now())
drop view v1;
drop table t1, t2;
CREATE TABLE t1 ( a varchar(50) );
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = CURRENT_USER();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user())
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user())
DROP VIEW v1;
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = VERSION();
SHOW CREATE VIEW v1;
@@ -1885,7 +1885,7 @@ DROP VIEW v1;
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = DATABASE();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database())
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database())
DROP VIEW v1;
DROP TABLE t1;
CREATE TABLE t1 (col1 time);
@@ -2538,7 +2538,7 @@ show create view v1;
drop view v1;
//
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`t1`.`id` AS `id` from `t1`
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1`
create table t1(f1 int, f2 int);
create view v1 as select ta.f1 as a, tb.f1 as b from t1 ta, t1 tb where ta.f1=tb
.f1 and ta.f2=tb.f2;
@@ -2683,7 +2683,7 @@ SELECT (year(now())-year(DOB)) AS Age
FROM t1 HAVING Age < 75;
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75)
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75)
SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75;
Age
42
diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test
index d017d91bfb1..ec05eeb3c34 100644
--- a/mysql-test/t/bdb.test
+++ b/mysql-test/t/bdb.test
@@ -1019,4 +1019,39 @@ commit;
alter table t1 add primary key(a);
drop table t1;
+
+#
+# Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode
+#
+set autocommit=1;
+
+let $VERSION=`select version()`;
+
+reset master;
+create table bug16206 (a int);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+--replace_result $VERSION VERSION
+--replace_column 1 f 2 n 5 n
+show binlog events;
+drop table bug16206;
+
+reset master;
+create table bug16206 (a int) engine= bdb;
+insert into bug16206 values(0);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+insert into bug16206 values(3);
+--replace_result $VERSION VERSION
+--replace_column 1 f 2 n 5 n
+show binlog events;
+drop table bug16206;
+
+set autocommit=0;
+
+
--echo End of 5.0 tests
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index e22c2b5c426..07edbf206fe 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -517,7 +517,7 @@ DROP TABLE t12913;
create database mysqltest;
use mysqltest;
drop database mysqltest;
---error 1102
+--error ER_NO_DB_ERROR
create table test.t1 like x;
--disable_warnings
drop table if exists test.t1;
diff --git a/mysql-test/t/ctype_ucs2_def-master.opt b/mysql-test/t/ctype_ucs2_def-master.opt
index 1f884ff1d67..a0b5b061860 100644
--- a/mysql-test/t/ctype_ucs2_def-master.opt
+++ b/mysql-test/t/ctype_ucs2_def-master.opt
@@ -1 +1 @@
---default-character-set=ucs2 --default-collation=ucs2_unicode_ci
+--default-collation=ucs2_unicode_ci --default-character-set=ucs2
diff --git a/mysql-test/t/ctype_ucs2_def.test b/mysql-test/t/ctype_ucs2_def.test
index fb174d551cf..00f636d79dc 100644
--- a/mysql-test/t/ctype_ucs2_def.test
+++ b/mysql-test/t/ctype_ucs2_def.test
@@ -1,4 +1,9 @@
#
+# MySQL Bug#15276: MySQL ignores collation-server
+#
+show variables like 'collation_server';
+
+#
# Bug#18004 Connecting crashes server when default charset is UCS2
#
show variables like "%character_set_ser%";
diff --git a/mysql-test/t/distinct.test b/mysql-test/t/distinct.test
index 09f07c2852f..61250a7105e 100644
--- a/mysql-test/t/distinct.test
+++ b/mysql-test/t/distinct.test
@@ -349,6 +349,34 @@ SELECT DISTINCT 2, a, b FROM t2;
SELECT DISTINCT a, 2, b FROM t2;
DROP TABLE t1,t2;
+#
+# Bug#16458: Simple SELECT FOR UPDATE causes "Result Set not updatable"
+# error.
+#
+CREATE TABLE t1(a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
+EXPLAIN SELECT DISTINCT a FROM t1;
+EXPLAIN SELECT DISTINCT a,b FROM t1;
+EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2;
+EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2
+ WHERE t1_1.a = t1_2.a;
+EXPLAIN SELECT a FROM t1 GROUP BY a;
+EXPLAIN SELECT a,b FROM t1 GROUP BY a,b;
+EXPLAIN SELECT DISTINCT a,b FROM t1 GROUP BY a,b;
+
+CREATE TABLE t2(a INT, b INT, c INT, d INT, PRIMARY KEY (a,b));
+INSERT INTO t2 VALUES (1,1,1,50), (1,2,3,40), (2,1,3,4);
+EXPLAIN SELECT DISTINCT a FROM t2;
+EXPLAIN SELECT DISTINCT a,a FROM t2;
+EXPLAIN SELECT DISTINCT b,a FROM t2;
+EXPLAIN SELECT DISTINCT a,c FROM t2;
+EXPLAIN SELECT DISTINCT c,a,b FROM t2;
+
+EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d;
+CREATE UNIQUE INDEX c_b_unq ON t2 (c,b);
+EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d;
+
+DROP TABLE t1,t2;
# Bug 9784 DISTINCT IFNULL truncates data
#
diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test
index 80b31c610a2..a8b16edc80a 100644
--- a/mysql-test/t/federated.test
+++ b/mysql-test/t/federated.test
@@ -1254,6 +1254,10 @@ SELECT LAST_INSERT_ID();
INSERT INTO federated.t1 VALUES ();
SELECT LAST_INSERT_ID();
SELECT * FROM federated.t1;
+DROP TABLE federated.t1;
+
+connection slave;
+DROP TABLE federated.t1;
#
# Bug#17377 Federated Engine returns wrong Data, always the rows
@@ -1309,5 +1313,56 @@ DROP TABLE federated.t1;
connection slave;
DROP TABLE federated.bug_17377_table;
+#
+# BUG 19773 Crash when using multi-table updates, deletes
+# with federated tables
+#
+connection slave;
+create table federated.t1 (i1 int, i2 int, i3 int);
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20));
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+select * from federated.t2;
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+drop table federated.t1, federated.t2;
+connection slave;
+drop table federated.t1, federated.t2;
+
+# Test multi updates and deletes with keys
+connection slave;
+create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1));
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id));
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+drop table federated.t1, federated.t2;
+
+connection slave;
+drop table federated.t1, federated.t2;
source include/federated_cleanup.inc;
diff --git a/mysql-test/t/func_sapdb.test b/mysql-test/t/func_sapdb.test
index 6189712b5fe..97101fba615 100644
--- a/mysql-test/t/func_sapdb.test
+++ b/mysql-test/t/func_sapdb.test
@@ -43,6 +43,8 @@ select weekofyear("1997-11-30 23:59:59.000001");
select makedate(1997,1);
select makedate(1997,0);
+select makedate(9999,365);
+select makedate(9999,366);
#Time functions
diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test
index d817d016881..559f6c13d4a 100644
--- a/mysql-test/t/func_time.test
+++ b/mysql-test/t/func_time.test
@@ -5,6 +5,9 @@
drop table if exists t1,t2,t3;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29");
select period_add("9602",-12),period_diff(199505,"9404") ;
@@ -140,6 +143,12 @@ select extract(SECOND FROM "1999-01-02 10:11:12");
select extract(MONTH FROM "2001-02-00");
#
+# MySQL Bugs: #12356: DATE_SUB or DATE_ADD incorrectly returns null
+#
+SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
+SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
+
+#
# test EXTRACT QUARTER (Bug #18100)
#
@@ -335,6 +344,7 @@ select last_day("1997-12-1")+0.0;
# Test SAPDB UTC_% functions. This part is TZ dependant (It is supposed that
# TZ variable set to GMT-3
+
select strcmp(date_sub(localtimestamp(), interval 3 hour), utc_timestamp())=0;
select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%T"), utc_time())=0;
select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%Y-%m-%d"), utc_date())=0;
@@ -513,3 +523,6 @@ SELECT * FROM t1, t2
DROP TABLE t1,t2;
# End of 5.0 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/func_timestamp.test b/mysql-test/t/func_timestamp.test
index e1bb7e878ee..05a91b06d28 100644
--- a/mysql-test/t/func_timestamp.test
+++ b/mysql-test/t/func_timestamp.test
@@ -6,6 +6,9 @@
drop table if exists t1;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null,
Jahr smallint not null, index(Tag), index(Monat), index(Jahr) );
insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998);
@@ -15,3 +18,6 @@ FROM t1;
drop table t1;
# End of 4.1 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test
index bb3f621d194..4c6ff9b2fe7 100644
--- a/mysql-test/t/gis.test
+++ b/mysql-test/t/gis.test
@@ -409,3 +409,10 @@ create table t1(pt GEOMETRY);
alter table t1 add primary key pti(pt);
alter table t1 add primary key pti(pt(20));
drop table t1;
+
+--enable_metadata
+create table t1 (g GEOMETRY);
+select * from t1;
+select asbinary(g) from t1;
+--disable_metadata
+drop table t1;
diff --git a/mysql-test/t/key.test b/mysql-test/t/key.test
index 31763b84379..e7072ae29f6 100644
--- a/mysql-test/t/key.test
+++ b/mysql-test/t/key.test
@@ -326,6 +326,17 @@ alter table t1 add key (c1,c1,c2);
drop table t1;
#
+# Bug#11228: DESC shows arbitrary column as "PRI"
+#
+create table t1 (
+ i1 INT NOT NULL,
+ i2 INT NOT NULL,
+ UNIQUE i1idx (i1),
+ UNIQUE i2idx (i2));
+desc t1;
+drop table t1;
+
+#
# Bug#12565 - ERROR 1034 when running simple UPDATE or DELETE
# on large MyISAM table
#
diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test
index 905d0699e6a..627c33b3d82 100644
--- a/mysql-test/t/lock_multi.test
+++ b/mysql-test/t/lock_multi.test
@@ -142,6 +142,7 @@ disconnect con2;
--error ER_DB_DROP_EXISTS
DROP DATABASE mysqltest_1;
+#
# Bug#16986 - Deadlock condition with MyISAM tables
#
connection locker;
@@ -170,6 +171,55 @@ connection locker;
use test;
#
connection default;
+#
+# Test if CREATE TABLE with LOCK TABLE deadlocks.
+#
+connection writer;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+#
+# This waits until t1 is unlocked.
+connection locker;
+send FLUSH TABLES WITH READ LOCK;
+--sleep 1
+#
+# This must not block.
+connection writer;
+CREATE TABLE t2 (c1 int);
+UNLOCK TABLES;
+#
+# This awakes now.
+connection locker;
+reap;
+UNLOCK TABLES;
+#
+connection default;
+DROP TABLE t1, t2;
+#
+# Test if CREATE TABLE SELECT with LOCK TABLE deadlocks.
+#
+connection writer;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+#
+# This waits until t1 is unlocked.
+connection locker;
+send FLUSH TABLES WITH READ LOCK;
+--sleep 1
+#
+# This must not block.
+connection writer;
+--error 1100
+CREATE TABLE t2 AS SELECT * FROM t1;
+UNLOCK TABLES;
+#
+# This awakes now.
+connection locker;
+reap;
+UNLOCK TABLES;
+#
+connection default;
+DROP TABLE t1;
#
# Bug #17264: MySQL Server freeze
diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test
index ab865594d42..7e4fedb297d 100644
--- a/mysql-test/t/mysqldump.test
+++ b/mysql-test/t/mysqldump.test
@@ -610,7 +610,7 @@ drop database db1;
# BUG#15328 Segmentation fault occured if my.cnf is invalid for escape sequence
#
---exec $MYSQL_MY_PRINT_DEFAULTS --defaults-extra-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump
+--exec $MYSQL_MY_PRINT_DEFAULTS --config-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump
#
@@ -1146,6 +1146,27 @@ drop table t1, t2;
#
+# Bug#18462 mysqldump does not dump view structures correctly
+#
+#
+create table t (qty int, price int);
+insert into t values(3, 50);
+insert into t values(5, 51);
+create view v1 as select qty, price, qty*price as value from t;
+create view v2 as select qty from v1;
+--echo mysqldump {
+--exec $MYSQL_DUMP --compact -F --tab . test
+--exec cat v1.sql
+--echo } mysqldump {
+--exec cat v2.sql
+--echo } mysqldump
+--rm v.sql t.sql t.txt
+drop view v1;
+drop view v2;
+drop table t;
+
+
+#
# Bug#14857 Reading dump files with single statement stored routines fails.
# fixed by patch for bug#16878
#
@@ -1162,6 +1183,18 @@ drop function f;
drop procedure p;
#
+# Bug #17371 Unable to dump a schema with invalid views
+#
+#
+create table t1 ( id serial );
+create view v1 as select * from t1;
+drop table t1;
+# mysqldump gets 1356 from server, but gives us 2
+--echo mysqldump {
+--error 2
+--exec $MYSQL_DUMP --force -N --compact --skip-comments test
+--echo } mysqldump
+drop view v1;
# BUG#17201 Spurious 'DROP DATABASE' in output,
# also confusion between tables and views.
# Example code from Markus Popp
diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test
index e3f3e37cd4c..ff66b265fae 100644
--- a/mysql-test/t/ps.test
+++ b/mysql-test/t/ps.test
@@ -1146,4 +1146,122 @@ execute stmt;
execute stmt;
deallocate prepare stmt;
drop table t1, t2, t3;
+
+#
+# Bug#17199 "Table not found" error occurs if the query contains a call
+# to a function from another database.
+# Test prepared statements- related behaviour.
+#
+#
+# ALTER TABLE RENAME and Prepared Statements: wrong DB name buffer was used
+# in ALTER ... RENAME which caused memory corruption in prepared statements.
+# No need to fix this problem in 4.1 as ALTER TABLE is not allowed in
+# Prepared Statements in 4.1.
+#
+create database mysqltest_long_database_name_to_thrash_heap;
+use test;
+create table t1 (i int);
+prepare stmt from "alter table test.t1 rename t1";
+use mysqltest_long_database_name_to_thrash_heap;
+execute stmt;
+show tables like 't1';
+prepare stmt from "alter table test.t1 rename t1";
+use test;
+execute stmt;
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+deallocate prepare stmt;
+#
+# Check that a prepared statement initializes its current database at
+# PREPARE, and then works correctly even if the current database has been
+# changed.
+#
+use mysqltest_long_database_name_to_thrash_heap;
+# Necessary for preparation of INSERT/UPDATE/DELETE to succeed
+prepare stmt_create from "create table t1 (i int)";
+prepare stmt_insert from "insert into t1 (i) values (1)";
+prepare stmt_update from "update t1 set i=2";
+prepare stmt_delete from "delete from t1 where i=2";
+prepare stmt_select from "select * from t1";
+prepare stmt_alter from "alter table t1 add column (b int)";
+prepare stmt_alter1 from "alter table t1 drop column b";
+prepare stmt_analyze from "analyze table t1";
+prepare stmt_optimize from "optimize table t1";
+prepare stmt_show from "show tables like 't1'";
+prepare stmt_truncate from "truncate table t1";
+prepare stmt_drop from "drop table t1";
+# Drop the table that was used to prepare INSERT/UPDATE/DELETE: we will
+# create a new one by executing stmt_create
+drop table t1;
+# Switch the current database
+use test;
+# Check that all prepared statements operate on the database that was
+# active at PREPARE
+execute stmt_create;
+# should return empty set
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+use test;
+execute stmt_insert;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_update;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_delete;
+execute stmt_select;
+execute stmt_alter;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_alter1;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_analyze;
+execute stmt_optimize;
+execute stmt_show;
+execute stmt_truncate;
+execute stmt_drop;
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+#
+# Attempt a statement PREPARE when there is no current database:
+# is expected to return an error.
+#
+drop database mysqltest_long_database_name_to_thrash_heap;
+--error ER_NO_DB_ERROR
+prepare stmt_create from "create table t1 (i int)";
+--error ER_NO_DB_ERROR
+prepare stmt_insert from "insert into t1 (i) values (1)";
+--error ER_NO_DB_ERROR
+prepare stmt_update from "update t1 set i=2";
+--error ER_NO_DB_ERROR
+prepare stmt_delete from "delete from t1 where i=2";
+--error ER_NO_DB_ERROR
+prepare stmt_select from "select * from t1";
+--error ER_NO_DB_ERROR
+prepare stmt_alter from "alter table t1 add column (b int)";
+--error ER_NO_DB_ERROR
+prepare stmt_alter1 from "alter table t1 drop column b";
+--error ER_NO_DB_ERROR
+prepare stmt_analyze from "analyze table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_optimize from "optimize table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_show from "show tables like 't1'";
+--error ER_NO_DB_ERROR
+prepare stmt_truncate from "truncate table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_drop from "drop table t1";
+#
+# The above has automatically deallocated all our statements.
+#
+# Attempt to CREATE a temporary table when no DB used: it should fail
+# This proves that no table can be used without explicit specification of
+# its database if there is no current database.
+#
+--error ER_NO_DB_ERROR
+create temporary table t1 (i int);
+#
+# Restore the old environemnt
+#
+use test;
# End of 5.0 tests
diff --git a/mysql-test/t/rpl_openssl.test b/mysql-test/t/rpl_openssl.test
index 7d769ad448e..af70a1a9453 100644
--- a/mysql-test/t/rpl_openssl.test
+++ b/mysql-test/t/rpl_openssl.test
@@ -1,3 +1,7 @@
+# TODO: THIS TEST DOES NOT WORK ON WINDOWS
+# This should be fixed.
+--source include/not_windows.inc
+
source include/have_openssl.inc;
source include/master-slave.inc;
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index 52d262677ff..6937cbe949d 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -424,3 +424,75 @@ DROP TABLE urkunde;
#
--error 1049
SHOW TABLES FROM non_existing_database;
+
+
+#
+# Bug#17203: "sql_no_cache sql_cache" in views created from prepared
+# statement
+#
+# The problem was that initial user setting was forgotten, and current
+# runtime-determined values of the flags were shown instead.
+#
+--disable_warnings
+DROP VIEW IF EXISTS v1;
+DROP PROCEDURE IF EXISTS p1;
+--enable_warnings
+
+# Check that SHOW CREATE VIEW shows SQL_CACHE flag exaclty as
+# specified by the user.
+CREATE VIEW v1 AS SELECT 1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_CACHE 1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+# Usage of NOW() disables caching, but we still have show what the
+# user have specified.
+CREATE VIEW v1 AS SELECT NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+# Check that SQL_NO_CACHE always wins.
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+# Check CREATE VIEW in a prepared statement in a procedure.
+delimiter |;
+CREATE PROCEDURE p1()
+BEGIN
+ SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1';
+ PREPARE stmt FROM @s;
+ EXECUTE stmt;
+ DROP PREPARE stmt;
+END |
+delimiter ;|
+CALL p1();
+SHOW CREATE VIEW v1;
+
+DROP PROCEDURE p1;
+DROP VIEW v1;
+
+# End of 5.0 tests.
diff --git a/mysql-test/t/sp-prelocking.test b/mysql-test/t/sp-prelocking.test
index a7215462afb..b94de6236d3 100644
--- a/mysql-test/t/sp-prelocking.test
+++ b/mysql-test/t/sp-prelocking.test
@@ -272,3 +272,34 @@ drop table t1;
drop view v1, v2, v3;
drop function bug15683;
+
+#
+# Bug#19634 "Re-execution of multi-delete which involve trigger/stored
+# function crashes server"
+#
+--disable_warnings
+drop table if exists t1, t2, t3;
+drop function if exists bug19634;
+--enable_warnings
+create table t1 (id int, data int);
+create table t2 (id int);
+create table t3 (data int);
+create function bug19634() returns int return (select count(*) from t3);
+prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id and bug19634()";
+# This should not crash server
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+create trigger t1_bi before delete on t1 for each row insert into t3 values (old.data);
+prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id";
+
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+drop function bug19634;
+drop table t1, t2, t3;
+
+
+--echo End of 5.0 tests
diff --git a/mysql-test/t/sp-security.test b/mysql-test/t/sp-security.test
index a8c3c0a22eb..d323b180216 100644
--- a/mysql-test/t/sp-security.test
+++ b/mysql-test/t/sp-security.test
@@ -744,4 +744,50 @@ DROP USER mysqltest_2@localhost;
DROP DATABASE mysqltest;
+#
+# Bug#19857 - When a user with CREATE ROUTINE priv creates a routine,
+# it results in NULL p/w
+#
+
+# Can't test with embedded server that doesn't support grants
+
+GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow';
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO
+user19857@localhost;
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+
+--connect (mysqltest_2_con,localhost,user19857,meow,test)
+--echo
+--echo ---> connection: mysqltest_2_con
+--connection mysqltest_2_con
+
+use test;
+
+DELIMITER //;
+ CREATE PROCEDURE sp19857() DETERMINISTIC
+ BEGIN
+ DECLARE a INT;
+ SET a=1;
+ SELECT a;
+ END //
+DELIMITER ;//
+
+SHOW CREATE PROCEDURE test.sp19857;
+
+--disconnect mysqltest_2_con
+--connect (mysqltest_2_con,localhost,user19857,meow,test)
+--connection mysqltest_2_con
+
+DROP PROCEDURE IF EXISTS test.sp19857;
+
+--echo
+--echo ---> connection: root
+--connection con1root
+
+--disconnect mysqltest_2_con
+
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+
+DROP USER user19857@localhost;
+
# End of 5.0 bugs.
diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test
index 1d21a5da187..25c96042e6f 100644
--- a/mysql-test/t/sp.test
+++ b/mysql-test/t/sp.test
@@ -5888,6 +5888,79 @@ DROP FUNCTION bug18037_f1|
DROP PROCEDURE bug18037_p1|
DROP PROCEDURE bug18037_p2|
+#
+# Bug#17199: "Table not found" error occurs if the query contains a call
+# to a function from another database.
+# See also ps.test for an additional test case for this bug.
+#
+use test|
+create table t3 (i int)|
+insert into t3 values (1), (2)|
+create database mysqltest1|
+use mysqltest1|
+create function bug17199() returns varchar(2) deterministic return 'ok'|
+use test|
+select *, mysqltest1.bug17199() from t3|
+#
+# Bug#18444: Fully qualified stored function names don't work correctly
+# in select statements
+#
+use mysqltest1|
+create function bug18444(i int) returns int no sql deterministic return i + 1|
+use test|
+select mysqltest1.bug18444(i) from t3|
+drop database mysqltest1|
+#
+# Check that current database has no influence to a stored procedure
+#
+create database mysqltest1 charset=utf8|
+create database mysqltest2 charset=utf8|
+create procedure mysqltest1.p1()
+begin
+-- alters the default collation of database test
+ alter database character set koi8r;
+end|
+use mysqltest1|
+call p1()|
+show create database mysqltest1|
+show create database mysqltest2|
+alter database mysqltest1 character set utf8|
+use mysqltest2|
+call mysqltest1.p1()|
+show create database mysqltest1|
+show create database mysqltest2|
+drop database mysqltest1|
+drop database mysqltest2|
+#
+# Restore the old environemnt
+use test|
+#
+# Bug#15217 "Using a SP cursor on a table created with PREPARE fails with
+# weird error". Check that the code that is supposed to work at
+# the first execution of a stored procedure actually works for
+# sp_instr_copen.
+
+--disable_warnings
+drop table if exists t3|
+drop procedure if exists bug15217|
+--enable_warnings
+create table t3 as select 1|
+create procedure bug15217()
+begin
+ declare var1 char(255);
+ declare cur1 cursor for select * from t3;
+ open cur1;
+ fetch cur1 into var1;
+ select concat('data was: /', var1, '/');
+ close cur1;
+end |
+# Returns expected result
+call bug15217()|
+flush tables |
+# Returns error with garbage as column name
+call bug15217()|
+drop table t3|
+drop procedure bug15217|
#
# BUG#NNNN: New bug synopsis
diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test
index 3743d8f5c76..95e8eaae83e 100644
--- a/mysql-test/t/trigger.test
+++ b/mysql-test/t/trigger.test
@@ -237,7 +237,7 @@ begin
end|
delimiter ;|
insert into t3 values (1);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values (4, "four", 1), (5, "five", 2);
select * from t1;
select * from t2;
@@ -295,19 +295,19 @@ drop table t1, t2;
create table t1 (i int);
create table t3 (i int);
---error 1363
+--error ER_TRG_NO_SUCH_ROW_IN_TRG
create trigger trg before insert on t1 for each row set @a:= old.i;
---error 1363
+--error ER_TRG_NO_SUCH_ROW_IN_TRG
create trigger trg before delete on t1 for each row set @a:= new.i;
---error 1362
+--error ER_TRG_CANT_CHANGE_ROW
create trigger trg before update on t1 for each row set old.i:=1;
---error 1363
+--error ER_TRG_NO_SUCH_ROW_IN_TRG
create trigger trg before delete on t1 for each row set new.i:=1;
---error 1362
+--error ER_TRG_CANT_CHANGE_ROW
create trigger trg after update on t1 for each row set new.i:=1;
---error 1054
+--error ER_BAD_FIELD_ERROR
create trigger trg before update on t1 for each row set new.j:=1;
---error 1054
+--error ER_BAD_FIELD_ERROR
create trigger trg before update on t1 for each row set @a:=old.j;
@@ -315,25 +315,25 @@ create trigger trg before update on t1 for each row set @a:=old.j;
# Let us test various trigger creation errors
# Also quickly test table namespace (bug#5892/6182)
#
---error 1146
+--error ER_NO_SUCH_TABLE
create trigger trg before insert on t2 for each row set @a:=1;
create trigger trg before insert on t1 for each row set @a:=1;
---error 1359
+--error ER_TRG_ALREADY_EXISTS
create trigger trg after insert on t1 for each row set @a:=1;
---error 1359
+--error ER_NOT_SUPPORTED_YET
create trigger trg2 before insert on t1 for each row set @a:=1;
---error 1359
+--error ER_TRG_ALREADY_EXISTS
create trigger trg before insert on t3 for each row set @a:=1;
create trigger trg2 before insert on t3 for each row set @a:=1;
drop trigger trg2;
drop trigger trg;
---error 1360
+--error ER_TRG_DOES_NOT_EXIST
drop trigger trg;
create view v1 as select * from t1;
---error 1347
+--error ER_WRONG_OBJECT
create trigger trg before insert on v1 for each row set @a:=1;
drop view v1;
@@ -341,7 +341,7 @@ drop table t1;
drop table t3;
create temporary table t1 (i int);
---error 1361
+--error ER_TRG_ON_VIEW_OR_TEMP_TABLE
create trigger trg before insert on t1 for each row set @a:=1;
drop table t1;
@@ -495,47 +495,47 @@ select * from t1;
# their main effect. This is because operation on the table row is
# executed before "after" trigger and its effect cannot be rolled back
# when whole statement fails, because t1 is MyISAM table.
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (2, 1);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1 set k = 2 where i = 2;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
delete from t1 where i = 2;
select * from t1;
# Should fail and insert only 1 row
---error 1054
+--error ER_BAD_FIELD_ERROR
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 select 3, 3;
select * from t1;
# Multi-update working on the fly, again it will update only
# one row even if more matches
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i;
select * from t1;
# The same for multi-update via temp table
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i and k < 3;
select * from t1;
# Multi-delete on the fly
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t1, t2 from t1 straight_join t2 where t1.i = t2.i;
select * from t1;
# And via temporary storage
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t2, t1 from t2 straight_join t1 where t1.i = t2.i;
select * from t1;
# Prepare table for testing of REPLACE and INSERT ... ON DUPLICATE KEY UPDATE
alter table t1 add primary key (i);
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (3, 4) on duplicate key update k= k + 10;
select * from t1;
# The following statement will delete old row and won't
# insert new one since after delete trigger will fail.
---error 1054
+--error ER_BAD_FIELD_ERROR
replace into t1 values (3, 3);
select * from t1;
# Also drops all triggers
@@ -553,33 +553,33 @@ alter table t1 drop column bt;
# The following statements changing t1 should fail and should not
# cause any effect on table, since "before" trigger is executed
# before operation on the table row.
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (3, 3);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1 set i = 2;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
delete from t1;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 select 3, 3;
select * from t1;
# Both types of multi-update (on the fly and via temp table)
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i and k < 2;
select * from t1;
# Both types of multi-delete
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t1, t2 from t1 straight_join t2 where t1.i = t2.i;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t2, t1 from t2 straight_join t1 where t1.i = t2.i;
select * from t1;
# Let us test REPLACE/INSERT ... ON DUPLICATE KEY UPDATE.
@@ -587,10 +587,10 @@ select * from t1;
# in ordinary INSERT we need to drop "before insert" trigger.
alter table t1 add primary key (i);
drop trigger bi;
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (2, 4) on duplicate key update k= k + 10;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
replace into t1 values (2, 4);
select * from t1;
# Also drops all triggers
@@ -608,7 +608,7 @@ insert into t1 values (1, 2);
create function bug5893 () returns int return 5;
create trigger t1_bu before update on t1 for each row set new.col1= bug5893();
drop function bug5893;
---error 1305
+--error ER_SP_DOES_NOT_EXIST
update t1 set col2 = 4;
# This should not crash server too.
drop trigger t1_bu;
@@ -908,9 +908,9 @@ create trigger t1_bi after insert on t1 for each row insert into t3 values (new.
# Until we implement proper mechanism for invalidation of PS/SP when table
# or SP's are changed these two statements will fail with 'Table ... was
# not locked' error (this mechanism should be based on the new TDC).
---error 1100
+--error 1100 #ER_TABLE_NOT_LOCKED
execute stmt1;
---error 1100
+--error 1100 #ER_TABLE_NOT_LOCKED
call p1();
deallocate prepare stmt1;
drop procedure p1;
@@ -1186,7 +1186,7 @@ INSERT INTO t1 VALUES (@x);
SELECT @x;
SET @x=2;
---error 1365
+--error ER_DIVISION_BY_ZERO
UPDATE t1 SET i1 = @x;
SELECT @x;
@@ -1197,7 +1197,7 @@ INSERT INTO t1 VALUES (@x);
SELECT @x;
SET @x=4;
---error 1365
+--error ER_DIVISION_BY_ZERO
UPDATE t1 SET i1 = @x;
SELECT @x;
@@ -1281,4 +1281,26 @@ SELECT * FROM t1;
DROP TABLE t1;
-# End of 5.0 tests
+#
+# Bug #18005: Creating a trigger on mysql.event leads to server crash on
+# scheduler startup
+#
+# Bug #18361: Triggers on mysql.user table cause server crash
+#
+# We don't allow triggers on the mysql schema
+delimiter |;
+--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+create trigger wont_work after update on mysql.user for each row
+begin
+ set @a:= 1;
+end|
+# Try when we're already using the mysql schema
+use mysql|
+--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+create trigger wont_work after update on event for each row
+begin
+ set @a:= 1;
+end|
+delimiter ;|
+
+--echo End of 5.0 tests
diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test
index f96beedbebc..ddfc3f11665 100644
--- a/mysql-test/t/type_timestamp.test
+++ b/mysql-test/t/type_timestamp.test
@@ -6,6 +6,9 @@
drop table if exists t1,t2;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
CREATE TABLE t1 (a int, t timestamp);
CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
@@ -322,3 +325,6 @@ select * from t1;
drop table t1;
# End of 4.1 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/udf.test b/mysql-test/t/udf.test
index e2556692612..f3be08c8537 100644
--- a/mysql-test/t/udf.test
+++ b/mysql-test/t/udf.test
@@ -99,6 +99,17 @@ delimiter ;//
call XXX2();
drop procedure xxx2;
+#
+# Bug#19904: UDF: not initialized *is_null per row
+#
+
+CREATE TABLE bug19904(n INT, v varchar(10));
+INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four');
+SELECT myfunc_double(n) AS f FROM bug19904;
+SELECT metaphon(v) AS f FROM bug19904;
+DROP TABLE bug19904;
+
+--echo End of 5.0 tests.
#
# Drop the example functions from udf_example
@@ -114,3 +125,4 @@ DROP FUNCTION lookup;
DROP FUNCTION reverse_lookup;
DROP FUNCTION avgcost;
+
diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test
index be1731e7493..68efcafd1e0 100644
--- a/mysql-test/t/variables.test
+++ b/mysql-test/t/variables.test
@@ -302,6 +302,22 @@ set wait_timeout=100;
set log_warnings=1;
#
+# Bugs: #20392: INSERT_ID session variable has weird value
+#
+select @@session.insert_id;
+set @save_insert_id=@@session.insert_id;
+set session insert_id=20;
+select @@session.insert_id;
+
+set session last_insert_id=100;
+select @@session.insert_id;
+select @@session.last_insert_id;
+select @@session.insert_id;
+
+set @@session.insert_id=@save_insert_id;
+select @@session.insert_id;
+
+#
# key buffer
#
diff --git a/mysql-test/t/wait_timeout.test b/mysql-test/t/wait_timeout.test
index 9310c3502b9..8387c08c902 100644
--- a/mysql-test/t/wait_timeout.test
+++ b/mysql-test/t/wait_timeout.test
@@ -11,6 +11,7 @@
connect (wait_con,localhost,root,,test,,);
flush status; # Reset counters
connection wait_con;
+set session wait_timeout=100;
let $retries=300;
let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`;
set @aborted_clients= 0;
diff --git a/mysys/Makefile.am b/mysys/Makefile.am
index d046b2fa3f8..bc84f44cd29 100644
--- a/mysys/Makefile.am
+++ b/mysys/Makefile.am
@@ -56,7 +56,7 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \
my_gethostbyname.c rijndael.c my_aes.c sha1.c \
my_handler.c my_netware.c my_largepage.c \
my_memmem.c \
- my_windac.c my_access.c base64.c
+ my_windac.c my_access.c base64.c my_libwrap.c
EXTRA_DIST = thr_alarm.c thr_lock.c my_pthread.c my_thr_init.c \
thr_mutex.c thr_rwlock.c
libmysys_a_LIBADD = @THREAD_LOBJECTS@
diff --git a/mysys/mf_dirname.c b/mysys/mf_dirname.c
index 9206aa28078..4d78f039799 100644
--- a/mysys/mf_dirname.c
+++ b/mysys/mf_dirname.c
@@ -72,7 +72,9 @@ uint dirname_part(my_string to, const char *name)
SYNPOSIS
convert_dirname()
- to Store result here
+ to Store result here. Must be at least of size
+ min(FN_REFLEN, strlen(from) + 1) to make room
+ for adding FN_LIBCHAR at the end.
from Original filename
from_end Pointer at end of filename (normally end \0)
diff --git a/mysys/my_delete.c b/mysys/my_delete.c
index 5670f03da64..de2a9814a56 100644
--- a/mysys/my_delete.c
+++ b/mysys/my_delete.c
@@ -32,3 +32,54 @@ int my_delete(const char *name, myf MyFlags)
}
DBUG_RETURN(err);
} /* my_delete */
+
+#if defined(__WIN__) && defined(__NT__)
+/*
+ Delete file which is possibly not closed.
+
+ This function is intended to be used exclusively as a temporal solution
+ for Win NT in case when it is needed to delete a not closed file (note
+ that the file must be opened everywhere with FILE_SHARE_DELETE mode).
+ Deleting not-closed files can not be supported on Win 98|ME (and because
+ of that is considered harmful).
+
+ The function deletes the file with its preliminary renaming. This is
+ because when not-closed share-delete file is deleted it still lives on
+ a disk until it will not be closed everwhere. This may conflict with an
+ attempt to create a new file with the same name. The deleted file is
+ renamed to <name>.<num>.deleted where <name> - the initial name of the
+ file, <num> - a hexadecimal number chosen to make the temporal name to
+ be unique.
+*/
+int nt_share_delete(const char *name, myf MyFlags)
+{
+ char buf[MAX_PATH + 20];
+ ulong cnt;
+ DBUG_ENTER("nt_share_delete");
+ DBUG_PRINT("my",("name %s MyFlags %d", name, MyFlags));
+
+ for (cnt= GetTickCount(); cnt; cnt--)
+ {
+ sprintf(buf, "%s.%08X.deleted", name, cnt);
+ if (MoveFile(name, buf))
+ break;
+
+ if ((errno= GetLastError()) == ERROR_ALREADY_EXISTS)
+ continue;
+
+ DBUG_PRINT("warning", ("Failed to rename %s to %s, errno: %d",
+ name, buf, errno));
+ break;
+ }
+
+ if (DeleteFile(buf))
+ DBUG_RETURN(0);
+
+ my_errno= GetLastError();
+ if (MyFlags & (MY_FAE+MY_WME))
+ my_error(EE_DELETE, MYF(ME_BELL + ME_WAITTANG + (MyFlags & ME_NOINPUT)),
+ name, my_errno);
+
+ DBUG_RETURN(-1);
+}
+#endif
diff --git a/mysys/my_init.c b/mysys/my_init.c
index 9b8d4db172f..8346fab95da 100644
--- a/mysys/my_init.c
+++ b/mysys/my_init.c
@@ -245,6 +245,22 @@ void setEnvString(char *ret, const char *name, const char *value)
DBUG_VOID_RETURN ;
}
+/*
+ my_paramter_handler
+ Invalid paramter handler we will use instead of the one "baked" into the CRT
+ for MSC v8. This one just prints out what invalid parameter was encountered.
+ By providing this routine, routines like lseek will return -1 when we expect them
+ to instead of crash.
+*/
+void my_parameter_handler(const wchar_t * expression, const wchar_t * function,
+ const wchar_t * file, unsigned int line,
+ uintptr_t pReserved)
+{
+ DBUG_PRINT("my",("Expression: %s function: %s file: %s, line: %d",
+ expression, function, file, line));
+}
+
+
static void my_win_init(void)
{
HKEY hSoftMysql ;
@@ -262,12 +278,18 @@ static void my_win_init(void)
setlocale(LC_CTYPE, ""); /* To get right sortorder */
-#if defined(_MSC_VER) && (_MSC_VER < 1300)
+#if defined(_MSC_VER)
+#if _MSC_VER < 1300
/*
Clear the OS system variable TZ and avoid the 100% CPU usage
Only for old versions of Visual C++
*/
_putenv( "TZ=" );
+#endif
+#if _MSC_VER >= 1400
+ /* this is required to make crt functions return -1 appropriately */
+ _set_invalid_parameter_handler(my_parameter_handler);
+#endif
#endif
_tzset();
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index 03f2d91916d..1c5630ad14e 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -501,7 +501,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
if (!(MyFlags & MY_DONT_SORT))
qsort((void *) result->dir_entry, result->number_off_files,
sizeof(FILEINFO), (qsort_cmp) comp_names);
- DBUG_PRINT(exit, ("found %d files", result->number_off_files));
+ DBUG_PRINT("exit", ("found %d files", result->number_off_files));
DBUG_RETURN(result);
error:
my_errno=errno;
diff --git a/mysys/my_libwrap.c b/mysys/my_libwrap.c
new file mode 100644
index 00000000000..be8adbab0a1
--- /dev/null
+++ b/mysys/my_libwrap.c
@@ -0,0 +1,42 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ This is needed to be able to compile with original libwrap header
+ files that don't have the prototypes
+*/
+
+#include <my_global.h>
+#include <my_libwrap.h>
+
+#ifdef HAVE_LIBWRAP
+
+void my_fromhost(struct request_info *req)
+{
+ fromhost(req);
+}
+
+int my_hosts_access(struct request_info *req)
+{
+ hosts_access(req);
+}
+
+char *my_eval_client(struct request_info *req)
+{
+ eval_client(req);
+}
+
+#endif /* HAVE_LIBWRAP */
diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c
index 3f601a42dc9..f33db2655c4 100644
--- a/mysys/my_malloc.c
+++ b/mysys/my_malloc.c
@@ -83,7 +83,7 @@ char *my_strdup(const char *from, myf my_flags)
}
-char *my_strdup_with_length(const byte *from, uint length, myf my_flags)
+char *my_strdup_with_length(const char *from, uint length, myf my_flags)
{
gptr ptr;
if ((ptr=my_malloc(length+1,my_flags)) != 0)
diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c
index 6cdf98c5f5f..f6d6644859e 100644
--- a/mysys/safemalloc.c
+++ b/mysys/safemalloc.c
@@ -525,7 +525,7 @@ char *_my_strdup(const char *from, const char *filename, uint lineno,
} /* _my_strdup */
-char *_my_strdup_with_length(const byte *from, uint length,
+char *_my_strdup_with_length(const char *from, uint length,
const char *filename, uint lineno,
myf MyFlags)
{
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index f5a8b618949..51df50a4926 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -204,6 +204,8 @@ static void check_locks(THR_LOCK *lock, const char *where,
{
if ((int) data->type == (int) TL_READ_NO_INSERT)
count++;
+ /* Protect against infinite loop. */
+ DBUG_ASSERT(count <= lock->read_no_write_count);
}
if (count != lock->read_no_write_count)
{
diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h
index 98b6ce7d949..ca82806f4b1 100644
--- a/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/ndb/include/kernel/GlobalSignalNumbers.h
@@ -507,16 +507,12 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_TEST_ORD 407
#define GSN_TESTSIG 408
#define GSN_TIME_SIGNAL 409
-/* 410 unused */
-/* 411 unused */
-/* 412 unused */
#define GSN_TUP_ABORTREQ 414
#define GSN_TUP_ADD_ATTCONF 415
#define GSN_TUP_ADD_ATTRREF 416
#define GSN_TUP_ADD_ATTRREQ 417
#define GSN_TUP_ATTRINFO 418
#define GSN_TUP_COMMITREQ 419
-/* 420 unused */
#define GSN_TUP_LCPCONF 421
#define GSN_TUP_LCPREF 422
#define GSN_TUP_LCPREQ 423
@@ -611,8 +607,6 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_WAIT_GCP_REF 500
#define GSN_WAIT_GCP_CONF 501
-/* 502 not used */
-
/**
* Trigger and index signals
*/
@@ -682,6 +676,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_BACKUP_FRAGMENT_REF 546
#define GSN_BACKUP_FRAGMENT_CONF 547
+#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575
+
#define GSN_STOP_BACKUP_REQ 548
#define GSN_STOP_BACKUP_REF 549
#define GSN_STOP_BACKUP_CONF 550
@@ -731,7 +727,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_SUB_STOP_REQ 572
#define GSN_SUB_STOP_REF 573
#define GSN_SUB_STOP_CONF 574
-/* 575 unused */
+/* 575 used */
#define GSN_SUB_CREATE_REQ 576
#define GSN_SUB_CREATE_REF 577
#define GSN_SUB_CREATE_CONF 578
@@ -938,4 +934,10 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_ACC_LOCKREQ 711
#define GSN_READ_PSUEDO_REQ 712
+/* DICT LOCK signals */
+#define GSN_DICT_LOCK_REQ 410
+#define GSN_DICT_LOCK_CONF 411
+#define GSN_DICT_LOCK_REF 412
+#define GSN_DICT_UNLOCK_ORD 420
+
#endif
diff --git a/ndb/include/kernel/signaldata/AlterTable.hpp b/ndb/include/kernel/signaldata/AlterTable.hpp
index 16c9eb204c9..f5006c27fdb 100644
--- a/ndb/include/kernel/signaldata/AlterTable.hpp
+++ b/ndb/include/kernel/signaldata/AlterTable.hpp
@@ -114,6 +114,7 @@ public:
InvalidTableVersion = 241,
DropInProgress = 283,
Busy = 701,
+ BusyWithNR = 711,
NotMaster = 702,
InvalidFormat = 703,
AttributeNameTooLong = 704,
diff --git a/ndb/include/kernel/signaldata/BackupContinueB.hpp b/ndb/include/kernel/signaldata/BackupContinueB.hpp
index d3d3f79f310..fe3f48444ec 100644
--- a/ndb/include/kernel/signaldata/BackupContinueB.hpp
+++ b/ndb/include/kernel/signaldata/BackupContinueB.hpp
@@ -31,7 +31,8 @@ private:
BUFFER_UNDERFLOW = 1,
BUFFER_FULL_SCAN = 2,
BUFFER_FULL_FRAG_COMPLETE = 3,
- BUFFER_FULL_META = 4
+ BUFFER_FULL_META = 4,
+ BACKUP_FRAGMENT_INFO = 5
};
};
diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/ndb/include/kernel/signaldata/BackupImpl.hpp
index 298440ad377..07ab5bc543b 100644
--- a/ndb/include/kernel/signaldata/BackupImpl.hpp
+++ b/ndb/include/kernel/signaldata/BackupImpl.hpp
@@ -258,15 +258,31 @@ class BackupFragmentConf {
friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 6 );
+ STATIC_CONST( SignalLength = 8 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 tableId;
Uint32 fragmentNo;
- Uint32 noOfRecords;
- Uint32 noOfBytes;
+ Uint32 noOfRecordsLow;
+ Uint32 noOfBytesLow;
+ Uint32 noOfRecordsHigh;
+ Uint32 noOfBytesHigh;
+};
+
+class BackupFragmentCompleteRep {
+public:
+ STATIC_CONST( SignalLength = 8 );
+
+ Uint32 backupId;
+ Uint32 backupPtr;
+ Uint32 tableId;
+ Uint32 fragmentNo;
+ Uint32 noOfTableRowsLow;
+ Uint32 noOfFragmentRowsLow;
+ Uint32 noOfTableRowsHigh;
+ Uint32 noOfFragmentRowsHigh;
};
class StopBackupReq {
diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/ndb/include/kernel/signaldata/BackupSignalData.hpp
index e1b8c6203a1..9e34ea3a211 100644
--- a/ndb/include/kernel/signaldata/BackupSignalData.hpp
+++ b/ndb/include/kernel/signaldata/BackupSignalData.hpp
@@ -201,17 +201,19 @@ class BackupCompleteRep {
friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size );
+ STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size );
private:
Uint32 senderData;
Uint32 backupId;
Uint32 startGCP;
Uint32 stopGCP;
- Uint32 noOfBytes;
- Uint32 noOfRecords;
+ Uint32 noOfBytesLow;
+ Uint32 noOfRecordsLow;
Uint32 noOfLogBytes;
Uint32 noOfLogRecords;
NdbNodeBitmask nodes;
+ Uint32 noOfBytesHigh;
+ Uint32 noOfRecordsHigh;
};
/**
diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/ndb/include/kernel/signaldata/CreateTable.hpp
index 481b323fdb0..7d3189cc126 100644
--- a/ndb/include/kernel/signaldata/CreateTable.hpp
+++ b/ndb/include/kernel/signaldata/CreateTable.hpp
@@ -77,6 +77,7 @@ public:
enum ErrorCode {
NoError = 0,
Busy = 701,
+ BusyWithNR = 711,
NotMaster = 702,
InvalidFormat = 703,
AttributeNameTooLong = 704,
diff --git a/ndb/include/kernel/signaldata/DictLock.hpp b/ndb/include/kernel/signaldata/DictLock.hpp
new file mode 100644
index 00000000000..3e29d762962
--- /dev/null
+++ b/ndb/include/kernel/signaldata/DictLock.hpp
@@ -0,0 +1,78 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DICT_LOCK_HPP
+#define DICT_LOCK_HPP
+
+#include "SignalData.hpp"
+
+// see comments in Dbdict.hpp
+
+class DictLockReq {
+ friend class Dbdict;
+ friend class Dbdih;
+public:
+ STATIC_CONST( SignalLength = 3 );
+ enum LockType {
+ NoLock = 0,
+ NodeRestartLock = 1
+ };
+private:
+ Uint32 userPtr;
+ Uint32 lockType;
+ Uint32 userRef;
+};
+
+class DictLockConf {
+ friend class Dbdict;
+ friend class Dbdih;
+public:
+ STATIC_CONST( SignalLength = 3 );
+private:
+ Uint32 userPtr;
+ Uint32 lockType;
+ Uint32 lockPtr;
+};
+
+class DictLockRef {
+ friend class Dbdict;
+ friend class Dbdih;
+public:
+ STATIC_CONST( SignalLength = 3 );
+ enum ErrorCode {
+ NotMaster = 1,
+ InvalidLockType = 2,
+ BadUserRef = 3,
+ TooLate = 4,
+ TooManyRequests = 5
+ };
+private:
+ Uint32 userPtr;
+ Uint32 lockType;
+ Uint32 errorCode;
+};
+
+class DictUnlockOrd {
+ friend class Dbdict;
+ friend class Dbdih;
+public:
+ STATIC_CONST( SignalLength = 2 );
+private:
+ Uint32 lockPtr;
+ Uint32 lockType;
+};
+
+#endif
diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp
index bc4817f0cf3..0a7f6aa3fb3 100644
--- a/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -117,9 +117,16 @@ public:
CustomTriggerId = 25,
FrmLen = 26,
FrmData = 27,
+
FragmentCount = 128, // No of fragments in table (!fragment replicas)
FragmentDataLen = 129,
FragmentData = 130, // CREATE_FRAGMENTATION reply
+
+ MaxRowsLow = 139,
+ MaxRowsHigh = 140,
+ MinRowsLow = 143,
+ MinRowsHigh = 144,
+
TableEnd = 999,
AttributeName = 1000, // String, Mandatory
@@ -263,6 +270,10 @@ public:
Uint32 FragmentCount;
Uint32 FragmentDataLen;
Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2];
+ Uint32 MaxRowsLow;
+ Uint32 MaxRowsHigh;
+ Uint32 MinRowsLow;
+ Uint32 MinRowsHigh;
void init();
};
diff --git a/ndb/include/kernel/signaldata/DropTable.hpp b/ndb/include/kernel/signaldata/DropTable.hpp
index cae6aff8754..e762446d2b8 100644
--- a/ndb/include/kernel/signaldata/DropTable.hpp
+++ b/ndb/include/kernel/signaldata/DropTable.hpp
@@ -53,6 +53,7 @@ public:
enum ErrorCode {
Busy = 701,
+ BusyWithNR = 711,
NotMaster = 702,
NoSuchTable = 709,
InvalidTableVersion = 241,
diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp
index 13dfafcc653..72c1537854c 100644
--- a/ndb/include/kernel/signaldata/LqhFrag.hpp
+++ b/ndb/include/kernel/signaldata/LqhFrag.hpp
@@ -104,7 +104,7 @@ class LqhFragReq {
friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 25 );
+ STATIC_CONST( SignalLength = 23 );
enum RequestInfo {
CreateInRunning = 0x8000000,
@@ -116,27 +116,32 @@ private:
Uint32 senderRef;
Uint32 fragmentId;
Uint32 requestInfo;
- Uint32 tableId;
- Uint32 localKeyLength;
Uint32 maxLoadFactor;
Uint32 minLoadFactor;
Uint32 kValue;
- Uint32 lh3DistrBits;
- Uint32 lh3PageBits;
- Uint32 noOfAttributes;
- Uint32 noOfNullAttributes;
- Uint32 noOfPagesToPreAllocate;
Uint32 schemaVersion;
- Uint32 keyLength;
Uint32 nextLCP;
- Uint32 noOfKeyAttr;
- Uint32 noOfNewAttr; // noOfCharsets in upper half
- Uint32 checksumIndicator;
- Uint32 noOfAttributeGroups;
- Uint32 GCPIndicator;
+ Uint16 noOfNewAttr;
+ Uint16 noOfCharsets;
Uint32 startGci;
Uint32 tableType; // DictTabInfo::TableType
Uint32 primaryTableId; // table of index or RNIL
+ Uint16 tableId;
+ Uint16 localKeyLength;
+ Uint16 lh3DistrBits;
+ Uint16 lh3PageBits;
+ Uint16 noOfAttributes;
+ Uint16 noOfNullAttributes;
+ Uint16 noOfPagesToPreAllocate;
+ Uint16 keyLength;
+ Uint16 noOfKeyAttr;
+ Uint8 checksumIndicator;
+ Uint8 GCPIndicator;
+ Uint32 noOfAttributeGroups;
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
};
class LqhFragConf {
diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp
index 5fb9d7bcf42..c9f2ad5382f 100644
--- a/ndb/include/kernel/signaldata/TupFrag.hpp
+++ b/ndb/include/kernel/signaldata/TupFrag.hpp
@@ -30,7 +30,7 @@ class TupFragReq {
friend class Dblqh;
friend class Dbtup;
public:
- STATIC_CONST( SignalLength = 14 );
+ STATIC_CONST( SignalLength = 17 );
private:
Uint32 userPtr;
Uint32 userRef;
@@ -38,7 +38,18 @@ private:
Uint32 tableId;
Uint32 noOfAttr;
Uint32 fragId;
- Uint32 todo[8];
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
+ Uint32 noOfNullAttr;
+ Uint32 schemaVersion;
+ Uint32 noOfKeyAttr;
+ Uint16 noOfNewAttr;
+ Uint16 noOfCharsets;
+ Uint32 checksumIndicator;
+ Uint32 noOfAttributeGroups;
+ Uint32 globalCheckpointIdIndicator;
};
class TupFragConf {
diff --git a/ndb/include/ndb_version.h.in b/ndb/include/ndb_version.h.in
index 38b72306d03..7e878803f46 100644
--- a/ndb/include/ndb_version.h.in
+++ b/ndb/include/ndb_version.h.in
@@ -60,5 +60,7 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
#define NDBD_INCL_NODECONF_VERSION_4 MAKE_VERSION(4,1,17)
#define NDBD_INCL_NODECONF_VERSION_5 MAKE_VERSION(5,0,18)
+#define NDBD_DICT_LOCK_VERSION_5 MAKE_VERSION(5,0,23)
+
#endif
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index 1413931035d..e67a0253096 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -722,6 +722,20 @@ public:
*/
void setObjectType(Object::Type type);
+ /**
+ * Set/Get Maximum number of rows in table (only used to calculate
+ * number of partitions).
+ */
+ void setMaxRows(Uint64 maxRows);
+ Uint64 getMaxRows() const;
+
+ /**
+ * Set/Get Minimum number of rows in table (only used to calculate
+ * number of partitions).
+ */
+ void setMinRows(Uint64 minRows);
+ Uint64 getMinRows() const;
+
/** @} *******************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
diff --git a/ndb/src/common/debugger/SignalLoggerManager.cpp b/ndb/src/common/debugger/SignalLoggerManager.cpp
index d8710d2058f..67e13dc805a 100644
--- a/ndb/src/common/debugger/SignalLoggerManager.cpp
+++ b/ndb/src/common/debugger/SignalLoggerManager.cpp
@@ -139,7 +139,7 @@ SignalLoggerManager::log(LogMode logMode, const char * params)
} else {
for (int i = 0; i < count; ++i){
BlockNumber number = getBlockNo(blocks[i]);
- cnt += log(SLM_ON, number-MIN_BLOCK_NO, logMode);
+ cnt += log(SLM_ON, number, logMode);
}
}
for(int i = 0; i<count; i++){
diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/ndb/src/common/debugger/signaldata/BackupImpl.cpp
index e9b0188d93b..855db0834bc 100644
--- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp
+++ b/ndb/src/common/debugger/signaldata/BackupImpl.cpp
@@ -100,8 +100,10 @@ printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){
BackupFragmentConf* sig = (BackupFragmentConf*)data;
fprintf(out, " backupPtr: %d backupId: %d\n",
sig->backupPtr, sig->backupId);
- fprintf(out, " tableId: %d fragmentNo: %d records: %d bytes: %d\n",
- sig->tableId, sig->fragmentNo, sig->noOfRecords, sig->noOfBytes);
+ fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n",
+ sig->tableId, sig->fragmentNo,
+ sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32),
+ sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32));
return true;
}
diff --git a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
index 4b0a0e07b66..27fed22ac72 100644
--- a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
+++ b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
@@ -72,11 +72,11 @@ printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){
bool
printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){
BackupCompleteRep* sig = (BackupCompleteRep*)data;
- fprintf(out, " senderData: %d backupId: %d records: %d bytes: %d\n",
+ fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n",
sig->senderData,
sig->backupId,
- sig->noOfRecords,
- sig->noOfBytes);
+ sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32),
+ sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32));
return true;
}
diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index 43c129347c0..a1d8d82474d 100644
--- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
@@ -48,6 +48,10 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, FragmentCount, FragmentCount),
DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES),
DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen),
+ DTIMAP(Table, MaxRowsLow, MaxRowsLow),
+ DTIMAP(Table, MaxRowsHigh, MaxRowsHigh),
+ DTIMAP(Table, MinRowsLow, MinRowsLow),
+ DTIMAP(Table, MinRowsHigh, MinRowsHigh),
DTIBREAK(AttributeName)
};
@@ -124,6 +128,10 @@ DictTabInfo::Table::init(){
FragmentCount = 0;
FragmentDataLen = 0;
memset(FragmentData, 0, sizeof(FragmentData));
+ MaxRowsLow = 0;
+ MaxRowsHigh = 0;
+ MinRowsLow = 0;
+ MinRowsHigh = 0;
}
void
diff --git a/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/ndb/src/common/debugger/signaldata/LqhFrag.cpp
index 6d727959a67..3175582c3a2 100644
--- a/ndb/src/common/debugger/signaldata/LqhFrag.cpp
+++ b/ndb/src/common/debugger/signaldata/LqhFrag.cpp
@@ -37,8 +37,10 @@ printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB
fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n",
sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength);
- fprintf(output, " noOfPagesToPreAllocate: %d schemaVersion: %d nextLCP: %d\n",
- sig->noOfPagesToPreAllocate, sig->schemaVersion, sig->nextLCP);
+ fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n",
+ sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh);
+ fprintf(output, " schemaVersion: %d nextLCP: %d\n",
+ sig->schemaVersion, sig->nextLCP);
return true;
}
diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 984d28819c0..5162679017a 100644
--- a/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -647,6 +647,12 @@ const GsnName SignalNames [] = {
,{ GSN_TUX_MAINT_REF, "TUX_MAINT_REF" }
,{ GSN_TUX_BOUND_INFO, "TUX_BOUND_INFO" }
,{ GSN_ACC_LOCKREQ, "ACC_LOCKREQ" }
+
+ /* DICT LOCK */
+ ,{ GSN_DICT_LOCK_REQ, "DICT_LOCK_REQ" }
+ ,{ GSN_DICT_LOCK_CONF, "DICT_LOCK_CONF" }
+ ,{ GSN_DICT_LOCK_REF, "DICT_LOCK_REF" }
+ ,{ GSN_DICT_UNLOCK_ORD, "DICT_UNLOCK_ORD" }
};
const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName);
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index f8e3d11f222..ddb99cb6b56 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -5,7 +5,7 @@ Next DBACC 3002
Next DBTUP 4013
Next DBLQH 5043
Next DBDICT 6007
-Next DBDIH 7174
+Next DBDIH 7177
Next DBTC 8037
Next CMVMI 9000
Next BACKUP 10022
@@ -312,6 +312,10 @@ Test Crashes in handling node restarts
7170: Crash when receiving START_PERMREF (InitialStartRequired)
+7174: Crash starting node before sending DICT_LOCK_REQ
+7175: Master sends one fake START_PERMREF (ZNODE_ALREADY_STARTING_ERROR)
+7176: Slave NR pretends master does not support DICT lock (rolling upgrade)
+
DICT:
6000 Crash during NR when receiving DICTSTARTREQ
6001 Crash during NR when receiving SCHEMA_INFO
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index f9089355475..43c1de5e2b3 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -266,6 +266,65 @@ Backup::execCONTINUEB(Signal* signal)
const Uint32 Tdata2 = signal->theData[2];
switch(Tdata0) {
+ case BackupContinueB::BACKUP_FRAGMENT_INFO:
+ {
+ const Uint32 ptr_I = Tdata1;
+ Uint32 tabPtr_I = Tdata2;
+ Uint32 fragPtr_I = signal->theData[3];
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptr_I);
+ TablePtr tabPtr;
+ ptr.p->tables.getPtr(tabPtr, tabPtr_I);
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I);
+
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+
+ const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2;
+ Uint32 * dst;
+ if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz))
+ {
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ BackupFormat::CtlFile::FragmentInfo * fragInfo =
+ (BackupFormat::CtlFile::FragmentInfo*)dst;
+ fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO);
+ fragInfo->SectionLength = htonl(sz);
+ fragInfo->TableId = htonl(fragPtr.p->tableId);
+ fragInfo->FragmentNo = htonl(fragPtr_I);
+ fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF);
+ fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32);
+ fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF);
+ fragInfo->FilePosHigh = htonl(0 >> 32);
+
+ filePtr.p->operation.dataBuffer.updateWritePtr(sz);
+
+ fragPtr_I++;
+ if (fragPtr_I == tabPtr.p->fragments.getSize())
+ {
+ signal->theData[0] = tabPtr.p->tableId;
+ signal->theData[1] = 0; // unlock
+ EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
+
+ fragPtr_I = 0;
+ ptr.p->tables.next(tabPtr);
+ if ((tabPtr_I = tabPtr.i) == RNIL)
+ {
+ closeFiles(signal, ptr);
+ return;
+ }
+ }
+ signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO;
+ signal->theData[1] = ptr_I;
+ signal->theData[2] = tabPtr_I;
+ signal->theData[3] = fragPtr_I;
+ sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }
case BackupContinueB::START_FILE_THREAD:
case BackupContinueB::BUFFER_UNDERFLOW:
{
@@ -455,7 +514,7 @@ Backup::findTable(const BackupRecordPtr & ptr,
return false;
}
-static Uint32 xps(Uint32 x, Uint64 ms)
+static Uint32 xps(Uint64 x, Uint64 ms)
{
float fx = x;
float fs = ms;
@@ -469,9 +528,9 @@ static Uint32 xps(Uint32 x, Uint64 ms)
}
struct Number {
- Number(Uint32 r) { val = r;}
- Number & operator=(Uint32 r) { val = r; return * this; }
- Uint32 val;
+ Number(Uint64 r) { val = r;}
+ Number & operator=(Uint64 r) { val = r; return * this; }
+ Uint64 val;
};
NdbOut &
@@ -545,8 +604,10 @@ Backup::execBACKUP_COMPLETE_REP(Signal* signal)
startTime = NdbTick_CurrentMillisecond() - startTime;
ndbout_c("Backup %d has completed", rep->backupId);
- const Uint32 bytes = rep->noOfBytes;
- const Uint32 records = rep->noOfRecords;
+ const Uint64 bytes =
+ rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32);
+ const Uint64 records =
+ rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32);
Number rps = xps(records, startTime);
Number bps = xps(bytes, startTime);
@@ -1905,8 +1966,10 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
const Uint32 tableId = conf->tableId;
const Uint32 fragmentNo = conf->fragmentNo;
const Uint32 nodeId = refToNode(signal->senderBlockRef());
- const Uint32 noOfBytes = conf->noOfBytes;
- const Uint32 noOfRecords = conf->noOfRecords;
+ const Uint64 noOfBytes =
+ conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32);
+ const Uint64 noOfRecords =
+ conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32);
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
@@ -1918,9 +1981,13 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, tableId));
+ tabPtr.p->noOfRecords += noOfRecords;
+
FragmentPtr fragPtr;
tabPtr.p->fragments.getPtr(fragPtr, fragmentNo);
+ fragPtr.p->noOfRecords = noOfRecords;
+
ndbrequire(fragPtr.p->scanned == 0);
ndbrequire(fragPtr.p->scanning == 1);
ndbrequire(fragPtr.p->node == nodeId);
@@ -1944,6 +2011,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
}
else
{
+ NodeBitmask nodes = ptr.p->nodes;
+ nodes.clear(getOwnNodeId());
+ if (!nodes.isclear())
+ {
+ BackupFragmentCompleteRep *rep =
+ (BackupFragmentCompleteRep*)signal->getDataPtrSend();
+ rep->backupId = ptr.p->backupId;
+ rep->backupPtr = ptr.i;
+ rep->tableId = tableId;
+ rep->fragmentNo = fragmentNo;
+ rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF);
+ rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32);
+ rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF);
+ rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32);
+ NodeReceiverGroup rg(BACKUP, ptr.p->nodes);
+ sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal,
+ BackupFragmentCompleteRep::SignalLength, JBB);
+ }
nextFragment(signal, ptr);
}
}
@@ -2006,6 +2091,29 @@ err:
execABORT_BACKUP_ORD(signal);
}
+void
+Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal)
+{
+ jamEntry();
+ BackupFragmentCompleteRep * rep =
+ (BackupFragmentCompleteRep*)signal->getDataPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, rep->backupPtr);
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, rep->tableId));
+
+ tabPtr.p->noOfRecords =
+ rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32);
+
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo);
+
+ fragPtr.p->noOfRecords =
+ rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32);
+}
+
/*****************************************************************************
*
* Master functionallity - Drop triggers
@@ -2206,8 +2314,10 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
rep->senderData = ptr.p->clientData;
rep->startGCP = ptr.p->startGCP;
rep->stopGCP = ptr.p->stopGCP;
- rep->noOfBytes = ptr.p->noOfBytes;
- rep->noOfRecords = ptr.p->noOfRecords;
+ rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF);
+ rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF);
+ rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32);
+ rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32);
rep->noOfLogBytes = ptr.p->noOfLogBytes;
rep->noOfLogRecords = ptr.p->noOfLogRecords;
rep->nodes = ptr.p->nodes;
@@ -2220,12 +2330,14 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
signal->theData[2] = ptr.p->backupId;
signal->theData[3] = ptr.p->startGCP;
signal->theData[4] = ptr.p->stopGCP;
- signal->theData[5] = ptr.p->noOfBytes;
- signal->theData[6] = ptr.p->noOfRecords;
+ signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF);
+ signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF);
signal->theData[7] = ptr.p->noOfLogBytes;
signal->theData[8] = ptr.p->noOfLogRecords;
ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9);
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB);
+ signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32);
+ signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB);
}
else
{
@@ -2988,6 +3100,7 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
/**
* Initialize table object
*/
+ tabPtr.p->noOfRecords = 0;
tabPtr.p->schemaVersion = tmpTab.TableVersion;
tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes;
tabPtr.p->noOfNull = 0;
@@ -3695,8 +3808,10 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
conf->backupPtr = ptr.i;
conf->tableId = filePtr.p->tableId;
conf->fragmentNo = filePtr.p->fragmentNo;
- conf->noOfRecords = op.noOfRecords;
- conf->noOfBytes = op.noOfBytes;
+ conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF);
+ conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32);
+ conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF);
+ conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32);
sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal,
BackupFragmentConf::SignalLength, JBB);
@@ -4123,20 +4238,18 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal)
gcp->StartGCP = htonl(startGCP);
gcp->StopGCP = htonl(stopGCP - 1);
filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
- }
- {
- TablePtr tabPtr;
- for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;
- ptr.p->tables.next(tabPtr))
{
- signal->theData[0] = tabPtr.p->tableId;
- signal->theData[1] = 0; // unlock
- EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
+ TablePtr tabPtr;
+ ptr.p->tables.first(tabPtr);
+
+ signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = tabPtr.i;
+ signal->theData[3] = 0;
+ sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB);
}
}
-
- closeFiles(signal, ptr);
}
void
diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp
index c455e32fa67..e37923da749 100644
--- a/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -68,6 +68,7 @@ protected:
void execBACKUP_DATA(Signal* signal);
void execSTART_BACKUP_REQ(Signal* signal);
void execBACKUP_FRAGMENT_REQ(Signal* signal);
+ void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal);
void execSTOP_BACKUP_REQ(Signal* signal);
void execBACKUP_STATUS_REQ(Signal* signal);
void execABORT_BACKUP_ORD(Signal* signal);
@@ -183,10 +184,12 @@ public:
typedef Ptr<Attribute> AttributePtr;
struct Fragment {
+ Uint64 noOfRecords;
Uint32 tableId;
- Uint32 node;
- Uint16 scanned; // 0 = not scanned x = scanned by node x
- Uint16 scanning; // 0 = not scanning x = scanning on node x
+ Uint8 node;
+ Uint8 scanned; // 0 = not scanned x = scanned by node x
+ Uint8 scanning; // 0 = not scanning x = scanning on node x
+ Uint8 unused1;
Uint32 nextPool;
};
typedef Ptr<Fragment> FragmentPtr;
@@ -194,6 +197,8 @@ public:
struct Table {
Table(ArrayPool<Attribute> &, ArrayPool<Fragment> &);
+ Uint64 noOfRecords;
+
Uint32 tableId;
Uint32 schemaVersion;
Uint32 tableType;
@@ -269,8 +274,8 @@ public:
Uint32 tablePtr; // Ptr.i to current table
FsBuffer dataBuffer;
- Uint32 noOfRecords;
- Uint32 noOfBytes;
+ Uint64 noOfRecords;
+ Uint64 noOfBytes;
Uint32 maxRecordSize;
private:
diff --git a/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/ndb/src/kernel/blocks/backup/BackupFormat.hpp
index 65dd2ad9053..b8ffff3a294 100644
--- a/ndb/src/kernel/blocks/backup/BackupFormat.hpp
+++ b/ndb/src/kernel/blocks/backup/BackupFormat.hpp
@@ -32,7 +32,8 @@ struct BackupFormat {
FRAGMENT_FOOTER = 3,
TABLE_LIST = 4,
TABLE_DESCRIPTION = 5,
- GCP_ENTRY = 6
+ GCP_ENTRY = 6,
+ FRAGMENT_INFO = 7
};
struct FileHeader {
@@ -126,6 +127,20 @@ struct BackupFormat {
Uint32 StartGCP;
Uint32 StopGCP;
};
+
+ /**
+ * Fragment Info
+ */
+ struct FragmentInfo {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 TableId;
+ Uint32 FragmentNo;
+ Uint32 NoOfRecordsLow;
+ Uint32 NoOfRecordsHigh;
+ Uint32 FilePosLow;
+ Uint32 FilePosHigh;
+ };
};
/**
diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp
index 4c734d58c8e..96c11468939 100644
--- a/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -97,6 +97,9 @@ Backup::Backup(const Configuration & conf) :
addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ);
addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF);
addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF);
+
+ addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP,
+ &Backup::execBACKUP_FRAGMENT_COMPLETE_REP);
addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ);
addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF);
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index ca9daca428b..133b4d75d8e 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -203,6 +203,11 @@ void Dbdict::execCONTINUEB(Signal* signal)
sendGetTabResponse(signal);
break;
+ case ZDICT_LOCK_POLL:
+ jam();
+ checkDictLockQueue(signal, true);
+ break;
+
default :
ndbrequire(false);
break;
@@ -281,6 +286,10 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
+ w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow);
+ w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh);
+ w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow);
+ w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh);
if(!signal)
{
@@ -1208,7 +1217,9 @@ Dbdict::Dbdict(const class Configuration & conf):
c_opCreateTrigger(c_opRecordPool),
c_opDropTrigger(c_opRecordPool),
c_opAlterTrigger(c_opRecordPool),
- c_opRecordSequence(0)
+ c_opRecordSequence(0),
+ c_dictLockQueue(c_dictLockPool),
+ c_dictLockPoll(false)
{
BLOCK_CONSTRUCTOR(Dbdict);
@@ -1352,6 +1363,9 @@ Dbdict::Dbdict(const class Configuration & conf):
addRecSignal(GSN_DROP_TAB_CONF, &Dbdict::execDROP_TAB_CONF);
addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Dbdict::execBACKUP_FRAGMENT_REQ);
+
+ addRecSignal(GSN_DICT_LOCK_REQ, &Dbdict::execDICT_LOCK_REQ);
+ addRecSignal(GSN_DICT_UNLOCK_ORD, &Dbdict::execDICT_UNLOCK_ORD);
}//Dbdict::Dbdict()
Dbdict::~Dbdict()
@@ -1525,6 +1539,10 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->minLoadFactor = 70;
tablePtr.p->noOfPrimkey = 1;
tablePtr.p->tupKeyLength = 1;
+ tablePtr.p->maxRowsLow = 0;
+ tablePtr.p->maxRowsHigh = 0;
+ tablePtr.p->minRowsLow = 0;
+ tablePtr.p->minRowsHigh = 0;
tablePtr.p->storedTable = true;
tablePtr.p->tableType = DictTabInfo::UserTable;
tablePtr.p->primaryTableId = RNIL;
@@ -1764,6 +1782,8 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
c_opCreateTrigger.setSize(8);
c_opDropTrigger.setSize(8);
c_opAlterTrigger.setSize(8);
+
+ c_dictLockPool.setSize(32);
// Initialize schema file copies
c_schemaFile[0].schemaPage =
@@ -2821,6 +2841,10 @@ void Dbdict::execNODE_FAILREP(Signal* signal)
c_blockState = BS_NODE_FAILURE;
ok = true;
break;
+ case BS_NODE_RESTART:
+ jam();
+ ok = true;
+ break;
}
ndbrequire(ok);
@@ -2843,6 +2867,15 @@ void Dbdict::execNODE_FAILREP(Signal* signal)
}//if
}//for
+ /*
+ * NODE_FAILREP guarantees that no "in flight" signal from
+ * a dead node is accepted, and also that the job buffer contains
+ * no such (un-executed) signals. Therefore no DICT_UNLOCK_ORD
+ * from a dead node (leading to master crash) is possible after
+ * this clean-up removes the lock record.
+ */
+ removeStaleDictLocks(signal, theFailedNodes);
+
}//execNODE_FAILREP()
@@ -2911,6 +2944,12 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){
break;
}
+ if (c_blockState == BS_NODE_RESTART){
+ jam();
+ parseRecord.errorCode = CreateTableRef::BusyWithNR;
+ break;
+ }
+
if (c_blockState != BS_IDLE){
jam();
parseRecord.errorCode = CreateTableRef::Busy;
@@ -3060,6 +3099,12 @@ Dbdict::execALTER_TABLE_REQ(Signal* signal)
return;
}
+ if(c_blockState == BS_NODE_RESTART){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::BusyWithNR);
+ return;
+ }
+
if(c_blockState != BS_IDLE){
jam();
alterTableRef(signal, req, AlterTableRef::Busy);
@@ -4464,6 +4509,13 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
Uint32 lhPageBits = 0;
::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount);
+ Uint64 maxRows = tabPtr.p->maxRowsLow +
+ (((Uint64)tabPtr.p->maxRowsHigh) << 32);
+ Uint64 minRows = tabPtr.p->minRowsLow +
+ (((Uint64)tabPtr.p->minRowsHigh) << 32);
+ maxRows = (maxRows + fragCount - 1) / fragCount;
+ minRows = (minRows + fragCount - 1) / fragCount;
+
{
LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend();
req->senderData = senderData;
@@ -4479,7 +4531,10 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->lh3PageBits = 0; //lhPageBits;
req->noOfAttributes = tabPtr.p->noOfAttributes;
req->noOfNullAttributes = tabPtr.p->noOfNullBits;
- req->noOfPagesToPreAllocate = 0;
+ req->maxRowsLow = maxRows & 0xFFFFFFFF;
+ req->maxRowsHigh = maxRows >> 32;
+ req->minRowsLow = minRows & 0xFFFFFFFF;
+ req->minRowsHigh = minRows >> 32;
req->schemaVersion = tabPtr.p->tableVersion;
Uint32 keyLen = tabPtr.p->tupKeyLength;
req->keyLength = keyLen; // wl-2066 no more "long keys"
@@ -4487,8 +4542,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
req->noOfNewAttr = 0;
- // noOfCharsets passed to TUP in upper half
- req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
+ req->noOfCharsets = tabPtr.p->noOfCharsets;
req->checksumIndicator = 1;
req->noOfAttributeGroups = 1;
req->GCPIndicator = 0;
@@ -5054,6 +5108,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
tablePtr.p->kValue = tableDesc.TableKValue;
tablePtr.p->fragmentCount = tableDesc.FragmentCount;
+ tablePtr.p->maxRowsLow = tableDesc.MaxRowsLow;
+ tablePtr.p->maxRowsHigh = tableDesc.MaxRowsHigh;
+ tablePtr.p->minRowsLow = tableDesc.MinRowsLow;
+ tablePtr.p->minRowsHigh = tableDesc.MinRowsHigh;
+
+ Uint64 maxRows =
+ (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow;
tablePtr.p->frmLen = tableDesc.FrmLen;
memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
@@ -5372,6 +5435,12 @@ Dbdict::execDROP_TABLE_REQ(Signal* signal){
return;
}
+ if(c_blockState == BS_NODE_RESTART){
+ jam();
+ dropTableRef(signal, req, DropTableRef::BusyWithNR);
+ return;
+ }
+
if(c_blockState != BS_IDLE){
jam();
dropTableRef(signal, req, DropTableRef::Busy);
@@ -12170,6 +12239,275 @@ Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask)
}
}
+// DICT lock master
+
+const Dbdict::DictLockType*
+Dbdict::getDictLockType(Uint32 lockType)
+{
+ static const DictLockType lt[] = {
+ { DictLockReq::NodeRestartLock, BS_NODE_RESTART, "NodeRestart" }
+ };
+ for (int i = 0; i < sizeof(lt)/sizeof(lt[0]); i++) {
+ if (lt[i].lockType == lockType)
+ return &lt[i];
+ }
+ return NULL;
+}
+
+void
+Dbdict::sendDictLockInfoEvent(Uint32 pollCount)
+{
+ DictLockPtr loopPtr;
+ c_dictLockQueue.first(loopPtr);
+ unsigned count = 0;
+
+ char queue_buf[100];
+ char *p = &queue_buf[0];
+ const char *const q = &queue_buf[sizeof(queue_buf)];
+ *p = 0;
+
+ while (loopPtr.i != RNIL) {
+ jam();
+ my_snprintf(p, q-p, "%s%u%s",
+ ++count == 1 ? "" : " ",
+ (unsigned)refToNode(loopPtr.p->req.userRef),
+ loopPtr.p->locked ? "L" : "");
+ p += strlen(p);
+ c_dictLockQueue.next(loopPtr);
+ }
+
+ infoEvent("DICT: lock bs: %d ops: %d poll: %d cnt: %d queue: %s",
+ (int)c_blockState,
+ c_opRecordPool.getSize() - c_opRecordPool.getNoOfFree(),
+ c_dictLockPoll, (int)pollCount, queue_buf);
+}
+
+void
+Dbdict::sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text)
+{
+ infoEvent("DICT: %s %u for %s",
+ text,
+ (unsigned)refToNode(lockPtr.p->req.userRef), lockPtr.p->lt->text);
+}
+
+void
+Dbdict::execDICT_LOCK_REQ(Signal* signal)
+{
+ jamEntry();
+ const DictLockReq* req = (const DictLockReq*)&signal->theData[0];
+
+ // make sure bad request crashes slave, not master (us)
+
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+ sendDictLockRef(signal, *req, DictLockRef::NotMaster);
+ return;
+ }
+
+ const DictLockType* lt = getDictLockType(req->lockType);
+ if (lt == NULL) {
+ jam();
+ sendDictLockRef(signal, *req, DictLockRef::InvalidLockType);
+ return;
+ }
+
+ if (req->userRef != signal->getSendersBlockRef() ||
+ getNodeInfo(refToNode(req->userRef)).m_type != NodeInfo::DB) {
+ jam();
+ sendDictLockRef(signal, *req, DictLockRef::BadUserRef);
+ return;
+ }
+
+ if (c_aliveNodes.get(refToNode(req->userRef))) {
+ jam();
+ sendDictLockRef(signal, *req, DictLockRef::TooLate);
+ return;
+ }
+
+ DictLockPtr lockPtr;
+ if (! c_dictLockQueue.seize(lockPtr)) {
+ jam();
+ sendDictLockRef(signal, *req, DictLockRef::TooManyRequests);
+ return;
+ }
+
+ lockPtr.p->req = *req;
+ lockPtr.p->locked = false;
+ lockPtr.p->lt = lt;
+
+ checkDictLockQueue(signal, false);
+
+ if (! lockPtr.p->locked)
+ sendDictLockInfoEvent(lockPtr, "lock request by node");
+}
+
+void
+Dbdict::checkDictLockQueue(Signal* signal, bool poll)
+{
+ Uint32 pollCount = ! poll ? 0 : signal->theData[1];
+
+ DictLockPtr lockPtr;
+
+ do {
+ if (! c_dictLockQueue.first(lockPtr)) {
+ jam();
+ setDictLockPoll(signal, false, pollCount);
+ return;
+ }
+
+ if (lockPtr.p->locked) {
+ jam();
+ ndbrequire(c_blockState == lockPtr.p->lt->blockState);
+ break;
+ }
+
+ if (c_opRecordPool.getNoOfFree() != c_opRecordPool.getSize()) {
+ jam();
+ break;
+ }
+
+ ndbrequire(c_blockState == BS_IDLE);
+ lockPtr.p->locked = true;
+ c_blockState = lockPtr.p->lt->blockState;
+ sendDictLockConf(signal, lockPtr);
+
+ sendDictLockInfoEvent(lockPtr, "locked by node");
+ } while (0);
+
+ // poll while first request is open
+ // this routine is called again when it is removed for any reason
+
+ bool on = ! lockPtr.p->locked;
+ setDictLockPoll(signal, on, pollCount);
+}
+
+void
+Dbdict::execDICT_UNLOCK_ORD(Signal* signal)
+{
+ jamEntry();
+ const DictUnlockOrd* ord = (const DictUnlockOrd*)&signal->theData[0];
+
+ DictLockPtr lockPtr;
+ c_dictLockQueue.getPtr(lockPtr, ord->lockPtr);
+ ndbrequire(lockPtr.p->lt->lockType == ord->lockType);
+
+ if (lockPtr.p->locked) {
+ jam();
+ ndbrequire(c_blockState == lockPtr.p->lt->blockState);
+ ndbrequire(c_opRecordPool.getNoOfFree() == c_opRecordPool.getSize());
+ ndbrequire(! c_dictLockQueue.hasPrev(lockPtr));
+
+ c_blockState = BS_IDLE;
+ sendDictLockInfoEvent(lockPtr, "unlocked by node");
+ } else {
+ sendDictLockInfoEvent(lockPtr, "lock request removed by node");
+ }
+
+ c_dictLockQueue.release(lockPtr);
+
+ checkDictLockQueue(signal, false);
+}
+
+void
+Dbdict::sendDictLockConf(Signal* signal, DictLockPtr lockPtr)
+{
+ DictLockConf* conf = (DictLockConf*)&signal->theData[0];
+ const DictLockReq& req = lockPtr.p->req;
+
+ conf->userPtr = req.userPtr;
+ conf->lockType = req.lockType;
+ conf->lockPtr = lockPtr.i;
+
+ sendSignal(req.userRef, GSN_DICT_LOCK_CONF, signal,
+ DictLockConf::SignalLength, JBB);
+}
+
+void
+Dbdict::sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode)
+{
+ DictLockRef* ref = (DictLockRef*)&signal->theData[0];
+
+ ref->userPtr = req.userPtr;
+ ref->lockType = req.lockType;
+ ref->errorCode = errorCode;
+
+ sendSignal(req.userRef, GSN_DICT_LOCK_REF, signal,
+ DictLockRef::SignalLength, JBB);
+}
+
+// control polling
+
+void
+Dbdict::setDictLockPoll(Signal* signal, bool on, Uint32 pollCount)
+{
+ if (on) {
+ jam();
+ signal->theData[0] = ZDICT_LOCK_POLL;
+ signal->theData[1] = pollCount + 1;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ }
+
+ bool change = (c_dictLockPoll != on);
+
+ if (change) {
+ jam();
+ c_dictLockPoll = on;
+ }
+
+ // avoid too many messages if master is stuck busy (BS_NODE_FAILURE)
+ bool periodic =
+ pollCount < 8 ||
+ pollCount < 64 && pollCount % 8 == 0 ||
+ pollCount < 512 && pollCount % 64 == 0 ||
+ pollCount < 4096 && pollCount % 512 == 0 ||
+ pollCount % 4096 == 0; // about every 6 minutes
+
+ if (change || periodic)
+ sendDictLockInfoEvent(pollCount);
+}
+
+// NF handling
+
+void
+Dbdict::removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes)
+{
+ DictLockPtr loopPtr;
+ c_dictLockQueue.first(loopPtr);
+
+ if (getOwnNodeId() != c_masterNodeId) {
+ ndbrequire(loopPtr.i == RNIL);
+ return;
+ }
+
+ while (loopPtr.i != RNIL) {
+ jam();
+ DictLockPtr lockPtr = loopPtr;
+ c_dictLockQueue.next(loopPtr);
+
+ Uint32 nodeId = refToNode(lockPtr.p->req.userRef);
+
+ if (NodeBitmask::get(theFailedNodes, nodeId)) {
+ if (lockPtr.p->locked) {
+ jam();
+ ndbrequire(c_blockState == lockPtr.p->lt->blockState);
+ ndbrequire(c_opRecordPool.getNoOfFree() == c_opRecordPool.getSize());
+ ndbrequire(! c_dictLockQueue.hasPrev(lockPtr));
+
+ c_blockState = BS_IDLE;
+
+ sendDictLockInfoEvent(lockPtr, "remove lock by failed node");
+ } else {
+ sendDictLockInfoEvent(lockPtr, "remove lock request by failed node");
+ }
+
+ c_dictLockQueue.release(lockPtr);
+ }
+ }
+
+ checkDictLockQueue(signal, false);
+}
+
+
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* MODULE: STORE/RESTORE SCHEMA FILE---------------------- */
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 6b78fb86534..0fa984a4c61 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -26,6 +26,7 @@
#include <pc.hpp>
#include <ArrayList.hpp>
#include <DLHashTable.hpp>
+#include <DLFifoList.hpp>
#include <CArray.hpp>
#include <KeyTable2.hpp>
#include <SimulatedBlock.hpp>
@@ -50,6 +51,7 @@
#include <signaldata/CreateTrig.hpp>
#include <signaldata/DropTrig.hpp>
#include <signaldata/AlterTrig.hpp>
+#include <signaldata/DictLock.hpp>
#include "SchemaFile.hpp"
#include <blocks/mutexes.hpp>
#include <SafeCounter.hpp>
@@ -63,6 +65,7 @@
/*--------------------------------------------------------------*/
#define ZPACK_TABLE_INTO_PAGES 0
#define ZSEND_GET_TAB_RESPONSE 3
+#define ZDICT_LOCK_POLL 4
/*--------------------------------------------------------------*/
@@ -131,6 +134,10 @@ public:
* on disk. Index trigger ids are volatile.
*/
struct TableRecord : public MetaData::Table {
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
/****************************************************
* Support variables for table handling
****************************************************/
@@ -587,6 +594,9 @@ private:
void execALTER_TAB_CONF(Signal* signal);
bool check_ndb_versions() const;
+ void execDICT_LOCK_REQ(Signal* signal);
+ void execDICT_UNLOCK_ORD(Signal* signal);
+
/*
* 2.4 COMMON STORED VARIABLES
*/
@@ -817,12 +827,43 @@ private:
// State variables
/* ----------------------------------------------------------------------- */
+#ifndef ndb_dbdict_log_block_state
enum BlockState {
BS_IDLE = 0,
BS_CREATE_TAB = 1,
BS_BUSY = 2,
- BS_NODE_FAILURE = 3
+ BS_NODE_FAILURE = 3,
+ BS_NODE_RESTART = 4
+ };
+#else // quick hack to log changes
+ enum {
+ BS_IDLE = 0,
+ BS_CREATE_TAB = 1,
+ BS_BUSY = 2,
+ BS_NODE_FAILURE = 3,
+ BS_NODE_RESTART = 4
};
+ struct BlockState;
+ friend struct BlockState;
+ struct BlockState {
+ BlockState() :
+ m_value(BS_IDLE) {
+ }
+ BlockState(int value) :
+ m_value(value) {
+ }
+ operator int() const {
+ return m_value;
+ }
+ BlockState& operator=(const BlockState& bs) {
+ Dbdict* dict = (Dbdict*)globalData.getBlock(DBDICT);
+ dict->infoEvent("DICT: bs %d->%d", m_value, bs.m_value);
+ m_value = bs.m_value;
+ return *this;
+ }
+ int m_value;
+ };
+#endif
BlockState c_blockState;
struct PackTable {
@@ -1722,6 +1763,70 @@ private:
// Unique key for operation XXX move to some system table
Uint32 c_opRecordSequence;
+ /*
+ * Master DICT can be locked in 2 mutually exclusive ways:
+ *
+ * 1) for schema ops, via operation records
+ * 2) against schema ops, via a lock queue
+ *
+ * Current use of 2) is by a starting node, to prevent schema ops
+ * until started. The ops are refused (BlockState != BS_IDLE),
+ * not queued.
+ *
+ * Master failure is not handled, in node start case the starting
+ * node will crash too anyway. Use lock table in future..
+ *
+ * The lock queue is "serial" but other behaviour is possible
+ * by checking lock types e.g. to allow parallel node starts.
+ *
+ * Checking release of last op record is not convenient with
+ * current structure (5.0). Instead we poll via continueB.
+ *
+ * XXX only table ops check BlockState
+ */
+ struct DictLockType;
+ friend struct DictLockType;
+
+ struct DictLockType {
+ DictLockReq::LockType lockType;
+ BlockState blockState;
+ const char* text;
+ };
+
+ struct DictLockRecord;
+ friend struct DictLockRecord;
+
+ struct DictLockRecord {
+ DictLockReq req;
+ const DictLockType* lt;
+ bool locked;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+
+ typedef Ptr<DictLockRecord> DictLockPtr;
+ ArrayPool<DictLockRecord> c_dictLockPool;
+ DLFifoList<DictLockRecord> c_dictLockQueue;
+ bool c_dictLockPoll;
+
+ static const DictLockType* getDictLockType(Uint32 lockType);
+ void sendDictLockInfoEvent(Uint32 pollCount);
+ void sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text);
+
+ void checkDictLockQueue(Signal* signal, bool poll);
+ void sendDictLockConf(Signal* signal, DictLockPtr lockPtr);
+ void sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode);
+
+ // control polling i.e. continueB loop
+ void setDictLockPoll(Signal* signal, bool on, Uint32 pollCount);
+
+ // NF handling
+ void removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes);
+
+
// Statement blocks
/* ------------------------------------------------------------ */
diff --git a/ndb/src/kernel/blocks/dbdict/DictLock.txt b/ndb/src/kernel/blocks/dbdict/DictLock.txt
new file mode 100644
index 00000000000..17f24119e9d
--- /dev/null
+++ b/ndb/src/kernel/blocks/dbdict/DictLock.txt
@@ -0,0 +1,94 @@
+Lock master DICT against schema operations
+
+Implementation
+--------------
+
+[ see comments in Dbdict.hpp ]
+
+Use case: Node startup INR / NR
+-------------------------------
+
+Master DICT (like any block) keeps list of alive nodes (c_aliveNodes).
+These are participants in schema ops.
+
+(1) c_aliveNodes is initialized when DICT starts
+ in sp3 in READ_NODESCONF from CNTR
+
+(2) when slave node fails (in any sp of the slave node)
+ it is removed from c_aliveNodes in NODE_FAILREP
+
+(3) when slave starts, it is added to c_aliveNodes
+ in sp4 of the starting node in INCL_NODEREQ
+
+Slave DIH locks master DICT in sp2 and releases the lock when started.
+Based on the constraints:
+
+- the lock is taken when master DICT is known
+ DIH reads this in sp2 in READ_NODESCONF
+
+- the lock is taken before (3)
+
+- the lock is taken before copying starts and held until it is done
+ in sp4 DIH meta, DICT meta, tuple data
+
+- on INR in sp2 in START_PERMREQ the LCP info of the slave is erased
+ in all DIH in invalidateNodeLCP() - not safe under schema ops
+
+Signals:
+
+All but DICT_LOCK are standard v5.0 signals.
+s=starting node, m=master, a=all participants, l=local block.
+
+* sp2 - DICT_LOCK and START_PERM
+
+DIH/s
+ DICT_LOCK_REQ
+ DICT/m
+ DICT_LOCK_CONF
+DIH/s
+ START_PERMREQ
+ DIH/m
+ START_INFOREQ
+ DIH/a
+ invalidateNodeLCP() if INR
+ DIH/a
+ START_INFOCONF
+ DIH/m
+ START_PERMCONF
+DIH/s
+
+* sp4 - START_ME (copy metadata, no changes)
+
+DIH/s
+ START_MEREQ
+ DIH/m
+ COPY_TABREQ
+ DIH/s
+ COPY_TABCONF
+ DIH/m
+ DICTSTARTREQ
+ DICT/s
+ GET_SCHEMA_INFOREQ
+ DICT/m
+ SCHEMA_INFO
+ DICT/s
+ DICTSTARTCONF
+ DIH/m
+ INCL_NODEREQ
+ DIH/a
+ INCL_NODEREQ
+ ANY/l
+ INCL_NODECONF
+ DIH/a
+ INCL_NODECONF
+ DIH/m
+ START_MECONF
+DIH/s
+
+* sp7 - release DICT lock
+
+DIH/s
+ DICT_UNLOCK_ORD
+ DICT/m
+
+# vim: set et sw=4:
diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index 78acf1ffd19..f4a33df9805 100644
--- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -718,6 +718,9 @@ private:
void checkPrepDropTabComplete(Signal *, TabRecordPtr tabPtr);
void checkWaitDropTabFailedLqh(Signal *, Uint32 nodeId, Uint32 tableId);
+ void execDICT_LOCK_CONF(Signal* signal);
+ void execDICT_LOCK_REF(Signal* signal);
+
// Statement blocks
//------------------------------------
// Methods that send signals
@@ -935,6 +938,7 @@ private:
void initialStartCompletedLab(Signal *);
void allNodesLcpCompletedLab(Signal *);
void nodeRestartPh2Lab(Signal *);
+ void nodeRestartPh2Lab2(Signal *);
void initGciFilesLab(Signal *);
void dictStartConfLab(Signal *);
void nodeDictStartConfLab(Signal *);
@@ -1594,6 +1598,30 @@ private:
* Reply from nodeId
*/
void startInfoReply(Signal *, Uint32 nodeId);
+
+ /*
+ * Lock master DICT. Only current use is by starting node
+ * during NR. A pool of slave records is convenient anyway.
+ */
+ struct DictLockSlaveRecord {
+ Uint32 lockPtr;
+ Uint32 lockType;
+ bool locked;
+ Callback callback;
+ Uint32 nextPool;
+ };
+
+ typedef Ptr<DictLockSlaveRecord> DictLockSlavePtr;
+ ArrayPool<DictLockSlaveRecord> c_dictLockSlavePool;
+
+ // slave
+ void sendDictLockReq(Signal* signal, Uint32 lockType, Callback c);
+ void recvDictLockConf(Signal* signal);
+ void sendDictUnlockOrd(Signal* signal, Uint32 lockSlavePtrI);
+
+ // NR
+ Uint32 c_dictLockSlavePtrI_nodeRestart; // userPtr for NR
+ void recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret);
};
#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32)
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index cd987048577..2b878034258 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -66,6 +66,9 @@ void Dbdih::initData()
waitGCPProxyPool.setSize(ZPROXY_FILE_SIZE);
waitGCPMasterPool.setSize(ZPROXY_MASTER_FILE_SIZE);
+ c_dictLockSlavePool.setSize(1); // assert single usage
+ c_dictLockSlavePtrI_nodeRestart = RNIL;
+
cgcpOrderBlocked = 0;
c_lcpState.ctcCounter = 0;
cwaitLcpSr = false;
@@ -264,6 +267,9 @@ Dbdih::Dbdih(const class Configuration & config):
addRecSignal(GSN_CREATE_FRAGMENTATION_REQ,
&Dbdih::execCREATE_FRAGMENTATION_REQ);
+ addRecSignal(GSN_DICT_LOCK_CONF, &Dbdih::execDICT_LOCK_CONF);
+ addRecSignal(GSN_DICT_LOCK_REF, &Dbdih::execDICT_LOCK_REF);
+
apiConnectRecord = 0;
connectRecord = 0;
fileRecord = 0;
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index dcb5d201d7f..c8a33715b9c 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -67,6 +67,7 @@
#include <signaldata/CreateFragmentation.hpp>
#include <signaldata/LqhFrag.hpp>
#include <signaldata/FsOpenReq.hpp>
+#include <signaldata/DictLock.hpp>
#include <DebuggerNames.hpp>
#include <EventLogger.hpp>
@@ -544,7 +545,7 @@ void Dbdih::execCONTINUEB(Signal* signal)
break;
case DihContinueB::ZSTART_PERMREQ_AGAIN:
jam();
- nodeRestartPh2Lab(signal);
+ nodeRestartPh2Lab2(signal);
return;
break;
case DihContinueB::SwitchReplica:
@@ -1284,6 +1285,7 @@ void Dbdih::execNDB_STTOR(Signal* signal)
case NodeState::ST_INITIAL_NODE_RESTART:
case NodeState::ST_NODE_RESTART:
jam();
+
/***********************************************************************
* When starting nodes while system is operational we must be controlled
* by the master since only one node restart is allowed at a time.
@@ -1294,7 +1296,7 @@ void Dbdih::execNDB_STTOR(Signal* signal)
req->startingRef = reference();
req->startingVersion = 0; // Obsolete
sendSignal(cmasterdihref, GSN_START_MEREQ, signal,
- StartMeReq::SignalLength, JBB);
+ StartMeReq::SignalLength, JBB);
return;
}
ndbrequire(false);
@@ -1354,6 +1356,24 @@ void Dbdih::execNDB_STTOR(Signal* signal)
}
ndbrequire(false);
break;
+ case ZNDB_SPH7:
+ jam();
+ switch (typestart) {
+ case NodeState::ST_INITIAL_START:
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ jam();
+ sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart);
+ c_dictLockSlavePtrI_nodeRestart = RNIL;
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }
+ ndbrequire(false);
+ break;
default:
jam();
ndbsttorry10Lab(signal, __LINE__);
@@ -1564,6 +1584,34 @@ void Dbdih::execREAD_NODESCONF(Signal* signal)
/*---------------------------------------------------------------------------*/
void Dbdih::nodeRestartPh2Lab(Signal* signal)
{
+ /*
+ * Lock master DICT to avoid metadata operations during INR/NR.
+ * Done just before START_PERMREQ.
+ *
+ * It would be more elegant to do this just before START_MEREQ.
+ * The problem is, on INR we end up in massive invalidateNodeLCP
+ * which is not fully protected against metadata ops.
+ */
+ ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL);
+
+ // check that we are not yet taking part in schema ops
+ CRASH_INSERTION(7174);
+
+ Uint32 lockType = DictLockReq::NodeRestartLock;
+ Callback c = { safe_cast(&Dbdih::recvDictLockConf_nodeRestart), 0 };
+ sendDictLockReq(signal, lockType, c);
+}
+
+void Dbdih::recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret)
+{
+ ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL);
+ c_dictLockSlavePtrI_nodeRestart = data;
+
+ nodeRestartPh2Lab2(signal);
+}
+
+void Dbdih::nodeRestartPh2Lab2(Signal* signal)
+{
/*------------------------------------------------------------------------*/
// REQUEST FOR PERMISSION FROM MASTER TO START A NODE IN AN ALREADY
// RUNNING SYSTEM.
@@ -1574,7 +1622,7 @@ void Dbdih::nodeRestartPh2Lab(Signal* signal)
req->nodeId = cownNodeId;
req->startType = cstarttype;
sendSignal(cmasterdihref, GSN_START_PERMREQ, signal, 3, JBB);
-}//Dbdih::nodeRestartPh2Lab()
+}
void Dbdih::execSTART_PERMCONF(Signal* signal)
{
@@ -1696,12 +1744,12 @@ void Dbdih::execSTART_PERMREQ(Signal* signal)
const BlockReference retRef = req->blockRef;
const Uint32 nodeId = req->nodeId;
const Uint32 typeStart = req->startType;
-
CRASH_INSERTION(7122);
ndbrequire(isMaster());
ndbrequire(refToNode(retRef) == nodeId);
if ((c_nodeStartMaster.activeState) ||
- (c_nodeStartMaster.wait != ZFALSE)) {
+ (c_nodeStartMaster.wait != ZFALSE) ||
+ ERROR_INSERTED_CLEAR(7175)) {
jam();
signal->theData[0] = nodeId;
signal->theData[1] = StartPermRef::ZNODE_ALREADY_STARTING_ERROR;
@@ -10448,6 +10496,10 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
c_copyGCIMaster.m_copyReason,
c_copyGCIMaster.m_waiting);
break;
+ case GCP_READY: // shut up lint
+ case GCP_PREPARE_SENT:
+ case GCP_COMMIT_SENT:
+ break;
}
ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
@@ -14639,3 +14691,118 @@ Dbdih::NodeRecord::NodeRecord(){
copyCompleted = false;
allowNodeStart = true;
}
+
+// DICT lock slave
+
+void
+Dbdih::sendDictLockReq(Signal* signal, Uint32 lockType, Callback c)
+{
+ DictLockReq* req = (DictLockReq*)&signal->theData[0];
+ DictLockSlavePtr lockPtr;
+
+ c_dictLockSlavePool.seize(lockPtr);
+ ndbrequire(lockPtr.i != RNIL);
+
+ req->userPtr = lockPtr.i;
+ req->lockType = lockType;
+ req->userRef = reference();
+
+ lockPtr.p->lockPtr = RNIL;
+ lockPtr.p->lockType = lockType;
+ lockPtr.p->locked = false;
+ lockPtr.p->callback = c;
+
+ // handle rolling upgrade
+ {
+ Uint32 masterVersion = getNodeInfo(cmasterNodeId).m_version;
+
+ unsigned int get_major = getMajor(masterVersion);
+ unsigned int get_minor = getMinor(masterVersion);
+ unsigned int get_build = getBuild(masterVersion);
+
+ ndbrequire(get_major == 4 || get_major == 5);
+
+ if (masterVersion < NDBD_DICT_LOCK_VERSION_5 ||
+ ERROR_INSERTED(7176)) {
+ jam();
+
+ infoEvent("DIH: detect upgrade: master node %u old version %u.%u.%u",
+ (unsigned int)cmasterNodeId, get_major, get_minor, get_build);
+
+ DictLockConf* conf = (DictLockConf*)&signal->theData[0];
+ conf->userPtr = lockPtr.i;
+ conf->lockType = lockType;
+ conf->lockPtr = ZNIL;
+
+ sendSignal(reference(), GSN_DICT_LOCK_CONF, signal,
+ DictLockConf::SignalLength, JBB);
+ return;
+ }
+ }
+
+ BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId);
+ sendSignal(dictMasterRef, GSN_DICT_LOCK_REQ, signal,
+ DictLockReq::SignalLength, JBB);
+}
+
+void
+Dbdih::execDICT_LOCK_CONF(Signal* signal)
+{
+ jamEntry();
+ recvDictLockConf(signal);
+}
+
+void
+Dbdih::execDICT_LOCK_REF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}
+
+void
+Dbdih::recvDictLockConf(Signal* signal)
+{
+ const DictLockConf* conf = (const DictLockConf*)&signal->theData[0];
+
+ DictLockSlavePtr lockPtr;
+ c_dictLockSlavePool.getPtr(lockPtr, conf->userPtr);
+
+ lockPtr.p->lockPtr = conf->lockPtr;
+ ndbrequire(lockPtr.p->lockType == conf->lockType);
+ ndbrequire(lockPtr.p->locked == false);
+ lockPtr.p->locked = true;
+
+ lockPtr.p->callback.m_callbackData = lockPtr.i;
+ execute(signal, lockPtr.p->callback, 0);
+}
+
+void
+Dbdih::sendDictUnlockOrd(Signal* signal, Uint32 lockSlavePtrI)
+{
+ DictUnlockOrd* ord = (DictUnlockOrd*)&signal->theData[0];
+
+ DictLockSlavePtr lockPtr;
+ c_dictLockSlavePool.getPtr(lockPtr, lockSlavePtrI);
+
+ ord->lockPtr = lockPtr.p->lockPtr;
+ ord->lockType = lockPtr.p->lockType;
+
+ c_dictLockSlavePool.release(lockPtr);
+
+ // handle rolling upgrade
+ {
+ Uint32 masterVersion = getNodeInfo(cmasterNodeId).m_version;
+
+ unsigned int get_major = getMajor(masterVersion);
+ ndbrequire(get_major == 4 || get_major == 5);
+
+ if (masterVersion < NDBD_DICT_LOCK_VERSION_5 ||
+ ERROR_INSERTED(7176)) {
+ return;
+ }
+ }
+
+ BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId);
+ sendSignal(dictMasterRef, GSN_DICT_UNLOCK_ORD, signal,
+ DictUnlockOrd::SignalLength, JBB);
+}
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 1ed383853ba..f8e6292f7f2 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -443,7 +443,6 @@ public:
UintR dictConnectptr;
UintR fragmentPtr;
UintR nextAddfragrec;
- UintR noOfAllocPages;
UintR schemaVer;
UintR tup1Connectptr;
UintR tup2Connectptr;
@@ -465,12 +464,17 @@ public:
Uint16 totalAttrReceived;
Uint16 fragCopyCreation;
Uint16 noOfKeyAttr;
- Uint32 noOfNewAttr; // noOfCharsets in upper half
+ Uint16 noOfNewAttr;
+ Uint16 noOfCharsets;
Uint16 noOfAttributeGroups;
Uint16 lh3DistrBits;
Uint16 tableType;
Uint16 primaryTableId;
- };// Size 108 bytes
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
+ };// Size 124 bytes
typedef Ptr<AddFragRecord> AddFragRecordPtr;
/* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 56e93e6ee01..ecb67d04050 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -939,12 +939,16 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
Uint8 tlh = req->lh3PageBits;
Uint32 tnoOfAttr = req->noOfAttributes;
Uint32 tnoOfNull = req->noOfNullAttributes;
- Uint32 noOfAlloc = req->noOfPagesToPreAllocate;
+ Uint32 maxRowsLow = req->maxRowsLow;
+ Uint32 maxRowsHigh = req->maxRowsHigh;
+ Uint32 minRowsLow = req->minRowsLow;
+ Uint32 minRowsHigh = req->minRowsHigh;
Uint32 tschemaVersion = req->schemaVersion;
Uint32 ttupKeyLength = req->keyLength;
Uint32 nextLcp = req->nextLCP;
Uint32 noOfKeyAttr = req->noOfKeyAttr;
Uint32 noOfNewAttr = req->noOfNewAttr;
+ Uint32 noOfCharsets = req->noOfCharsets;
Uint32 checksumIndicator = req->checksumIndicator;
Uint32 noOfAttributeGroups = req->noOfAttributeGroups;
Uint32 gcpIndicator = req->GCPIndicator;
@@ -1042,7 +1046,10 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->m_senderAttrPtr = RNIL;
addfragptr.p->noOfAttr = tnoOfAttr;
addfragptr.p->noOfNull = tnoOfNull;
- addfragptr.p->noOfAllocPages = noOfAlloc;
+ addfragptr.p->maxRowsLow = maxRowsLow;
+ addfragptr.p->maxRowsHigh = maxRowsHigh;
+ addfragptr.p->minRowsLow = minRowsLow;
+ addfragptr.p->minRowsHigh = minRowsHigh;
addfragptr.p->tabId = tabptr.i;
addfragptr.p->totalAttrReceived = 0;
addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */
@@ -1052,6 +1059,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->addfragErrorCode = 0;
addfragptr.p->noOfKeyAttr = noOfKeyAttr;
addfragptr.p->noOfNewAttr = noOfNewAttr;
+ addfragptr.p->noOfCharsets = noOfCharsets;
addfragptr.p->checksumIndicator = checksumIndicator;
addfragptr.p->noOfAttributeGroups = noOfAttributeGroups;
addfragptr.p->GCPIndicator = gcpIndicator;
@@ -1221,47 +1229,56 @@ Dblqh::sendAddFragReq(Signal* signal)
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ||
addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) {
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
if (DictTabInfo::isTable(addfragptr.p->tableType) ||
DictTabInfo::isHashIndex(addfragptr.p->tableType)) {
jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = addfragptr.p->noOfAttr;
- signal->theData[5] =
+ tupFragReq->userPtr = addfragptr.i;
+ tupFragReq->userRef = cownref;
+ tupFragReq->reqInfo = 0; /* ADD TABLE */
+ tupFragReq->tableId = addfragptr.p->tabId;
+ tupFragReq->noOfAttr = addfragptr.p->noOfAttr;
+ tupFragReq->fragId =
addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
? addfragptr.p->fragid1 : addfragptr.p->fragid2;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = addfragptr.p->noOfNull;
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = addfragptr.p->noOfKeyAttr;
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
+ tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow;
+ tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh;
+ tupFragReq->minRowsLow = addfragptr.p->minRowsLow;
+ tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh;
+ tupFragReq->noOfNullAttr = addfragptr.p->noOfNull;
+ tupFragReq->schemaVersion = addfragptr.p->schemaVer;
+ tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr;
+ tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr;
+ tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets;
+ tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator;
+ tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups;
+ tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
signal, TupFragReq::SignalLength, JBB);
return;
}
if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = 1; /* ordered index: one array attr */
- signal->theData[5] =
+ tupFragReq->userPtr = addfragptr.i;
+ tupFragReq->userRef = cownref;
+ tupFragReq->reqInfo = 0; /* ADD TABLE */
+ tupFragReq->tableId = addfragptr.p->tabId;
+ tupFragReq->noOfAttr = 1; /* ordered index: one array attr */
+ tupFragReq->fragId =
addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
? addfragptr.p->fragid1 : addfragptr.p->fragid2;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = 0; /* ordered index: no nullable */
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = 1; /* ordered index: one key */
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
+ tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow;
+ tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh;
+ tupFragReq->minRowsLow = addfragptr.p->minRowsLow;
+ tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh;
+ tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */
+ tupFragReq->schemaVersion = addfragptr.p->schemaVer;
+ tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */
+ tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr;
+ tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets;
+ tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator;
+ tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups;
+ tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
signal, TupFragReq::SignalLength, JBB);
return;
@@ -1580,28 +1597,35 @@ void Dblqh::abortAddFragOps(Signal* signal)
{
fragptr.i = addfragptr.p->fragmentPtr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- signal->theData[0] = (Uint32)-1;
if (addfragptr.p->tup1Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tup1Connectptr;
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
+ tupFragReq->userPtr = (Uint32)-1;
+ tupFragReq->userRef = addfragptr.p->tup1Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup1Connectptr = RNIL;
}
if (addfragptr.p->tup2Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tup2Connectptr;
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
+ tupFragReq->userPtr = (Uint32)-1;
+ tupFragReq->userRef = addfragptr.p->tup2Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup2Connectptr = RNIL;
}
if (addfragptr.p->tux1Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tux1Connectptr;
+ TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend();
+ tuxFragReq->userPtr = (Uint32)-1;
+ tuxFragReq->userRef = addfragptr.p->tux1Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux1Connectptr = RNIL;
}
if (addfragptr.p->tux2Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tux2Connectptr;
+ TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend();
+ tuxFragReq->userPtr = (Uint32)-1;
+ tuxFragReq->userRef = addfragptr.p->tux2Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux2Connectptr = RNIL;
}
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index cf3c6056d65..41194fba82c 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -496,7 +496,8 @@ struct DiskBufferSegmentInfo {
typedef Ptr<DiskBufferSegmentInfo> DiskBufferSegmentInfoPtr;
struct Fragoperrec {
- bool definingFragment;
+ Uint64 minRows;
+ Uint64 maxRows;
Uint32 nextFragoprec;
Uint32 lqhPtrFrag;
Uint32 fragidFrag;
@@ -509,6 +510,7 @@ struct Fragoperrec {
Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
bool inUse;
+ bool definingFragment;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
@@ -560,6 +562,7 @@ struct Fragrecord {
Uint32 currentPageRange;
Uint32 rootPageRange;
Uint32 noOfPages;
+ Uint32 noOfPagesToGrow;
Uint32 emptyPrimPage;
Uint32 firstusedOprec;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index bacba2a880c..12cd61a17a6 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -41,7 +41,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
{
ljamEntry();
- if (signal->theData[0] == (Uint32)-1) {
+ TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr();
+ if (tupFragReq->userPtr == (Uint32)-1) {
ljam();
abortAddFragOp(signal);
return;
@@ -51,30 +52,34 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
FragrecordPtr regFragPtr;
TablerecPtr regTabPtr;
- Uint32 userptr = signal->theData[0];
- Uint32 userblockref = signal->theData[1];
- Uint32 reqinfo = signal->theData[2];
- regTabPtr.i = signal->theData[3];
- Uint32 noOfAttributes = signal->theData[4];
- Uint32 fragId = signal->theData[5];
- Uint32 noOfNullAttr = signal->theData[7];
- /* Uint32 schemaVersion = signal->theData[8];*/
- Uint32 noOfKeyAttr = signal->theData[9];
+ Uint32 userptr = tupFragReq->userPtr;
+ Uint32 userblockref = tupFragReq->userRef;
+ Uint32 reqinfo = tupFragReq->reqInfo;
+ regTabPtr.i = tupFragReq->tableId;
+ Uint32 noOfAttributes = tupFragReq->noOfAttr;
+ Uint32 fragId = tupFragReq->fragId;
+ Uint32 noOfNullAttr = tupFragReq->noOfNullAttr;
+ /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/
+ Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr;
- Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF);
- /* DICT sends number of character sets in upper half */
- Uint32 noOfCharsets = (signal->theData[10] >> 16);
+ Uint32 noOfNewAttr = tupFragReq->noOfNewAttr;
+ Uint32 noOfCharsets = tupFragReq->noOfCharsets;
- Uint32 checksumIndicator = signal->theData[11];
- Uint32 noOfAttributeGroups = signal->theData[12];
- Uint32 globalCheckpointIdIndicator = signal->theData[13];
+ Uint32 checksumIndicator = tupFragReq->checksumIndicator;
+ Uint32 noOfAttributeGroups = tupFragReq->noOfAttributeGroups;
+ Uint32 globalCheckpointIdIndicator = tupFragReq->globalCheckpointIdIndicator;
+
+ Uint64 maxRows =
+ (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow;
#ifndef VM_TRACE
// config mismatch - do not crash if release compiled
if (regTabPtr.i >= cnoOfTablerec) {
ljam();
- signal->theData[0] = userptr;
- signal->theData[1] = 800;
+ tupFragReq->userPtr = userptr;
+ tupFragReq->userRef = 800;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
return;
}
@@ -83,8 +88,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
if (cfirstfreeFragopr == RNIL) {
ljam();
- signal->theData[0] = userptr;
- signal->theData[1] = ZNOFREE_FRAGOP_ERROR;
+ tupFragReq->userPtr = userptr;
+ tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
return;
}//if
@@ -100,6 +105,9 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
fragOperPtr.p->noOfNewAttrCount = noOfNewAttr;
fragOperPtr.p->charsetIndex = 0;
fragOperPtr.p->currNullBit = 0;
+ // remove in 5.1, 2 fragments per fragment in 5.0
+ fragOperPtr.p->minRows = (minRows + 1)/2;
+ fragOperPtr.p->maxRows = (maxRows + 1)/2;
ndbrequire(reqinfo == ZADDFRAG);
@@ -141,16 +149,6 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regFragPtr.p->fragmentId = fragId;
regFragPtr.p->checkpointVersion = RNIL;
- Uint32 noAllocatedPages = 2;
- noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages);
-
- if (noAllocatedPages == 0) {
- ljam();
- terrorCode = ZNO_PAGES_ALLOCATED_ERROR;
- fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
- return;
- }//if
-
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
ljam();
@@ -407,6 +405,27 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
CLEAR_ERROR_INSERT_VALUE;
return;
}
+
+ if (lastAttr)
+ {
+ ljam();
+ Uint32 noRowsPerPage = ZWORDS_ON_PAGE/regTabPtr.p->tupheadsize;
+ Uint32 noAllocatedPages =
+ (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage;
+ if (fragOperPtr.p->minRows == 0)
+ noAllocatedPages = 2;
+ else if (noAllocatedPages == 0)
+ noAllocatedPages = 2;
+ noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages);
+
+ if (noAllocatedPages == 0) {
+ ljam();
+ terrorCode = ZNO_PAGES_ALLOCATED_ERROR;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+ }
+
/* **************************************************************** */
/* ************** TUP_ADD_ATTCONF ****************** */
/* **************************************************************** */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index 1f674876642..acdb73704cb 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -332,6 +332,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr)
regFragPtr->rootPageRange = RNIL;
regFragPtr->currentPageRange = RNIL;
regFragPtr->noOfPages = 0;
+ regFragPtr->noOfPagesToGrow = 2;
regFragPtr->nextStartRange = 0;
}//initFragRange()
@@ -393,9 +394,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* const regFragPtr, Uint32 tafpNoAllocReq
void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
{
- Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5%
- noAllocPages += regFragPtr->noOfPages >> 4; // 6.25%
+ Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5%
+ noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25%
noAllocPages += 2;
+ regFragPtr->noOfPagesToGrow += noAllocPages;
/* -----------------------------------------------------------------*/
// We will grow by 18.75% plus two more additional pages to grow
// a little bit quicker in the beginning.
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 9a7256b4a55..b9bf522f7c8 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -2477,7 +2477,7 @@ void Qmgr::execDISCONNECT_REP(Signal* signal)
{
jam();
CRASH_INSERTION(932);
- BaseString::snprintf(buf, 100, "Node %u disconected", nodeId);
+ BaseString::snprintf(buf, 100, "Node %u disconnected", nodeId);
progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf);
ndbrequire(false);
}
@@ -2500,7 +2500,7 @@ void Qmgr::execDISCONNECT_REP(Signal* signal)
ndbrequire(false);
case ZAPI_INACTIVE:
{
- BaseString::snprintf(buf, 100, "Node %u disconected", nodeId);
+ BaseString::snprintf(buf, 100, "Node %u disconnected", nodeId);
progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf);
ndbrequire(false);
}
diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp
index 7c1763485ce..649ae7cae3f 100644
--- a/ndb/src/kernel/main.cpp
+++ b/ndb/src/kernel/main.cpp
@@ -420,6 +420,10 @@ int main(int argc, char** argv)
FILE * signalLog = fopen(buf, "a");
globalSignalLoggers.setOwnNodeId(globalData.ownId);
globalSignalLoggers.setOutputStream(signalLog);
+#if 0 // to log startup
+ globalSignalLoggers.log(SignalLoggerManager::LogInOut, "BLOCK=DBDICT,DBDIH");
+ globalData.testOn = 1;
+#endif
#endif
catchsigs(false);
diff --git a/ndb/src/kernel/vm/DLFifoList.hpp b/ndb/src/kernel/vm/DLFifoList.hpp
index b139ade831d..963ab007b65 100644
--- a/ndb/src/kernel/vm/DLFifoList.hpp
+++ b/ndb/src/kernel/vm/DLFifoList.hpp
@@ -115,6 +115,13 @@ public:
*/
bool hasNext(const Ptr<T> &) const;
+ /**
+ * Check if prev exists i.e. this is not first
+ *
+ * NOTE ptr must be both p & i
+ */
+ bool hasPrev(const Ptr<T> &) const;
+
Uint32 noOfElements() const {
Uint32 c = 0;
Uint32 i = head.firstItem;
@@ -357,4 +364,11 @@ DLFifoList<T>::hasNext(const Ptr<T> & p) const {
return p.p->nextList != RNIL;
}
+template <class T>
+inline
+bool
+DLFifoList<T>::hasPrev(const Ptr<T> & p) const {
+ return p.p->prevList != RNIL;
+}
+
#endif
diff --git a/ndb/src/kernel/vm/pc.hpp b/ndb/src/kernel/vm/pc.hpp
index 6aeda59224f..95839c48e4e 100644
--- a/ndb/src/kernel/vm/pc.hpp
+++ b/ndb/src/kernel/vm/pc.hpp
@@ -125,11 +125,13 @@
#ifdef ERROR_INSERT
#define ERROR_INSERT_VARIABLE UintR cerrorInsert
#define ERROR_INSERTED(x) (cerrorInsert == (x))
+#define ERROR_INSERTED_CLEAR(x) (cerrorInsert == (x) ? (cerrorInsert = 0, true) : false)
#define SET_ERROR_INSERT_VALUE(x) cerrorInsert = x
#define CLEAR_ERROR_INSERT_VALUE cerrorInsert = 0
#else
#define ERROR_INSERT_VARIABLE typedef void * cerrorInsert // Will generate compiler error if used
#define ERROR_INSERTED(x) false
+#define ERROR_INSERTED_CLEAR(x) false
#define SET_ERROR_INSERT_VALUE(x)
#define CLEAR_ERROR_INSERT_VALUE
#endif
diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp
index dfe4f9aa63e..629ddf7a655 100644
--- a/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -853,7 +853,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::CI_INT,
"8",
- "1",
+ "3",
STR_VALUE(MAX_INT_RNIL) },
{
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index b9466ed1173..69c0286a1de 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -2380,14 +2380,20 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted)
event.Event = BackupEvent::BackupCompleted;
event.Completed.BackupId = rep->backupId;
- event.Completed.NoOfBytes = rep->noOfBytes;
+ event.Completed.NoOfBytes = rep->noOfBytesLow;
event.Completed.NoOfLogBytes = rep->noOfLogBytes;
- event.Completed.NoOfRecords = rep->noOfRecords;
+ event.Completed.NoOfRecords = rep->noOfRecordsLow;
event.Completed.NoOfLogRecords = rep->noOfLogRecords;
event.Completed.stopGCP = rep->stopGCP;
event.Completed.startGCP = rep->startGCP;
event.Nodes = rep->nodes;
+ if (signal->header.theLength >= BackupCompleteRep::SignalLength)
+ {
+ event.Completed.NoOfBytes += ((Uint64)rep->noOfBytesHigh) << 32;
+ event.Completed.NoOfRecords += ((Uint64)rep->noOfRecordsHigh) << 32;
+ }
+
backupId = rep->backupId;
return 0;
}
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index 7811cf0e5d1..187f225470a 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -323,9 +323,9 @@ public:
Uint32 ErrorCode;
} FailedToStart ;
struct {
+ Uint64 NoOfBytes;
+ Uint64 NoOfRecords;
Uint32 BackupId;
- Uint32 NoOfBytes;
- Uint32 NoOfRecords;
Uint32 NoOfLogBytes;
Uint32 NoOfLogRecords;
Uint32 startGCP;
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index a342a5d5926..a0a3dd431b8 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -385,6 +385,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const {
return m_impl.m_noOfKeys;
}
+void
+NdbDictionary::Table::setMaxRows(Uint64 maxRows)
+{
+ m_impl.m_max_rows = maxRows;
+}
+
+Uint64
+NdbDictionary::Table::getMaxRows() const
+{
+ return m_impl.m_max_rows;
+}
+
+void
+NdbDictionary::Table::setMinRows(Uint64 minRows)
+{
+ m_impl.m_min_rows = minRows;
+}
+
+Uint64
+NdbDictionary::Table::getMinRows() const
+{
+ return m_impl.m_min_rows;
+}
+
const char*
NdbDictionary::Table::getPrimaryKey(int no) const {
int count = 0;
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index bd50440b3c0..ce348b616c9 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -319,6 +319,8 @@ NdbTableImpl::init(){
m_noOfDistributionKeys= 0;
m_noOfBlobs= 0;
m_replicaCount= 0;
+ m_min_rows = 0;
+ m_max_rows = 0;
}
bool
@@ -416,6 +418,9 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_version = org.m_version;
m_status = org.m_status;
+
+ m_max_rows = org.m_max_rows;
+ m_min_rows = org.m_min_rows;
}
void NdbTableImpl::setName(const char * name)
@@ -1302,6 +1307,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
fragmentTypeMapping,
(Uint32)NdbDictionary::Object::FragUndefined);
+ Uint64 max_rows = ((Uint64)tableDesc.MaxRowsHigh) << 32;
+ max_rows += tableDesc.MaxRowsLow;
+ impl->m_max_rows = max_rows;
+ Uint64 min_rows = ((Uint64)tableDesc.MinRowsHigh) << 32;
+ min_rows += tableDesc.MinRowsLow;
+ impl->m_min_rows = min_rows;
impl->m_logging = tableDesc.TableLoggedFlag;
impl->m_kvalue = tableDesc.TableKValue;
impl->m_minLoadFactor = tableDesc.MinLoadFactor;
@@ -1630,7 +1641,16 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
tmpTab.TableType = DictTabInfo::UserTable;
tmpTab.NoOfAttributes = sz;
+ tmpTab.MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32);
+ tmpTab.MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF);
+ tmpTab.MinRowsHigh = (Uint32)(impl.m_min_rows >> 32);
+ tmpTab.MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF);
+ Uint64 maxRows =
+ (((Uint64)tmpTab.MaxRowsHigh) << 32) + tmpTab.MaxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tmpTab.MinRowsHigh) << 32) + tmpTab.MinRowsLow;
+
tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
fragmentTypeMapping,
DictTabInfo::AllNodesSmallTable);
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index bc9894497f8..dfccf120228 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -130,6 +130,9 @@ public:
Uint32 m_hashpointerValue;
Vector<Uint16> m_fragments;
+ Uint64 m_max_rows;
+ Uint64 m_min_rows;
+
bool m_logging;
int m_kvalue;
int m_minLoadFactor;
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index 91d58f515aa..946e6fe4cc2 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -326,6 +326,7 @@ ErrorBundle ErrorCodes[] = {
* SchemaError
*/
{ 701, SE, "System busy with other schema operation" },
+ { 711, SE, "System busy with node restart, schema operations not allowed" },
{ 703, SE, "Invalid table format" },
{ 704, SE, "Attribute name too long" },
{ 705, SE, "Table name too long" },
diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp
index 710a47bf3dc..397f41b3d4e 100644
--- a/ndb/test/ndbapi/testDict.cpp
+++ b/ndb/test/ndbapi/testDict.cpp
@@ -1551,6 +1551,299 @@ end:
return result;
}
+// NFNR
+
+// Restarter controls dict ops : 1-run 2-pause 3-stop
+// synced by polling...
+
+static bool
+send_dict_ops_cmd(NDBT_Context* ctx, Uint32 cmd)
+{
+ ctx->setProperty("DictOps_CMD", cmd);
+ while (1) {
+ if (ctx->isTestStopped())
+ return false;
+ if (ctx->getProperty("DictOps_ACK") == cmd)
+ break;
+ NdbSleep_MilliSleep(100);
+ }
+ return true;
+}
+
+static bool
+recv_dict_ops_run(NDBT_Context* ctx)
+{
+ while (1) {
+ if (ctx->isTestStopped())
+ return false;
+ Uint32 cmd = ctx->getProperty("DictOps_CMD");
+ ctx->setProperty("DictOps_ACK", cmd);
+ if (cmd == 1)
+ break;
+ if (cmd == 3)
+ return false;
+ NdbSleep_MilliSleep(100);
+ }
+ return true;
+}
+
+int
+runRestarts(NDBT_Context* ctx, NDBT_Step* step)
+{
+ static int errlst_master[] = { // non-crashing
+ 7175, // send one fake START_PERMREF
+ 0
+ };
+ static int errlst_node[] = {
+ 7174, // crash before sending DICT_LOCK_REQ
+ 7176, // pretend master does not support DICT lock
+ 7121, // crash at receive START_PERMCONF
+ 0
+ };
+ const uint errcnt_master = sizeof(errlst_master)/sizeof(errlst_master[0]);
+ const uint errcnt_node = sizeof(errlst_node)/sizeof(errlst_node[0]);
+
+ myRandom48Init(NdbTick_CurrentMillisecond());
+ NdbRestarter restarter;
+ int result = NDBT_OK;
+ const int loops = ctx->getNumLoops();
+
+ for (int l = 0; l < loops && result == NDBT_OK; l++) {
+ g_info << "1: === loop " << l << " ===" << endl;
+
+ // assuming 2-way replicated
+
+ int numnodes = restarter.getNumDbNodes();
+ CHECK(numnodes >= 1);
+ if (numnodes == 1)
+ break;
+
+ int masterNodeId = restarter.getMasterNodeId();
+ CHECK(masterNodeId != -1);
+
+ // for more complex cases need more restarter support methods
+
+ int nodeIdList[2] = { 0, 0 };
+ int nodeIdCnt = 0;
+
+ if (numnodes >= 2) {
+ int rand = myRandom48(numnodes);
+ int nodeId = restarter.getRandomNotMasterNodeId(rand);
+ CHECK(nodeId != -1);
+ nodeIdList[nodeIdCnt++] = nodeId;
+ }
+
+ if (numnodes >= 4 && myRandom48(2) == 0) {
+ int rand = myRandom48(numnodes);
+ int nodeId = restarter.getRandomNodeOtherNodeGroup(nodeIdList[0], rand);
+ CHECK(nodeId != -1);
+ if (nodeId != masterNodeId)
+ nodeIdList[nodeIdCnt++] = nodeId;
+ }
+
+ g_info << "1: master=" << masterNodeId << " nodes=" << nodeIdList[0] << "," << nodeIdList[1] << endl;
+
+ const uint timeout = 60; //secs for node wait
+ const unsigned maxsleep = 2000; //ms
+
+ bool NF_ops = ctx->getProperty("Restart_NF_ops");
+ uint NF_type = ctx->getProperty("Restart_NF_type");
+ bool NR_ops = ctx->getProperty("Restart_NR_ops");
+ bool NR_error = ctx->getProperty("Restart_NR_error");
+
+ g_info << "1: " << (NF_ops ? "run" : "pause") << " dict ops" << endl;
+ if (! send_dict_ops_cmd(ctx, NF_ops ? 1 : 2))
+ break;
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ {
+ for (int i = 0; i < nodeIdCnt; i++) {
+ int nodeId = nodeIdList[i];
+
+ bool nostart = true;
+ bool abort = NF_type == 0 ? myRandom48(2) : (NF_type == 2);
+ bool initial = myRandom48(2);
+
+ char flags[40];
+ strcpy(flags, "flags: nostart");
+ if (abort)
+ strcat(flags, ",abort");
+ if (initial)
+ strcat(flags, ",initial");
+
+ g_info << "1: restart " << nodeId << " " << flags << endl;
+ CHECK(restarter.restartOneDbNode(nodeId, initial, nostart, abort) == 0);
+ }
+ }
+
+ g_info << "1: wait for nostart" << endl;
+ CHECK(restarter.waitNodesNoStart(nodeIdList, nodeIdCnt, timeout) == 0);
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ int err_master = 0;
+ int err_node[2] = { 0, 0 };
+
+ if (NR_error) {
+ err_master = errlst_master[l % errcnt_master];
+
+ // limitation: cannot have 2 node restarts and crash_insert
+ // one node may die for real (NF during startup)
+
+ for (int i = 0; i < nodeIdCnt && nodeIdCnt == 1; i++) {
+ err_node[i] = errlst_node[l % errcnt_node];
+
+ // 7176 - no DICT lock protection
+
+ if (err_node[i] == 7176) {
+ g_info << "1: no dict ops due to error insert "
+ << err_node[i] << endl;
+ NR_ops = false;
+ }
+ }
+ }
+
+ g_info << "1: " << (NR_ops ? "run" : "pause") << " dict ops" << endl;
+ if (! send_dict_ops_cmd(ctx, NR_ops ? 1 : 2))
+ break;
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ g_info << "1: start nodes" << endl;
+ CHECK(restarter.startNodes(nodeIdList, nodeIdCnt) == 0);
+
+ if (NR_error) {
+ {
+ int err = err_master;
+ if (err != 0) {
+ g_info << "1: insert master error " << err << endl;
+ CHECK(restarter.insertErrorInNode(masterNodeId, err) == 0);
+ }
+ }
+
+ for (int i = 0; i < nodeIdCnt; i++) {
+ int nodeId = nodeIdList[i];
+
+ int err = err_node[i];
+ if (err != 0) {
+ g_info << "1: insert node " << nodeId << " error " << err << endl;
+ CHECK(restarter.insertErrorInNode(nodeId, err) == 0);
+ }
+ }
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ g_info << "1: wait cluster started" << endl;
+ CHECK(restarter.waitClusterStarted(timeout) == 0);
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ g_info << "1: restart done" << endl;
+ }
+
+ g_info << "1: stop dict ops" << endl;
+ send_dict_ops_cmd(ctx, 3);
+
+ return result;
+}
+
+int
+runDictOps(NDBT_Context* ctx, NDBT_Step* step)
+{
+ myRandom48Init(NdbTick_CurrentMillisecond());
+ int result = NDBT_OK;
+
+ for (int l = 0; result == NDBT_OK; l++) {
+ if (! recv_dict_ops_run(ctx))
+ break;
+
+ g_info << "2: === loop " << l << " ===" << endl;
+
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
+ const NdbDictionary::Table* pTab = ctx->getTab();
+ const char* tabName = pTab->getName();
+
+ const unsigned long maxsleep = 100; //ms
+
+ g_info << "2: create table" << endl;
+ {
+ uint count = 0;
+ try_create:
+ count++;
+ if (pDic->createTable(*pTab) != 0) {
+ const NdbError err = pDic->getNdbError();
+ if (count == 1)
+ g_err << "2: " << tabName << ": create failed: " << err << endl;
+ if (err.code != 711) {
+ result = NDBT_FAILED;
+ break;
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+ goto try_create;
+ }
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ g_info << "2: verify create" << endl;
+ const NdbDictionary::Table* pTab2 = pDic->getTable(tabName);
+ if (pTab2 == NULL) {
+ const NdbError err = pDic->getNdbError();
+ g_err << "2: " << tabName << ": verify create: " << err << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ // replace by the Retrieved table
+ pTab = pTab2;
+
+ int records = myRandom48(ctx->getNumRecords());
+ g_info << "2: load " << records << " records" << endl;
+ HugoTransactions hugoTrans(*pTab);
+ if (hugoTrans.loadTable(pNdb, records) != 0) {
+ // XXX get error code from hugo
+ g_err << "2: " << tabName << ": load failed" << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ g_info << "2: drop" << endl;
+ {
+ uint count = 0;
+ try_drop:
+ count++;
+ if (pDic->dropTable(tabName) != 0) {
+ const NdbError err = pDic->getNdbError();
+ if (count == 1)
+ g_err << "2: " << tabName << ": drop failed: " << err << endl;
+ if (err.code != 711) {
+ result = NDBT_FAILED;
+ break;
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+ goto try_drop;
+ }
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+
+ g_info << "2: verify drop" << endl;
+ const NdbDictionary::Table* pTab3 = pDic->getTable(tabName);
+ if (pTab3 != NULL) {
+ g_err << "2: " << tabName << ": verify drop: table exists" << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ if (pDic->getNdbError().code != 709) {
+ const NdbError err = pDic->getNdbError();
+ g_err << "2: " << tabName << ": verify drop: " << err << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+ }
+
+ return result;
+}
+
NDBT_TESTSUITE(testDict);
TESTCASE("CreateAndDrop",
"Try to create and drop the table loop number of times\n"){
@@ -1655,6 +1948,34 @@ TESTCASE("FailAddFragment",
"Fail add fragment or attribute in ACC or TUP or TUX\n"){
INITIALIZER(runFailAddFragment);
}
+TESTCASE("Restart_NF1",
+ "DICT ops during node graceful shutdown (not master)"){
+ TC_PROPERTY("Restart_NF_ops", 1);
+ TC_PROPERTY("Restart_NF_type", 1);
+ STEP(runRestarts);
+ STEP(runDictOps);
+}
+TESTCASE("Restart_NF2",
+ "DICT ops during node shutdown abort (not master)"){
+ TC_PROPERTY("Restart_NF_ops", 1);
+ TC_PROPERTY("Restart_NF_type", 2);
+ STEP(runRestarts);
+ STEP(runDictOps);
+}
+TESTCASE("Restart_NR1",
+ "DICT ops during node startup (not master)"){
+ TC_PROPERTY("Restart_NR_ops", 1);
+ STEP(runRestarts);
+ STEP(runDictOps);
+}
+TESTCASE("Restart_NR2",
+ "DICT ops during node startup with crash inserts (not master)"){
+ TC_PROPERTY("Restart_NR_ops", 1);
+ TC_PROPERTY("Restart_NR_error", 1);
+ STEP(runRestarts);
+ STEP(runDictOps);
+}
+
NDBT_TESTSUITE_END(testDict);
int main(int argc, const char** argv){
diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp
index 6ac06f8a6f8..a808a48b558 100644
--- a/ndb/tools/restore/Restore.cpp
+++ b/ndb/tools/restore/Restore.cpp
@@ -80,7 +80,12 @@ RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) {
RestoreMetaData::~RestoreMetaData(){
for(Uint32 i= 0; i < allTables.size(); i++)
- delete allTables[i];
+ {
+ TableS *table = allTables[i];
+ for(Uint32 j= 0; j < table->m_fragmentInfo.size(); j++)
+ delete table->m_fragmentInfo[j];
+ delete table;
+ }
allTables.clear();
}
@@ -111,6 +116,9 @@ RestoreMetaData::loadContent()
}
if(!readGCPEntry())
return 0;
+
+ if(!readFragmentInfo())
+ return 0;
return 1;
}
@@ -192,6 +200,52 @@ RestoreMetaData::readGCPEntry() {
return true;
}
+bool
+RestoreMetaData::readFragmentInfo()
+{
+ BackupFormat::CtlFile::FragmentInfo fragInfo;
+ TableS * table = 0;
+ Uint32 tableId = RNIL;
+
+ while (buffer_read(&fragInfo, 4, 2) == 2)
+ {
+ fragInfo.SectionType = ntohl(fragInfo.SectionType);
+ fragInfo.SectionLength = ntohl(fragInfo.SectionLength);
+
+ if (fragInfo.SectionType != BackupFormat::FRAGMENT_INFO)
+ {
+ err << "readFragmentInfo invalid section type: " <<
+ fragInfo.SectionType << endl;
+ return false;
+ }
+
+ if (buffer_read(&fragInfo.TableId, (fragInfo.SectionLength-2)*4, 1) != 1)
+ {
+ err << "readFragmentInfo invalid section length: " <<
+ fragInfo.SectionLength << endl;
+ return false;
+ }
+
+ fragInfo.TableId = ntohl(fragInfo.TableId);
+ if (fragInfo.TableId != tableId)
+ {
+ tableId = fragInfo.TableId;
+ table = getTable(tableId);
+ }
+
+ FragmentInfo * tmp = new FragmentInfo;
+ tmp->fragmentNo = ntohl(fragInfo.FragmentNo);
+ tmp->noOfRecords = ntohl(fragInfo.NoOfRecordsLow) +
+ (((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32);
+ tmp->filePosLow = ntohl(fragInfo.FilePosLow);
+ tmp->filePosHigh = ntohl(fragInfo.FilePosHigh);
+
+ table->m_fragmentInfo.push_back(tmp);
+ table->m_noOfRecords += tmp->noOfRecords;
+ }
+ return true;
+}
+
TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
: m_dictTable(tableImpl)
{
@@ -199,6 +253,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
m_noOfNullable = m_nullBitmaskSize = 0;
m_auto_val_id= ~(Uint32)0;
m_max_auto_val= 0;
+ m_noOfRecords= 0;
backupVersion = version;
for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
@@ -937,4 +992,5 @@ operator<<(NdbOut& ndbout, const TableS & table){
template class Vector<TableS*>;
template class Vector<AttributeS*>;
template class Vector<AttributeDesc*>;
+template class Vector<FragmentInfo*>;
diff --git a/ndb/tools/restore/Restore.hpp b/ndb/tools/restore/Restore.hpp
index 85793baf9df..cf8feb7125c 100644
--- a/ndb/tools/restore/Restore.hpp
+++ b/ndb/tools/restore/Restore.hpp
@@ -114,6 +114,14 @@ public:
AttributeData * getData(int i) const;
}; // class TupleS
+struct FragmentInfo
+{
+ Uint32 fragmentNo;
+ Uint64 noOfRecords;
+ Uint32 filePosLow;
+ Uint32 filePosHigh;
+};
+
class TableS {
friend class TupleS;
@@ -136,6 +144,9 @@ class TableS {
int pos;
+ Uint64 m_noOfRecords;
+ Vector<FragmentInfo *> m_fragmentInfo;
+
void createAttr(NdbDictionary::Column *column);
public:
@@ -146,6 +157,9 @@ public:
Uint32 getTableId() const {
return m_dictTable->getTableId();
}
+ Uint32 getNoOfRecords() const {
+ return m_noOfRecords;
+ }
/*
void setMysqlTableName(char * tableName) {
strpcpy(mysqlTableName, tableName);
@@ -274,6 +288,7 @@ class RestoreMetaData : public BackupFile {
bool readMetaTableDesc();
bool readGCPEntry();
+ bool readFragmentInfo();
Uint32 readMetaTableList();
Uint32 m_startGCP;
diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp
index d62ca3f610a..bff63c28716 100644
--- a/ndb/tools/restore/consumer_restore.cpp
+++ b/ndb/tools/restore/consumer_restore.cpp
@@ -193,6 +193,16 @@ BackupRestore::table(const TableS & table){
copy.setName(split[2].c_str());
+ /*
+ update min and max rows to reflect the table, this to
+ ensure that memory is allocated properly in the ndb kernel
+ */
+ copy.setMinRows(table.getNoOfRecords());
+ if (table.getNoOfRecords() > copy.getMaxRows())
+ {
+ copy.setMaxRows(table.getNoOfRecords());
+ }
+
if (dict->createTable(copy) == -1)
{
err << "Create table " << table.getTableName() << " failed: "
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 0f68b484f41..a339ebc5b8f 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -32,7 +32,7 @@ bin_SCRIPTS = @server_scripts@ \
mysqldumpslow \
mysql_explain_log \
mysql_tableinfo \
- mysql_upgrade \
+ mysql_upgrade_shell \
mysqld_multi \
mysql_create_system_tables
@@ -60,7 +60,7 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \
mysql_explain_log.sh \
mysqld_multi.sh \
mysql_tableinfo.sh \
- mysql_upgrade.sh \
+ mysql_upgrade_shell.sh \
mysqld_safe.sh \
mysql_create_system_tables.sh
@@ -89,7 +89,7 @@ CLEANFILES = @server_scripts@ \
mysqldumpslow \
mysql_explain_log \
mysql_tableinfo \
- mysql_upgrade \
+ mysql_upgrade_shell \
mysqld_multi \
make_win_src_distribution \
mysql_create_system_tables
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 36c941ef6aa..c344cf3e93a 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -134,7 +134,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \
client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \
client/mysqldump$BS client/mysqlimport$BS \
client/mysqltest$BS client/mysqlcheck$BS \
- client/mysqlbinlog$BS \
+ client/mysqlbinlog$BS client/mysql_upgrade$BS \
tests/mysql_client_test$BS \
libmysqld/examples/mysql_client_test_embedded$BS \
libmysqld/examples/mysqltest_embedded$BS \
@@ -178,11 +178,21 @@ if [ $BASE_SYSTEM = "netware" ] ; then
fi
copyfileto $BASE/lib \
- libmysql/.libs/libmysqlclient.a libmysql/.libs/libmysqlclient.so* \
- libmysql/libmysqlclient.* libmysql_r/.libs/libmysqlclient_r.a \
- libmysql_r/.libs/libmysqlclient_r.so* libmysql_r/libmysqlclient_r.* \
+ libmysql/.libs/libmysqlclient.a \
+ libmysql/.libs/libmysqlclient.so* \
+ libmysql/.libs/libmysqlclient.sl* \
+ libmysql/.libs/libmysqlclient*.dylib \
+ libmysql/libmysqlclient.* \
+ libmysql_r/.libs/libmysqlclient_r.a \
+ libmysql_r/.libs/libmysqlclient_r.so* \
+ libmysql_r/.libs/libmysqlclient_r.sl* \
+ libmysql_r/.libs/libmysqlclient_r*.dylib \
+ libmysql_r/libmysqlclient_r.* \
+ libmysqld/.libs/libmysqld.a \
+ libmysqld/.libs/libmysqld.so* \
+ libmysqld/.libs/libmysqld.sl* \
+ libmysqld/.libs/libmysqld*.dylib \
mysys/libmysys.a strings/libmystrings.a dbug/libdbug.a \
- libmysqld/.libs/libmysqld.a libmysqld/.libs/libmysqld.so* \
libmysqld/libmysqld.a netware/libmysql.imp \
zlib/.libs/libz.a
diff --git a/scripts/mysql_upgrade.sh b/scripts/mysql_upgrade_shell.sh
index c9f375b6c5b..c9f375b6c5b 100644
--- a/scripts/mysql_upgrade.sh
+++ b/scripts/mysql_upgrade_shell.sh
diff --git a/server-tools/instance-manager/instance_options.cc b/server-tools/instance-manager/instance_options.cc
index 9389694822a..72621ed1662 100644
--- a/server-tools/instance-manager/instance_options.cc
+++ b/server-tools/instance-manager/instance_options.cc
@@ -391,8 +391,13 @@ int Instance_options::complete_initialization(const char *default_path,
const char *tmp;
char *end;
- if (!mysqld_path && !(mysqld_path= strdup_root(&alloc, default_path)))
- goto err;
+ if (!mysqld_path)
+ {
+ // Need one extra byte, as convert_dirname() adds a slash at the end.
+ if (!(mysqld_path= alloc_root(&alloc, strlen(default_path) + 2)))
+ goto err;
+ strcpy((char *)mysqld_path, default_path);
+ }
// it's safe to cast this to char* since this is a buffer we are allocating
end= convert_dirname((char*)mysqld_path, mysqld_path, NullS);
diff --git a/sql-common/client.c b/sql-common/client.c
index 26ebc9cc6b0..56a5862c90e 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -130,6 +130,8 @@ static void mysql_close_free(MYSQL *mysql);
static int wait_for_data(my_socket fd, uint timeout);
#endif
+CHARSET_INFO *default_client_charset_info = &my_charset_latin1;
+
/****************************************************************************
A modified version of connect(). my_connect() allows you to specify
@@ -1431,7 +1433,7 @@ mysql_init(MYSQL *mysql)
bzero((char*) (mysql), sizeof(*(mysql)));
mysql->options.connect_timeout= CONNECT_TIMEOUT;
mysql->last_used_con= mysql->next_slave= mysql->master = mysql;
- mysql->charset=default_charset_info;
+ mysql->charset=default_client_charset_info;
strmov(mysql->net.sqlstate, not_error_sqlstate);
/*
By default, we are a replication pivot. The caller must reset it
@@ -1660,7 +1662,51 @@ static MYSQL_METHODS client_methods=
#endif
};
-MYSQL *
+C_MODE_START
+int mysql_init_character_set(MYSQL *mysql)
+{
+ NET *net= &mysql->net;
+ /* Set character set */
+ if (!mysql->options.charset_name &&
+ !(mysql->options.charset_name=
+ my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME))))
+ return 1;
+
+ {
+ const char *save= charsets_dir;
+ if (mysql->options.charset_dir)
+ charsets_dir=mysql->options.charset_dir;
+ mysql->charset=get_charset_by_csname(mysql->options.charset_name,
+ MY_CS_PRIMARY, MYF(MY_WME));
+ charsets_dir= save;
+ }
+
+ if (!mysql->charset)
+ {
+ net->last_errno=CR_CANT_READ_CHARSET;
+ strmov(net->sqlstate, unknown_sqlstate);
+ if (mysql->options.charset_dir)
+ my_snprintf(net->last_error, sizeof(net->last_error)-1,
+ ER(net->last_errno),
+ mysql->options.charset_name,
+ mysql->options.charset_dir);
+ else
+ {
+ char cs_dir_name[FN_REFLEN];
+ get_charsets_dir(cs_dir_name);
+ my_snprintf(net->last_error, sizeof(net->last_error)-1,
+ ER(net->last_errno),
+ mysql->options.charset_name,
+ cs_dir_name);
+ }
+ return 1;
+ }
+ return 0;
+}
+C_MODE_END
+
+
+MYSQL * STDCALL
CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
const char *passwd, const char *db,
uint port, const char *unix_socket,ulong client_flag)
@@ -1997,42 +2043,8 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
goto error;
}
- /* Set character set */
- if (!mysql->options.charset_name &&
- !(mysql->options.charset_name=
- my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME))))
- goto error;
-
- {
- const char *save= charsets_dir;
- if (mysql->options.charset_dir)
- charsets_dir=mysql->options.charset_dir;
- mysql->charset=get_charset_by_csname(mysql->options.charset_name,
- MY_CS_PRIMARY, MYF(MY_WME));
- charsets_dir= save;
- }
-
- if (!mysql->charset)
- {
- net->last_errno=CR_CANT_READ_CHARSET;
- strmov(net->sqlstate, unknown_sqlstate);
- if (mysql->options.charset_dir)
- my_snprintf(net->last_error, sizeof(net->last_error)-1,
- ER(net->last_errno),
- mysql->options.charset_name,
- mysql->options.charset_dir);
- else
- {
- char cs_dir_name[FN_REFLEN];
- get_charsets_dir(cs_dir_name);
- my_snprintf(net->last_error, sizeof(net->last_error)-1,
- ER(net->last_errno),
- mysql->options.charset_name,
- cs_dir_name);
- }
+ if (mysql_init_character_set(mysql))
goto error;
- }
-
/* Save connection information */
if (!my_multi_malloc(MYF(0),
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index c6d5c77803b..11f676d9cf6 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -32,13 +32,14 @@
so to read, that data has to be parsed into fields, to write, fields have to
be stored in this format to write to this data file.
- With MySQL Federated storage engine, there will be no local files for each
- table's data (such as .MYD). A foreign database will store the data that would
- normally be in this file. This will necessitate the use of MySQL client API
- to read, delete, update, insert this data. The data will have to be retrieve
- via an SQL call "SELECT * FROM users". Then, to read this data, it will have
- to be retrieved via mysql_fetch_row one row at a time, then converted from
- the column in this select into the format that the handler expects.
+ With MySQL Federated storage engine, there will be no local files
+ for each table's data (such as .MYD). A foreign database will store
+ the data that would normally be in this file. This will necessitate
+ the use of MySQL client API to read, delete, update, insert this
+ data. The data will have to be retrieve via an SQL call "SELECT *
+ FROM users". Then, to read this data, it will have to be retrieved
+ via mysql_fetch_row one row at a time, then converted from the
+ column in this select into the format that the handler expects.
The create table will simply create the .frm file, and within the
"CREATE TABLE" SQL, there SHALL be any of the following :
@@ -395,8 +396,8 @@ handlerton federated_hton= {
static byte *federated_get_key(FEDERATED_SHARE *share, uint *length,
my_bool not_used __attribute__ ((unused)))
{
- *length= share->table_name_length;
- return (byte*) share->table_name;
+ *length= share->connect_string_length;
+ return (byte*) share->scheme;
}
/*
@@ -416,7 +417,7 @@ bool federated_db_init()
DBUG_ENTER("federated_db_init");
if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST))
goto error;
- if (hash_init(&federated_open_tables, system_charset_info, 32, 0, 0,
+ if (hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0,
(hash_get_key) federated_get_key, 0, 0))
{
VOID(pthread_mutex_destroy(&federated_mutex));
@@ -513,6 +514,7 @@ static int check_foreign_data_source(FEDERATED_SHARE *share,
}
else
{
+ int escaped_table_name_length= 0;
/*
Since we do not support transactions at this version, we can let the
client API silently reconnect. For future versions, we will need more
@@ -531,17 +533,16 @@ static int check_foreign_data_source(FEDERATED_SHARE *share,
query.append(FEDERATED_STAR);
query.append(FEDERATED_FROM);
query.append(FEDERATED_BTICK);
- escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name,
+ escaped_table_name_length=
+ escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name,
sizeof(escaped_table_name),
share->table_name,
share->table_name_length);
- query.append(escaped_table_name);
+ query.append(escaped_table_name, escaped_table_name_length);
query.append(FEDERATED_BTICK);
query.append(FEDERATED_WHERE);
query.append(FEDERATED_FALSE);
- DBUG_PRINT("info", ("check_foreign_data_source query %s",
- query.c_ptr_quick()));
if (mysql_real_query(mysql, query.ptr(), query.length()))
{
error_code= table_create_flag ?
@@ -632,13 +633,11 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
DBUG_PRINT("info", ("Length %d \n", table->s->connect_string.length));
DBUG_PRINT("info", ("String %.*s \n", table->s->connect_string.length,
table->s->connect_string.str));
- share->scheme= my_strdup_with_length((const byte*)table->s->
- connect_string.str,
+ share->scheme= my_strdup_with_length(table->s->connect_string.str,
table->s->connect_string.length,
MYF(0));
- // Add a null for later termination of table name
- share->scheme[table->s->connect_string.length]= 0;
+ share->connect_string_length= table->s->connect_string.length;
DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme));
/*
@@ -704,7 +703,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
share->table_name++;
share->table_name_length= strlen(share->table_name);
-
+
/* make sure there's not an extra / */
if ((strchr(share->table_name, '/')))
goto error;
@@ -740,8 +739,7 @@ error:
ha_federated::ha_federated(TABLE *table_arg)
:handler(&federated_hton, table_arg),
- mysql(0), stored_result(0),
- ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0)
+ mysql(0), stored_result(0)
{}
@@ -752,6 +750,7 @@ ha_federated::ha_federated(TABLE *table_arg)
convert_row_to_internal_format()
record Byte pointer to record
row MySQL result set row from fetchrow()
+ result Result set to use
DESCRIPTION
This method simply iterates through a row returned via fetchrow with
@@ -764,14 +763,15 @@ ha_federated::ha_federated(TABLE *table_arg)
0 After fields have had field values stored from record
*/
-uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
+uint ha_federated::convert_row_to_internal_format(byte *record,
+ MYSQL_ROW row,
+ MYSQL_RES *result)
{
ulong *lengths;
Field **field;
DBUG_ENTER("ha_federated::convert_row_to_internal_format");
- lengths= mysql_fetch_lengths(stored_result);
- memset(record, 0, table->s->null_bytes);
+ lengths= mysql_fetch_lengths(result);
for (field= table->field; *field; field++)
{
@@ -1299,12 +1299,11 @@ next_loop:
static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
{
- char *select_query, *tmp_table_name;
+ char *select_query;
char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- uint tmp_table_name_length;
Field **field;
String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- FEDERATED_SHARE *share;
+ FEDERATED_SHARE *share= NULL, tmp_share;
/*
In order to use this string, we must first zero it's length,
or it will contain garbage
@@ -1312,12 +1311,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
query.length(0);
pthread_mutex_lock(&federated_mutex);
- tmp_table_name= (char *)table->s->table_name;
- tmp_table_name_length= (uint) strlen(tmp_table_name);
+ if (parse_url(&tmp_share, table, 0))
+ goto error;
+
+ /* TODO: change tmp_share.scheme to LEX_STRING object */
if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables,
- (byte*) table_name,
- strlen(table_name))))
+ (byte*) tmp_share.scheme,
+ tmp_share.
+ connect_string_length)))
{
query.set_charset(system_charset_info);
query.append(FEDERATED_SELECT);
@@ -1335,24 +1337,20 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
if (!(share= (FEDERATED_SHARE *)
my_multi_malloc(MYF(MY_WME),
&share, sizeof(*share),
- &tmp_table_name, tmp_table_name_length+ 1,
&select_query,
query.length()+table->s->connect_string.length+1,
NullS)))
- {
- pthread_mutex_unlock(&federated_mutex);
- return NULL;
- }
-
- if (parse_url(share, table, 0))
goto error;
+ memcpy(share, &tmp_share, sizeof(tmp_share));
+
+ share->table_name_length= strlen(share->table_name);
+ /* TODO: share->table_name to LEX_STRING object */
query.append(share->table_name, share->table_name_length);
query.append(FEDERATED_BTICK);
share->select_query= select_query;
strmov(share->select_query, query.ptr());
share->use_count= 0;
- share->table_name_length= strlen(share->table_name);
DBUG_PRINT("info",
("share->select_query %s", share->select_query));
@@ -1368,11 +1366,8 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
error:
pthread_mutex_unlock(&federated_mutex);
- if (share->scheme)
- {
- my_free((gptr) share->scheme, MYF(0));
- share->scheme= 0;
- }
+ my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR));
return NULL;
}
@@ -1392,13 +1387,7 @@ static int free_share(FEDERATED_SHARE *share)
{
hash_delete(&federated_open_tables, (byte*) share);
my_free((gptr) share->scheme, MYF(MY_ALLOW_ZERO_PTR));
- share->scheme= 0;
- if (share->socket)
- {
- my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR));
- share->socket= 0;
- }
-
+ my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR));
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
my_free((gptr) share, MYF(0));
@@ -1460,22 +1449,29 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
/* Connect to foreign database mysql_real_connect() */
mysql= mysql_init(0);
- if (!mysql_real_connect(mysql,
- share->hostname,
- share->username,
- share->password,
- share->database,
- share->port,
- share->socket, 0))
+ if (!mysql || !mysql_real_connect(mysql,
+ share->hostname,
+ share->username,
+ share->password,
+ share->database,
+ share->port,
+ share->socket, 0))
{
+ free_share(share);
DBUG_RETURN(stash_remote_error());
}
/*
Since we do not support transactions at this version, we can let the client
- API silently reconnect. For future versions, we will need more logic to deal
- with transactions
+ API silently reconnect. For future versions, we will need more logic to
+ deal with transactions
*/
mysql->reconnect= 1;
+
+ ref_length= (table->s->primary_key != MAX_KEY ?
+ table->key_info[table->s->primary_key].key_length :
+ table->s->reclength);
+ DBUG_PRINT("info", ("ref_length: %u", ref_length));
+
DBUG_RETURN(0);
}
@@ -1499,13 +1495,12 @@ int ha_federated::close(void)
/* free the result set */
if (stored_result)
{
- DBUG_PRINT("info",
- ("mysql_free_result result at address %lx", stored_result));
mysql_free_result(stored_result);
stored_result= 0;
}
/* Disconnect from mysql */
- mysql_close(mysql);
+ if (mysql) // QQ is this really needed
+ mysql_close(mysql);
retval= free_share(share);
DBUG_RETURN(retval);
@@ -1695,15 +1690,13 @@ int ha_federated::write_row(byte *buf)
/* add the values */
insert_string.append(values_string);
- DBUG_PRINT("info", ("insert query %s", insert_string.c_ptr_quick()));
-
if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length()))
{
DBUG_RETURN(stash_remote_error());
}
/*
- If the table we've just written a record to contains an auto_increment field,
- then store the last_insert_id() value from the foreign server
+ If the table we've just written a record to contains an auto_increment
+ field, then store the last_insert_id() value from the foreign server
*/
if (table->next_number_field)
update_auto_increment();
@@ -1772,7 +1765,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
query.append(FEDERATED_EXTENDED);
if (check_opt->sql_flags & TT_USEFRM)
query.append(FEDERATED_USE_FRM);
-
+
if (mysql_real_query(mysql, query.ptr(), query.length()))
{
DBUG_RETURN(stash_remote_error());
@@ -1924,7 +1917,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
/*
This will delete a row. 'buf' will contain a copy of the row to be =deleted.
The server will call this right after the current row has been called (from
- either a previous rnd_nexT() or index call).
+ either a previous rnd_next() or index call).
If you keep a pointer to the last row or can access a primary key it will
make doing the deletion quite a bit easier.
Keep in mind that the server does no guarentee consecutive deletions.
@@ -1984,6 +1977,7 @@ int ha_federated::delete_row(const byte *buf)
DBUG_RETURN(stash_remote_error());
}
deleted+= mysql->affected_rows;
+ records-= mysql->affected_rows;
DBUG_PRINT("info",
("rows deleted %d rows deleted for all time %d",
int(mysql->affected_rows), deleted));
@@ -2000,12 +1994,15 @@ int ha_federated::delete_row(const byte *buf)
*/
int ha_federated::index_read(byte *buf, const byte *key,
- uint key_len, enum ha_rkey_function find_flag)
+ uint key_len, ha_rkey_function find_flag)
{
- int retval;
DBUG_ENTER("ha_federated::index_read");
- retval= index_read_idx(buf, active_index, key, key_len, find_flag);
- DBUG_RETURN(retval);
+
+ if (stored_result)
+ mysql_free_result(stored_result);
+ DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key,
+ key_len, find_flag,
+ &stored_result));
}
@@ -2014,26 +2011,60 @@ int ha_federated::index_read(byte *buf, const byte *key,
row if any. This is only used to read whole keys.
This method is called via index_read in the case of a WHERE clause using
- a regular non-primary key index, OR is called DIRECTLY when the WHERE clause
+ a primary key index OR is called DIRECTLY when the WHERE clause
uses a PRIMARY KEY index.
+
+ NOTES
+ This uses an internal result set that is deleted before function
+ returns. We need to be able to be calable from ha_rnd_pos()
*/
int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
uint key_len, enum ha_rkey_function find_flag)
{
int retval;
+ MYSQL_RES *mysql_result;
+ DBUG_ENTER("ha_federated::index_read_idx");
+
+ if ((retval= index_read_idx_with_result_set(buf, index, key,
+ key_len, find_flag,
+ &mysql_result)))
+ DBUG_RETURN(retval);
+ mysql_free_result(mysql_result);
+ DBUG_RETURN(retval);
+}
+
+
+/*
+ Create result set for rows matching query and return first row
+
+ RESULT
+ 0 ok In this case *result will contain the result set
+ table->status == 0
+ # error In this case *result will contain 0
+ table->status == STATUS_NOT_FOUND
+*/
+
+int ha_federated::index_read_idx_with_result_set(byte *buf, uint index,
+ const byte *key,
+ uint key_len,
+ ha_rkey_function find_flag,
+ MYSQL_RES **result)
+{
+ int retval;
char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char index_value[STRING_BUFFER_USUAL_SIZE];
char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- String index_string(index_value,
+ String index_string(index_value,
sizeof(index_value),
&my_charset_bin);
String sql_query(sql_query_buffer,
sizeof(sql_query_buffer),
&my_charset_bin);
key_range range;
- DBUG_ENTER("ha_federated::index_read_idx");
+ DBUG_ENTER("ha_federated::index_read_idx_with_result_set");
+ *result= 0; // In case of errors
index_string.length(0);
sql_query.length(0);
statistic_increment(table->in_use->status_var.ha_read_key_count,
@@ -2050,20 +2081,6 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
NULL, 0);
sql_query.append(index_string);
- DBUG_PRINT("info",
- ("current key %d key value %s index_string value %s length %d",
- index, (char*) key, index_string.c_ptr_quick(),
- index_string.length()));
-
- DBUG_PRINT("info",
- ("current position %d sql_query %s", current_position,
- sql_query.c_ptr_quick()));
-
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
{
my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
@@ -2071,53 +2088,44 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
goto error;
}
- stored_result= mysql_store_result(mysql);
-
- if (!stored_result)
+ if (!(*result= mysql_store_result(mysql)))
{
retval= HA_ERR_END_OF_FILE;
goto error;
}
- /*
- This basically says that the record in table->record[0] is legal,
- and that it is ok to use this record, for whatever reason, such
- as with a join (without it, joins will not work)
- */
- table->status= 0;
+ if (!(retval= read_next(buf, *result)))
+ DBUG_RETURN(retval);
- retval= rnd_next(buf);
+ mysql_free_result(*result);
+ *result= 0;
+ table->status= STATUS_NOT_FOUND;
DBUG_RETURN(retval);
error:
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
table->status= STATUS_NOT_FOUND;
my_error(retval, MYF(0), error_buffer);
DBUG_RETURN(retval);
}
+
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
+
int ha_federated::index_init(uint keynr)
{
DBUG_ENTER("ha_federated::index_init");
- DBUG_PRINT("info",
- ("table: '%s' key: %d", table->s->table_name, keynr));
+ DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr));
active_index= keynr;
DBUG_RETURN(0);
}
-/*
- int read_range_first(const key_range *start_key,
- const key_range *end_key,
- bool eq_range, bool sorted);
+/*
+ Read first range
*/
+
int ha_federated::read_range_first(const key_range *start_key,
- const key_range *end_key,
- bool eq_range, bool sorted)
+ const key_range *end_key,
+ bool eq_range, bool sorted)
{
char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
int retval;
@@ -2126,8 +2134,7 @@ int ha_federated::read_range_first(const key_range *start_key,
&my_charset_bin);
DBUG_ENTER("ha_federated::read_range_first");
- if (start_key == NULL && end_key == NULL)
- DBUG_RETURN(0);
+ DBUG_ASSERT(!(start_key == NULL && end_key == NULL));
sql_query.length(0);
sql_query.append(share->select_query);
@@ -2135,6 +2142,11 @@ int ha_federated::read_range_first(const key_range *start_key,
&table->key_info[active_index],
start_key, end_key, 0);
+ if (stored_result)
+ {
+ mysql_free_result(stored_result);
+ stored_result= 0;
+ }
if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
{
retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
@@ -2142,38 +2154,21 @@ int ha_federated::read_range_first(const key_range *start_key,
}
sql_query.length(0);
- if (stored_result)
- {
- DBUG_PRINT("info",
- ("mysql_free_result address %lx", stored_result));
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- stored_result= mysql_store_result(mysql);
-
- if (!stored_result)
+ if (!(stored_result= mysql_store_result(mysql)))
{
retval= HA_ERR_END_OF_FILE;
goto error;
}
-
- /* This was successful, please let it be known! */
- table->status= 0;
- retval= rnd_next(table->record[0]);
+ retval= read_next(table->record[0], stored_result);
DBUG_RETURN(retval);
error:
- table->status= STATUS_NOT_FOUND;
- if (stored_result)
- {
- DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result));
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- DBUG_RETURN(retval);
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(retval);
}
+
int ha_federated::read_range_next()
{
int retval;
@@ -2186,13 +2181,13 @@ int ha_federated::read_range_next()
/* Used to read forward through the index. */
int ha_federated::index_next(byte *buf)
{
- int retval;
DBUG_ENTER("ha_federated::index_next");
statistic_increment(table->in_use->status_var.ha_read_next_count,
&LOCK_status);
- retval= rnd_next(buf);
- DBUG_RETURN(retval);
+ DBUG_RETURN(read_next(buf, stored_result));
}
+
+
/*
rnd_init() is called when the system wants the storage engine to do a table
scan.
@@ -2246,11 +2241,8 @@ int ha_federated::rnd_init(bool scan)
if (scan)
{
- DBUG_PRINT("info", ("share->select_query %s", share->select_query));
if (stored_result)
{
- DBUG_PRINT("info",
- ("mysql_free_result address %lx", stored_result));
mysql_free_result(stored_result);
stored_result= 0;
}
@@ -2267,27 +2259,25 @@ int ha_federated::rnd_init(bool scan)
DBUG_RETURN(0);
error:
- DBUG_RETURN(stash_remote_error());
+ DBUG_RETURN(stash_remote_error());
}
+
int ha_federated::rnd_end()
{
- int retval;
DBUG_ENTER("ha_federated::rnd_end");
+ DBUG_RETURN(index_end());
+}
+
+int ha_federated::index_end(void)
+{
+ DBUG_ENTER("ha_federated::index_end");
if (stored_result)
{
- DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result));
mysql_free_result(stored_result);
stored_result= 0;
}
- retval= index_end();
- DBUG_RETURN(retval);
-}
-
-int ha_federated::index_end(void)
-{
- DBUG_ENTER("ha_federated::index_end");
active_index= MAX_KEY;
DBUG_RETURN(0);
}
@@ -2304,8 +2294,6 @@ int ha_federated::index_end(void)
int ha_federated::rnd_next(byte *buf)
{
- int retval;
- MYSQL_ROW row;
DBUG_ENTER("ha_federated::rnd_next");
if (stored_result == 0)
@@ -2313,32 +2301,60 @@ int ha_federated::rnd_next(byte *buf)
/*
Return value of rnd_init is not always checked (see records.cc),
so we can get here _even_ if there is _no_ pre-fetched result-set!
- TODO: fix it.
- */
+ TODO: fix it. We can delete this in 5.1 when rnd_init() is checked.
+ */
DBUG_RETURN(1);
}
-
+ DBUG_RETURN(read_next(buf, stored_result));
+}
+
+
+/*
+ ha_federated::read_next
+
+ reads from a result set and converts to mysql internal
+ format
+
+ SYNOPSIS
+ field_in_record_is_null()
+ buf byte pointer to record
+ result mysql result set
+
+ DESCRIPTION
+ This method is a wrapper method that reads one record from a result
+ set and converts it to the internal table format
+
+ RETURN VALUE
+ 1 error
+ 0 no error
+*/
+
+int ha_federated::read_next(byte *buf, MYSQL_RES *result)
+{
+ int retval;
+ my_ulonglong num_rows;
+ MYSQL_ROW row;
+ DBUG_ENTER("ha_federated::read_next");
+
+ table->status= STATUS_NOT_FOUND; // For easier return
+
/* Fetch a row, insert it back in a row format. */
- current_position= stored_result->data_cursor;
- DBUG_PRINT("info", ("current position %d", current_position));
- if (!(row= mysql_fetch_row(stored_result)))
+ if (!(row= mysql_fetch_row(result)))
DBUG_RETURN(HA_ERR_END_OF_FILE);
- retval= convert_row_to_internal_format(buf, row);
+ if (!(retval= convert_row_to_internal_format(buf, row, result)))
+ table->status= 0;
+
DBUG_RETURN(retval);
}
/*
- 'position()' is called after each call to rnd_next() if the data needs to be
- ordered. You can do something like the following to store the position:
- my_store_ptr(ref, ref_length, current_position);
+ store reference to current row so that we can later find it for
+ a re-read, update or delete.
- The server uses ref to store data. ref_length in the above case is the size
- needed to store current_position. ref is just a byte array that the server
- will maintain. If you are using offsets to mark rows, then current_position
- should be the offset. If it is a primary key like in BDB, then it needs to
- be a primary key.
+ In case of federated, a reference is either a primary key or
+ the whole record.
Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
@@ -2346,32 +2362,44 @@ int ha_federated::rnd_next(byte *buf)
void ha_federated::position(const byte *record)
{
DBUG_ENTER("ha_federated::position");
- /* my_store_ptr Add seek storage */
- *(MYSQL_ROW_OFFSET *) ref= current_position; // ref is always aligned
+ if (table->s->primary_key != MAX_KEY)
+ key_copy(ref, (byte *)record, table->key_info + table->s->primary_key,
+ ref_length);
+ else
+ memcpy(ref, record, ref_length);
DBUG_VOID_RETURN;
}
/*
This is like rnd_next, but you are given a position to use to determine the
- row. The position will be of the type that you stored in ref. You can use
- ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved
- when position() was called.
+ row. The position will be of the type that you stored in ref.
- This method is required for an ORDER BY.
+ This method is required for an ORDER BY
Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
*/
+
int ha_federated::rnd_pos(byte *buf, byte *pos)
{
+ int result;
DBUG_ENTER("ha_federated::rnd_pos");
-
statistic_increment(table->in_use->status_var.ha_read_rnd_count,
&LOCK_status);
- memcpy_fixed(&current_position, pos, sizeof(MYSQL_ROW_OFFSET));
- stored_result->current_row= 0;
- stored_result->data_cursor= current_position;
- DBUG_RETURN(rnd_next(buf));
+ if (table->s->primary_key != MAX_KEY)
+ {
+ /* We have a primary key, so use index_read_idx to find row */
+ result= index_read_idx(buf, table->s->primary_key, pos,
+ ref_length, HA_READ_KEY_EXACT);
+ }
+ else
+ {
+ /* otherwise, get the old record ref as obtained in ::position */
+ memcpy(buf, pos, ref_length);
+ result= 0;
+ }
+ table->status= result ? STATUS_NOT_FOUND : 0;
+ DBUG_RETURN(result);
}
@@ -2476,18 +2504,22 @@ void ha_federated::info(uint flag)
delete_length = ?
*/
if (row[4] != NULL)
- records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error);
- if (row[5] != NULL)
- mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error);
+ records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error);
+
+ mean_rec_length= table->s->reclength;
+ data_file_length= records * mean_rec_length;
+
if (row[12] != NULL)
- update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error);
+ update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error);
if (row[13] != NULL)
- check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error);
- }
- if (flag & HA_STATUS_CONST)
- {
- block_size= 4096;
+ check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error);
}
+
+ /*
+ size of IO operations (This is based on a good guess, no high science
+ involved)
+ */
+ block_size= 4096;
}
if (result)
@@ -2498,6 +2530,7 @@ void ha_federated::info(uint flag)
error:
if (result)
mysql_free_result(result);
+
my_sprintf(error_buffer, (error_buffer, ": %d : %s",
mysql_errno(mysql), mysql_error(mysql)));
my_error(error_code, MYF(0), error_buffer);
@@ -2578,6 +2611,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
+ DBUG_ENTER("ha_federated::store_lock");
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
{
/*
@@ -2607,7 +2641,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
*to++= &lock;
- return to;
+ DBUG_RETURN(to);
}
/*
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
index cafd1fe59a5..85474d142a3 100644
--- a/sql/ha_federated.h
+++ b/sql/ha_federated.h
@@ -78,7 +78,7 @@
#define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES)
#define FEDERATED_UPDATE "UPDATE "
#define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE)
-#define FEDERATED_SET "SET "
+#define FEDERATED_SET " SET "
#define FEDERATED_SET_LEN sizeof(FEDERATED_SET)
#define FEDERATED_AND " AND "
#define FEDERATED_AND_LEN sizeof(FEDERATED_AND)
@@ -130,6 +130,7 @@ typedef struct st_federated_share {
remote host info, parse_url supplies
*/
char *scheme;
+ char *connect_string;
char *hostname;
char *username;
char *password;
@@ -139,7 +140,7 @@ typedef struct st_federated_share {
char *socket;
char *sport;
ushort port;
- uint table_name_length, use_count;
+ uint table_name_length, connect_string_length, use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
} FEDERATED_SHARE;
@@ -153,7 +154,6 @@ class ha_federated: public handler
FEDERATED_SHARE *share; /* Shared lock info */
MYSQL *mysql; /* MySQL connection */
MYSQL_RES *stored_result;
- uint ref_length;
uint fetch_num; // stores the fetch num
MYSQL_ROW_OFFSET current_position; // Current position used by ::position()
int remote_error_number;
@@ -164,8 +164,9 @@ private:
return 0 on success
return errorcode otherwise
*/
- uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row);
- bool create_where_from_key(String *to, KEY *key_info,
+ uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row,
+ MYSQL_RES *result);
+ bool create_where_from_key(String *to, KEY *key_info,
const key_range *start_key,
const key_range *end_key,
bool records_in_range);
@@ -298,6 +299,13 @@ public:
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type); //required
virtual bool get_error_message(int error, String *buf);
+
+ int read_next(byte *buf, MYSQL_RES *result);
+ int index_read_idx_with_result_set(byte *buf, uint index,
+ const byte *key,
+ uint key_len,
+ ha_rkey_function find_flag,
+ MYSQL_RES **result);
};
bool federated_db_init(void);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 46ab5b88624..b36099cf25c 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -160,8 +160,8 @@ static int update_status_variables(Ndb_cluster_connection *c)
struct show_var_st ndb_status_variables[]= {
{"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG},
- {"connected_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
- {"connected_port", (char*) &ndb_connected_port, SHOW_LONG},
+ {"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
+ {"config_from_port", (char*) &ndb_connected_port, SHOW_LONG},
// {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG},
{"number_of_storage_nodes",(char*) &ndb_number_of_storage_nodes, SHOW_LONG},
{NullS, NullS, SHOW_LONG}
@@ -363,6 +363,7 @@ void ha_ndbcluster::records_update()
{
Ndb *ndb= get_ndb();
struct Ndb_statistics stat;
+ ndb->setDatabaseName(m_dbname);
if (ndb_get_table_statistics(ndb, m_tabname, &stat) == 0){
mean_rec_length= stat.row_size;
data_file_length= stat.fragment_memory;
@@ -3081,6 +3082,7 @@ void ha_ndbcluster::info(uint flag)
DBUG_VOID_RETURN;
Ndb *ndb= get_ndb();
struct Ndb_statistics stat;
+ ndb->setDatabaseName(m_dbname);
if (current_thd->variables.ndb_use_exact_count &&
ndb_get_table_statistics(ndb, m_tabname, &stat) == 0)
{
@@ -4116,7 +4118,11 @@ static int create_ndb_column(NDBCOL &col,
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
{
- if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */
+ ha_rows max_rows= form->s->max_rows;
+ ha_rows min_rows= form->s->min_rows;
+ if (max_rows < min_rows)
+ max_rows= min_rows;
+ if (max_rows == (ha_rows)0) /* default setting, don't set fragmentation */
return;
/**
* get the number of fragments right
@@ -4134,7 +4140,6 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
acc_row_size+= 4 + /*safety margin*/ 4;
#endif
ulonglong acc_fragment_size= 512*1024*1024;
- ulonglong max_rows= form->s->max_rows;
#if MYSQL_VERSION_ID >= 50100
no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1;
#else
@@ -4158,6 +4163,8 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
ftype= NDBTAB::FragAllSmall;
tab.setFragmentType(ftype);
}
+ tab.setMaxRows(max_rows);
+ tab.setMinRows(min_rows);
}
int ha_ndbcluster::create(const char *name,
@@ -5837,62 +5844,60 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
DBUG_ENTER("ndb_get_table_statistics");
DBUG_PRINT("enter", ("table: %s", table));
NdbTransaction* pTrans;
+ NdbError error;
int retries= 10;
int retry_sleep= 30 * 1000; /* 30 milliseconds */
do
{
- pTrans= ndb->startTransaction();
- if (pTrans == NULL)
+ Uint64 rows, commits, mem;
+ Uint32 size;
+ Uint32 count= 0;
+ Uint64 sum_rows= 0;
+ Uint64 sum_commits= 0;
+ Uint64 sum_row_size= 0;
+ Uint64 sum_mem= 0;
+ NdbScanOperation*pOp;
+ NdbResultSet *rs;
+ int check;
+
+ if ((pTrans= ndb->startTransaction()) == NULL)
{
- if (ndb->getNdbError().status == NdbError::TemporaryError &&
- retries--)
- {
- my_sleep(retry_sleep);
- continue;
- }
- break;
+ error= ndb->getNdbError();
+ goto retry;
+ }
+
+ if ((pOp= pTrans->getNdbScanOperation(table)) == NULL)
+ {
+ error= pTrans->getNdbError();
+ goto retry;
}
-
- NdbScanOperation* pOp= pTrans->getNdbScanOperation(table);
- if (pOp == NULL)
- break;
if (pOp->readTuples(NdbOperation::LM_CommittedRead))
- break;
+ {
+ error= pOp->getNdbError();
+ goto retry;
+ }
- int check= pOp->interpret_exit_last_row();
- if (check == -1)
- break;
+ if (pOp->interpret_exit_last_row() == -1)
+ {
+ error= pOp->getNdbError();
+ goto retry;
+ }
- Uint64 rows, commits, mem;
- Uint32 size;
pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows);
pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits);
pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size);
pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem);
- check= pTrans->execute(NdbTransaction::NoCommit,
- NdbTransaction::AbortOnError,
- TRUE);
- if (check == -1)
+ if (pTrans->execute(NdbTransaction::NoCommit,
+ NdbTransaction::AbortOnError,
+ TRUE) == -1)
{
- if (pTrans->getNdbError().status == NdbError::TemporaryError &&
- retries--)
- {
- ndb->closeTransaction(pTrans);
- pTrans= 0;
- my_sleep(retry_sleep);
- continue;
- }
- break;
+ error= pTrans->getNdbError();
+ goto retry;
}
-
- Uint32 count= 0;
- Uint64 sum_rows= 0;
- Uint64 sum_commits= 0;
- Uint64 sum_row_size= 0;
- Uint64 sum_mem= 0;
+
while ((check= pOp->nextResult(TRUE, TRUE)) == 0)
{
sum_rows+= rows;
@@ -5904,7 +5909,10 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
}
if (check == -1)
- break;
+ {
+ error= pOp->getNdbError();
+ goto retry;
+ }
pOp->close(TRUE);
@@ -5921,12 +5929,21 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
sum_mem, count));
DBUG_RETURN(0);
+retry:
+ if (pTrans)
+ {
+ ndb->closeTransaction(pTrans);
+ pTrans= NULL;
+ }
+ if (error.status == NdbError::TemporaryError && retries--)
+ {
+ my_sleep(retry_sleep);
+ continue;
+ }
+ break;
} while(1);
-
- if (pTrans)
- ndb->closeTransaction(pTrans);
- DBUG_PRINT("exit", ("failed"));
- DBUG_RETURN(-1);
+ DBUG_PRINT("exit", ("failed, error %u(%s)", error.code, error.message));
+ ERR_RETURN(error);
}
/*
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index c45fb88a48a..1f64fdba609 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -32,6 +32,7 @@ public:
Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {}
Item_geometry_func(List<Item> &list) :Item_str_func(list) {}
void fix_length_and_dec();
+ enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; }
};
class Item_func_geometry_from_text: public Item_geometry_func
@@ -67,6 +68,7 @@ public:
Item_func_as_wkb(Item *a): Item_geometry_func(a) {}
const char *func_name() const { return "aswkb"; }
String *val_str(String *);
+ enum_field_types field_type() const { return MYSQL_TYPE_BLOB; }
};
class Item_func_geometry_type: public Item_str_func
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index a51ebd39147..7a35dedc08a 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -1667,13 +1667,13 @@ String *Item_func_database::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
THD *thd= current_thd;
- if (!thd->db)
+ if (thd->db == NULL)
{
null_value= 1;
return 0;
}
else
- str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info);
+ str->copy(thd->db, thd->db_length, system_charset_info);
return str;
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index d8b309978d5..f741984e05f 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -27,6 +27,7 @@
/* TODO: Move month and days to language files */
+/* Day number for Dec 31st, 9999 */
#define MAX_DAY_NUMBER 3652424L
static const char *month_names[]=
@@ -408,7 +409,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
if (yearday > 0)
{
uint days= calc_daynr(l_time->year,1,1) + yearday - 1;
- if (days <= 0 || days >= MAX_DAY_NUMBER)
+ if (days <= 0 || days > MAX_DAY_NUMBER)
goto err;
get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day);
}
@@ -454,7 +455,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
(weekday - 1);
}
- if (days <= 0 || days >= MAX_DAY_NUMBER)
+ if (days <= 0 || days > MAX_DAY_NUMBER)
goto err;
get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day);
}
@@ -2035,7 +2036,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date)
ltime->hour= (uint) (sec/3600);
daynr= calc_daynr(ltime->year,ltime->month,1) + days;
/* Day number from year 0 to 9999-12-31 */
- if ((ulonglong) daynr >= MAX_DAY_NUMBER)
+ if ((ulonglong) daynr > MAX_DAY_NUMBER)
goto invalid_date;
get_date_from_daynr((long) daynr, &ltime->year, &ltime->month,
&ltime->day);
@@ -2046,7 +2047,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date)
period= (calc_daynr(ltime->year,ltime->month,ltime->day) +
sign * (long) interval.day);
/* Daynumber from year 0 to 9999-12-31 */
- if ((ulong) period >= MAX_DAY_NUMBER)
+ if ((ulong) period > MAX_DAY_NUMBER)
goto invalid_date;
get_date_from_daynr((long) period,&ltime->year,&ltime->month,&ltime->day);
break;
@@ -2570,7 +2571,7 @@ String *Item_func_makedate::val_str(String *str)
days= calc_daynr(yearnr,1,1) + daynr - 1;
/* Day number from year 0 to 9999-12-31 */
- if (days >= 0 && days < MAX_DAY_NUMBER)
+ if (days >= 0 && days <= MAX_DAY_NUMBER)
{
null_value=0;
get_date_from_daynr(days,&l_time.year,&l_time.month,&l_time.day);
diff --git a/sql/lock.cc b/sql/lock.cc
index 71384fe7fc6..97a080c5634 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -905,7 +905,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list)
if (table_list->table)
{
hash_delete(&open_cache, (byte*) table_list->table);
- (void) pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
}
}
@@ -997,9 +997,9 @@ end:
(default 0, which will unlock all tables)
NOTES
- One must have a lock on LOCK_open when calling this
- This function will send a COND_refresh signal to inform other threads
- that the name locks are removed
+ One must have a lock on LOCK_open when calling this.
+ This function will broadcast refresh signals to inform other threads
+ that the name locks are removed.
RETURN
0 ok
@@ -1013,7 +1013,7 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
table != last_table;
table= table->next_local)
unlock_table_name(thd,table);
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
}
@@ -1304,3 +1304,37 @@ bool make_global_read_lock_block_commit(THD *thd)
}
+/*
+ Broadcast COND_refresh and COND_global_read_lock.
+
+ SYNOPSIS
+ broadcast_refresh()
+ void No parameters.
+
+ DESCRIPTION
+ Due to a bug in a threading library it could happen that a signal
+ did not reach its target. A condition for this was that the same
+ condition variable was used with different mutexes in
+ pthread_cond_wait(). Some time ago we changed LOCK_open to
+ LOCK_global_read_lock in global read lock handling. So COND_refresh
+ was used with LOCK_open and LOCK_global_read_lock.
+
+ We did now also change from COND_refresh to COND_global_read_lock
+ in global read lock handling. But now it is necessary to signal
+ both conditions at the same time.
+
+ NOTE
+ When signalling COND_global_read_lock within the global read lock
+ handling, it is not necessary to also signal COND_refresh.
+
+ RETURN
+ void
+*/
+
+void broadcast_refresh(void)
+{
+ VOID(pthread_cond_broadcast(&COND_refresh));
+ VOID(pthread_cond_broadcast(&COND_global_read_lock));
+}
+
+
diff --git a/sql/log.cc b/sql/log.cc
index ba02c9ba082..ebd1d10d8b7 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -36,6 +36,8 @@
MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log;
ulong sync_binlog_counter= 0;
+static Muted_query_log_event invisible_commit;
+
static bool test_if_number(const char *str,
long *res, bool allow_wildcards);
static bool binlog_init();
@@ -94,7 +96,9 @@ static int binlog_end_trans(THD *thd, IO_CACHE *trans_log, Log_event *end_ev)
{
int error=0;
DBUG_ENTER("binlog_end_trans");
- if (end_ev)
+
+ /* NULL denotes ROLLBACK with nothing to replicate */
+ if (end_ev != NULL)
error= mysql_bin_log.write(thd, trans_log, end_ev);
statistic_increment(binlog_cache_use, &LOCK_status);
@@ -126,14 +130,19 @@ static int binlog_commit(THD *thd, bool all)
DBUG_ASSERT(mysql_bin_log.is_open() &&
(all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))));
- if (!my_b_tell(trans_log))
+ if (my_b_tell(trans_log) == 0)
{
// we're here because trans_log was flushed in MYSQL_LOG::log()
DBUG_RETURN(0);
}
- Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
- qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
- DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev));
+ if (all)
+ {
+ Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
+ qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
+ DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev));
+ }
+ else
+ DBUG_RETURN(binlog_end_trans(thd, trans_log, &invisible_commit));
}
static int binlog_rollback(THD *thd, bool all)
@@ -959,14 +968,14 @@ bool MYSQL_LOG::reset_logs(THD* thd)
for (;;)
{
- my_delete(linfo.log_file_name, MYF(MY_WME));
+ my_delete_allow_opened(linfo.log_file_name, MYF(MY_WME));
if (find_next_log(&linfo, 0))
break;
}
/* Start logging with a new file */
close(LOG_CLOSE_INDEX);
- my_delete(index_file_name, MYF(MY_WME)); // Reset (open will update)
+ my_delete_allow_opened(index_file_name, MYF(MY_WME)); // Reset (open will update)
if (!thd->slave_thread)
need_start_event=1;
if (!open_index_file(index_file_name, 0))
@@ -1813,6 +1822,9 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
DBUG_ENTER("MYSQL_LOG::write(THD *, IO_CACHE *, Log_event *)");
VOID(pthread_mutex_lock(&LOCK_log));
+ /* NULL would represent nothing to replicate after ROLLBACK */
+ DBUG_ASSERT(commit_event != NULL);
+
if (likely(is_open())) // Should always be true
{
uint length;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 266d6b064bd..cf5dbb1e77c 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1229,6 +1229,18 @@ bool Query_log_event::write(IO_CACHE* file)
my_b_safe_write(file, (byte*) query, q_len)) ? 1 : 0;
}
+/*
+ Query_log_event::Query_log_event()
+
+ The simplest constructor that could possibly work. This is used for
+ creating static objects that have a special meaning and are invisible
+ to the log.
+*/
+Query_log_event::Query_log_event()
+ :Log_event(), data_buf(0)
+{
+}
+
/*
Query_log_event::Query_log_event()
@@ -1623,14 +1635,33 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
*/
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+
+static const char *rewrite_db(const char *db)
+{
+ if (replicate_rewrite_db.is_empty() || db == NULL)
+ return db;
+ I_List_iterator<i_string_pair> it(replicate_rewrite_db);
+ i_string_pair* tmp;
+
+ while ((tmp=it++))
+ {
+ if (strcmp(tmp->key, db) == 0)
+ return tmp->val;
+ }
+ return db;
+}
+
+
int Query_log_event::exec_event(struct st_relay_log_info* rli)
{
return exec_event(rli, query, q_len);
}
-int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query_arg, uint32 q_len_arg)
+int Query_log_event::exec_event(struct st_relay_log_info* rli,
+ const char *query_arg, uint32 q_len_arg)
{
+ const char *new_db= rewrite_db(db);
int expected_error,actual_error= 0;
/*
Colleagues: please never free(thd->catalog) in MySQL. This would lead to
@@ -1639,8 +1670,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query
Thank you.
*/
thd->catalog= catalog_len ? (char *) catalog : (char *)"";
- thd->db_length= db_len;
- thd->db= (char*) rewrite_db(db, &thd->db_length);
+ thd->set_db(new_db, strlen(new_db)); /* allocates a copy of 'db' */
thd->variables.auto_increment_increment= auto_increment_increment;
thd->variables.auto_increment_offset= auto_increment_offset;
@@ -1856,9 +1886,10 @@ end:
don't suffer from these assignments to 0 as DROP TEMPORARY
TABLE uses the db.table syntax.
*/
- thd->db= thd->catalog= 0; // prevent db from being freed
+ thd->catalog= 0;
+ thd->set_db(NULL, 0); /* will free the current database */
thd->query= 0; // just to be sure
- thd->query_length= thd->db_length =0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
@@ -1876,6 +1907,21 @@ end:
/**************************************************************************
+ Muted_query_log_event methods
+**************************************************************************/
+
+#ifndef MYSQL_CLIENT
+/*
+ Muted_query_log_event::Muted_query_log_event()
+*/
+Muted_query_log_event::Muted_query_log_event()
+ :Query_log_event()
+{
+}
+#endif
+
+
+/**************************************************************************
Start_log_event_v3 methods
**************************************************************************/
@@ -2789,8 +2835,8 @@ void Load_log_event::set_fields(const char* affected_db,
int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
bool use_rli_only_for_errors)
{
- thd->db_length= db_len;
- thd->db= (char*) rewrite_db(db, &thd->db_length);
+ const char *new_db= rewrite_db(db);
+ thd->set_db(new_db, strlen(new_db));
DBUG_ASSERT(thd->query == 0);
thd->query_length= 0; // Should not be needed
thd->query_error= 0;
@@ -2845,7 +2891,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
TABLE_LIST tables;
bzero((char*) &tables,sizeof(tables));
- tables.db = thd->db;
+ tables.db= thd->strmake(thd->db, thd->db_length);
tables.alias = tables.table_name = (char*) table_name;
tables.lock_type = TL_WRITE;
tables.updating= 1;
@@ -2940,7 +2986,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
ex.skip_lines = skip_lines;
List<Item> field_list;
thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables);
- set_fields(thd->db, field_list, &thd->main_lex.select_lex.context);
+ set_fields(tables.db, field_list, &thd->main_lex.select_lex.context);
thd->variables.pseudo_thread_id= thread_id;
List<Item> set_fields;
if (net)
@@ -2987,11 +3033,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
error:
thd->net.vio = 0;
- char *save_db= thd->db;
+ const char *remember_db= thd->db;
VOID(pthread_mutex_lock(&LOCK_thread_count));
- thd->db= thd->catalog= 0;
+ thd->catalog= 0;
+ thd->set_db(NULL, 0); /* will free the current database */
thd->query= 0;
- thd->query_length= thd->db_length= 0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
if (thd->query_error)
@@ -3008,7 +3055,7 @@ error:
}
slave_print_error(rli,sql_errno,"\
Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- err, (char*)table_name, print_slave_db_safe(save_db));
+ err, (char*)table_name, print_slave_db_safe(remember_db));
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
return 1;
}
@@ -3018,7 +3065,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
{
slave_print_error(rli,ER_UNKNOWN_ERROR, "\
Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- (char*)table_name, print_slave_db_safe(save_db));
+ (char*)table_name, print_slave_db_safe(remember_db));
return 1;
}
@@ -3094,7 +3141,7 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg,
llstr(pos_arg, buff), flags));
#endif
if (flags & DUP_NAME)
- new_log_ident= my_strdup_with_length((const byte*) new_log_ident_arg,
+ new_log_ident= my_strdup_with_length(new_log_ident_arg,
ident_len, MYF(MY_WME));
DBUG_VOID_RETURN;
}
@@ -3118,7 +3165,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len,
(header_size+post_header_len));
ident_offset = post_header_len;
set_if_smaller(ident_len,FN_REFLEN-1);
- new_log_ident= my_strdup_with_length((byte*) buf + ident_offset,
+ new_log_ident= my_strdup_with_length(buf + ident_offset,
(uint) ident_len,
MYF(MY_WME));
DBUG_VOID_RETURN;
diff --git a/sql/log_event.h b/sql/log_event.h
index 0e1eb7cd13c..f1b441dedb1 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -783,6 +783,7 @@ public:
void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
#endif
+ Query_log_event();
Query_log_event(const char* buf, uint event_len,
const Format_description_log_event *description_event,
Log_event_type event_type);
@@ -806,6 +807,26 @@ public:
/* Writes derived event-specific part of post header. */
};
+
+/*****************************************************************************
+
+ Muted Query Log Event class
+
+ Pretends to Log SQL queries, but doesn't actually do so.
+
+ ****************************************************************************/
+class Muted_query_log_event: public Query_log_event
+{
+public:
+#ifndef MYSQL_CLIENT
+ Muted_query_log_event();
+
+ bool write(IO_CACHE* file) { return(false); };
+ virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; }
+#endif
+};
+
+
#ifdef HAVE_REPLICATION
/*****************************************************************************
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 3bb371b6004..3c58f2cbc6b 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -1344,6 +1344,7 @@ void start_waiting_global_read_lock(THD *thd);
bool make_global_read_lock_block_commit(THD *thd);
bool set_protect_against_global_read_lock(void);
void unset_protect_against_global_read_lock(void);
+void broadcast_refresh(void);
/* Lock based on name */
int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index d7a38d6b715..429bdee17d6 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -120,16 +120,7 @@ extern "C" { // Because of SCO 3.2V4.2
#include <sys/utsname.h>
#endif /* __WIN__ */
-#ifdef HAVE_LIBWRAP
-#include <tcpd.h>
-#include <syslog.h>
-#ifdef NEED_SYS_SYSLOG_H
-#include <sys/syslog.h>
-#endif /* NEED_SYS_SYSLOG_H */
-int allow_severity = LOG_INFO;
-int deny_severity = LOG_WARNING;
-
-#endif /* HAVE_LIBWRAP */
+#include <my_libwrap.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
@@ -323,6 +314,7 @@ static char *default_character_set_name;
static char *character_set_filesystem_name;
static char *my_bind_addr_str;
static char *default_collation_name;
+static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME;
static char mysql_data_home_buff[2];
static struct passwd *user_info;
static I_List<THD> thread_cache;
@@ -591,6 +583,8 @@ static const char* default_dbug_option;
#endif
#ifdef HAVE_LIBWRAP
const char *libwrapName= NULL;
+int allow_severity = LOG_INFO;
+int deny_severity = LOG_WARNING;
#endif
#ifdef HAVE_QUERY_CACHE
static ulong query_cache_limit= 0;
@@ -4072,8 +4066,8 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
struct request_info req;
signal(SIGCHLD, SIG_DFL);
request_init(&req, RQ_DAEMON, libwrapName, RQ_FILE, new_sock, NULL);
- fromhost(&req);
- if (!hosts_access(&req))
+ my_fromhost(&req);
+ if (!my_hosts_access(&req))
{
/*
This may be stupid but refuse() includes an exit(0)
@@ -4081,7 +4075,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
clean_exit() - same stupid thing ...
*/
syslog(deny_severity, "refused connect from %s",
- eval_client(&req));
+ my_eval_client(&req));
/*
C++ sucks (the gibberish in front just translates the supplied
@@ -6389,7 +6383,7 @@ static void mysql_init_variables(void)
/* Variables in libraries */
charsets_dir= 0;
default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME;
- default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME;
+ default_collation_name= compiled_default_collation_name;
sys_charset_system.value= (char*) system_charset_info->csname;
character_set_filesystem_name= (char*) "binary";
@@ -6551,7 +6545,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
strmake(mysql_home,argument,sizeof(mysql_home)-1);
break;
case 'C':
- default_collation_name= 0;
+ if (default_collation_name == compiled_default_collation_name)
+ default_collation_name= 0;
break;
case 'l':
opt_log=1;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index cb0f35a425e..72ead07059b 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -80,8 +80,8 @@ public:
SEL_ARG(Field *field, uint8 part, char *min_value, char *max_value,
uint8 min_flag, uint8 max_flag, uint8 maybe_flag);
SEL_ARG(enum Type type_arg)
- :elements(1),use_count(1),left(0),next_key_part(0),color(BLACK),
- type(type_arg),min_flag(0)
+ :min_flag(0),elements(1),use_count(1),left(0),next_key_part(0),
+ color(BLACK), type(type_arg)
{}
inline bool is_same(SEL_ARG *arg)
{
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 003dd4a8ab3..1cb3878ac70 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -1133,7 +1133,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex,
uint new_length= (var ? var->value->str_value.length() : 0);
if (!old_value)
old_value= (char*) "";
- if (!(res= my_strdup_with_length((byte*)old_value, new_length, MYF(0))))
+ if (!(res= my_strdup_with_length(old_value, new_length, MYF(0))))
return 1;
/*
Replace the old value in such a way that the any thread using
@@ -2578,7 +2578,7 @@ bool sys_var_insert_id::update(THD *thd, set_var *var)
byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- return (byte*) &thd->current_insert_id;
+ return (byte*) &thd->next_insert_id;
}
diff --git a/sql/set_var.h b/sql/set_var.h
index 8e5a94b1e1b..b048428219d 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -935,7 +935,7 @@ public:
uint name_length_arg, gptr data_arg)
:name_length(name_length_arg), data(data_arg)
{
- name= my_strdup_with_length((byte*) name_arg, name_length, MYF(MY_WME));
+ name= my_strdup_with_length(name_arg, name_length, MYF(MY_WME));
links->push_back(this);
}
inline bool cmp(const char *name_cmp, uint length)
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 4e7b9200d88..9b20c37ece2 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5619,3 +5619,5 @@ ER_NON_GROUPING_FIELD_USED 42000
eng "non-grouping field '%-.64s' is used in %-.64s clause"
ER_TABLE_CANT_HANDLE_SPKEYS
eng "The used table type doesn't support SPATIAL indexes"
+ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+ eng "Triggers can not be created on system tables"
diff --git a/sql/slave.cc b/sql/slave.cc
index 2b31d722f26..b284f4a6a16 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1177,24 +1177,6 @@ bool net_request_file(NET* net, const char* fname)
}
-const char *rewrite_db(const char* db, uint32 *new_len)
-{
- if (replicate_rewrite_db.is_empty() || !db)
- return db;
- I_List_iterator<i_string_pair> it(replicate_rewrite_db);
- i_string_pair* tmp;
-
- while ((tmp=it++))
- {
- if (!strcmp(tmp->key, db))
- {
- *new_len= (uint32)strlen(tmp->val);
- return tmp->val;
- }
- }
- return db;
-}
-
/*
From other comments and tests in code, it looks like
sometimes Query_log_event and Load_log_event can have db == 0
@@ -1581,9 +1563,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
// save old db in case we are creating in a different database
save_db = thd->db;
save_db_length= thd->db_length;
- thd->db = (char*)db;
- DBUG_ASSERT(thd->db != 0);
- thd->db_length= strlen(thd->db);
+ DBUG_ASSERT(db != 0);
+ thd->reset_db((char*)db, strlen(db));
mysql_parse(thd, thd->query, packet_len); // run create table
thd->db = save_db; // leave things the way the were before
thd->db_length= save_db_length;
@@ -3713,8 +3694,9 @@ err:
sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s",
IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
VOID(pthread_mutex_lock(&LOCK_thread_count));
- thd->query = thd->db = 0; // extra safety
- thd->query_length= thd->db_length= 0;
+ thd->query= 0; // extra safety
+ thd->query_length= 0;
+ thd->reset_db(NULL, 0);
VOID(pthread_mutex_unlock(&LOCK_thread_count));
if (mysql)
{
@@ -3932,8 +3914,10 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
should already have done these assignments (each event which sets these
variables is supposed to set them to 0 before terminating)).
*/
- thd->query= thd->db= thd->catalog= 0;
- thd->query_length= thd->db_length= 0;
+ thd->catalog= 0;
+ thd->reset_db(NULL, 0);
+ thd->query= 0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
thd->proc_info = "Waiting for slave mutex on exit";
pthread_mutex_lock(&rli->run_lock);
diff --git a/sql/slave.h b/sql/slave.h
index 040ce4eaf85..c355f7172a9 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -526,10 +526,6 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock,
MASTER_INFO* mi,
bool high_priority);
-/* If fd is -1, dump to NET */
-int mysql_table_dump(THD* thd, const char* db,
- const char* tbl_name, int fd = -1);
-
/* retrieve table from master and copy to slave*/
int fetch_master_table(THD* thd, const char* db_name, const char* table_name,
MASTER_INFO* mi, MYSQL* mysql, bool overwrite);
@@ -554,7 +550,6 @@ int add_table_rule(HASH* h, const char* table_spec);
int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
void init_table_rule_hash(HASH* h, bool* h_inited);
void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
-const char *rewrite_db(const char* db, uint32 *new_db_len);
const char *print_slave_db_safe(const char *db);
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code);
void skip_load_data_infile(NET* net);
diff --git a/sql/sp.cc b/sql/sp.cc
index cae7a56fa57..553465ebff8 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -404,7 +404,8 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
{
LEX *old_lex= thd->lex, newlex;
String defstr;
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
ulong old_sql_mode= thd->variables.sql_mode;
ha_rows old_select_limit= thd->variables.select_limit;
@@ -450,9 +451,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
goto end;
}
- dbchanged= FALSE;
- if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb),
- 1, &dbchanged)))
+ if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged)))
goto end;
lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length());
@@ -462,14 +461,14 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
{
sp_head *sp= newlex.sphead;
- if (dbchanged && (ret= mysql_change_db(thd, olddb, 1)))
+ if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1)))
goto end;
delete sp;
ret= SP_PARSE_ERROR;
}
else
{
- if (dbchanged && (ret= mysql_change_db(thd, olddb, 1)))
+ if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1)))
goto end;
*sphp= newlex.sphead;
(*sphp)->set_definer(&definer_user_name, &definer_host_name);
@@ -505,15 +504,14 @@ db_create_routine(THD *thd, int type, sp_head *sp)
int ret;
TABLE *table;
char definer[USER_HOST_BUFF_SIZE];
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
DBUG_ENTER("db_create_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length,
sp->m_name.str));
- dbchanged= FALSE;
- if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb),
- 0, &dbchanged)))
+ if ((ret= sp_use_new_db(thd, sp->m_db, &old_db, 0, &dbchanged)))
{
ret= SP_NO_DB_ERROR;
goto done;
@@ -641,7 +639,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
done:
close_thread_tables(thd);
if (dbchanged)
- (void)mysql_change_db(thd, olddb, 1);
+ (void) mysql_change_db(thd, old_db.str, 1);
DBUG_RETURN(ret);
}
@@ -1814,49 +1812,76 @@ create_string(THD *thd, String *buf,
}
-//
-// Utilities...
-//
+
+/*
+ Change the current database if needed.
+
+ SYNOPSIS
+ sp_use_new_db()
+ thd thread handle
+
+ new_db new database name (a string and its length)
+
+ old_db [IN] str points to a buffer where to store the old
+ database, length contains the size of the buffer
+ [OUT] if old db was not NULL, its name is copied
+ to the buffer pointed at by str and length is updated
+ accordingly. Otherwise str[0] is set to '\0' and length
+ is set to 0. The out parameter should be used only if
+ the database name has been changed (see dbchangedp).
+
+ dbchangedp [OUT] is set to TRUE if the current database is changed,
+ FALSE otherwise. A database is not changed if the old
+ name is the same as the new one, both names are empty,
+ or an error has occurred.
+
+ RETURN VALUE
+ 0 success
+ 1 access denied or out of memory (the error message is
+ set in THD)
+*/
int
-sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen,
+sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
bool no_access_check, bool *dbchangedp)
{
- bool changeit;
+ int ret;
+ static char empty_c_string[1]= {0}; /* used for not defined db */
DBUG_ENTER("sp_use_new_db");
- DBUG_PRINT("enter", ("newdb: %s", newdb));
+ DBUG_PRINT("enter", ("newdb: %s", new_db.str));
- if (! newdb)
- newdb= (char *)"";
- if (thd->db && thd->db[0])
+ /*
+ Set new_db to an empty string if it's NULL, because mysql_change_db
+ requires a non-NULL argument.
+ new_db.str can be NULL only if we're restoring the old database after
+ execution of a stored procedure and there were no current database
+ selected. The stored procedure itself must always have its database
+ initialized.
+ */
+ if (new_db.str == NULL)
+ new_db.str= empty_c_string;
+
+ if (thd->db)
{
- if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0)
- changeit= 0;
- else
- {
- changeit= 1;
- strnmov(olddb, thd->db, olddblen);
- }
+ old_db->length= (strmake(old_db->str, thd->db, old_db->length) -
+ old_db->str);
}
else
- { // thd->db empty
- if (newdb[0])
- changeit= 1;
- else
- changeit= 0;
- olddb[0] = '\0';
+ {
+ old_db->str[0]= '\0';
+ old_db->length= 0;
}
- if (!changeit)
+
+ /* Don't change the database if the new name is the same as the old one. */
+ if (my_strcasecmp(system_charset_info, old_db->str, new_db.str) == 0)
{
*dbchangedp= FALSE;
DBUG_RETURN(0);
}
- else
- {
- int ret= mysql_change_db(thd, newdb, no_access_check);
- if (! ret)
- *dbchangedp= TRUE;
- DBUG_RETURN(ret);
- }
+ ret= mysql_change_db(thd, new_db.str, no_access_check);
+
+ *dbchangedp= ret == 0;
+ DBUG_RETURN(ret);
}
+
diff --git a/sql/sp.h b/sql/sp.h
index 2587a9b115a..631b8a87aa2 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -104,15 +104,15 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first);
TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup);
void close_proc_table(THD *thd, Open_tables_state *backup);
-//
-// Utilities...
-//
-// Do a "use newdb". The current db is stored at olddb.
-// If newdb is the same as the current one, nothing is changed.
-// dbchangedp is set to true if the db was actually changed.
+/*
+ Do a "use new_db". The current db is stored at old_db. If new_db is the
+ same as the current one, nothing is changed. dbchangedp is set to true if
+ the db was actually changed.
+*/
+
int
-sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax,
+sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
bool no_access_check, bool *dbchangedp);
#endif /* _SP_H_ */
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 3b29a841966..b3b99557b63 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -376,24 +376,6 @@ sp_name::init_qname(THD *thd)
m_name.length, m_name.str);
}
-sp_name *
-sp_name_current_db_new(THD *thd, LEX_STRING name)
-{
- sp_name *qname;
-
- if (! thd->db)
- qname= new sp_name(name);
- else
- {
- LEX_STRING db;
-
- db.length= strlen(thd->db);
- db.str= thd->strmake(thd->db, db.length);
- qname= new sp_name(db, name);
- }
- qname->init_qname(thd);
- return qname;
-}
/*
Check that the name 'ident' is ok. It's assumed to be an 'ident'
@@ -504,27 +486,20 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name)
/* During parsing, we must use thd->mem_root */
MEM_ROOT *root= thd->mem_root;
+ DBUG_ASSERT(name);
+ /* Must be initialized in the parser */
+ DBUG_ASSERT(name->m_db.str && name->m_db.length);
+
/* We have to copy strings to get them into the right memroot */
- if (name)
- {
- m_db.length= name->m_db.length;
- if (name->m_db.length == 0)
- m_db.str= NULL;
- else
- m_db.str= strmake_root(root, name->m_db.str, name->m_db.length);
- m_name.length= name->m_name.length;
- m_name.str= strmake_root(root, name->m_name.str, name->m_name.length);
-
- if (name->m_qname.length == 0)
- name->init_qname(thd);
- m_qname.length= name->m_qname.length;
- m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length);
- }
- else if (thd->db)
- {
- m_db.length= thd->db_length;
- m_db.str= strmake_root(root, thd->db, m_db.length);
- }
+ m_db.length= name->m_db.length;
+ m_db.str= strmake_root(root, name->m_db.str, name->m_db.length);
+ m_name.length= name->m_name.length;
+ m_name.str= strmake_root(root, name->m_name.str, name->m_name.length);
+
+ if (name->m_qname.length == 0)
+ name->init_qname(thd);
+ m_qname.length= name->m_qname.length;
+ m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length);
if (m_param_begin && m_param_end)
{
@@ -565,7 +540,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List<String> *src)
result->name= "";
if (!(result->type_names=(const char **)
alloc_root(mem_root,(sizeof(char *)+sizeof(int))*(result->count+1))))
- return 0;
+ DBUG_RETURN(0);
result->type_lengths= (unsigned int *)(result->type_names + result->count+1);
List_iterator<String> it(*src);
String conv;
@@ -599,7 +574,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List<String> *src)
result->type_names[result->count]= 0;
result->type_lengths[result->count]= 0;
}
- return result;
+ DBUG_RETURN(result);
}
@@ -933,7 +908,8 @@ bool
sp_head::execute(THD *thd)
{
DBUG_ENTER("sp_head::execute");
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
sp_rcontext *ctx;
bool err_status= FALSE;
@@ -980,10 +956,8 @@ sp_head::execute(THD *thd)
m_first_instance->m_last_cached_sp == this) ||
(m_recursion_level + 1 == m_next_cached_sp->m_recursion_level));
- dbchanged= FALSE;
if (m_db.length &&
- (err_status= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0,
- &dbchanged)))
+ (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged)))
goto done;
if ((ctx= thd->spcont))
@@ -1075,7 +1049,6 @@ sp_head::execute(THD *thd)
thd->net.no_send_error= 0;
if (i->free_list)
cleanup_items(i->free_list);
- i->state= Query_arena::EXECUTED;
/*
If we've set thd->user_var_events_alloc to mem_root of this SP
@@ -1155,10 +1128,10 @@ sp_head::execute(THD *thd)
{
/*
No access check when changing back to where we came from.
- (It would generate an error from mysql_change_db() when olddb=="")
+ (It would generate an error from mysql_change_db() when old_db=="")
*/
if (! thd->killed)
- err_status|= mysql_change_db(thd, olddb, 1);
+ err_status|= mysql_change_db(thd, old_db.str, 1);
}
m_flags&= ~IS_INVOKED;
DBUG_PRINT("info",
@@ -1816,9 +1789,6 @@ sp_head::reset_thd_mem_root(THD *thd)
(ulong) &mem_root, (ulong) &thd->mem_root));
free_list= thd->free_list; // Keep the old list
thd->free_list= NULL; // Start a new one
- /* Copy the db, since substatements will point to it */
- m_thd_db= thd->db;
- thd->db= thd->strmake(thd->db, thd->db_length);
m_thd= thd;
DBUG_VOID_RETURN;
}
@@ -1834,7 +1804,6 @@ sp_head::restore_thd_mem_root(THD *thd)
DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx",
(ulong) &mem_root, (ulong) &thd->mem_root));
thd->free_list= flist; // Restore the old one
- thd->db= m_thd_db; // Restore the original db pointer
thd->mem_root= m_thd_root;
m_thd= NULL;
DBUG_VOID_RETURN;
@@ -2210,6 +2179,9 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
m_lex->mark_as_requiring_prelocking(NULL);
}
thd->rollback_item_tree_changes();
+ /* Update the state of the active arena. */
+ thd->stmt_arena->state= Query_arena::EXECUTED;
+
/*
Unlike for PS we should not call Item's destructors for newly created
diff --git a/sql/sp_head.h b/sql/sp_head.h
index d5f49d8a964..073cca2cd12 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -61,13 +61,6 @@ public:
*/
LEX_STRING m_sroutines_key;
- sp_name(LEX_STRING name)
- : m_name(name)
- {
- m_db.str= m_qname.str= m_sroutines_key.str= 0;
- m_db.length= m_qname.length= m_sroutines_key.length= 0;
- }
-
sp_name(LEX_STRING db, LEX_STRING name)
: m_db(db), m_name(name)
{
@@ -101,8 +94,6 @@ public:
{}
};
-sp_name *
-sp_name_current_db_new(THD *thd, LEX_STRING name);
bool
check_routine_name(LEX_STRING name);
@@ -355,7 +346,6 @@ private:
MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root
THD *m_thd; // Set if we have reset mem_root
- char *m_thd_db; // Original thd->db pointer
sp_pcontext *m_pcont; // Parse context
List<LEX> m_lex; // Temp. store for the other lex
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 8b235d26d37..124d3566b19 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -5601,25 +5601,30 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
TABLE_LIST tables[1];
List<LEX_USER> user_list;
bool result;
+ ACL_USER *au;
+ char passwd_buff[SCRAMBLED_PASSWORD_CHAR_LENGTH+1];
DBUG_ENTER("sp_grant_privileges");
if (!(combo=(LEX_USER*) thd->alloc(sizeof(st_lex_user))))
DBUG_RETURN(TRUE);
combo->user.str= sctx->user;
-
+
VOID(pthread_mutex_lock(&acl_cache->lock));
- if (!find_acl_user(combo->host.str=(char*)sctx->host_or_ip, combo->user.str,
- FALSE) &&
- !find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,
- FALSE) &&
- !find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,
- FALSE) &&
- !find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE))
- {
- VOID(pthread_mutex_unlock(&acl_cache->lock));
- DBUG_RETURN(TRUE);
- }
+
+ if ((au= find_acl_user(combo->host.str=(char*)sctx->host_or_ip,combo->user.str,FALSE)))
+ goto found_acl;
+ if ((au= find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,FALSE)))
+ goto found_acl;
+ if ((au= find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,FALSE)))
+ goto found_acl;
+ if((au= find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE)))
+ goto found_acl;
+
+ VOID(pthread_mutex_unlock(&acl_cache->lock));
+ DBUG_RETURN(TRUE);
+
+ found_acl:
VOID(pthread_mutex_unlock(&acl_cache->lock));
bzero((char*)tables, sizeof(TABLE_LIST));
@@ -5627,13 +5632,37 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
tables->db= (char*)sp_db;
tables->table_name= tables->alias= (char*)sp_name;
-
+
combo->host.length= strlen(combo->host.str);
combo->user.length= strlen(combo->user.str);
combo->host.str= thd->strmake(combo->host.str,combo->host.length);
combo->user.str= thd->strmake(combo->user.str,combo->user.length);
- combo->password.str= (char*)"";
- combo->password.length= 0;
+
+
+ if(au && au->salt_len)
+ {
+ if (au->salt_len == SCRAMBLE_LENGTH)
+ {
+ make_password_from_salt(passwd_buff, au->salt);
+ combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
+ }
+ else if (au->salt_len == SCRAMBLE_LENGTH_323)
+ {
+ make_password_from_salt_323(passwd_buff, (ulong *) au->salt);
+ combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
+ }
+ else
+ {
+ my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH);
+ return -1;
+ }
+ combo->password.str= passwd_buff;
+ }
+ else
+ {
+ combo->password.str= (char*)"";
+ combo->password.length= 0;
+ }
if (user_list.push_back(combo))
DBUG_RETURN(TRUE);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 7d14e99fb77..5904e13d710 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -530,7 +530,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
if (found_old_table)
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
if (!lock_in_use)
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -1035,7 +1035,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find)
}
*prev=0;
// Notify any 'refresh' threads
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
return start;
}
@@ -1577,7 +1577,7 @@ bool reopen_table(TABLE *table,bool locked)
if (table->triggers)
table->triggers->set_table(table);
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
error=0;
end:
@@ -1678,7 +1678,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh)
{
my_afree((gptr) tables);
}
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
*prev=0;
DBUG_RETURN(error);
}
@@ -1715,7 +1715,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
}
}
if (found)
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
DBUG_VOID_RETURN;
}
@@ -1807,7 +1807,7 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name)
}
*prev=0;
if (found)
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
if (thd->locked_tables && thd->locked_tables->table_count == 0)
{
my_free((gptr) thd->locked_tables,MYF(0));
@@ -5249,7 +5249,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
Signal any thread waiting for tables to be freed to
reopen their tables
*/
- (void) pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
DBUG_PRINT("info", ("Waiting for refresh signal"));
if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed)
{
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 2cfc9142453..eb075dd54bb 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1300,7 +1300,7 @@ public:
pthread_t real_id;
uint tmp_table, global_read_lock;
uint server_status,open_options,system_thread;
- uint32 db_length;
+ uint db_length;
uint select_number; //number of select (used for EXPLAIN)
/* variables.transaction_isolation is reset to this after each commit */
enum_tx_isolation session_tx_isolation;
@@ -1571,6 +1571,49 @@ public:
void restore_sub_statement_state(Sub_statement_state *backup);
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
void restore_active_arena(Query_arena *set, Query_arena *backup);
+
+ /*
+ Initialize the current database from a NULL-terminated string with length
+ If we run out of memory, we free the current database and return TRUE.
+ This way the user will notice the error as there will be no current
+ database selected (in addition to the error message set by malloc).
+ */
+ bool set_db(const char *new_db, uint new_db_len)
+ {
+ /* Do not reallocate memory if current chunk is big enough. */
+ if (db && new_db && db_length >= new_db_len)
+ memcpy(db, new_db, new_db_len+1);
+ else
+ {
+ x_free(db);
+ db= new_db ? my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)) :
+ NULL;
+ }
+ db_length= db ? new_db_len : 0;
+ return new_db && !db;
+ }
+ void reset_db(char *new_db, uint new_db_len)
+ {
+ db= new_db;
+ db_length= new_db_len;
+ }
+ /*
+ Copy the current database to the argument. Use the current arena to
+ allocate memory for a deep copy: current database may be freed after
+ a statement is parsed but before it's executed.
+ */
+ bool copy_db_to(char **p_db, uint *p_db_length)
+ {
+ if (db == NULL)
+ {
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ return TRUE;
+ }
+ *p_db= strmake(db, db_length);
+ if (p_db_length)
+ *p_db_length= db_length;
+ return FALSE;
+ }
};
@@ -1916,7 +1959,7 @@ typedef struct st_sort_buffer {
class Table_ident :public Sql_alloc
{
- public:
+public:
LEX_STRING db;
LEX_STRING table;
SELECT_LEX_UNIT *sel;
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index a52972753a7..902539dfdec 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -781,33 +781,13 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
exit:
(void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */
/*
- If this database was the client's selected database, we silently change the
- client's selected database to nothing (to have an empty SELECT DATABASE()
- in the future). For this we free() thd->db and set it to 0. But we don't do
- free() for the slave thread. Indeed, doing a x_free() on it leads to nasty
- problems (i.e. long painful debugging) because in this thread, thd->db is
- the same as data_buf and db of the Query_log_event which is dropping the
- database. So if you free() thd->db, you're freeing data_buf. You set
- thd->db to 0 but not data_buf (thd->db and data_buf are two distinct
- pointers which point to the same place). Then in ~Query_log_event(), we
- have 'if (data_buf) free(data_buf)' data_buf is !=0 so this makes a
- DOUBLE free().
- Side effects of this double free() are, randomly (depends on the machine),
- when the slave is replicating a DROP DATABASE:
- - garbage characters in the error message:
- "Error 'Can't drop database 'test2'; database doesn't exist' on query
- 'h4zI©'"
- - segfault
- - hang in "free(vio)" (yes!) in the I/O or SQL slave threads (so slave
- server hangs at shutdown etc).
+ If this database was the client's selected database, we silently
+ change the client's selected database to nothing (to have an empty
+ SELECT DATABASE() in the future). For this we free() thd->db and set
+ it to 0.
*/
if (thd->db && !strcmp(thd->db, db))
- {
- if (!(thd->slave_thread)) /* a slave thread will free it itself */
- x_free(thd->db);
- thd->db= 0;
- thd->db_length= 0;
- }
+ thd->set_db(NULL, 0);
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
start_waiting_global_read_lock(thd);
exit2:
@@ -1100,38 +1080,52 @@ err:
/*
- Change default database.
+ Change the current database.
SYNOPSIS
mysql_change_db()
- thd Thread handler
- name Databasename
- no_access_check True don't do access check. In this case name may be ""
+ thd thread handle
+ name database name
+ no_access_check if TRUE, don't do access check. In this
+ case name may be ""
DESCRIPTION
- Becasue the database name may have been given directly from the
- communication packet (in case of 'connect' or 'COM_INIT_DB')
- we have to do end space removal in this function.
+ Check that the database name corresponds to a valid and
+ existent database, check access rights (unless called with
+ no_access_check), and set the current database. This function
+ is called to change the current database upon user request
+ (COM_CHANGE_DB command) or temporarily, to execute a stored
+ routine.
NOTES
- Do as little as possible in this function, as it is not called for the
- replication slave SQL thread (for that thread, setting of thd->db is done
- in ::exec_event() methods of log_event.cc).
-
- This function does not send anything, including error messages to the
- client, if that should be sent to the client, call net_send_error after
- this function.
+ This function is not the only way to switch the database that
+ is currently employed. When the replication slave thread
+ switches the database before executing a query, it calls
+ thd->set_db directly. However, if the query, in turn, uses
+ a stored routine, the stored routine will use this function,
+ even if it's run on the slave.
+
+ This function allocates the name of the database on the system
+ heap: this is necessary to be able to uniformly change the
+ database from any module of the server. Up to 5.0 different
+ modules were using different memory to store the name of the
+ database, and this led to memory corruption: a stack pointer
+ set by Stored Procedures was used by replication after the
+ stack address was long gone.
+
+ This function does not send anything, including error
+ messages, to the client. If that should be sent to the client,
+ call net_send_error after this function.
RETURN VALUES
- 0 ok
+ 0 OK
1 error
*/
bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
{
- int length, db_length;
- char *dbname= thd->slave_thread ? (char *) name :
- my_strdup((char *) name, MYF(MY_WME));
+ int path_length, db_length;
+ char *db_name;
char path[FN_REFLEN];
HA_CREATE_INFO create;
bool system_db= 0;
@@ -1143,32 +1137,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
DBUG_ENTER("mysql_change_db");
DBUG_PRINT("enter",("name: '%s'",name));
- LINT_INIT(db_length);
-
- /* dbname can only be NULL if malloc failed */
- if (!dbname || !(db_length= strlen(dbname)))
+ if (name == NULL || name[0] == '\0' && no_access_check == FALSE)
{
- if (no_access_check && dbname)
- {
- /* Called from SP when orignal database was not set */
- system_db= 1;
- goto end;
- }
- if (!(thd->slave_thread))
- x_free(dbname); /* purecov: inspected */
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR),
- MYF(0)); /* purecov: inspected */
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
DBUG_RETURN(1); /* purecov: inspected */
}
- if (check_db_name(dbname))
+ else if (name[0] == '\0')
{
- my_error(ER_WRONG_DB_NAME, MYF(0), dbname);
- if (!(thd->slave_thread))
- my_free(dbname, MYF(0));
+ /* Called from SP to restore the original database, which was NULL */
+ DBUG_ASSERT(no_access_check);
+ system_db= 1;
+ db_name= NULL;
+ db_length= 0;
+ goto end;
+ }
+ /*
+ Now we need to make a copy because check_db_name requires a
+ non-constant argument. TODO: fix check_db_name.
+ */
+ if ((db_name= my_strdup(name, MYF(MY_WME))) == NULL)
+ DBUG_RETURN(1); /* the error is set */
+ db_length= strlen(db_name);
+ if (check_db_name(db_name))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), db_name);
+ my_free(db_name, MYF(0));
DBUG_RETURN(1);
}
- DBUG_PRINT("info",("Use database: %s", dbname));
- if (!my_strcasecmp(system_charset_info, dbname, information_schema_name.str))
+ DBUG_PRINT("info",("Use database: %s", db_name));
+ if (!my_strcasecmp(system_charset_info, db_name, information_schema_name.str))
{
system_db= 1;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1183,49 +1180,36 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
if (test_all_bits(sctx->master_access, DB_ACLS))
db_access=DB_ACLS;
else
- db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, dbname, 0) |
+ db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name, 0) |
sctx->master_access);
if (!(db_access & DB_ACLS) && (!grant_option ||
- check_grant_db(thd,dbname)))
+ check_grant_db(thd,db_name)))
{
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user,
sctx->priv_host,
- dbname);
+ db_name);
mysql_log.write(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR),
- sctx->priv_user, sctx->priv_host, dbname);
- if (!(thd->slave_thread))
- my_free(dbname,MYF(0));
+ sctx->priv_user, sctx->priv_host, db_name);
+ my_free(db_name, MYF(0));
DBUG_RETURN(1);
}
}
#endif
- (void) sprintf(path,"%s/%s",mysql_data_home,dbname);
- length=unpack_dirname(path,path); // Convert if not unix
- if (length && path[length-1] == FN_LIBCHAR)
- path[length-1]=0; // remove ending '\'
+ (void) sprintf(path,"%s/%s", mysql_data_home, db_name);
+ path_length= unpack_dirname(path, path); // Convert if not UNIX
+ if (path_length && path[path_length-1] == FN_LIBCHAR)
+ path[path_length-1]= '\0'; // remove ending '\'
if (my_access(path,F_OK))
{
- my_error(ER_BAD_DB_ERROR, MYF(0), dbname);
- if (!(thd->slave_thread))
- my_free(dbname,MYF(0));
+ my_error(ER_BAD_DB_ERROR, MYF(0), db_name);
+ my_free(db_name, MYF(0));
DBUG_RETURN(1);
}
end:
- if (!(thd->slave_thread))
- x_free(thd->db);
- if (dbname && dbname[0] == 0)
- {
- if (!(thd->slave_thread))
- my_free(dbname, MYF(0));
- thd->db= NULL;
- thd->db_length= 0;
- }
- else
- {
- thd->db= dbname; // THD::~THD will free this
- thd->db_length= db_length;
- }
+ x_free(thd->db);
+ DBUG_ASSERT(db_name == NULL || db_name[0] != '\0');
+ thd->reset_db(db_name, db_length); // THD::~THD will free this
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (!no_access_check)
sctx->db_access= db_access;
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 1cd7778a053..0193d4d5355 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -254,7 +254,8 @@ err:
DESCRIPTION
Though this function takes a list of tables, only the first list entry
- will be closed. Broadcasts a COND_refresh condition.
+ will be closed.
+ Broadcasts refresh if it closed the table.
RETURN
FALSE ok
@@ -291,7 +292,7 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables)
if (close_thread_table(thd, table_ptr))
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
VOID(pthread_mutex_unlock(&LOCK_open));
}
@@ -608,7 +609,7 @@ err0:
tables are closed (if MYSQL_HA_FLUSH_ALL) is set.
If 'tables' is NULL and MYSQL_HA_FLUSH_ALL is not set,
all HANDLER tables marked for flush are closed.
- Broadcasts a COND_refresh condition, for every table closed.
+ Broadcasts refresh for every table closed.
NOTE
Since mysql_ha_flush() is called when the base table has to be closed,
@@ -704,7 +705,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
MYSQL_HA_REOPEN_ON_USAGE mark for reopen.
DESCRIPTION
- Broadcasts a COND_refresh condition, for every table closed.
+ Broadcasts refresh if it closed the table.
The caller must lock LOCK_open.
RETURN
@@ -742,7 +743,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags)
if (close_thread_table(thd, table_ptr))
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
DBUG_RETURN(0);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 8ffc6f53a43..ba0d2d00f2c 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -298,9 +298,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
{
if (thd->locked_tables)
{
- if (find_locked_table(thd,
- table_list->db ? table_list->db : thd->db,
- table_list->table_name))
+ DBUG_ASSERT(table_list->db); /* Must be set in the parser */
+ if (find_locked_table(thd, table_list->db, table_list->table_name))
{
my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0),
table_list->table_name);
@@ -1329,8 +1328,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
TABLE *table;
DBUG_ENTER("delayed_get_table");
- if (!table_list->db)
- table_list->db=thd->db;
+ /* Must be set in the parser */
+ DBUG_ASSERT(table_list->db);
/* Find the thread which handles this table. */
if (!(tmp=find_handler(thd,table_list)))
@@ -1349,18 +1348,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
*/
if (! (tmp= find_handler(thd, table_list)))
{
- /*
- Avoid that a global read lock steps in while we are creating the
- new thread. It would block trying to open the table. Hence, the
- DI thread and this thread would wait until after the global
- readlock is gone. Since the insert thread needs to wait for a
- global read lock anyway, we do it right now. Note that
- wait_if_global_read_lock() sets a protection against a new
- global read lock when it succeeds. This needs to be released by
- start_waiting_global_read_lock().
- */
- if (wait_if_global_read_lock(thd, 0, 1))
- goto err;
if (!(tmp=new delayed_insert()))
{
my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert));
@@ -1369,15 +1356,15 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
pthread_mutex_lock(&LOCK_thread_count);
thread_count++;
pthread_mutex_unlock(&LOCK_thread_count);
- if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) ||
- !(tmp->thd.query=my_strdup(table_list->table_name,MYF(MY_WME))))
+ tmp->thd.set_db(table_list->db, strlen(table_list->db));
+ tmp->thd.query= my_strdup(table_list->table_name,MYF(MY_WME));
+ if (tmp->thd.db == NULL || tmp->thd.query == NULL)
{
delete tmp;
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
goto err1;
}
tmp->table_list= *table_list; // Needed to open table
- tmp->table_list.db= tmp->thd.db;
tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query;
tmp->lock();
pthread_mutex_lock(&tmp->mutex);
@@ -1401,11 +1388,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
pthread_cond_wait(&tmp->cond_client,&tmp->mutex);
}
pthread_mutex_unlock(&tmp->mutex);
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
thd->proc_info="got old table";
if (tmp->thd.killed)
{
@@ -1441,11 +1423,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
err1:
thd->fatal_error();
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
err:
pthread_mutex_unlock(&LOCK_delayed_create);
DBUG_RETURN(0); // Continue with normal insert
@@ -2676,7 +2653,7 @@ bool select_create::send_eof()
hash_delete(&open_cache,(byte*) table);
/* Tell threads waiting for refresh that something has happened */
if (version != refresh_version)
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
lock=0;
table=0;
@@ -2705,7 +2682,7 @@ void select_create::abort()
quick_rm_table(table_type, create_table->db, create_table->table_name);
/* Tell threads waiting for refresh that something has happened */
if (version != refresh_version)
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
else if (!create_info->table_existed)
close_temporary_table(thd, create_table->db, create_table->table_name);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 0bbfc64e272..47af816f41d 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -140,6 +140,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0;
lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list);
lex->select_lex.options= 0;
+ lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
lex->select_lex.init_order();
lex->select_lex.group_list.empty();
lex->describe= 0;
@@ -1063,6 +1064,7 @@ int MYSQLlex(void *arg, void *yythd)
void st_select_lex_node::init_query()
{
options= 0;
+ sql_cache= SQL_CACHE_UNSPECIFIED;
linkage= UNSPECIFIED_TYPE;
no_error= no_table_names_allowed= 0;
uncacheable= 0;
@@ -1139,6 +1141,7 @@ void st_select_lex::init_select()
table_join_options= 0;
in_sum_expr= with_wild= 0;
options= 0;
+ sql_cache= SQL_CACHE_UNSPECIFIED;
braces= 0;
when_list.empty();
expr_list.empty();
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index c75aa8f31b9..285e1d6d5a6 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -311,6 +311,14 @@ protected:
public:
ulonglong options;
+
+ /*
+ In sql_cache we store SQL_CACHE flag as specified by user to be
+ able to restore SELECT statement from internal structures.
+ */
+ enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE };
+ e_sql_cache sql_cache;
+
/*
result of this query can't be cached, bit field, can be :
UNCACHEABLE_DEPENDENT
@@ -758,6 +766,11 @@ public:
*this= *state;
}
+ /*
+ Direct addition to the list of query tables.
+ If you are using this function, you must ensure that the table
+ object, in particular table->db member, is initialized.
+ */
void add_to_query_tables(TABLE_LIST *table)
{
*(table->prev_global= query_tables_last)= table;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index a78d55af0ce..7c07b4f8847 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -93,8 +93,6 @@ const char *xa_state_names[]={
"NON-EXISTING", "ACTIVE", "IDLE", "PREPARED"
};
-static char empty_c_string[1]= {0}; // Used for not defined 'db'
-
#ifdef __WIN__
static void test_signal(int sig_ptr)
{
@@ -300,8 +298,7 @@ int check_user(THD *thd, enum enum_server_command command,
thd->db is saved in caller and needs to be freed by caller if this
function returns 0
*/
- thd->db= 0;
- thd->db_length= 0;
+ thd->reset_db(NULL, 0);
if (mysql_change_db(thd, db, FALSE))
{
/* Send the error to the client */
@@ -341,9 +338,8 @@ int check_user(THD *thd, enum enum_server_command command,
if connect failed. Also in case of 'CHANGE USER' failure, current
database will be switched to 'no database selected'.
*/
- thd->db= 0;
- thd->db_length= 0;
-
+ thd->reset_db(NULL, 0);
+
USER_RESOURCES ur;
int res= acl_getroot(thd, &ur, passwd, passwd_len);
#ifndef EMBEDDED_LIBRARY
@@ -775,6 +771,37 @@ static void reset_mqh(LEX_USER *lu, bool get_them= 0)
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
+void thd_init_client_charset(THD *thd, uint cs_number)
+{
+ /*
+ Use server character set and collation if
+ - opt_character_set_client_handshake is not set
+ - client has not specified a character set
+ - client character set is the same as the servers
+ - client character set doesn't exists in server
+ */
+ if (!opt_character_set_client_handshake ||
+ !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) ||
+ !my_strcasecmp(&my_charset_latin1,
+ global_system_variables.character_set_client->name,
+ thd->variables.character_set_client->name))
+ {
+ thd->variables.character_set_client=
+ global_system_variables.character_set_client;
+ thd->variables.collation_connection=
+ global_system_variables.collation_connection;
+ thd->variables.character_set_results=
+ global_system_variables.character_set_results;
+ }
+ else
+ {
+ thd->variables.character_set_results=
+ thd->variables.collation_connection=
+ thd->variables.character_set_client;
+ }
+}
+
+
/*
Perform handshake, authorize client and update thd ACL variables.
SYNOPSIS
@@ -910,33 +937,7 @@ static int check_connection(THD *thd)
thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16;
thd->max_client_packet_length= uint4korr(net->read_pos+4);
DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8]));
- /*
- Use server character set and collation if
- - opt_character_set_client_handshake is not set
- - client has not specified a character set
- - client character set is the same as the servers
- - client character set doesn't exists in server
- */
- if (!opt_character_set_client_handshake ||
- !(thd->variables.character_set_client=
- get_charset((uint) net->read_pos[8], MYF(0))) ||
- !my_strcasecmp(&my_charset_latin1,
- global_system_variables.character_set_client->name,
- thd->variables.character_set_client->name))
- {
- thd->variables.character_set_client=
- global_system_variables.character_set_client;
- thd->variables.collation_connection=
- global_system_variables.collation_connection;
- thd->variables.character_set_results=
- global_system_variables.character_set_results;
- }
- else
- {
- thd->variables.character_set_results=
- thd->variables.collation_connection=
- thd->variables.character_set_client;
- }
+ thd_init_client_charset(thd, (uint) net->read_pos[8]);
thd->update_charset();
end= (char*) net->read_pos+32;
}
@@ -1316,19 +1317,6 @@ end:
DBUG_RETURN(0);
}
- /* This works because items are allocated with sql_alloc() */
-
-void free_items(Item *item)
-{
- Item *next;
- DBUG_ENTER("free_items");
- for (; item ; item=next)
- {
- next=item->next;
- item->delete_self();
- }
- DBUG_VOID_RETURN;
-}
/* This works because items are allocated with sql_alloc() */
@@ -1340,7 +1328,26 @@ void cleanup_items(Item *item)
DBUG_VOID_RETURN;
}
-int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd)
+/*
+ Handle COM_TABLE_DUMP command
+
+ SYNOPSIS
+ mysql_table_dump
+ thd thread handle
+ db database name or an empty string. If empty,
+ the current database of the connection is used
+ tbl_name name of the table to dump
+
+ NOTES
+ This function is written to handle one specific command only.
+
+ RETURN VALUE
+ 0 success
+ 1 error, the error message is set in THD
+*/
+
+static
+int mysql_table_dump(THD* thd, char* db, char* tbl_name)
{
TABLE* table;
TABLE_LIST* table_list;
@@ -1377,7 +1384,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd)
goto err;
}
net_flush(&thd->net);
- if ((error= table->file->dump(thd,fd)))
+ if ((error= table->file->dump(thd,-1)))
my_error(ER_GET_ERRNO, MYF(0), error);
err:
@@ -1627,7 +1634,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
tbl_name= strmake(db, packet + 1, db_len)+1;
strmake(tbl_name, packet + db_len + 2, tbl_len);
- mysql_table_dump(thd, db, tbl_name, -1);
+ mysql_table_dump(thd, db, tbl_name);
break;
}
case COM_CHANGE_USER:
@@ -1801,11 +1808,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS],
&LOCK_status);
bzero((char*) &table_list,sizeof(table_list));
- if (!(table_list.db=thd->db))
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ if (thd->copy_db_to(&table_list.db, 0))
break;
- }
pend= strend(packet);
thd->convert_string(&conv_name, system_charset_info,
packet, (uint) (pend-packet), thd->charset());
@@ -2152,6 +2156,34 @@ void log_slow_statement(THD *thd)
}
+/*
+ Create a TABLE_LIST object for an INFORMATION_SCHEMA table.
+
+ SYNOPSIS
+ prepare_schema_table()
+ thd thread handle
+ lex current lex
+ table_ident table alias if it's used
+ schema_table_idx the type of the INFORMATION_SCHEMA table to be
+ created
+
+ DESCRIPTION
+ This function is used in the parser to convert a SHOW or DESCRIBE
+ table_name command to a SELECT from INFORMATION_SCHEMA.
+ It prepares a SELECT_LEX and a TABLE_LIST object to represent the
+ given command as a SELECT parse tree.
+
+ NOTES
+ Due to the way this function works with memory and LEX it cannot
+ be used outside the parser (parse tree transformations outside
+ the parser break PS and SP).
+
+ RETURN VALUE
+ 0 success
+ 1 out of memory or SHOW commands are not allowed
+ in this version of the server.
+*/
+
int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
enum enum_schema_tables schema_table_idx)
{
@@ -2179,13 +2211,13 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
DBUG_RETURN(1);
#else
{
- char *db= lex->select_lex.db ? lex->select_lex.db : thd->db;
- if (!db)
+ char *db;
+ if (lex->select_lex.db == NULL &&
+ thd->copy_db_to(&lex->select_lex.db, 0))
{
- my_message(ER_NO_DB_ERROR,
- ER(ER_NO_DB_ERROR), MYF(0)); /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
+ DBUG_RETURN(1);
}
+ db= lex->select_lex.db;
remove_escape(db); // Fix escaped '_'
if (check_db_name(db))
{
@@ -2202,11 +2234,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
db);
DBUG_RETURN(1);
}
- /*
- We need to do a copy to make this prepared statement safe if this
- was thd->db
- */
- lex->select_lex.db= thd->strdup(db);
break;
}
#endif
@@ -2337,17 +2364,37 @@ static void reset_one_shot_variables(THD *thd)
}
-/****************************************************************************
-** mysql_execute_command
-** Execute command saved in thd and current_lex->sql_command
-****************************************************************************/
+/*
+ Execute command saved in thd and current_lex->sql_command
+
+ SYNOPSIS
+ mysql_execute_command()
+ thd Thread handle
+
+ IMPLEMENTATION
+
+ Before every operation that can request a write lock for a table
+ wait if a global read lock exists. However do not wait if this
+ thread has locked tables already. No new locks can be requested
+ until the other locks are released. The thread that requests the
+ global read lock waits for write locked tables to become unlocked.
+
+ Note that wait_if_global_read_lock() sets a protection against a new
+ global read lock when it succeeds. This needs to be released by
+ start_waiting_global_read_lock() after the operation.
+
+ RETURN
+ FALSE OK
+ TRUE Error
+*/
bool
mysql_execute_command(THD *thd)
{
- bool res= FALSE;
- int result= 0;
- LEX *lex= thd->lex;
+ bool res= FALSE;
+ bool need_start_waiting= FALSE; // have protection against global read lock
+ int result= 0;
+ LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
SELECT_LEX *select_lex= &lex->select_lex;
/* first table of first SELECT_LEX */
@@ -2739,8 +2786,8 @@ mysql_execute_command(THD *thd)
case SQLCOM_LOAD_MASTER_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (!first_table->db)
- first_table->db= thd->db;
+ DBUG_ASSERT(first_table->db); /* Must be set in the parser */
+
if (check_access(thd, CREATE_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
@@ -2832,7 +2879,8 @@ mysql_execute_command(THD *thd)
TABLE in the same way. That way we avoid that a new table is
created during a gobal read lock.
*/
- if (wait_if_global_read_lock(thd, 0, 1))
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
{
res= 1;
goto end_with_restore_list;
@@ -2857,7 +2905,7 @@ mysql_execute_command(THD *thd)
{
update_non_unique_table_error(create_table, "CREATE", duplicate);
res= 1;
- goto end_with_restart_wait;
+ goto end_with_restore_list;
}
}
/* If we create merge table, we have to test tables in merge, too */
@@ -2873,7 +2921,7 @@ mysql_execute_command(THD *thd)
{
update_non_unique_table_error(tab, "CREATE", duplicate);
res= 1;
- goto end_with_restart_wait;
+ goto end_with_restore_list;
}
}
}
@@ -2915,13 +2963,6 @@ mysql_execute_command(THD *thd)
send_ok(thd);
}
-end_with_restart_wait:
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
-
/* put tables back for PS rexecuting */
end_with_restore_list:
lex->link_first_table_back(create_table, link_to_local);
@@ -2988,25 +3029,8 @@ end_with_restore_list:
my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name);
goto error;
}
- if (!select_lex->db)
- {
- /*
- In the case of ALTER TABLE ... RENAME we should supply the
- default database if the new name is not explicitly qualified
- by a database. (Bug #11493)
- */
- if (lex->alter_info.flags & ALTER_RENAME)
- {
- if (! thd->db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- goto error;
- }
- select_lex->db= thd->db;
- }
- else
- select_lex->db= first_table->db;
- }
+ /* Must be set in the parser */
+ DBUG_ASSERT(select_lex->db);
if (check_access(thd, ALTER_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)) ||
@@ -3033,12 +3057,25 @@ end_with_restore_list:
}
}
/* Don't yet allow changing of symlinks with ALTER TABLE */
+ if (lex->create_info.data_file_name)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "DATA DIRECTORY option ignored");
+ if (lex->create_info.index_file_name)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "INDEX DIRECTORY option ignored");
lex->create_info.data_file_name=lex->create_info.index_file_name=0;
/* ALTER TABLE ends previous transaction */
if (end_active_trans(thd))
goto error;
else
{
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_alter_table(thd, select_lex->db, lex->name,
&lex->create_info,
@@ -3296,6 +3333,14 @@ end_with_restore_list:
break;
/* Skip first table, which is the table we are inserting in */
select_lex->context.table_list= first_table->next_local;
+
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values,
lex->update_list, lex->value_list,
lex->duplicates, lex->ignore);
@@ -3319,6 +3364,14 @@ end_with_restore_list:
select_lex->options|= SELECT_NO_UNLOCK;
unit->set_limit(select_lex);
+
+ if (! thd->locked_tables &&
+ ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
if (!(res= open_and_lock_tables(thd, all_tables)))
{
/* Skip first table, which is the table we are inserting in */
@@ -3386,6 +3439,14 @@ end_with_restore_list:
break;
DBUG_ASSERT(select_lex->offset_limit == 0);
unit->set_limit(select_lex);
+
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
res = mysql_delete(thd, all_tables, select_lex->where,
&select_lex->order_list,
unit->select_limit_cnt, select_lex->options,
@@ -3399,6 +3460,13 @@ end_with_restore_list:
(TABLE_LIST *)thd->lex->auxilliary_table_list.first;
multi_delete *result;
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
if ((res= multi_delete_precheck(thd, all_tables)))
break;
@@ -3676,12 +3744,8 @@ end_with_restore_list:
}
case SQLCOM_ALTER_DB:
{
- char *db= lex->name ? lex->name : thd->db;
- if (!db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- break;
- }
+ char *db= lex->name;
+ DBUG_ASSERT(db); /* Must be set in the parser */
if (!strip_sp(db) || check_db_name(db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), lex->name);
@@ -4130,23 +4194,11 @@ end_with_restore_list:
case SQLCOM_CREATE_SPFUNCTION:
{
uint namelen;
- char *name, *db;
+ char *name;
int result;
DBUG_ASSERT(lex->sphead != 0);
-
- if (!lex->sphead->m_db.str || !lex->sphead->m_db.str[0])
- {
- if (!thd->db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- delete lex->sphead;
- lex->sphead= 0;
- goto error;
- }
- lex->sphead->m_db.length= strlen(thd->db);
- lex->sphead->m_db.str= thd->db;
- }
+ DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */
if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0,
is_schema_db(lex->sphead->m_db.str)))
@@ -4263,41 +4315,27 @@ end_with_restore_list:
}
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
- /*
- We need to copy name and db in order to use them for
- check_routine_access which is called after lex->sphead has
- been deleted.
- */
- name= thd->strdup(name);
- lex->sphead->m_db.str= db= thd->strmake(lex->sphead->m_db.str,
- lex->sphead->m_db.length);
res= (result= lex->sphead->create(thd));
if (result == SP_OK)
{
- /*
- We must cleanup the unit and the lex here because
- sp_grant_privileges calls (indirectly) db_find_routine,
- which in turn may call MYSQLparse with THD::lex.
- TODO: fix db_find_routine to use a temporary lex.
- */
- lex->unit.cleanup();
- delete lex->sphead;
- lex->sphead= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* only add privileges if really neccessary */
if (sp_automatic_privileges && !opt_noacl &&
check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS,
- db, name,
+ lex->sphead->m_db.str, name,
lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1))
{
close_thread_tables(thd);
- if (sp_grant_privileges(thd, db, name,
+ if (sp_grant_privileges(thd, lex->sphead->m_db.str, name,
lex->sql_command == SQLCOM_CREATE_PROCEDURE))
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_PROC_AUTO_GRANT_FAIL,
ER(ER_PROC_AUTO_GRANT_FAIL));
}
#endif
+ lex->unit.cleanup();
+ delete lex->sphead;
+ lex->sphead= 0;
send_ok(thd);
}
else
@@ -4712,7 +4750,8 @@ end_with_restore_list:
view_store_options(thd, first_table, &buff);
buff.append(STRING_WITH_LEN("VIEW "));
/* Test if user supplied a db (ie: we did not use thd->db) */
- if (first_table->db != thd->db && first_table->db[0])
+ if (first_table->db && first_table->db[0] &&
+ (thd->db == NULL || strcmp(first_table->db, thd->db)))
{
append_identifier(thd, &buff, first_table->db,
first_table->db_length);
@@ -4965,10 +5004,22 @@ end_with_restore_list:
if (lex->sql_command != SQLCOM_CALL && lex->sql_command != SQLCOM_EXECUTE &&
uc_update_queries[lex->sql_command]<2)
thd->row_count_func= -1;
- DBUG_RETURN(res || thd->net.report_error);
+
+ goto end;
error:
- DBUG_RETURN(1);
+ res= TRUE;
+
+end:
+ if (need_start_waiting)
+ {
+ /*
+ Release the protection against the global read lock and wake
+ everyone, who might want to set a global read lock.
+ */
+ start_waiting_global_read_lock(thd);
+ }
+ DBUG_RETURN(res || thd->net.report_error);
}
@@ -5215,8 +5266,26 @@ bool check_global_access(THD *thd, ulong want_access)
/*
- Check the privilege for all used tables. Table privileges are cached
- in the table list for GRANT checking
+ Check the privilege for all used tables.
+
+ SYNOPSYS
+ check_table_access()
+ thd Thread context
+ want_access Privileges requested
+ tables List of tables to be checked
+ no_errors FALSE/TRUE - report/don't report error to
+ the client (using my_error() call).
+
+ NOTES
+ Table privileges are cached in the table list for GRANT checking.
+ This functions assumes that table list used and
+ thd->lex->query_tables_own_last value correspond to each other
+ (the latter should be either 0 or point to next_global member
+ of one of elements of this table list).
+
+ RETURN VALUE
+ FALSE - OK
+ TRUE - Access denied
*/
bool
@@ -5264,7 +5333,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
(want_access & ~EXTRA_ACL) &&
thd->db)
tables->grant.privilege= want_access;
- else if (tables->db && tables->db == thd->db)
+ else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0)
{
if (found && !grant_option) // db already checked
tables->grant.privilege=found_access;
@@ -5412,22 +5481,25 @@ bool check_merge_table_access(THD *thd, char *db,
static bool check_db_used(THD *thd,TABLE_LIST *tables)
{
+ char *current_db= NULL;
for (; tables; tables= tables->next_global)
{
- if (!tables->db)
+ if (tables->db == NULL)
{
- if (!(tables->db=thd->db))
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR),
- MYF(0)); /* purecov: tested */
- return TRUE; /* purecov: tested */
- }
+ /*
+ This code never works and should be removed in 5.1. All tables
+ that are added to the list of tables should already have its
+ database field initialized properly (see st_lex::add_table_to_list).
+ */
+ DBUG_ASSERT(0);
+ if (thd->copy_db_to(&current_db, 0))
+ return TRUE;
+ tables->db= current_db;
}
}
return FALSE;
}
-
/****************************************************************************
Check stack size; Send error if there isn't enough stack to continue
****************************************************************************/
@@ -6047,19 +6119,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->db= table->db.str;
ptr->db_length= table->db.length;
}
- else if (thd->db)
- {
- ptr->db= thd->db;
- ptr->db_length= thd->db_length;
- }
- else
- {
- /* The following can't be "" as we may do 'casedn_str()' on it */
- ptr->db= empty_c_string;
- ptr->db_length= 0;
- }
- if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
- ptr->db= thd->strdup(ptr->db);
+ else if (thd->copy_db_to(&ptr->db, &ptr->db_length))
+ DBUG_RETURN(0);
ptr->alias= alias_str;
if (lower_case_table_names && table->table.length)
@@ -7081,14 +7142,28 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
SELECT_LEX *select_lex= &thd->lex->select_lex;
TABLE_LIST *aux_tables=
(TABLE_LIST *)thd->lex->auxilliary_table_list.first;
+ TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
DBUG_ENTER("multi_delete_precheck");
/* sql_yacc guarantees that tables and aux_tables are not zero */
DBUG_ASSERT(aux_tables != 0);
if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) ||
- check_table_access(thd,SELECT_ACL, tables,0) ||
- check_table_access(thd,DELETE_ACL, aux_tables,0))
+ check_table_access(thd, SELECT_ACL, tables, 0))
+ DBUG_RETURN(TRUE);
+
+ /*
+ Since aux_tables list is not part of LEX::query_tables list we
+ have to juggle with LEX::query_tables_own_last value to be able
+ call check_table_access() safely.
+ */
+ thd->lex->query_tables_own_last= 0;
+ if (check_table_access(thd, DELETE_ACL, aux_tables, 0))
+ {
+ thd->lex->query_tables_own_last= save_query_tables_own_last;
DBUG_RETURN(TRUE);
+ }
+ thd->lex->query_tables_own_last= save_query_tables_own_last;
+
if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where)
{
my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
@@ -7236,6 +7311,8 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables)
my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
DBUG_RETURN(TRUE);
}
+ if (check_db_used(thd, tables))
+ DBUG_RETURN(TRUE);
DBUG_RETURN(FALSE);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 9f317842d98..41ff9185cfb 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -163,6 +163,10 @@ static Item* part_of_refkey(TABLE *form,Field *field);
uint find_shortest_key(TABLE *table, const key_map *usable_keys);
static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,
ha_rows select_limit, bool no_changes);
+static bool list_contains_unique_index(TABLE *table,
+ bool (*find_func) (Field *, void *), void *data);
+static bool find_field_in_item_list (Field *field, void *data);
+static bool find_field_in_order_list (Field *field, void *data);
static int create_sort_index(THD *thd, JOIN *join, ORDER *order,
ha_rows filesort_limit, ha_rows select_limit);
static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
@@ -858,6 +862,40 @@ JOIN::optimize()
if (old_group_list && !group_list)
select_distinct= 0;
}
+ /*
+ Check if we can optimize away GROUP BY/DISTINCT.
+ We can do that if there are no aggregate functions and the
+ fields in DISTINCT clause (if present) and/or columns in GROUP BY
+ (if present) contain direct references to all key parts of
+ an unique index (in whatever order).
+ Note that the unique keys for DISTINCT and GROUP BY should not
+ be the same (as long as they are unique).
+
+ The FROM clause must contain a single non-constant table.
+ */
+ if (tables - const_tables == 1 && (group_list || select_distinct) &&
+ !tmp_table_param.sum_func_count &&
+ (!join_tab[const_tables].select ||
+ !join_tab[const_tables].select->quick ||
+ join_tab[const_tables].select->quick->get_type() !=
+ QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX))
+ {
+ if (group_list &&
+ list_contains_unique_index(join_tab[const_tables].table,
+ find_field_in_order_list,
+ (void *) group_list))
+ {
+ group_list= 0;
+ group= 0;
+ }
+ if (select_distinct &&
+ list_contains_unique_index(join_tab[const_tables].table,
+ find_field_in_item_list,
+ (void *) &fields_list))
+ {
+ select_distinct= 0;
+ }
+ }
if (!group_list && group)
{
order=0; // The output has only one row
@@ -11209,6 +11247,140 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
return best;
}
+
+/*
+ Check if GROUP BY/DISTINCT can be optimized away because the set is
+ already known to be distinct.
+
+ SYNOPSIS
+ list_contains_unique_index ()
+ table The table to operate on.
+ find_func function to iterate over the list and search
+ for a field
+
+ DESCRIPTION
+ Used in removing the GROUP BY/DISTINCT of the following types of
+ statements:
+ SELECT [DISTINCT] <unique_key_cols>... FROM <single_table_ref>
+ [GROUP BY <unique_key_cols>,...]
+
+ If (a,b,c is distinct)
+ then <any combination of a,b,c>,{whatever} is also distinct
+
+ This function checks if all the key parts of any of the unique keys
+ of the table are referenced by a list : either the select list
+ through find_field_in_item_list or GROUP BY list through
+ find_field_in_order_list.
+ If the above holds then we can safely remove the GROUP BY/DISTINCT,
+ as no result set can be more distinct than an unique key.
+
+ RETURN VALUE
+ 1 found
+ 0 not found.
+*/
+
+static bool
+list_contains_unique_index(TABLE *table,
+ bool (*find_func) (Field *, void *), void *data)
+{
+ for (uint keynr= 0; keynr < table->s->keys; keynr++)
+ {
+ if (keynr == table->s->primary_key ||
+ (table->key_info[keynr].flags & HA_NOSAME))
+ {
+ KEY *keyinfo= table->key_info + keynr;
+ KEY_PART_INFO *key_part, *key_part_end;
+
+ for (key_part=keyinfo->key_part,
+ key_part_end=key_part+ keyinfo->key_parts;
+ key_part < key_part_end;
+ key_part++)
+ {
+ if (!find_func(key_part->field, data))
+ break;
+ }
+ if (key_part == key_part_end)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+/*
+ Helper function for list_contains_unique_index.
+ Find a field reference in a list of ORDER structures.
+
+ SYNOPSIS
+ find_field_in_order_list ()
+ field The field to search for.
+ data ORDER *.The list to search in
+
+ DESCRIPTION
+ Finds a direct reference of the Field in the list.
+
+ RETURN VALUE
+ 1 found
+ 0 not found.
+*/
+
+static bool
+find_field_in_order_list (Field *field, void *data)
+{
+ ORDER *group= (ORDER *) data;
+ bool part_found= 0;
+ for (ORDER *tmp_group= group; tmp_group; tmp_group=tmp_group->next)
+ {
+ Item *item= (*tmp_group->item)->real_item();
+ if (item->type() == Item::FIELD_ITEM &&
+ ((Item_field*) item)->field->eq(field))
+ {
+ part_found= 1;
+ break;
+ }
+ }
+ return part_found;
+}
+
+
+/*
+ Helper function for list_contains_unique_index.
+ Find a field reference in a dynamic list of Items.
+
+ SYNOPSIS
+ find_field_in_item_list ()
+ field in The field to search for.
+ data in List<Item> *.The list to search in
+
+ DESCRIPTION
+ Finds a direct reference of the Field in the list.
+
+ RETURN VALUE
+ 1 found
+ 0 not found.
+*/
+
+static bool
+find_field_in_item_list (Field *field, void *data)
+{
+ List<Item> *fields= (List<Item> *) data;
+ bool part_found= 0;
+ List_iterator<Item> li(*fields);
+ Item *item;
+
+ while ((item= li++))
+ {
+ if (item->type() == Item::FIELD_ITEM &&
+ ((Item_field*) item)->field->eq(field))
+ {
+ part_found= 1;
+ break;
+ }
+ }
+ return part_found;
+}
+
+
/*
Test if we can skip the ORDER BY by using an index.
@@ -14277,10 +14449,19 @@ void st_select_lex::print(THD *thd, String *str)
str->append(STRING_WITH_LEN("sql_buffer_result "));
if (options & OPTION_FOUND_ROWS)
str->append(STRING_WITH_LEN("sql_calc_found_rows "));
- if (!thd->lex->safe_to_cache_query)
- str->append(STRING_WITH_LEN("sql_no_cache "));
- if (options & OPTION_TO_QUERY_CACHE)
- str->append(STRING_WITH_LEN("sql_cache "));
+ switch (sql_cache)
+ {
+ case SQL_NO_CACHE:
+ str->append(STRING_WITH_LEN("sql_no_cache "));
+ break;
+ case SQL_CACHE:
+ str->append(STRING_WITH_LEN("sql_cache "));
+ break;
+ case SQL_CACHE_UNSPECIFIED:
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
//Item List
bool first= 1;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index ca6a8ddfb6b..60d50c415d5 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -41,6 +41,8 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
static int
store_create_info(THD *thd, TABLE_LIST *table_list, String *packet);
+static void
+append_algorithm(TABLE_LIST *table, String *buff);
static int
view_store_create_info(THD *thd, TABLE_LIST *table, String *buff);
static bool schema_table_store_record(THD *thd, TABLE *table);
@@ -1099,6 +1101,28 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
void
view_store_options(THD *thd, TABLE_LIST *table, String *buff)
{
+ append_algorithm(table, buff);
+ append_definer(thd, buff, &table->definer.user, &table->definer.host);
+ if (table->view_suid)
+ buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER "));
+ else
+ buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER "));
+}
+
+
+/*
+ Append DEFINER clause to the given buffer.
+
+ SYNOPSIS
+ append_definer()
+ thd [in] thread handle
+ buffer [inout] buffer to hold DEFINER clause
+ definer_user [in] user name part of definer
+ definer_host [in] host name part of definer
+*/
+
+static void append_algorithm(TABLE_LIST *table, String *buff)
+{
buff->append(STRING_WITH_LEN("ALGORITHM="));
switch ((int8)table->algorithm) {
case VIEW_ALGORITHM_UNDEFINED:
@@ -1113,11 +1137,6 @@ view_store_options(THD *thd, TABLE_LIST *table, String *buff)
default:
DBUG_ASSERT(0); // never should happen
}
- append_definer(thd, buff, &table->definer.user, &table->definer.host);
- if (table->view_suid)
- buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER "));
- else
- buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER "));
}
@@ -3105,7 +3124,16 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables,
table->field[1]->store(tables->view_db.str, tables->view_db.length, cs);
table->field[2]->store(tables->view_name.str, tables->view_name.length, cs);
if (grant & SHOW_VIEW_ACL)
- table->field[3]->store(tables->query.str, tables->query.length, cs);
+ {
+ char buff[2048];
+ String qwe_str(buff, sizeof(buff), cs);
+ qwe_str.length(0);
+ qwe_str.append(STRING_WITH_LEN("/* "));
+ append_algorithm(tables, &qwe_str);
+ qwe_str.append(STRING_WITH_LEN("*/ "));
+ qwe_str.append(tables->query.str, tables->query.length);
+ table->field[3]->store(qwe_str.ptr(), qwe_str.length(), cs);
+ }
if (tables->with_check != VIEW_CHECK_NONE)
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 275cfbaa088..91c71193df2 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1656,8 +1656,23 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
my_casedn_str(files_charset_info, path);
create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE;
}
- else
+ else
+ {
+ #ifdef FN_DEVCHAR
+ /* check if the table name contains FN_DEVCHAR when defined */
+ const char *start= alias;
+ while (*start != '\0')
+ {
+ if (*start == FN_DEVCHAR)
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), alias);
+ DBUG_RETURN(TRUE);
+ }
+ start++;
+ }
+ #endif
build_table_path(path, sizeof(path), db, alias, reg_ext);
+ }
/* Check if table already exists */
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE)
@@ -1674,8 +1689,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
DBUG_RETURN(TRUE);
}
- if (wait_if_global_read_lock(thd, 0, 1))
- DBUG_RETURN(TRUE);
VOID(pthread_mutex_lock(&LOCK_open));
if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
{
@@ -1743,7 +1756,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
end:
VOID(pthread_mutex_unlock(&LOCK_open));
- start_waiting_global_read_lock(thd);
thd->proc_info="After create";
DBUG_RETURN(error);
@@ -1923,7 +1935,7 @@ void close_cached_table(THD *thd, TABLE *table)
thd->open_tables=unlink_open_table(thd,thd->open_tables,table);
/* When lock on LOCK_open is freed other threads can continue */
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
DBUG_VOID_RETURN;
}
@@ -2672,7 +2684,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
TABLE_LIST src_tables_list;
DBUG_ENTER("mysql_create_like_table");
- src_db= table_ident->db.str ? table_ident->db.str : thd->db;
+ DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */
+ src_db= table_ident->db.str;
/*
Validate the source table
@@ -3894,7 +3907,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (error)
{
VOID(pthread_mutex_unlock(&LOCK_open));
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
goto err;
}
thd->proc_info="end";
@@ -3904,7 +3917,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
mysql_bin_log.write(&qinfo);
}
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
VOID(pthread_mutex_unlock(&LOCK_open));
#ifdef HAVE_BERKELEY_DB
if (old_db_type == DB_TYPE_BERKELEY_DB)
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index f943b014118..28d7dc0bb9d 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -183,6 +183,15 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
!(tables= add_table_for_trigger(thd, thd->lex->spname)))
DBUG_RETURN(TRUE);
+ /*
+ We don't allow creating triggers on tables in the 'mysql' schema
+ */
+ if (create && !my_strcasecmp(system_charset_info, "mysql", tables->db))
+ {
+ my_error(ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
/* We should have only one table in table list. */
DBUG_ASSERT(tables->next_global == 0);
@@ -366,7 +375,9 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
/* We don't allow creation of several triggers of the same type yet */
if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time])
{
- my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0));
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "multiple triggers with the same action time"
+ " and event for one table");
return 1;
}
@@ -932,8 +943,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
save_db.str= thd->db;
save_db.length= thd->db_length;
- thd->db_length= strlen(db);
- thd->db= (char *) db;
+ thd->reset_db((char*) db, strlen(db));
while ((trg_create_str= it++))
{
trg_sql_mode= itm++;
@@ -1035,8 +1045,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
lex_end(&lex);
}
- thd->db= save_db.str;
- thd->db_length= save_db.length;
+ thd->reset_db(save_db.str, save_db.length);
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
@@ -1049,8 +1058,7 @@ err_with_lex_cleanup:
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
- thd->db= save_db.str;
- thd->db_length= save_db.length;
+ thd->reset_db(save_db.str, save_db.length);
DBUG_RETURN(1);
}
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 6269c0a2eb3..95589a58b37 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -140,6 +140,7 @@ void udf_init()
READ_RECORD read_record_info;
TABLE *table;
int error;
+ char db[]= "mysql"; /* A subject to casednstr, can't be constant */
DBUG_ENTER("ufd_init");
if (initialized)
@@ -161,13 +162,12 @@ void udf_init()
initialized = 1;
new_thd->thread_stack= (char*) &new_thd;
new_thd->store_globals();
- new_thd->db= my_strdup("mysql", MYF(0));
- new_thd->db_length=5;
+ new_thd->set_db(db, sizeof(db)-1);
bzero((gptr) &tables,sizeof(tables));
tables.alias= tables.table_name= (char*) "func";
tables.lock_type = TL_READ;
- tables.db=new_thd->db;
+ tables.db= db;
if (simple_open_n_lock_tables(new_thd, &tables))
{
diff --git a/sql/sql_udf.h b/sql/sql_udf.h
index d588572a762..d0729deecaa 100644
--- a/sql/sql_udf.h
+++ b/sql/sql_udf.h
@@ -70,6 +70,7 @@ class udf_handler :public Sql_alloc
void cleanup();
double val(my_bool *null_value)
{
+ is_null= 0;
if (get_arguments())
{
*null_value=1;
@@ -88,6 +89,7 @@ class udf_handler :public Sql_alloc
}
longlong val_int(my_bool *null_value)
{
+ is_null= 0;
if (get_arguments())
{
*null_value=1;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 0f836bd58ff..1561ade78af 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -452,15 +452,15 @@ bool mysql_create_view(THD *thd,
*/
for (sl= select_lex; sl; sl= sl->next_select())
{
- char *db= view->db ? view->db : thd->db;
+ DBUG_ASSERT(view->db); /* Must be set in the parser */
List_iterator_fast<Item> it(sl->item_list);
Item *item;
- fill_effective_table_privileges(thd, &view->grant, db,
+ fill_effective_table_privileges(thd, &view->grant, view->db,
view->table_name);
while ((item= it++))
{
Item_field *fld;
- uint priv= (get_column_grant(thd, &view->grant, db,
+ uint priv= (get_column_grant(thd, &view->grant, view->db,
view->table_name, item->name) &
VIEW_ANY_ACL);
if ((fld= item->filed_for_view_update()))
@@ -641,8 +641,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
if (!parser->ok() || !is_equal(&view_type, parser->type()))
{
- my_error(ER_WRONG_OBJECT, MYF(0),
- (view->db ? view->db : thd->db), view->table_name, "VIEW");
+ my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW");
DBUG_RETURN(-1);
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 4f3cf4d8554..952a8eb44ea 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1237,12 +1237,18 @@ sp_name:
}
| ident
{
+ THD *thd= YYTHD;
+ LEX_STRING db;
if (check_routine_name($1))
{
my_error(ER_SP_WRONG_NAME, MYF(0), $1.str);
YYABORT;
}
- $$= sp_name_current_db_new(YYTHD, $1);
+ if (thd->copy_db_to(&db.str, &db.length))
+ YYABORT;
+ $$= new sp_name(db, $1);
+ if ($$)
+ $$->init_qname(YYTHD);
}
;
@@ -2405,14 +2411,26 @@ create2:
| LIKE table_ident
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
if (!(lex->name= (char *)$2))
YYABORT;
+ if ($2->db.str == NULL &&
+ thd->copy_db_to(&($2->db.str), &($2->db.length)))
+ {
+ YYABORT;
+ }
}
| '(' LIKE table_ident ')'
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
if (!(lex->name= (char *)$3))
YYABORT;
+ if ($3->db.str == NULL &&
+ thd->copy_db_to(&($3->db.str), &($3->db.length)))
+ {
+ YYABORT;
+ }
}
;
@@ -3240,7 +3258,9 @@ alter:
lex->key_list.empty();
lex->col_list.empty();
lex->select_lex.init_order();
- lex->select_lex.db=lex->name=0;
+ lex->select_lex.db=
+ ((TABLE_LIST*) lex->select_lex.table_list.first)->db;
+ lex->name=0;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
lex->create_info.db_type= DB_TYPE_DEFAULT;
lex->create_info.default_table_charset= NULL;
@@ -3258,8 +3278,11 @@ alter:
opt_create_database_options
{
LEX *lex=Lex;
+ THD *thd= Lex->thd;
lex->sql_command=SQLCOM_ALTER_DB;
lex->name= $3;
+ if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL))
+ YYABORT;
}
| ALTER PROCEDURE sp_name
{
@@ -3421,14 +3444,20 @@ alter_list_item:
| RENAME opt_to table_ident
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
lex->select_lex.db=$3->db.str;
- lex->name= $3->table.str;
+ if (lex->select_lex.db == NULL &&
+ thd->copy_db_to(&lex->select_lex.db, NULL))
+ {
+ YYABORT;
+ }
if (check_table_name($3->table.str,$3->table.length) ||
$3->db.str && check_db_name($3->db.str))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str);
YYABORT;
}
+ lex->name= $3->table.str;
lex->alter_info.flags|= ALTER_RENAME;
}
| CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate
@@ -3973,10 +4002,21 @@ select_option:
YYABORT;
Select->options|= OPTION_FOUND_ROWS;
}
- | SQL_NO_CACHE_SYM { Lex->safe_to_cache_query=0; }
+ | SQL_NO_CACHE_SYM
+ {
+ Lex->safe_to_cache_query=0;
+ Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE;
+ Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE;
+ }
| SQL_CACHE_SYM
{
- Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
+ /* Honor this flag only if SQL_NO_CACHE wasn't specified. */
+ if (Lex->select_lex.sql_cache != SELECT_LEX::SQL_NO_CACHE)
+ {
+ Lex->safe_to_cache_query=1;
+ Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
+ Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE;
+ }
}
| ALL { Select->options|= SELECT_ALL; }
;
@@ -4742,7 +4782,13 @@ simple_expr:
#endif /* HAVE_DLOPEN */
{
LEX *lex= Lex;
- sp_name *name= sp_name_current_db_new(YYTHD, $1);
+ THD *thd= lex->thd;
+ LEX_STRING db;
+ if (thd->copy_db_to(&db.str, &db.length))
+ YYABORT;
+ sp_name *name= new sp_name(db, $1);
+ if (name)
+ name->init_qname(thd);
sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION);
if ($4)
@@ -6284,6 +6330,7 @@ truncate:
LEX* lex= Lex;
lex->sql_command= SQLCOM_TRUNCATE;
lex->select_lex.options= 0;
+ lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
lex->select_lex.init_order();
}
;
@@ -8460,7 +8507,9 @@ grant_ident:
'*'
{
LEX *lex= Lex;
- lex->current_select->db= lex->thd->db;
+ THD *thd= lex->thd;
+ if (thd->copy_db_to(&lex->current_select->db, NULL))
+ YYABORT;
if (lex->grant == GLOBAL_ACLS)
lex->grant = DB_ACLS & ~GRANT_ACL;
else if (lex->columns.elements)
diff --git a/sql/table.cc b/sql/table.cc
index 711f250c271..9ec9463c33c 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -678,27 +678,6 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (outparam->key_info[key].flags & HA_FULLTEXT)
outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
- if (primary_key >= MAX_KEY && (keyinfo->flags & HA_NOSAME))
- {
- /*
- If the UNIQUE key doesn't have NULL columns and is not a part key
- declare this as a primary key.
- */
- primary_key=key;
- for (i=0 ; i < keyinfo->key_parts ;i++)
- {
- uint fieldnr= key_part[i].fieldnr;
- if (!fieldnr ||
- outparam->field[fieldnr-1]->null_ptr ||
- outparam->field[fieldnr-1]->key_length() !=
- key_part[i].length)
- {
- primary_key=MAX_KEY; // Can't be used
- break;
- }
- }
- }
-
for (i=0 ; i < keyinfo->key_parts ; key_part++,i++)
{
if (new_field_pack_flag <= 1)
@@ -1614,10 +1593,6 @@ bool check_db_name(char *name)
if (*name == '/' || *name == '\\' || *name == FN_LIBCHAR ||
*name == FN_EXTCHAR)
return 1;
-#ifdef FN_DEVCHAR
- if (*name == FN_DEVCHAR)
- return 1;
-#endif
name++;
}
return last_char_is_space || (uint) (name - start) > NAME_LEN;
@@ -1660,10 +1635,6 @@ bool check_table_name(const char *name, uint length)
#endif
if (*name == '/' || *name == '\\' || *name == FN_EXTCHAR)
return 1;
-#ifdef FN_DEVCHAR
- if (*name == FN_DEVCHAR)
- return 1;
-#endif
name++;
}
#if defined(USE_MB) && defined(USE_MB_IDENT)
diff --git a/sql/table.h b/sql/table.h
index 106421d7a17..ebb4481ef3a 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -599,7 +599,8 @@ typedef struct st_table_list
thr_lock_type lock_type;
uint outer_join; /* Which join type */
uint shared; /* Used in multi-upd */
- uint32 db_length, table_name_length;
+ uint db_length;
+ uint32 table_name_length;
bool updatable; /* VIEW/TABLE can be updated now */
bool straight; /* optimize with prev table */
bool updating; /* for replicate-do/ignore table */
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 079abfc9299..d12aef47b40 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1548,6 +1548,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
TABLE *table;
Tz_names_entry *tmp_tzname;
my_bool return_val= 1;
+ char db[]= "mysql";
int res;
DBUG_ENTER("my_tz_init");
@@ -1604,13 +1605,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
leap seconds shared by all time zones.
*/
- thd->db= my_strdup("mysql",MYF(0));
- thd->db_length= 5; // Safety
+ thd->set_db(db, sizeof(db)-1);
bzero((char*) &tables_buff, sizeof(TABLE_LIST));
tables_buff[0].alias= tables_buff[0].table_name=
(char*)"time_zone_leap_second";
tables_buff[0].lock_type= TL_READ;
- tables_buff[0].db= thd->db;
+ tables_buff[0].db= db;
/*
Fill TABLE_LIST for the rest of the time zone describing tables
and link it to first one.
diff --git a/strings/Makefile.am b/strings/Makefile.am
index c43cf0f290a..7ee115c09e5 100644
--- a/strings/Makefile.am
+++ b/strings/Makefile.am
@@ -66,12 +66,6 @@ conf_to_src_LDFLAGS= @NOINST_LDFLAGS@
#strtoull.o: @CHARSET_OBJS@
-if ASSEMBLER
-# On Linux gcc can compile the assembly files
-%.o : %.s
- $(AS) $(ASFLAGS) -o $@ $<
-endif
-
FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@
str_test: str_test.c $(pkglib_LIBRARIES)
diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c
index 36b52826486..0f95a688d85 100644
--- a/strings/ctype-mb.c
+++ b/strings/ctype-mb.c
@@ -24,12 +24,12 @@
void my_caseup_str_mb(CHARSET_INFO * cs, char *str)
{
register uint32 l;
- register char *end=str+strlen(str); /* BAR TODO: remove strlen() call */
register uchar *map=cs->to_upper;
while (*str)
{
- if ((l=my_ismbchar(cs, str,end)))
+ /* Pointing after the '\0' is safe here. */
+ if ((l=my_ismbchar(cs, str, str + cs->mbmaxlen)))
str+=l;
else
{
@@ -42,12 +42,12 @@ void my_caseup_str_mb(CHARSET_INFO * cs, char *str)
void my_casedn_str_mb(CHARSET_INFO * cs, char *str)
{
register uint32 l;
- register char *end=str+strlen(str);
register uchar *map=cs->to_lower;
while (*str)
{
- if ((l=my_ismbchar(cs, str,end)))
+ /* Pointing after the '\0' is safe here. */
+ if ((l=my_ismbchar(cs, str, str + cs->mbmaxlen)))
str+=l;
else
{
@@ -101,15 +101,18 @@ uint my_casedn_mb(CHARSET_INFO * cs, char *src, uint srclen,
return srclen;
}
+/*
+ my_strcasecmp_mb() returns 0 if strings are equal, non-zero otherwise.
+ */
int my_strcasecmp_mb(CHARSET_INFO * cs,const char *s, const char *t)
{
register uint32 l;
- register const char *end=s+strlen(s);
register uchar *map=cs->to_upper;
- while (s<end)
+ while (*s && *t)
{
- if ((l=my_ismbchar(cs, s,end)))
+ /* Pointing after the '\0' is safe here. */
+ if ((l=my_ismbchar(cs, s, s + cs->mbmaxlen)))
{
while (l--)
if (*s++ != *t++)
@@ -120,7 +123,8 @@ int my_strcasecmp_mb(CHARSET_INFO * cs,const char *s, const char *t)
else if (map[(uchar) *s++] != map[(uchar) *t++])
return 1;
}
- return *t;
+ /* At least one of '*s' and '*t' is zero here. */
+ return (*t != *s);
}
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index abd29b6014a..fd92c2bd25e 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -155,6 +155,7 @@ Summary: MySQL - Benchmarks and test system
Group: Applications/Databases
Provides: mysql-bench
Obsoletes: mysql-bench
+AutoReqProv: no
%description bench
This package contains MySQL benchmark scripts and data.
@@ -484,17 +485,7 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
# Initiate databases if needed
%{_bindir}/mysql_install_db --rpm --user=%{mysqld_user}
-# Upgrade databases if needed
-# This must be done as database user "root", who should be password-protected,
-# but this password is not available here.
-# So ensure the server is isolated as much as possible, and start it so that
-# passwords are not checked.
-# See the related change in the start script "/etc/init.d/mysql".
-chmod 700 $mysql_datadir
-%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables
-%{_bindir}/mysql_upgrade
-%{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables
-chmod 755 $mysql_datadir
+# Upgrade databases if needed would go here - but it cannot be automated yet
# Change permissions again to fix any new files.
chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
@@ -597,6 +588,7 @@ fi
%attr(755, root, root) %{_bindir}/mysqlbug
%attr(755, root, root) %{_bindir}/mysqld_multi
%attr(755, root, root) %{_bindir}/mysqld_safe
+%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlhotcopy
%attr(755, root, root) %{_bindir}/mysqltest
%attr(755, root, root) %{_bindir}/perror
@@ -627,7 +619,6 @@ fi
%attr(755, root, root) %{_bindir}/mysqlbinlog
%attr(755, root, root) %{_bindir}/mysqlcheck
%attr(755, root, root) %{_bindir}/mysqldump
-%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlimport
%attr(755, root, root) %{_bindir}/mysqlshow
@@ -732,6 +723,20 @@ fi
# itself - note that they must be ordered by date (important when
# merging BK trees)
%changelog
+* Tue Jun 27 2006 Joerg Bruehe <joerg@mysql.com>
+
+- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216)
+
+- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade,
+ there are some more aspects which need to be solved before this is possible.
+ For now, just ensure the binary "mysql_upgrade" is delivered and installed.
+
+* Thu Jun 22 2006 Joerg Bruehe <joerg@mysql.com>
+
+- Close a gap of the previous version by explicitly using
+ a newly created temporary directory for the socket to be used
+ in the "mysql_upgrade" operation, overriding any local setting.
+
* Tue Jun 20 2006 Joerg Bruehe <joerg@mysql.com>
- To run "mysql_upgrade", we need a running server;