summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2013-02-28 09:58:39 +0100
committerSergei Golubchik <sergii@pisem.net>2013-02-28 09:58:39 +0100
commit08ba257846e75641304e530b7a4a4ab21b1714d7 (patch)
tree5152c8e758f3b566dda4bf07bf5aad1607910eda
parent65831bca6f516db383c1bc04884b65b9fa7b6926 (diff)
parent40bbd1862376fa634e42b4e0197e22f5633f0c21 (diff)
downloadmariadb-git-08ba257846e75641304e530b7a4a4ab21b1714d7.tar.gz
mysql-5.1 merge
mysys/errors.c: revert upstream's fix. use a much simpler one mysys/my_write.c: revert upstream's fix. use a simpler one sql/item_xmlfunc.cc: useless, but ok sql/mysqld.cc: simplify upstream's fix storage/heap/hp_delete.c: remove upstream's fix. we'll use a much less expensive approach.
-rw-r--r--config/ac-macros/ssl.m42
-rw-r--r--configure.in29
-rw-r--r--extra/yassl/README11
-rw-r--r--extra/yassl/include/lock.hpp9
-rw-r--r--extra/yassl/include/openssl/ssl.h2
-rw-r--r--extra/yassl/include/yassl_error.hpp2
-rw-r--r--extra/yassl/src/cert_wrapper.cpp3
-rw-r--r--extra/yassl/src/handshake.cpp8
-rw-r--r--extra/yassl/src/lock.cpp4
-rw-r--r--extra/yassl/src/ssl.cpp3
-rw-r--r--extra/yassl/src/yassl_error.cpp7
-rw-r--r--extra/yassl/taocrypt/include/aes.hpp1
-rw-r--r--extra/yassl/taocrypt/include/pwdbased.hpp4
-rw-r--r--extra/yassl/taocrypt/src/asn.cpp4
-rw-r--r--extra/yassl/taocrypt/src/coding.cpp21
-rw-r--r--extra/yassl/taocrypt/taocrypt.dsw17
-rw-r--r--extra/yassl/taocrypt/test/memory.cpp2
-rw-r--r--extra/yassl/taocrypt/test/test.dsp (renamed from extra/yassl/taocrypt/test.dsp)14
-rw-r--r--extra/yassl/yassl.dsw5
-rw-r--r--include/welcome_copyright_notice.h4
-rw-r--r--libmysqld/Makefile.am2
-rw-r--r--mysql-test/Makefile.am3
-rw-r--r--mysql-test/include/get_binlog_dump_thread_id.inc22
-rw-r--r--mysql-test/lib/My/SafeProcess/safe_process.cc15
-rw-r--r--mysql-test/lib/My/SafeProcess/safe_process.pl166
-rwxr-xr-xmysql-test/mysql-test-run.pl4
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_until.result242
-rw-r--r--mysql-test/suite/engines/funcs/t/disabled.def1
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_until.test170
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug14676111.result53
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug14676111.test128
-rw-r--r--mysql-test/suite/innodb_plugin/r/innodb-index.result2
-rw-r--r--mysql-test/suite/innodb_plugin/r/innodb_bug14676111.result53
-rw-r--r--mysql-test/suite/innodb_plugin/r/innodb_mysql.result2
-rw-r--r--mysql-test/suite/innodb_plugin/t/disabled.def2
-rw-r--r--mysql-test/suite/innodb_plugin/t/innodb-index.test4
-rw-r--r--mysql-test/suite/innodb_plugin/t/innodb_bug14676111.test128
-rw-r--r--mysql-test/suite/parts/r/partition_alter4_innodb.result48
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_until.result1
-rw-r--r--mysql-test/suite/rpl/t/disabled.def2
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_until.test4
-rw-r--r--mysys/errors.c4
-rw-r--r--mysys/mf_pack.c10
-rw-r--r--mysys/my_lib.c10
-rw-r--r--mysys/my_write.c2
-rw-r--r--scripts/mysqld_safe.sh4
-rw-r--r--sql/.cvsignore1
-rw-r--r--sql/Makefile.am2
-rw-r--r--sql/field.cc8
-rw-r--r--sql/ha_partition.cc21
-rw-r--r--sql/item_xmlfunc.cc6
-rw-r--r--sql/log_event.cc18
-rw-r--r--sql/log_event.h18
-rw-r--r--sql/mysql_priv.h27
-rw-r--r--sql/mysqld.cc38
-rw-r--r--sql/sql_base.cc50
-rw-r--r--sql/sql_cache.cc22
-rw-r--r--sql/sql_lex.h6
-rw-r--r--sql/sql_repl.cc4
-rw-r--r--sql/sql_table.cc255
-rw-r--r--sql/sql_trigger.cc4
-rw-r--r--sql/tztime.cc21
-rw-r--r--storage/heap/hp_delete.c8
-rw-r--r--storage/innobase/btr/btr0btr.c24
-rw-r--r--storage/innobase/btr/btr0cur.c11
-rw-r--r--storage/innobase/handler/ha_innodb.cc20
-rw-r--r--storage/innobase/include/btr0cur.h5
-rw-r--r--storage/innobase/include/btr0cur.ic13
-rw-r--r--storage/innobase/include/lock0lock.h7
-rw-r--r--storage/innobase/include/srv0srv.h8
-rw-r--r--storage/innobase/include/trx0purge.h4
-rw-r--r--storage/innobase/lock/lock0lock.c85
-rw-r--r--storage/innobase/row/row0ins.c5
-rw-r--r--storage/innobase/srv/srv0srv.c50
-rw-r--r--storage/innobase/trx/trx0purge.c13
-rw-r--r--storage/innodb_plugin/ChangeLog19
-rw-r--r--storage/innodb_plugin/btr/btr0btr.c38
-rw-r--r--storage/innodb_plugin/btr/btr0cur.c8
-rw-r--r--storage/innodb_plugin/buf/buf0buf.c101
-rw-r--r--storage/innodb_plugin/fil/fil0fil.c27
-rw-r--r--storage/innodb_plugin/handler/ha_innodb.cc37
-rw-r--r--storage/innodb_plugin/handler/handler0alter.cc1
-rw-r--r--storage/innodb_plugin/include/btr0cur.h5
-rw-r--r--storage/innodb_plugin/include/btr0cur.ic13
-rw-r--r--storage/innodb_plugin/include/buf0buf.h10
-rw-r--r--storage/innodb_plugin/include/buf0buf.ic8
-rw-r--r--storage/innodb_plugin/include/data0type.ic28
-rw-r--r--storage/innodb_plugin/include/fil0fil.h15
-rw-r--r--storage/innodb_plugin/include/lock0lock.h12
-rw-r--r--storage/innodb_plugin/include/rem0rec.h80
-rw-r--r--storage/innodb_plugin/include/srv0srv.h9
-rw-r--r--storage/innodb_plugin/include/trx0purge.h4
-rw-r--r--storage/innodb_plugin/include/univ.i18
-rw-r--r--storage/innodb_plugin/lock/lock0lock.c87
-rw-r--r--storage/innodb_plugin/log/log0recv.c15
-rw-r--r--storage/innodb_plugin/rem/rem0rec.c237
-rw-r--r--storage/innodb_plugin/row/row0ins.c5
-rw-r--r--storage/innodb_plugin/row/row0merge.c104
-rw-r--r--storage/innodb_plugin/row/row0mysql.c7
-rw-r--r--storage/innodb_plugin/row/row0umod.c3
-rw-r--r--storage/innodb_plugin/row/row0undo.c2
-rw-r--r--storage/innodb_plugin/srv/srv0srv.c56
-rw-r--r--storage/innodb_plugin/trx/trx0purge.c13
-rw-r--r--storage/innodb_plugin/trx/trx0rec.c20
104 files changed, 1961 insertions, 1009 deletions
diff --git a/config/ac-macros/ssl.m4 b/config/ac-macros/ssl.m4
index fc55f93d8d6..4e785ba0a6f 100644
--- a/config/ac-macros/ssl.m4
+++ b/config/ac-macros/ssl.m4
@@ -42,7 +42,7 @@ AC_DEFUN([MYSQL_USE_BUNDLED_YASSL], [
yassl_thread_cxxflags=""
yassl_thread_safe=""
if test "$with_server" != "no" -o "$THREAD_SAFE_CLIENT" != "no"; then
- yassl_thread_cxxflags="-DYASSL_THREAD_SAFE"
+ yassl_thread_cxxflags="-DMULTI_THREADED"
yassl_thread_safe="(thread-safe)"
fi
AC_SUBST([yassl_thread_cxxflags])
diff --git a/configure.in b/configure.in
index 6ff35483898..5e73dce78bb 100644
--- a/configure.in
+++ b/configure.in
@@ -12,7 +12,7 @@ dnl
dnl When changing the major version number please also check the switch
dnl statement in mysqlbinlog::check_master_version(). You may also need
dnl to update version.c in ndb.
-AC_INIT([MariaDB Server], [5.1.67-MariaDB], [], [mysql])
+AC_INIT([MariaDB Server], [5.1.68-MariaDB], [], [mysql])
AC_CONFIG_SRCDIR([sql/mysqld.cc])
AC_CANONICAL_SYSTEM
@@ -22,7 +22,7 @@ AC_CANONICAL_SYSTEM
AM_INIT_AUTOMAKE([1.9 tar-ustar])
AC_PROG_LIBTOOL
-AM_CONFIG_HEADER([include/config.h])
+AC_CONFIG_HEADERS([include/config.h])
# Request support for automake silent-rules if available.
# Default to verbose output. One can use the configure-time
@@ -274,6 +274,31 @@ AC_SUBST(LIBTOOL)dnl
AC_SUBST(NM)dnl
+##############################################################################
+# In automake 1.12, the extension on generated yacc/bison header files changed
+##############################################################################
+
+YACC_HEXT="h"
+MAKEFILE_1ST=`head -1 "$srcdir/Makefile.in"`
+AMAKE_MAJOR=`expr "$MAKEFILE_1ST" : '.*generated by automake \([[0-9]]*\).*'`
+if test $? -eq "0" ; then
+ if test "$AMAKE_MAJOR" -gt "1" ; then
+ YACC_HEXT="hh"
+ CXXFLAGS="$CXXFLAGS -DYACC_HEXT_HH"
+ elif test "$AMAKE_MAJOR" -eq "1" ; then
+ AMAKE_MINOR=`expr "$MAKEFILE_1ST" : '.*generated by automake 1.\([[0-9]]*\).*'`
+ if test $? -eq "0" ; then
+ if test "$AMAKE_MINOR" -ge "12" ; then
+ YACC_HEXT="hh"
+ CXXFLAGS="$CXXFLAGS -DYACC_HEXT_HH"
+ fi
+ fi
+ fi
+fi
+AC_SUBST(YACC_HEXT)
+
+##############################################################################
+
# NM= "$NM -X64"
#archive_expsym_cmds= `echo "$archive_expsym_cmds" | sed -e '/"$(CC)"//'`
#archive_expsym_cmds= "$CC -q64 $archive_expsym_cmds"
diff --git a/extra/yassl/README b/extra/yassl/README
index 7720a9453dd..24bdf32f989 100644
--- a/extra/yassl/README
+++ b/extra/yassl/README
@@ -12,7 +12,16 @@ before calling SSL_new();
*** end Note ***
-yaSSL Release notes, version 2.1.2 (9/2/2011)
+yaSSL Release notes, version 2.2.2 (7/5/2012)
+
+ This release of yaSSL contains bug fixes and more security checks around
+ malicious certificates.
+
+See normal build instructions below under 1.0.6.
+See libcurl build instructions below under 1.3.0 and note in 1.5.8.
+
+
+*****************yaSSL Release notes, version 2.1.2 (9/2/2011)
This release of yaSSL contains bug fixes, better non-blocking support with
SSL_write, and OpenSSL RSA public key format support.
diff --git a/extra/yassl/include/lock.hpp b/extra/yassl/include/lock.hpp
index 99829b0b6de..9485c09b214 100644
--- a/extra/yassl/include/lock.hpp
+++ b/extra/yassl/include/lock.hpp
@@ -27,7 +27,7 @@
Visual Studio Source Annotations header (sourceannotations.h) fails
to compile if outside of the global namespace.
*/
-#ifdef YASSL_THREAD_SAFE
+#ifdef MULTI_THREADED
#ifdef _WIN32
#include <windows.h>
#endif
@@ -36,8 +36,9 @@
namespace yaSSL {
-#ifdef YASSL_THREAD_SAFE
+#ifdef MULTI_THREADED
#ifdef _WIN32
+ #include <windows.h>
class Mutex {
CRITICAL_SECTION cs_;
@@ -77,7 +78,7 @@ namespace yaSSL {
};
#endif // _WIN32
-#else // YASSL_THREAD_SAFE (WE'RE SINGLE)
+#else // MULTI_THREADED (WE'RE SINGLE)
class Mutex {
public:
@@ -87,7 +88,7 @@ namespace yaSSL {
};
};
-#endif // YASSL_THREAD_SAFE
+#endif // MULTI_THREADED
diff --git a/extra/yassl/include/openssl/ssl.h b/extra/yassl/include/openssl/ssl.h
index 0d99888da88..2fcba67cfdd 100644
--- a/extra/yassl/include/openssl/ssl.h
+++ b/extra/yassl/include/openssl/ssl.h
@@ -35,7 +35,7 @@
#include "rsa.h"
-#define YASSL_VERSION "2.2.0"
+#define YASSL_VERSION "2.2.2"
#if defined(__cplusplus)
diff --git a/extra/yassl/include/yassl_error.hpp b/extra/yassl/include/yassl_error.hpp
index 87bb4c55e96..8efc7f72e87 100644
--- a/extra/yassl/include/yassl_error.hpp
+++ b/extra/yassl/include/yassl_error.hpp
@@ -65,7 +65,7 @@ enum YasslError {
enum Library { yaSSL_Lib = 0, CryptoLib, SocketLib };
enum { MAX_ERROR_SZ = 80 };
-void SetErrorString(unsigned long, char*);
+void SetErrorString(YasslError, char*);
/* remove for now, if go back to exceptions use this wrapper
// Base class for all yaSSL exceptions
diff --git a/extra/yassl/src/cert_wrapper.cpp b/extra/yassl/src/cert_wrapper.cpp
index 7e73464001a..917cfa1a8fb 100644
--- a/extra/yassl/src/cert_wrapper.cpp
+++ b/extra/yassl/src/cert_wrapper.cpp
@@ -250,8 +250,7 @@ int CertManager::Validate()
TaoCrypt::Source source((*last)->get_buffer(), (*last)->get_length());
TaoCrypt::CertDecoder cert(source, true, &signers_, verifyNone_);
- int err = cert.GetError().What();
- if ( err )
+ if (int err = cert.GetError().What())
return err;
const TaoCrypt::PublicKey& key = cert.GetPublicKey();
diff --git a/extra/yassl/src/handshake.cpp b/extra/yassl/src/handshake.cpp
index c1ee61d043e..c7dbaf86071 100644
--- a/extra/yassl/src/handshake.cpp
+++ b/extra/yassl/src/handshake.cpp
@@ -767,8 +767,14 @@ int DoProcessReply(SSL& ssl)
while (buffer.get_current() < hdr.length_ + RECORD_HEADER + offset) {
// each message in record, can be more than 1 if not encrypted
- if (ssl.getSecurity().get_parms().pending_ == false) // cipher on
+ if (ssl.getSecurity().get_parms().pending_ == false) { // cipher on
+ // sanity check for malicious/corrupted/illegal input
+ if (buffer.get_remaining() < hdr.length_) {
+ ssl.SetError(bad_input);
+ return 0;
+ }
decrypt_message(ssl, buffer, hdr.length_);
+ }
mySTL::auto_ptr<Message> msg(mf.CreateObject(hdr.type_));
if (!msg.get()) {
diff --git a/extra/yassl/src/lock.cpp b/extra/yassl/src/lock.cpp
index 6e85fefa14d..6d8e9c17477 100644
--- a/extra/yassl/src/lock.cpp
+++ b/extra/yassl/src/lock.cpp
@@ -26,7 +26,7 @@
namespace yaSSL {
-#ifdef YASSL_THREAD_SAFE
+#ifdef MULTI_THREADED
#ifdef _WIN32
Mutex::Mutex()
@@ -79,7 +79,7 @@ namespace yaSSL {
#endif // _WIN32
-#endif // YASSL_THREAD_SAFE
+#endif // MULTI_THREADED
diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp
index 3b1fc43bc94..7c264e5a939 100644
--- a/extra/yassl/src/ssl.cpp
+++ b/extra/yassl/src/ssl.cpp
@@ -27,7 +27,6 @@
-
/* see man pages for function descriptions */
#include "runtime.hpp"
@@ -1014,7 +1013,7 @@ char* ERR_error_string(unsigned long errNumber, char* buffer)
static char* msg = (char*)"Please supply a buffer for error string";
if (buffer) {
- SetErrorString(errNumber, buffer);
+ SetErrorString(YasslError(errNumber), buffer);
return buffer;
}
diff --git a/extra/yassl/src/yassl_error.cpp b/extra/yassl/src/yassl_error.cpp
index e55c10c68c0..f48fbdc925e 100644
--- a/extra/yassl/src/yassl_error.cpp
+++ b/extra/yassl/src/yassl_error.cpp
@@ -31,11 +31,6 @@
#pragma warning(disable: 4996)
#endif
-#ifdef _MSC_VER
- // 4996 warning to use MS extensions e.g., strcpy_s instead of strncpy
- #pragma warning(disable: 4996)
-#endif
-
namespace yaSSL {
@@ -60,7 +55,7 @@ Library Error::get_lib() const
*/
-void SetErrorString(unsigned long error, char* buffer)
+void SetErrorString(YasslError error, char* buffer)
{
using namespace TaoCrypt;
const int max = MAX_ERROR_SZ; // shorthand
diff --git a/extra/yassl/taocrypt/include/aes.hpp b/extra/yassl/taocrypt/include/aes.hpp
index dc19c98a83a..e2041fc9350 100644
--- a/extra/yassl/taocrypt/include/aes.hpp
+++ b/extra/yassl/taocrypt/include/aes.hpp
@@ -92,7 +92,6 @@ typedef BlockCipher<ENCRYPTION, AES, CBC> AES_CBC_Encryption;
typedef BlockCipher<DECRYPTION, AES, CBC> AES_CBC_Decryption;
-
} // naemspace
#endif // TAO_CRYPT_AES_HPP
diff --git a/extra/yassl/taocrypt/include/pwdbased.hpp b/extra/yassl/taocrypt/include/pwdbased.hpp
index f40a336e2c3..d050fd8988b 100644
--- a/extra/yassl/taocrypt/include/pwdbased.hpp
+++ b/extra/yassl/taocrypt/include/pwdbased.hpp
@@ -48,9 +48,11 @@ word32 PBKDF2_HMAC<T>::DeriveKey(byte* derived, word32 dLen, const byte* pwd,
word32 pLen, const byte* salt, word32 sLen,
word32 iterations) const
{
- if (dLen > MaxDerivedKeyLength())
+ if (dLen > MaxDerivedKeyLength())
return 0;
+ if (iterations < 0)
+ return 0;
ByteBlock buffer(T::DIGEST_SIZE);
HMAC<T> hmac;
diff --git a/extra/yassl/taocrypt/src/asn.cpp b/extra/yassl/taocrypt/src/asn.cpp
index 5ec4cac1c44..ad054809879 100644
--- a/extra/yassl/taocrypt/src/asn.cpp
+++ b/extra/yassl/taocrypt/src/asn.cpp
@@ -154,6 +154,8 @@ word32 GetLength(Source& source)
else
length = b;
+ if (source.IsLeft(length) == false) return 0;
+
return length;
}
@@ -832,7 +834,7 @@ void CertDecoder::GetName(NameType nt)
if (email) {
if (!(ptr = AddTag(ptr, buf_end, "/emailAddress=", 14, length))) {
source_.SetError(CONTENT_E);
- return;
+ return;
}
}
diff --git a/extra/yassl/taocrypt/src/coding.cpp b/extra/yassl/taocrypt/src/coding.cpp
index 97c62ea12a7..0512ea9c889 100644
--- a/extra/yassl/taocrypt/src/coding.cpp
+++ b/extra/yassl/taocrypt/src/coding.cpp
@@ -103,6 +103,16 @@ void HexDecoder::Decode()
byte b = coded_.next() - 0x30; // 0 starts at 0x30
byte b2 = coded_.next() - 0x30;
+ // sanity checks
+ if (b >= sizeof(hexDecode)/sizeof(hexDecode[0])) {
+ coded_.SetError(PEM_E);
+ return;
+ }
+ if (b2 >= sizeof(hexDecode)/sizeof(hexDecode[0])) {
+ coded_.SetError(PEM_E);
+ return;
+ }
+
b = hexDecode[b];
b2 = hexDecode[b2];
@@ -178,6 +188,7 @@ void Base64Decoder::Decode()
{
word32 bytes = coded_.size();
word32 plainSz = bytes - ((bytes + (pemLineSz - 1)) / pemLineSz);
+ const byte maxIdx = (byte)sizeof(base64Decode) + 0x2B - 1;
plainSz = ((plainSz * 3) / 4) + 3;
decoded_.New(plainSz);
@@ -200,6 +211,16 @@ void Base64Decoder::Decode()
if (e4 == pad)
pad4 = true;
+ if (e1 < 0x2B || e2 < 0x2B || e3 < 0x2B || e4 < 0x2B) {
+ coded_.SetError(PEM_E);
+ return;
+ }
+
+ if (e1 > maxIdx || e2 > maxIdx || e3 > maxIdx || e4 > maxIdx) {
+ coded_.SetError(PEM_E);
+ return;
+ }
+
e1 = base64Decode[e1 - 0x2B];
e2 = base64Decode[e2 - 0x2B];
e3 = (e3 == pad) ? 0 : base64Decode[e3 - 0x2B];
diff --git a/extra/yassl/taocrypt/taocrypt.dsw b/extra/yassl/taocrypt/taocrypt.dsw
index d10d7534c3d..43115069160 100644
--- a/extra/yassl/taocrypt/taocrypt.dsw
+++ b/extra/yassl/taocrypt/taocrypt.dsw
@@ -3,6 +3,21 @@ Microsoft Developer Studio Workspace File, Format Version 6.00
###############################################################################
+Project: "benchmark"=.\benchmark\benchmark.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name taocrypt
+ End Project Dependency
+}}}
+
+###############################################################################
+
Project: "taocrypt"=.\taocrypt.dsp - Package Owner=<4>
Package=<5>
@@ -15,7 +30,7 @@ Package=<4>
###############################################################################
-Project: "test"=.\test.dsp - Package Owner=<4>
+Project: "test"=.\test\test.dsp - Package Owner=<4>
Package=<5>
{{{
diff --git a/extra/yassl/taocrypt/test/memory.cpp b/extra/yassl/taocrypt/test/memory.cpp
index 33bf523565e..935733c225f 100644
--- a/extra/yassl/taocrypt/test/memory.cpp
+++ b/extra/yassl/taocrypt/test/memory.cpp
@@ -13,7 +13,7 @@
To use MemoryTracker merely add this file to your project
No need to instantiate anything
-If your app is multi threaded define YASSL_THREAD_SAFE
+If your app is multi threaded define MULTI_THREADED
*********************************************************************/
diff --git a/extra/yassl/taocrypt/test.dsp b/extra/yassl/taocrypt/test/test.dsp
index 1084f8e06e3..93b369de3d9 100644
--- a/extra/yassl/taocrypt/test.dsp
+++ b/extra/yassl/taocrypt/test/test.dsp
@@ -37,12 +37,12 @@ RSC=rc.exe
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 0
-# PROP Output_Dir "test\Release"
-# PROP Intermediate_Dir "test\Release"
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MT /W3 /O2 /I "include" /I "mySTL" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /I "../include" /I "../mySTL" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -61,12 +61,12 @@ LINK32=link.exe
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 1
-# PROP Output_Dir "test\Debug"
-# PROP Intermediate_Dir "test\Debug"
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
-# ADD CPP /nologo /MTd /W3 /Gm /ZI /Od /I "include" /I "mySTL" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /GZ /c
+# ADD CPP /nologo /MDd /W3 /Gm /ZI /Od /I "../include" /I "../mySTL" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /GZ /c
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
@@ -87,7 +87,7 @@ LINK32=link.exe
# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
# Begin Source File
-SOURCE=.\test\test.cpp
+SOURCE=.\test.cpp
# End Source File
# End Group
# Begin Group "Header Files"
diff --git a/extra/yassl/yassl.dsw b/extra/yassl/yassl.dsw
index 288c88dfd5b..8da089fc1fa 100644
--- a/extra/yassl/yassl.dsw
+++ b/extra/yassl/yassl.dsw
@@ -90,7 +90,7 @@ Package=<4>
###############################################################################
-Project: "test"=.\taocrypt\test.dsp - Package Owner=<4>
+Project: "test"=.\taocrypt\test\test.dsp - Package Owner=<4>
Package=<5>
{{{
@@ -114,9 +114,6 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name taocrypt
- End Project Dependency
- Begin Project Dependency
Project_Dep_Name yassl
End Project Dependency
}}}
diff --git a/include/welcome_copyright_notice.h b/include/welcome_copyright_notice.h
index 8a9db3ad3dd..c141c62f403 100644
--- a/include/welcome_copyright_notice.h
+++ b/include/welcome_copyright_notice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2012, Oracle and/or its affiliates.
+/* Copyright (c) 2011, 2013, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,7 +16,7 @@
#ifndef _welcome_copyright_notice_h_
#define _welcome_copyright_notice_h_
-#define COPYRIGHT_NOTICE_CURRENT_YEAR "2012"
+#define COPYRIGHT_NOTICE_CURRENT_YEAR "2013"
/*
This define specifies copyright notice which is displayed by every MySQL
diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am
index 7a2c92ed030..530710073cb 100644
--- a/libmysqld/Makefile.am
+++ b/libmysqld/Makefile.am
@@ -81,7 +81,7 @@ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
sql_servers.cc event_parse_data.cc opt_table_elimination.cc
# automake misses these
-sql_yacc.cc sql_yacc.h: $(top_srcdir)/sql/sql_yacc.yy
+sql_yacc.cc sql_yacc.$(YACC_HEXT): $(top_srcdir)/sql/sql_yacc.yy
# The following libraries should be included in libmysqld.a
INC_LIB= $(top_builddir)/regex/libregex.la \
diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am
index 5afcc380520..516416916d1 100644
--- a/mysql-test/Makefile.am
+++ b/mysql-test/Makefile.am
@@ -65,8 +65,7 @@ nobase_test_DATA = \
lib/My/SysInfo.pm \
lib/My/Suite.pm \
lib/My/CoreDump.pm \
- lib/My/SafeProcess/Base.pm \
- lib/My/SafeProcess/safe_process.pl
+ lib/My/SafeProcess/Base.pm
SUBDIRS = lib/My/SafeProcess
diff --git a/mysql-test/include/get_binlog_dump_thread_id.inc b/mysql-test/include/get_binlog_dump_thread_id.inc
deleted file mode 100644
index bfc8506b39e..00000000000
--- a/mysql-test/include/get_binlog_dump_thread_id.inc
+++ /dev/null
@@ -1,22 +0,0 @@
---exec $MYSQL test -e "show processlist" > $MYSQLTEST_VARDIR/tmp/bl_dump_thread_id
---disable_warnings
-drop table if exists t999;
---enable_warnings
-# Create a table to hold the process list
-create temporary table t999(
- id int,
- user char(255),
- host char(255),
- db char(255),
- Command char(255),
- time int,
- State char(255),
- info char(255)
-);
-# Load processlist into table, headers will create seom warnings
---disable_warnings
---replace_result $MYSQLTEST_VARDIR "."
-eval LOAD DATA INFILE "$MYSQLTEST_VARDIR/tmp/bl_dump_thread_id" into table t999;
---enable_warnings
-let $id = `select Id from t999 where Command="Binlog Dump"`;
-drop table t999;
diff --git a/mysql-test/lib/My/SafeProcess/safe_process.cc b/mysql-test/lib/My/SafeProcess/safe_process.cc
index 4aedba3f3c1..69756cd94c6 100644
--- a/mysql-test/lib/My/SafeProcess/safe_process.cc
+++ b/mysql-test/lib/My/SafeProcess/safe_process.cc
@@ -154,12 +154,19 @@ int main(int argc, char* const argv[] )
pid_t own_pid= getpid();
pid_t parent_pid= getppid();
bool nocore = false;
+ struct sigaction sa,sa_abort;
+ sa.sa_handler= handle_signal;
+ sa.sa_flags= SA_NOCLDSTOP;
+ sigemptyset(&sa.sa_mask);
+
+ sa_abort.sa_handler= handle_abort;
+ sigemptyset(&sa_abort.sa_mask);
/* Install signal handlers */
- signal(SIGTERM, handle_signal);
- signal(SIGINT, handle_signal);
- signal(SIGCHLD, handle_signal);
- signal(SIGABRT, handle_abort);
+ sigaction(SIGTERM, &sa,NULL);
+ sigaction(SIGINT, &sa,NULL);
+ sigaction(SIGCHLD, &sa,NULL);
+ sigaction(SIGABRT, &sa_abort,NULL);
sprintf(safe_process_name, "safe_process[%ld]", (long) own_pid);
diff --git a/mysql-test/lib/My/SafeProcess/safe_process.pl b/mysql-test/lib/My/SafeProcess/safe_process.pl
deleted file mode 100644
index de844e010a1..00000000000
--- a/mysql-test/lib/My/SafeProcess/safe_process.pl
+++ /dev/null
@@ -1,166 +0,0 @@
-#!/usr/bin/perl
-# -*- cperl -*-
-
-# Copyright (c) 2007, 2011, Oracle and/or its affiliates
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-use strict;
-use warnings;
-
-use lib 'lib';
-use My::SafeProcess::Base;
-use POSIX qw(WNOHANG);
-
-###########################################################################
-# Util functions
-###########################################################################
-
-#
-#Print message to stderr
-#
-my $verbose= 0;
-sub message {
- if ($verbose > 0){
- use Time::localtime;
- my $tm= localtime();
- my $timestamp= sprintf("%02d%02d%02d %2d:%02d:%02d",
- $tm->year % 100, $tm->mon+1, $tm->mday,
- $tm->hour, $tm->min, $tm->sec);
- print STDERR $timestamp, " monitor[$$]: ", @_, "\n";
- }
-}
-
-
-###########################################################################
-# Main program
-###########################################################################
-
-my $terminated= 0;
-
-# Protect against being killed in the middle
-# of child creation, just set the terminated flag
-# to make sure the child will be killed off
-# when program is ready to do that
-$SIG{TERM}= sub { message("!Got signal @_"); $terminated= 1; };
-$SIG{INT}= sub { message("!Got signal @_"); $terminated= 1; };
-
-my $parent_pid= getppid();
-
-my $found_double_dash= 0;
-while (my $arg= shift(@ARGV)){
-
- if ($arg =~ /^--$/){
- $found_double_dash= 1;
- last;
- }
- elsif ($arg =~ /^--verbose$/){
- $verbose= 1;
- }
- else {
- die "Unknown option: $arg";
- }
-}
-
-my $path= shift(@ARGV); # Executable
-
-die "usage:\n" .
- " safe_process.pl [opts] -- <path> [<args> [...<args_n>]]"
- unless defined $path || $found_double_dash;
-
-
-message("started");
-#message("path: '$path'");
-message("parent: $parent_pid");
-
-# Start process to monitor
-my $child_pid=
- create_process(
- path => $path,
- args => \@ARGV,
- setpgrp => 1,
- );
-message("Started child $child_pid");
-
-eval {
- sub handle_signal {
- $terminated= 1;
- message("Got signal @_");
-
- # Ignore all signals
- foreach my $name (keys %SIG){
- $SIG{$name}= 'IGNORE';
- }
-
- die "signaled\n";
- };
- local $SIG{TERM}= \&handle_signal;
- local $SIG{INT}= \&handle_signal;
- local $SIG{CHLD}= sub {
- message("Got signal @_");
- kill('KILL', -$child_pid);
- my $ret= waitpid($child_pid, 0);
- if ($? & 127){
- exit(65); # Killed by signal
- }
- exit($? >> 8);
- };
-
- # Monitoring loop
- while(!$terminated) {
-
- # Check if parent is still alive
- if (kill(0, $parent_pid) < 1){
- message("Parent is not alive anymore");
- last;
- }
-
- # Wait for child to terminate but wakeup every
- # second to also check that parent is still alive
- my $ret_pid;
- $ret_pid= waitpid($child_pid, &WNOHANG);
- if ($ret_pid == $child_pid) {
- # Process has exited, collect return status
- my $ret_code= $? >> 8;
- message("Child exit: $ret_code");
- # Exit with exit status of the child
- exit ($ret_code);
- }
- sleep(1);
- }
-};
-if ( $@ ) {
- # The monitoring loop should have been
- # broken by handle_signal
- warn "Unexpected: $@" unless ( $@ =~ /signaled/ );
-}
-
-# Use negative pid in order to kill the whole
-# process group
-#
-my $ret= kill('KILL', -$child_pid);
-message("Killed child: $child_pid, ret: $ret");
-if ($ret > 0) {
- message("Killed child: $child_pid");
- # Wait blocking for the child to return
- my $ret_pid= waitpid($child_pid, 0);
- if ($ret_pid != $child_pid){
- message("unexpected pid $ret_pid returned from waitpid($child_pid)");
- }
-}
-
-message("DONE!");
-exit (1);
-
-
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 9cd6c6b2b81..e0dd01a9266 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -663,6 +663,10 @@ sub run_test_server ($$$) {
else {
mtr_report("\nRetrying test $tname, ".
"attempt($retries/$opt_retry)...\n");
+ #saving the log file as filename.failed in case of retry
+ my $worker_logdir= $result->{savedir};
+ my $log_file_name=dirname($worker_logdir)."/".$result->{shortname}.".log";
+ rename $log_file_name,$log_file_name.".failed";
delete($result->{result});
$result->{retries}= $retries+1;
$result->write_test($sock, 'TESTCASE');
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_until.result b/mysql-test/suite/engines/funcs/r/rpl_row_until.result
index 5091a9f6468..5629f5c8cdd 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_until.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_until.result
@@ -1,204 +1,60 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
-create table t1(n int not null auto_increment primary key);
-insert into t1 values (1),(2),(3),(4);
-drop table t1;
-create table t2(n int not null auto_increment primary key);
-insert into t2 values (1),(2);
-insert into t2 values (3),(4);
-drop table t2;
-start slave until master_log_file='master-bin.000001', master_log_pos=311;
-select * from t1;
+include/master-slave.inc
+[connection master]
+CREATE TABLE t1(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+DROP TABLE t1;
+CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t2 VALUES (3),(4);
+DROP TABLE t2;
+include/stop_slave.inc
+RESET SLAVE;
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_drop_t1
+include/wait_for_slave_sql_to_stop.inc
+SELECT * FROM t1;
n
1
2
3
4
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_MYPORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos #
-Relay_Log_File slave-relay-bin.000004
-Relay_Log_Pos #
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running #
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos #
-Relay_Log_Space #
-Until_Condition Master
-Until_Log_File master-bin.000001
-Until_Log_Pos 311
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-start slave until master_log_file='master-no-such-bin.000001', master_log_pos=291;
-select * from t1;
-n 1
-n 2
-n 3
-n 4
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_MYPORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos #
-Relay_Log_File slave-relay-bin.000004
-Relay_Log_Pos #
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running #
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos #
-Relay_Log_Space #
-Until_Condition Master
-Until_Log_File master-no-such-bin.000001
-Until_Log_Pos 291
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-start slave until relay_log_file='slave-relay-bin.000004', relay_log_pos=728;
-select * from t2;
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_MYPORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos #
-Relay_Log_File slave-relay-bin.000004
-Relay_Log_Pos #
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running #
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos #
-Relay_Log_Space #
-Until_Condition Relay
-Until_Log_File slave-relay-bin.000004
-Until_Log_Pos 728
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-start slave;
-stop slave;
-start slave until master_log_file='master-bin.000001', master_log_pos=740;
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_MYPORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos #
-Relay_Log_File slave-relay-bin.000004
-Relay_Log_Pos #
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running Yes
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos #
-Relay_Log_Space #
-Until_Condition Master
-Until_Log_File master-bin.000001
-Until_Log_Pos 740
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-start slave until master_log_file='master-bin', master_log_pos=561;
+include/check_slave_param.inc [Exec_Master_Log_Pos]
+START SLAVE UNTIL MASTER_LOG_FILE='master-no-such-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS;
+include/wait_for_slave_sql_to_stop.inc
+SELECT * FROM t1;
+n
+1
+2
+3
+4
+include/check_slave_param.inc [Exec_Master_Log_Pos]
+START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2
+include/wait_for_slave_sql_to_stop.inc
+SELECT * FROM t2;
+n
+1
+2
+include/check_slave_param.inc [Exec_Master_Log_Pos]
+START SLAVE;
+include/wait_for_slave_to_start.inc
+include/stop_slave.inc
+START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_create_t2
+include/wait_for_slave_param.inc [Until_Log_Pos]
+include/wait_for_slave_sql_to_stop.inc
+include/check_slave_param.inc [Exec_Master_Log_Pos]
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin', MASTER_LOG_POS=MASTER_LOG_POS;
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
-start slave until master_log_file='master-bin.000001', master_log_pos=561, relay_log_pos=12;
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS, RELAY_LOG_POS=RELAY_LOG_POS;
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
-start slave until master_log_file='master-bin.000001';
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001';
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
-start slave until relay_log_file='slave-relay-bin.000002';
+START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000009';
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
-start slave until relay_log_file='slave-relay-bin.000002', master_log_pos=561;
+START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', MASTER_LOG_POS=MASTER_LOG_POS;
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
-start slave sql_thread;
-start slave until master_log_file='master-bin.000001', master_log_pos=740;
+START SLAVE;
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS;
Warnings:
-Level Note
-Code 1254
-Message Slave is already running
+Note 1254 Slave is already running
+include/stop_slave.inc
+RESET SLAVE;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/disabled.def b/mysql-test/suite/engines/funcs/t/disabled.def
index 5b6e3f6a281..586abe7171f 100644
--- a/mysql-test/suite/engines/funcs/t/disabled.def
+++ b/mysql-test/suite/engines/funcs/t/disabled.def
@@ -72,7 +72,6 @@ rpl000017 : Result Difference Due to Change in .inc file
rpl_skip_error : Result Difference Due to Change in .inc file
rpl_sp : Result Difference Due to Change in .inc file
-rpl_row_until : Test Present in rpl suite as well . Test Fails with table t2 not found.
rpl_loaddata_s : Test Present in rpl suite as well . Test Fails due to bin log truncation.
rpl_log_pos : Test Present in rpl suite as well . Test Fails due to bin log truncation.
rpl_row_NOW : Result Difference Due to Change in .inc file
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_until.test b/mysql-test/suite/engines/funcs/t/rpl_row_until.test
index ccd9ce11637..bf38bd487ea 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_until.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_until.test
@@ -2,90 +2,126 @@
-- source include/have_binlog_format_row.inc
-- source include/master-slave.inc
-# Test is dependent on binlog positions
+# Note: The test is dependent on binlog positions
-# prepare version for substitutions
-let $VERSION=`select version()`;
+# Create some events on master
+connection master;
+CREATE TABLE t1(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+DROP TABLE t1;
+# Save master log position for query DROP TABLE t1
+save_master_pos;
+let $master_pos_drop_t1= query_get_value(SHOW BINLOG EVENTS, Pos, 7);
+let $master_log_file= query_get_value(SHOW BINLOG EVENTS, Log_name, 7);
-# stop slave before he will start replication also sync with master
-# for avoiding undetermenistic behaviour
+CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+# Save master log position for query CREATE TABLE t2
save_master_pos;
-connection slave;
-sync_with_master;
-stop slave;
+let $master_pos_create_t2= query_get_value(SHOW BINLOG EVENTS, Pos, 8);
+
+INSERT INTO t2 VALUES (1),(2);
+save_master_pos;
+# Save master log position for query INSERT INTO t2 VALUES (1),(2);
+let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 12);
+sync_slave_with_master;
+
+# Save relay log position for query INSERT INTO t2 VALUES (1),(2);
+let $relay_pos_insert1_t2= query_get_value(show slave status, Relay_Log_Pos, 1);
connection master;
-# create some events on master
-create table t1(n int not null auto_increment primary key);
-insert into t1 values (1),(2),(3),(4);
-drop table t1;
-create table t2(n int not null auto_increment primary key);
-insert into t2 values (1),(2);
-insert into t2 values (3),(4);
-drop table t2;
-
-# try to replicate all queries until drop of t1
+INSERT INTO t2 VALUES (3),(4);
+DROP TABLE t2;
+# Save master log position for query INSERT INTO t2 VALUES (1),(2);
+let $master_pos_drop_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 17);
+sync_slave_with_master;
+
+--source include/stop_slave.inc
+# Reset slave.
+RESET SLAVE;
+--disable_query_log
+eval CHANGE MASTER TO MASTER_USER='root', MASTER_CONNECT_RETRY=1, MASTER_HOST='127.0.0.1', MASTER_PORT=$MASTER_MYPORT;
+--enable_query_log
+
+# Try to replicate all queries until drop of t1
connection slave;
-start slave until master_log_file='master-bin.000001', master_log_pos=311;
-sleep 2;
-wait_for_slave_to_stop;
-# here table should be still not deleted
-select * from t1;
---vertical_results
---replace_result $MASTER_MYPORT MASTER_MYPORT
---replace_column 1 # 7 # 9 # 11 # 22 # 23 # 33 #
-show slave status;
-
-# this should fail right after start
-start slave until master_log_file='master-no-such-bin.000001', master_log_pos=291;
+echo START SLAVE UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=master_pos_drop_t1;
+--disable_query_log
+eval START SLAVE UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=$master_pos_drop_t1;
+--enable_query_log
+--source include/wait_for_slave_sql_to_stop.inc
+
+# Here table should be still not deleted
+SELECT * FROM t1;
+--let $slave_param= Exec_Master_Log_Pos
+--let $slave_param_value= $master_pos_drop_t1
+--source include/check_slave_param.inc
+
+# This should fail right after start
+--replace_result 291 MASTER_LOG_POS
+START SLAVE UNTIL MASTER_LOG_FILE='master-no-such-bin.000001', MASTER_LOG_POS=291;
+--source include/wait_for_slave_sql_to_stop.inc
# again this table should be still not deleted
-select * from t1;
-sleep 2;
-wait_for_slave_to_stop;
---vertical_results
---replace_result $MASTER_MYPORT MASTER_MYPORT
---replace_column 1 # 7 # 9 # 11 # 22 # 23 # 33 #
-show slave status;
-
-# try replicate all up to and not including the second insert to t2;
-start slave until relay_log_file='slave-relay-bin.000004', relay_log_pos=728;
-sleep 2;
-wait_for_slave_to_stop;
-select * from t2;
---vertical_results
---replace_result $MASTER_MYPORT MASTER_MYPORT
---replace_column 1 # 7 # 9 # 11 # 22 # 23 # 33 #
-show slave status;
+SELECT * FROM t1;
+
+--let $slave_param= Exec_Master_Log_Pos
+--let $slave_param_value= $master_pos_drop_t1
+--source include/check_slave_param.inc
+
+# Try replicate all up to and not including the second insert to t2;
+echo START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2;
+--disable_query_log
+eval START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=$relay_pos_insert1_t2;
+--enable_query_log
+--source include/wait_for_slave_sql_to_stop.inc
+SELECT * FROM t2;
+
+--let $slave_param= Exec_Master_Log_Pos
+--let $slave_param_value= $master_pos_insert1_t2
+--source include/check_slave_param.inc
# clean up
-start slave;
+START SLAVE;
+--source include/wait_for_slave_to_start.inc
connection master;
-save_master_pos;
-connection slave;
-sync_with_master;
-stop slave;
+sync_slave_with_master;
+--source include/stop_slave.inc
-# this should stop immediately as we are already there
-start slave until master_log_file='master-bin.000001', master_log_pos=740;
-sleep 2;
-wait_for_slave_to_stop;
+# This should stop immediately as we are already there
+echo START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=master_pos_create_t2;
+--disable_query_log
+eval START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=$master_pos_create_t2;
+--enable_query_log
+let $slave_param= Until_Log_Pos;
+let $slave_param_value= $master_pos_create_t2;
+--source include/wait_for_slave_param.inc
+--source include/wait_for_slave_sql_to_stop.inc
# here the sql slave thread should be stopped
---vertical_results
---replace_result $MASTER_MYPORT MASTER_MYPORT bin.000005 bin.000004 bin.000006 bin.000004 bin.000007 bin.000004
---replace_column 1 # 7 # 9 # 22 # 23 # 33 #
-show slave status;
+--let $slave_param= Exec_Master_Log_Pos
+--let $slave_param_value= $master_pos_drop_t2
+--source include/check_slave_param.inc
#testing various error conditions
+--replace_result 561 MASTER_LOG_POS
--error 1277
-start slave until master_log_file='master-bin', master_log_pos=561;
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin', MASTER_LOG_POS=561;
+--replace_result 561 MASTER_LOG_POS 12 RELAY_LOG_POS
--error 1277
-start slave until master_log_file='master-bin.000001', master_log_pos=561, relay_log_pos=12;
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=561, RELAY_LOG_POS=12;
--error 1277
-start slave until master_log_file='master-bin.000001';
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001';
--error 1277
-start slave until relay_log_file='slave-relay-bin.000002';
+START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000009';
+--replace_result 561 MASTER_LOG_POS
--error 1277
-start slave until relay_log_file='slave-relay-bin.000002', master_log_pos=561;
+START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', MASTER_LOG_POS=561;
# Warning should be given for second command
-start slave sql_thread;
-start slave until master_log_file='master-bin.000001', master_log_pos=740;
+START SLAVE;
+--replace_result 740 MASTER_LOG_POS
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=740;
+
+--source include/stop_slave.inc
+# Clear slave IO error.
+RESET SLAVE;
+
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/innodb/r/innodb_bug14676111.result b/mysql-test/suite/innodb/r/innodb_bug14676111.result
new file mode 100644
index 00000000000..ebecd1d00cb
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb_bug14676111.result
@@ -0,0 +1,53 @@
+drop table if exists t1;
+CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
+set global innodb_limit_optimistic_insert_debug = 2;
+insert into t1 values (1);
+insert into t1 values (5);
+insert into t1 values (4);
+insert into t1 values (3);
+insert into t1 values (2);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+10.0000
+delete from t1 where a=4;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+8.0000
+delete from t1 where a=5;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+5.0000
+set global innodb_limit_optimistic_insert_debug = 10000;
+delete from t1 where a=2;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+3.0000
+insert into t1 values (2);
+delete from t1 where a=2;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+2.0000
+insert into t1 values (2);
+delete from t1 where a=2;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+1.0000
+drop table t1;
diff --git a/mysql-test/suite/innodb/t/innodb_bug14676111.test b/mysql-test/suite/innodb/t/innodb_bug14676111.test
new file mode 100644
index 00000000000..fadd111fdc9
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innodb_bug14676111.test
@@ -0,0 +1,128 @@
+# Test for bug #14676111: WRONG PAGE_LEVEL WRITTEN FOR UPPER THAN FATHER PAGE AT BTR_LIFT_PAGE_UP()
+
+-- source include/have_innodb.inc
+-- source include/have_debug.inc
+
+if (`select count(*)=0 from information_schema.global_variables where variable_name = 'INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG'`)
+{
+ --skip Test requires InnoDB built with UNIV_DEBUG definition.
+}
+
+--disable_query_log
+set @old_innodb_limit_optimistic_insert_debug = @@innodb_limit_optimistic_insert_debug;
+--enable_query_log
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
+
+let $wait_condition=
+ SELECT VARIABLE_VALUE < 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS
+ WHERE VARIABLE_NAME = 'INNODB_PURGE_TRX_ID_AGE';
+
+#
+# make 4 leveled straight tree
+#
+set global innodb_limit_optimistic_insert_debug = 2;
+insert into t1 values (1);
+insert into t1 values (5);
+#current tree form
+# (1, 5)
+
+insert into t1 values (4);
+#records in a page is limited to 2 artificially. root rise occurs
+#current tree form
+# (1, 5)
+#(1, 4) (5)
+
+insert into t1 values (3);
+#current tree form
+# (1, 5)
+# (1, 4) (5)
+#(1, 3) (4) (5)
+
+insert into t1 values (2);
+#current tree form
+# (1, 5)
+# (1, 4) (5)
+# (1, 3) (4) (5)
+#(1, 2) (3) (4) (5)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+delete from t1 where a=4;
+--source include/wait_condition.inc
+#deleting 1 record of 2 records don't cause merge artificially.
+#current tree form
+# (1, 5)
+# (1) (5)
+# (1, 3) (5)
+#(1, 2) (3) (5)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+delete from t1 where a=5;
+--source include/wait_condition.inc
+#deleting 1 record of 2 records don't cause merge artificially.
+#current tree form
+# (1)
+# (1)
+# (1, 3) <- lift up this level next, when deleting node ptr
+#(1, 2) (3) <- merged next
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+#
+# cause merge at level 0
+#
+
+#disable the artificial limitation of records in a page
+set global innodb_limit_optimistic_insert_debug = 10000;
+delete from t1 where a=2;
+--source include/wait_condition.inc
+#merge page occurs. and lift up occurs.
+#current tree form
+# (1)
+# (1)
+# (1, 3)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+insert into t1 values (2);
+#current tree form
+# (1)
+# (1) <- lift up this level next, because it is not root
+# (1, 2, 3)
+
+delete from t1 where a=2;
+--source include/wait_condition.inc
+#current tree form
+# (1)
+# (1, 3)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+insert into t1 values (2);
+#current tree form
+# (1)
+# (1, 2, 3) <- lift up this level next, because the father is root
+
+delete from t1 where a=2;
+--source include/wait_condition.inc
+#current tree form
+# (1, 3)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+drop table t1;
+
+--disable_query_log
+set global innodb_limit_optimistic_insert_debug = @old_innodb_limit_optimistic_insert_debug;
+--enable_query_log
diff --git a/mysql-test/suite/innodb_plugin/r/innodb-index.result b/mysql-test/suite/innodb_plugin/r/innodb-index.result
index bf7c382327b..1200215fca4 100644
--- a/mysql-test/suite/innodb_plugin/r/innodb-index.result
+++ b/mysql-test/suite/innodb_plugin/r/innodb-index.result
@@ -963,7 +963,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
explain select * from t1 where b like 'adfd%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL b NULL NULL NULL 15 Using where
+1 SIMPLE t1 range b b 769 NULL # Using where
create table t2(a int, b varchar(255), primary key(a,b)) engine=innodb;
insert into t2 select a,left(b,255) from t1;
drop table t1;
diff --git a/mysql-test/suite/innodb_plugin/r/innodb_bug14676111.result b/mysql-test/suite/innodb_plugin/r/innodb_bug14676111.result
new file mode 100644
index 00000000000..ebecd1d00cb
--- /dev/null
+++ b/mysql-test/suite/innodb_plugin/r/innodb_bug14676111.result
@@ -0,0 +1,53 @@
+drop table if exists t1;
+CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
+set global innodb_limit_optimistic_insert_debug = 2;
+insert into t1 values (1);
+insert into t1 values (5);
+insert into t1 values (4);
+insert into t1 values (3);
+insert into t1 values (2);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+10.0000
+delete from t1 where a=4;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+8.0000
+delete from t1 where a=5;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+5.0000
+set global innodb_limit_optimistic_insert_debug = 10000;
+delete from t1 where a=2;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+3.0000
+insert into t1 values (2);
+delete from t1 where a=2;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+2.0000
+insert into t1 values (2);
+delete from t1 where a=2;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+DATA_LENGTH / 16384
+1.0000
+drop table t1;
diff --git a/mysql-test/suite/innodb_plugin/r/innodb_mysql.result b/mysql-test/suite/innodb_plugin/r/innodb_mysql.result
index 86d83f82b76..01809e23511 100644
--- a/mysql-test/suite/innodb_plugin/r/innodb_mysql.result
+++ b/mysql-test/suite/innodb_plugin/r/innodb_mysql.result
@@ -343,7 +343,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index; Using temporary
explain select distinct f1, f2 from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL PRIMARY 5 NULL 3 Using index for group-by; Using temporary
+1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index
drop table t1;
CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20),
INDEX (name));
diff --git a/mysql-test/suite/innodb_plugin/t/disabled.def b/mysql-test/suite/innodb_plugin/t/disabled.def
index d692eccac74..f1e4c5ccd15 100644
--- a/mysql-test/suite/innodb_plugin/t/disabled.def
+++ b/mysql-test/suite/innodb_plugin/t/disabled.def
@@ -12,3 +12,5 @@
innodb_bug52745: Disabled as this has valgrind failures (also in MySQL 5.1.50)
innodb_bug14007649: Disabled until merging with XtraDB 5.1.65
+innodb_mysql: disabled until merge with XtraDB 5.1.68
+innodb-index: disabled until merge with XtraDB 5.1.68
diff --git a/mysql-test/suite/innodb_plugin/t/innodb-index.test b/mysql-test/suite/innodb_plugin/t/innodb-index.test
index d4310093bfd..fa47087903e 100644
--- a/mysql-test/suite/innodb_plugin/t/innodb-index.test
+++ b/mysql-test/suite/innodb_plugin/t/innodb-index.test
@@ -420,6 +420,10 @@ select a,
length(b),b=left(repeat(d,100*a),65535),length(c),c=repeat(d,20*a),d from t1;
show create table t1;
check table t1;
+
+# In my local machine and in pb2 machine only the key_len field is differing.
+# So masking this problematic output.
+--replace_column 9 #
explain select * from t1 where b like 'adfd%';
#
diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug14676111.test b/mysql-test/suite/innodb_plugin/t/innodb_bug14676111.test
new file mode 100644
index 00000000000..ae871e3b63e
--- /dev/null
+++ b/mysql-test/suite/innodb_plugin/t/innodb_bug14676111.test
@@ -0,0 +1,128 @@
+# Test for bug #14676111: WRONG PAGE_LEVEL WRITTEN FOR UPPER THAN FATHER PAGE AT BTR_LIFT_PAGE_UP()
+
+-- source include/have_innodb_plugin.inc
+-- source include/have_debug.inc
+
+if (`select count(*)=0 from information_schema.global_variables where variable_name = 'INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG'`)
+{
+ --skip Test requires InnoDB built with UNIV_DEBUG definition.
+}
+
+--disable_query_log
+set @old_innodb_limit_optimistic_insert_debug = @@innodb_limit_optimistic_insert_debug;
+--enable_query_log
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
+
+let $wait_condition=
+ SELECT VARIABLE_VALUE < 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS
+ WHERE VARIABLE_NAME = 'INNODB_PURGE_TRX_ID_AGE';
+
+#
+# make 4 leveled straight tree
+#
+set global innodb_limit_optimistic_insert_debug = 2;
+insert into t1 values (1);
+insert into t1 values (5);
+#current tree form
+# (1, 5)
+
+insert into t1 values (4);
+#records in a page is limited to 2 artificially. root rise occurs
+#current tree form
+# (1, 5)
+#(1, 4) (5)
+
+insert into t1 values (3);
+#current tree form
+# (1, 5)
+# (1, 4) (5)
+#(1, 3) (4) (5)
+
+insert into t1 values (2);
+#current tree form
+# (1, 5)
+# (1, 4) (5)
+# (1, 3) (4) (5)
+#(1, 2) (3) (4) (5)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+delete from t1 where a=4;
+--source include/wait_condition.inc
+#deleting 1 record of 2 records don't cause merge artificially.
+#current tree form
+# (1, 5)
+# (1) (5)
+# (1, 3) (5)
+#(1, 2) (3) (5)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+delete from t1 where a=5;
+--source include/wait_condition.inc
+#deleting 1 record of 2 records don't cause merge artificially.
+#current tree form
+# (1)
+# (1)
+# (1, 3) <- lift up this level next, when deleting node ptr
+#(1, 2) (3) <- merged next
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+#
+# cause merge at level 0
+#
+
+#disable the artificial limitation of records in a page
+set global innodb_limit_optimistic_insert_debug = 10000;
+delete from t1 where a=2;
+--source include/wait_condition.inc
+#merge page occurs. and lift up occurs.
+#current tree form
+# (1)
+# (1)
+# (1, 3)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+insert into t1 values (2);
+#current tree form
+# (1)
+# (1) <- lift up this level next, because it is not root
+# (1, 2, 3)
+
+delete from t1 where a=2;
+--source include/wait_condition.inc
+#current tree form
+# (1)
+# (1, 3)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+insert into t1 values (2);
+#current tree form
+# (1)
+# (1, 2, 3) <- lift up this level next, because the father is root
+
+delete from t1 where a=2;
+--source include/wait_condition.inc
+#current tree form
+# (1, 3)
+
+analyze table t1;
+select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
+
+drop table t1;
+
+--disable_query_log
+set global innodb_limit_optimistic_insert_debug = @old_innodb_limit_optimistic_insert_debug;
+--enable_query_log
diff --git a/mysql-test/suite/parts/r/partition_alter4_innodb.result b/mysql-test/suite/parts/r/partition_alter4_innodb.result
index 5d3143e35bb..b0f340664b7 100644
--- a/mysql-test/suite/parts/r/partition_alter4_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter4_innodb.result
@@ -37566,7 +37566,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -38026,7 +38026,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -38497,7 +38497,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -38969,7 +38969,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -39435,7 +39435,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -39907,7 +39907,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -40384,7 +40384,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -40859,7 +40859,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -41324,7 +41324,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -41784,7 +41784,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -42255,7 +42255,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -42727,7 +42727,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -43193,7 +43193,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -43665,7 +43665,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -44142,7 +44142,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -44617,7 +44617,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -52582,7 +52582,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -53042,7 +53042,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -53513,7 +53513,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -53985,7 +53985,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -54451,7 +54451,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -54923,7 +54923,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -55400,7 +55400,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -55875,7 +55875,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
Table Op Msg_type Msg_text
-test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
test.t1 optimize status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
diff --git a/mysql-test/suite/rpl/r/rpl_row_until.result b/mysql-test/suite/rpl/r/rpl_row_until.result
index 5629f5c8cdd..be8f17c6f01 100644
--- a/mysql-test/suite/rpl/r/rpl_row_until.result
+++ b/mysql-test/suite/rpl/r/rpl_row_until.result
@@ -27,7 +27,6 @@ n
3
4
include/check_slave_param.inc [Exec_Master_Log_Pos]
-START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2
include/wait_for_slave_sql_to_stop.inc
SELECT * FROM t2;
n
diff --git a/mysql-test/suite/rpl/t/disabled.def b/mysql-test/suite/rpl/t/disabled.def
index 0a8f3cb2a50..d33f85091ff 100644
--- a/mysql-test/suite/rpl/t/disabled.def
+++ b/mysql-test/suite/rpl/t/disabled.def
@@ -12,5 +12,5 @@
rpl_row_create_table : Bug#11759274 Feb 27 2010 andrei failed different way than earlier with bug#45576
rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock
-rpl_row_until : BUG#59543 Jan 26 2011 alfranio Replication test from eits suite rpl_row_until times out
rpl_stm_until : BUG#59543 Jan 26 2011 alfranio Replication test from eits suite rpl_row_until times out
+rpl_row_until @macosx : BUG#15965353 RPL.RPL_ROW_UNTIL FAILS ON PB2 , PLATFORM= MACOSX10.6 X86_64 MAX
diff --git a/mysql-test/suite/rpl/t/rpl_row_until.test b/mysql-test/suite/rpl/t/rpl_row_until.test
index bf38bd487ea..eaa99c29694 100644
--- a/mysql-test/suite/rpl/t/rpl_row_until.test
+++ b/mysql-test/suite/rpl/t/rpl_row_until.test
@@ -26,6 +26,7 @@ let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 12)
sync_slave_with_master;
# Save relay log position for query INSERT INTO t2 VALUES (1),(2);
+let $relay_log_file= query_get_value(show slave status, Relay_Log_File,1);
let $relay_pos_insert1_t2= query_get_value(show slave status, Relay_Log_Pos, 1);
connection master;
@@ -68,9 +69,8 @@ SELECT * FROM t1;
--source include/check_slave_param.inc
# Try replicate all up to and not including the second insert to t2;
-echo START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2;
--disable_query_log
-eval START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=$relay_pos_insert1_t2;
+eval START SLAVE UNTIL RELAY_LOG_FILE='$relay_log_file', RELAY_LOG_POS=$relay_pos_insert1_t2;
--enable_query_log
--source include/wait_for_slave_sql_to_stop.inc
SELECT * FROM t2;
diff --git a/mysys/errors.c b/mysys/errors.c
index 6ff118fd81f..a2f4ce298f1 100644
--- a/mysys/errors.c
+++ b/mysys/errors.c
@@ -108,12 +108,12 @@ void init_glob_errs()
void wait_for_free_space(const char *filename, int errors)
{
if (errors == 0)
- my_error(EE_DISK_FULL,MYF(ME_BELL | ME_NOREFRESH),
+ my_error(EE_DISK_FULL,MYF(ME_BELL | ME_NOREFRESH | ME_JUST_WARNING),
filename,my_errno,MY_WAIT_FOR_USER_TO_FIX_PANIC);
if (!(errors % MY_WAIT_GIVE_USER_A_MESSAGE))
my_printf_error(EE_DISK_FULL,
"Retry in %d secs. Message reprinted in %d secs",
- MYF(ME_BELL | ME_NOREFRESH),
+ MYF(ME_BELL | ME_NOREFRESH | ME_JUST_WARNING),
MY_WAIT_FOR_USER_TO_FIX_PANIC,
MY_WAIT_GIVE_USER_A_MESSAGE * MY_WAIT_FOR_USER_TO_FIX_PANIC );
VOID(sleep(MY_WAIT_FOR_USER_TO_FIX_PANIC));
diff --git a/mysys/mf_pack.c b/mysys/mf_pack.c
index e6b576b6d96..4521216cdc7 100644
--- a/mysys/mf_pack.c
+++ b/mysys/mf_pack.c
@@ -1,4 +1,5 @@
-/* Copyright (C) 2000 MySQL AB
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2012, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -11,7 +12,8 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+*/
#include "mysys_priv.h"
#include <m_string.h>
@@ -519,10 +521,10 @@ char *intern_filename(char *to, const char *from)
char buff[FN_REFLEN + 1];
if (from == to)
{ /* Dirname may destroy from */
- strmov(buff,from);
+ (void) strnmov(buff, from, FN_REFLEN);
from=buff;
}
length= dirname_part(to, from, &to_length); /* Copy dirname & fix chars */
- (void) strmov(to + to_length,from+length);
+ (void) strnmov(to + to_length, from + length, FN_REFLEN - to_length);
return (to);
} /* intern_filename */
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index c18d14fb549..8bd9a289176 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000 MySQL AB
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -11,7 +11,8 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+*/
/* TODO: check for overun of memory for names. */
/* Convert MSDOS-TIME to standar time_t (still needed?) */
@@ -103,7 +104,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
MEM_ROOT *names_storage;
DIR *dirp;
struct dirent *dp;
- char tmp_path[FN_REFLEN+1],*tmp_file;
+ char tmp_path[FN_REFLEN + 2], *tmp_file;
#ifdef THREAD
char dirent_tmp[sizeof(struct dirent)+_POSIX_PATH_MAX+1];
#endif
@@ -215,10 +216,11 @@ char * directory_file_name (char * dst, const char *src)
/* Process as Unix format: just remove test the final slash. */
char * end;
+ DBUG_ASSERT(strlen(src) < (FN_REFLEN + 1));
if (src[0] == 0)
src= (char*) "."; /* Use empty as current */
- end=strmov(dst, src);
+ end= strnmov(dst, src, FN_REFLEN + 1);
if (end[-1] != FN_LIBCHAR)
{
end[0]=FN_LIBCHAR; /* Add last '/' */
diff --git a/mysys/my_write.c b/mysys/my_write.c
index 11708d8f10e..14d16955a0a 100644
--- a/mysys/my_write.c
+++ b/mysys/my_write.c
@@ -44,7 +44,7 @@ size_t my_write(int Filedes, const uchar *Buffer, size_t Count, myf MyFlags)
and the number pf written bytes to -1.
*/
DBUG_EXECUTE_IF ("simulate_file_write_error",
- {
+ if (!errors) {
errno= ENOSPC;
writenbytes= (size_t) -1;
});
diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh
index 75589a2d004..48c98f2fde3 100644
--- a/scripts/mysqld_safe.sh
+++ b/scripts/mysqld_safe.sh
@@ -393,7 +393,7 @@ then
log_notice "Logging to '$err_log'."
logging=file
- if [ ! -e "$err_log" ]; then # if error log already exists,
+ if [ ! -f "$err_log" ]; then # if error log already exists,
touch "$err_log" # we just append. otherwise,
chmod "$fmode" "$err_log" # fix the permissions here!
fi
@@ -599,7 +599,7 @@ do
eval_log_error "$cmd"
- if [ $want_syslog -eq 0 -a ! -e "$err_log" ]; then
+ if [ $want_syslog -eq 0 -a ! -f "$err_log" ]; then
touch "$err_log" # hypothetical: log was renamed but not
chown $user "$err_log" # flushed yet. we'd recreate it with
chmod "$fmode" "$err_log" # wrong owner next time we log, so set
diff --git a/sql/.cvsignore b/sql/.cvsignore
index 3e2f44f5a10..7fcd82ab952 100644
--- a/sql/.cvsignore
+++ b/sql/.cvsignore
@@ -9,4 +9,5 @@ mysqlbinlog
mysqlbinlog
mysqld
sql_yacc.cc
+sql_yacc.hh
sql_yacc.h
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 92fcc4cdb6b..b323a7a191e 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -149,7 +149,7 @@ DEFS = -DMYSQL_SERVER \
-DHAVE_EVENT_SCHEDULER \
@DEFS@
-BUILT_MAINT_SRC = sql_yacc.cc sql_yacc.h
+BUILT_MAINT_SRC = sql_yacc.cc sql_yacc.$(YACC_HEXT)
BUILT_SOURCES = $(BUILT_MAINT_SRC) lex_hash.h link_sources
EXTRA_DIST = udf_example.c udf_example.def $(BUILT_MAINT_SRC) \
nt_servc.cc nt_servc.h \
diff --git a/sql/field.cc b/sql/field.cc
index 278ba83cc81..7e2db40ccb0 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -185,7 +185,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_LONG, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -216,7 +216,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_FLOAT, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_FLOAT, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_FLOAT, MYSQL_TYPE_FLOAT,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -247,7 +247,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_DOUBLE, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -278,7 +278,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONGLONG,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_NEWDATE, MYSQL_TYPE_TIME,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index b40025f2eb5..fb028d502c9 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -4148,6 +4148,7 @@ bool ha_partition::init_record_priority_queue()
{
if (bitmap_is_set(&m_part_info->used_partitions, i))
{
+ DBUG_PRINT("info", ("init rec-buf for part %u", i));
int2store(ptr, i);
ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
}
@@ -5052,11 +5053,27 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
m_top_entry= NO_CURRENT_PART_ID;
queue_remove_all(&m_queue);
- DBUG_PRINT("info", ("m_part_spec.start_part %d", m_part_spec.start_part));
- for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ /*
+ Position part_rec_buf_ptr to point to the first used partition >=
+ start_part. There may be partitions marked by used_partitions,
+ but is before start_part. These partitions has allocated record buffers
+ but is dynamically pruned, so those buffers must be skipped.
+ */
+ uint first_used_part= bitmap_get_first_set(&m_part_info->used_partitions);
+ for (; first_used_part < m_part_spec.start_part; first_used_part++)
+ {
+ if (bitmap_is_set(&(m_part_info->used_partitions), first_used_part))
+ part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
+ }
+ DBUG_PRINT("info", ("m_part_spec.start_part %u first_used_part %u",
+ m_part_spec.start_part, first_used_part));
+ for (i= first_used_part; i <= m_part_spec.end_part; i++)
{
if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
continue;
+ DBUG_PRINT("info", ("reading from part %u (scan_type: %u)",
+ i, m_index_scan_type));
+ DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr));
uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS;
int error;
handler *file= m_file[i];
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 7464fbb440e..c066f1b9744 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -2669,8 +2669,12 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
node.parent= data->parent; // Set parent for the new node to old parent
data->parent= numnodes; // Remember current node as new parent
+ DBUG_ASSERT(data->level <= MAX_LEVEL);
data->pos[data->level]= numnodes;
- node.level= data->level++;
+ if (data->level < MAX_LEVEL)
+ node.level= data->level++;
+ else
+ return MY_XML_ERROR;
node.type= st->current_node_type; // TAG or ATTR
node.beg= attr;
node.end= attr + len;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 27448864cee..b93c4fa564b 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -4191,7 +4191,6 @@ Format_description_log_event::do_shall_skip(Relay_log_info *rli)
into 'server_version_split':
X.Y.Zabc (X,Y,Z numbers, a not a digit) -> {X,Y,Z}
X.Yabc -> {X,Y,0}
- Xabc -> {X,0,0}
'server_version_split' is then used for lookups to find if the server which
created this event has some known bug.
*/
@@ -4202,10 +4201,21 @@ void Format_description_log_event::calc_server_version_split()
for (uint i= 0; i<=2; i++)
{
number= strtoul(p, &r, 10);
- server_version_split[i]= (uchar)number;
- DBUG_ASSERT(number < 256); // fit in uchar
+ /*
+ It is an invalid version if any version number greater than 255 or
+ first number is not followed by '.'.
+ */
+ if (number < 256 && (*r == '.' || i != 0))
+ server_version_split[i]= (uchar)number;
+ else
+ {
+ server_version_split[0]= 0;
+ server_version_split[1]= 0;
+ server_version_split[2]= 0;
+ break;
+ }
+
p= r;
- DBUG_ASSERT(!((i == 0) && (*r != '.'))); // should be true in practice
if (*r == '.')
p++; // skip the dot
}
diff --git a/sql/log_event.h b/sql/log_event.h
index bab2ab2a43c..741f53c34eb 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -970,7 +970,7 @@ public:
return thd ? thd->db : 0;
}
#else
- Log_event() : temp_buf(0) {}
+ Log_event() : temp_buf(0), flags(0) {}
/* avoid having to link mysqlbinlog against libpthread */
static Log_event* read_log_event(IO_CACHE* file,
const Format_description_log_event
@@ -2247,12 +2247,26 @@ public:
#ifndef MYSQL_CLIENT
bool write(IO_CACHE* file);
#endif
- bool is_valid() const
+ bool header_is_valid() const
{
return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN :
LOG_EVENT_MINIMAL_HEADER_LEN)) &&
(post_header_len != NULL));
}
+
+ bool version_is_valid() const
+ {
+ /* It is invalid only when all version numbers are 0 */
+ return !(server_version_split[0] == 0 &&
+ server_version_split[1] == 0 &&
+ server_version_split[2] == 0);
+ }
+
+ bool is_valid() const
+ {
+ return header_is_valid() && version_is_valid();
+ }
+
int get_data_size()
{
/*
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 6a7a4a0476b..5dc1bdc66e3 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -1321,6 +1321,31 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok);
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create);
uint create_table_def_key(THD *thd, char *key, TABLE_LIST *table_list,
bool tmp_table);
+
+/**
+ Create a table cache key for non-temporary table.
+
+ @param key Buffer for key (must be at least NAME_LEN*2+2 bytes).
+ @param db Database name.
+ @param table_name Table name.
+
+ @return Length of key.
+
+ @sa create_table_def_key(thd, char *, table_list, bool)
+*/
+
+inline uint
+create_table_def_key(char *key, const char *db, const char *table_name)
+{
+ /*
+ In theory caller should ensure that both db and table_name are
+ not longer than NAME_LEN bytes. In practice we play safe to avoid
+ buffer overruns.
+ */
+ return (uint)(strmake(strmake(key, db, NAME_LEN) + 1, table_name,
+ NAME_LEN) - key + 1);
+}
+
TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key,
uint key_length, uint db_flags, int *error);
void release_table_share(TABLE_SHARE *share, enum release_type type);
@@ -1619,7 +1644,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint counter, bool *need_reopen);
int decide_logging_format(THD *thd, TABLE_LIST *tables);
TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
const char *table_name, bool link_in_list);
-bool rm_temporary_table(handlerton *base, char *path);
+bool rm_temporary_table(handlerton *base, const char *path);
void free_io_cache(TABLE *entry);
void intern_close_table(TABLE *entry);
bool close_thread_table(THD *thd, TABLE **table_ptr);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 3a25371e3dc..f322cd1bb01 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -881,6 +881,7 @@ static int test_if_case_insensitive(const char *dir_name);
static void register_mutex_order();
#ifndef EMBEDDED_LIBRARY
+static bool pid_file_created= false;
static void usage(void);
static void start_signal_handler(void);
static void close_server_sock();
@@ -889,6 +890,7 @@ static void wait_for_signal_thread_to_end(void);
static void create_pid_file();
static void end_ssl();
#endif
+static void delete_pid_file(myf flags);
#ifndef EMBEDDED_LIBRARY
@@ -1375,7 +1377,6 @@ void clean_up(bool print_message)
lex_free(); /* Free some memory */
item_create_cleanup();
set_var_free();
- free_charsets();
if (!opt_noacl)
{
#ifdef HAVE_DLOPEN
@@ -1428,15 +1429,13 @@ void clean_up(bool print_message)
#ifdef USE_REGEX
my_regex_end();
#endif
+ free_charsets();
#if defined(ENABLED_DEBUG_SYNC)
/* End the debug sync facility. See debug_sync.cc. */
debug_sync_end();
#endif /* defined(ENABLED_DEBUG_SYNC) */
-#if !defined(EMBEDDED_LIBRARY)
- if (!opt_bootstrap)
- (void) my_delete(pidfile_name,MYF(0)); // This may not always exist
-#endif
+ delete_pid_file(MYF(0));
if (print_message && errmesg && server_start_time)
sql_print_information(ER(ER_SHUTDOWN_COMPLETE),my_progname);
thread_scheduler.end();
@@ -2052,7 +2051,7 @@ static bool cache_thread()
this thread for handling of new THD object/connection.
*/
thd->mysys_var->abort= 0;
- thd->thr_create_utime= my_micro_time();
+ thd->thr_create_utime= thd->start_utime= my_micro_time();
threads.append(thd);
return(1);
}
@@ -4515,9 +4514,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
(void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL);
#endif /* __NETWARE__ */
- if (!opt_bootstrap)
- (void) my_delete(pidfile_name,MYF(MY_WME)); // Not needed anymore
-
+ delete_pid_file(MYF(MY_WME));
if (unix_sock != INVALID_SOCKET)
unlink(mysqld_unix_port);
exit(1);
@@ -9462,12 +9459,13 @@ static void create_pid_file()
if ((file = my_create(pidfile_name,0664,
O_WRONLY | O_TRUNC, MYF(MY_WME))) >= 0)
{
- char buff[21], *end;
+ char buff[MAX_BIGINT_WIDTH + 1], *end;
end= int10_to_str((long) getpid(), buff, 10);
*end++= '\n';
if (!my_write(file, (uchar*) buff, (uint) (end-buff), MYF(MY_WME | MY_NABP)))
{
(void) my_close(file, MYF(0));
+ pid_file_created= true;
return;
}
(void) my_close(file, MYF(0));
@@ -9477,6 +9475,26 @@ static void create_pid_file()
}
#endif /* EMBEDDED_LIBRARY */
+
+/**
+ Remove the process' pid file.
+
+ @param flags file operation flags
+*/
+
+static void delete_pid_file(myf flags)
+{
+#ifndef EMBEDDED_LIBRARY
+ if (opt_bootstrap || !pid_file_created)
+ return;
+
+ my_delete(pidfile_name, flags);
+ pid_file_created= false;
+#endif /* EMBEDDED_LIBRARY */
+ return;
+}
+
+
/** Clear most status variables. */
void refresh_status(THD *thd)
{
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 1971911fb88..283d56f6d48 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -242,12 +242,9 @@ static void check_unused(void)
uint create_table_def_key(THD *thd, char *key, TABLE_LIST *table_list,
bool tmp_table)
{
- char *db_end= strnmov(key, table_list->db, MAX_DBKEY_LENGTH - 2);
- *db_end++= '\0';
- char *table_end= strnmov(db_end, table_list->table_name,
- key + MAX_DBKEY_LENGTH - 1 - db_end);
- *table_end++= '\0';
- uint key_length= (uint) (table_end-key);
+ uint key_length= create_table_def_key(key, table_list->db,
+ table_list->table_name);
+
if (tmp_table)
{
int4store(key + key_length, thd->server_id);
@@ -623,13 +620,10 @@ void release_table_share(TABLE_SHARE *share, enum release_type type)
TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name)
{
char key[SAFE_NAME_LEN*2+2];
- TABLE_LIST table_list;
uint key_length;
safe_mutex_assert_owner(&LOCK_open);
- table_list.db= (char*) db;
- table_list.table_name= (char*) table_name;
- key_length= create_table_def_key((THD*) 0, key, &table_list, 0);
+ key_length= create_table_def_key(key, db, table_name);
return (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key, key_length);
}
@@ -2428,7 +2422,7 @@ bool lock_table_name_if_not_cached(THD *thd, const char *db,
uint key_length;
DBUG_ENTER("lock_table_name_if_not_cached");
- key_length= (uint)(strmov(strmov(key, db) + 1, table_name) - key) + 1;
+ key_length= create_table_def_key(key, db, table_name);
VOID(pthread_mutex_lock(&LOCK_open));
if (hash_search(&open_cache, (uchar *)key, key_length))
@@ -3041,7 +3035,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
TABLE *find_locked_table(THD *thd, const char *db,const char *table_name)
{
char key[MAX_DBKEY_LENGTH];
- uint key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
+ uint key_length= create_table_def_key(key, db, table_name);
for (TABLE *table=thd->open_tables; table ; table=table->next)
{
@@ -5770,17 +5764,27 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
}
-bool rm_temporary_table(handlerton *base, char *path)
+/**
+ Delete a temporary table.
+
+ @param base Handlerton for table to be deleted.
+ @param path Path to the table to be deleted (i.e. path
+ to its .frm without an extension).
+
+ @retval false - success.
+ @retval true - failure.
+*/
+
+bool rm_temporary_table(handlerton *base, const char *path)
{
bool error=0;
handler *file;
- char *ext;
+ char frm_path[FN_REFLEN + 1];
DBUG_ENTER("rm_temporary_table");
- strmov(ext= strend(path), reg_ext);
- if (my_delete(path,MYF(0)))
+ strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS);
+ if (my_delete(frm_path, MYF(0)))
error=1; /* purecov: inspected */
- *ext= 0; // remove extension
file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base);
if (file && file->ha_delete_table(path))
{
@@ -8676,7 +8680,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
DBUG_ENTER("remove_table_from_cache");
DBUG_PRINT("enter", ("table: '%s'.'%s' flags: %u", db, table_name, flags));
- key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
+ key_length= create_table_def_key(key, db, table_name);
for (;;)
{
HASH_SEARCH_STATE state;
@@ -8884,12 +8888,14 @@ open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias,
{
LEX_STRING pathstr;
File_parser *parser;
- char path[FN_REFLEN];
+ char path[FN_REFLEN+1];
DBUG_ENTER("open_new_frm");
/* Create path with extension */
- pathstr.length= (uint) (strxmov(path, share->normalized_path.str, reg_ext,
- NullS)- path);
+ pathstr.length= (uint) (strxnmov(path, sizeof(path) - 1,
+ share->normalized_path.str,
+ reg_ext,
+ NullS) - path);
pathstr.str= path;
if ((parser= sql_parse_prepare(&pathstr, mem_root, 1)))
@@ -9030,7 +9036,7 @@ void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table
TABLE *table;
DBUG_ENTER("mysql_wait_completed_table");
- key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1;
+ key_length= create_table_def_key(key, lpt->db, lpt->table_name);
VOID(pthread_mutex_lock(&LOCK_open));
HASH_SEARCH_STATE state;
for (table= (TABLE*) hash_first(&open_cache,(uchar*) key,key_length,
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 654d178152d..ba874f9cc99 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -2738,8 +2738,8 @@ void Query_cache::invalidate_table(THD *thd, TABLE_LIST *table_list)
char key[MAX_DBKEY_LENGTH];
uint key_length;
- key_length=(uint) (strmov(strmov(key,table_list->db)+1,
- table_list->table_name) -key)+ 1;
+ key_length= create_table_def_key(key, table_list->db,
+ table_list->table_name);
// We don't store temporary tables => no key_length+=4 ...
invalidate_table(thd, (uchar *)key, key_length);
@@ -2860,8 +2860,8 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
- key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1,
- tables_used->view_name.str) - key) + 1;
+ key_length= create_table_def_key(key, tables_used->view_db.str,
+ tables_used->view_name.str);
/*
There are not callback function for for VIEWs
*/
@@ -3923,14 +3923,13 @@ my_bool Query_cache::move_by_type(uchar **border,
case Query_cache_block::RESULT:
{
DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block,
- (int) block->type));
+ (int) block->type));
if (*border == 0)
break;
- Query_cache_block *query_block = block->result()->parent(),
- *next = block->next,
- *prev = block->prev;
- Query_cache_block::block_type type = block->type;
+ Query_cache_block *query_block= block->result()->parent();
BLOCK_LOCK_WR(query_block);
+ Query_cache_block *next= block->next, *prev= block->prev;
+ Query_cache_block::block_type type= block->type;
ulong len = block->length, used = block->used;
Query_cache_block *pprev = block->pprev,
*pnext = block->pnext,
@@ -4092,8 +4091,9 @@ uint Query_cache::filename_2_table_key (char *key, const char *path,
*db_length= (filename - dbname) - 1;
DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename));
- DBUG_RETURN((uint) (strmov(strmake(key, dbname, *db_length) + 1,
- filename) -key) + 1);
+ DBUG_RETURN((uint) (strmake(strmake(key, dbname,
+ min(*db_length, NAME_LEN)) + 1,
+ filename, NAME_LEN) - key) + 1);
}
/****************************************************************************
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 819cf41ec21..692129dd200 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -47,7 +47,11 @@ class Event_parse_data;
#else
#include "lex_symbol.h"
#if MYSQL_LEX
-#include "sql_yacc.h"
+# if YACC_HEXT_HH
+# include "sql_yacc.hh"
+# else
+# include "sql_yacc.h"
+# endif
#define LEX_YYSTYPE YYSTYPE *
#else
#define LEX_YYSTYPE void *
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 5108170d2c6..f852bf0377e 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -875,6 +875,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
if (thd->lex->mi.pos)
{
+ if (thd->lex->mi.relay_log_pos)
+ slave_errno=ER_BAD_SLAVE_UNTIL_COND;
mi->rli.until_condition= Relay_log_info::UNTIL_MASTER_POS;
mi->rli.until_log_pos= thd->lex->mi.pos;
/*
@@ -886,6 +888,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
}
else if (thd->lex->mi.relay_log_pos)
{
+ if (thd->lex->mi.pos)
+ slave_errno=ER_BAD_SLAVE_UNTIL_COND;
mi->rli.until_condition= Relay_log_info::UNTIL_RELAY_POS;
mi->rli.until_log_pos= thd->lex->mi.relay_log_pos;
strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name,
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 541a7909f46..ce9d91bec72 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2000, 2011, Oracle and/or its affiliates.
+ Copyright (c) 2000, 2012, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -618,13 +618,6 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
struct st_global_ddl_log
{
- /*
- We need to adjust buffer size to be able to handle downgrades/upgrades
- where IO_SIZE has changed. We'll set the buffer size such that we can
- handle that the buffer size was upto 4 times bigger in the version
- that wrote the DDL log.
- */
- char file_entry_buf[4*IO_SIZE];
char file_name_str[FN_REFLEN];
char *file_name;
DDL_LOG_MEMORY_ENTRY *first_free;
@@ -652,51 +645,60 @@ pthread_mutex_t LOCK_gdl;
#define DDL_LOG_NUM_ENTRY_POS 0
#define DDL_LOG_NAME_LEN_POS 4
#define DDL_LOG_IO_SIZE_POS 8
+#define DDL_LOG_HEADER_SIZE 12
-/*
- Read one entry from ddl log file
- SYNOPSIS
- read_ddl_log_file_entry()
- entry_no Entry number to read
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Read one entry from ddl log file.
+ @param[out] file_entry_buf Buffer to read into
+ @param entry_no Entry number to read
+ @param size Number of bytes of the entry to read
+
+ @return Operation status
+ @retval true Error
+ @retval false Success
*/
-static bool read_ddl_log_file_entry(uint entry_no)
+static bool read_ddl_log_file_entry(uchar *file_entry_buf,
+ uint entry_no,
+ uint size)
{
bool error= FALSE;
File file_id= global_ddl_log.file_id;
- uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf;
uint io_size= global_ddl_log.io_size;
DBUG_ENTER("read_ddl_log_file_entry");
+ DBUG_ASSERT(io_size >= size);
- if (my_pread(file_id, file_entry_buf, io_size, io_size * entry_no,
- MYF(MY_WME)) != io_size)
+ if (my_pread(file_id, file_entry_buf, size, io_size * entry_no,
+ MYF(MY_WME)) != size)
error= TRUE;
DBUG_RETURN(error);
}
-/*
- Write one entry from ddl log file
- SYNOPSIS
- write_ddl_log_file_entry()
- entry_no Entry number to write
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Write one entry to ddl log file.
+
+ @param file_entry_buf Buffer to write
+ @param entry_no Entry number to write
+ @param size Number of bytes of the entry to write
+
+ @return Operation status
+ @retval true Error
+ @retval false Success
*/
-static bool write_ddl_log_file_entry(uint entry_no)
+static bool write_ddl_log_file_entry(uchar *file_entry_buf,
+ uint entry_no,
+ uint size)
{
bool error= FALSE;
File file_id= global_ddl_log.file_id;
- char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ uint io_size= global_ddl_log.io_size;
DBUG_ENTER("write_ddl_log_file_entry");
+ DBUG_ASSERT(io_size >= size);
- if (my_pwrite(file_id, (uchar*)file_entry_buf,
- IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE)
+ if (my_pwrite(file_id, file_entry_buf, size,
+ io_size * entry_no, MYF(MY_WME)) != size)
error= TRUE;
DBUG_RETURN(error);
}
@@ -715,17 +717,20 @@ static bool write_ddl_log_header()
{
uint16 const_var;
bool error= FALSE;
+ uchar file_entry_buf[DDL_LOG_HEADER_SIZE];
DBUG_ENTER("write_ddl_log_header");
+ DBUG_ASSERT((DDL_LOG_NAME_POS + 3 * global_ddl_log.name_len)
+ <= global_ddl_log.io_size);
- int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
+ int4store(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
global_ddl_log.num_entries);
- const_var= FN_LEN;
- int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS],
+ const_var= global_ddl_log.name_len;
+ int4store(&file_entry_buf[DDL_LOG_NAME_LEN_POS],
(ulong) const_var);
- const_var= IO_SIZE;
- int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS],
+ const_var= global_ddl_log.io_size;
+ int4store(&file_entry_buf[DDL_LOG_IO_SIZE_POS],
(ulong) const_var);
- if (write_ddl_log_file_entry(0UL))
+ if (write_ddl_log_file_entry(file_entry_buf, 0UL, DDL_LOG_HEADER_SIZE))
{
sql_print_error("Error writing ddl log header");
DBUG_RETURN(TRUE);
@@ -765,17 +770,19 @@ static inline void create_ddl_log_file_name(char *file_name)
static uint read_ddl_log_header()
{
- char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ char file_entry_buf[DDL_LOG_HEADER_SIZE];
char file_name[FN_REFLEN];
uint entry_no;
bool successful_open= FALSE;
DBUG_ENTER("read_ddl_log_header");
+ DBUG_ASSERT(global_ddl_log.io_size <= IO_SIZE);
create_ddl_log_file_name(file_name);
if ((global_ddl_log.file_id= my_open(file_name,
O_RDWR | O_BINARY, MYF(0))) >= 0)
{
- if (read_ddl_log_file_entry(0UL))
+ if (read_ddl_log_file_entry((uchar *) file_entry_buf, 0UL,
+ DDL_LOG_HEADER_SIZE))
{
/* Write message into error log */
sql_print_error("Failed to read ddl log file in recovery");
@@ -788,8 +795,6 @@ static uint read_ddl_log_header()
entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]);
global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]);
global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]);
- DBUG_ASSERT(global_ddl_log.io_size <=
- sizeof(global_ddl_log.file_entry_buf));
}
else
{
@@ -804,30 +809,22 @@ static uint read_ddl_log_header()
}
-/*
- Read a ddl log entry
- SYNOPSIS
- read_ddl_log_entry()
- read_entry Number of entry to read
- out:entry_info Information from entry
- RETURN VALUES
- TRUE Error
- FALSE Success
- DESCRIPTION
- Read a specified entry in the ddl log
+/**
+ Set ddl log entry struct from buffer
+ @param read_entry Entry number
+ @param file_entry_buf Buffer to use
+ @param ddl_log_entry Entry to be set
+
+ @note Pointers in ddl_log_entry will point into file_entry_buf!
*/
-bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
+static void set_ddl_log_entry_from_buf(uint read_entry,
+ uchar *file_entry_buf,
+ DDL_LOG_ENTRY *ddl_log_entry)
{
- char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf;
uint inx;
uchar single_char;
- DBUG_ENTER("read_ddl_log_entry");
-
- if (read_ddl_log_file_entry(read_entry))
- {
- DBUG_RETURN(TRUE);
- }
+ DBUG_ENTER("set_ddl_log_entry_from_buf");
ddl_log_entry->entry_pos= read_entry;
single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS];
ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char;
@@ -835,14 +832,14 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
ddl_log_entry->action_type= (enum ddl_log_action_code)single_char;
ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS];
ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]);
- ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS];
+ ddl_log_entry->name= (char*) &file_entry_buf[DDL_LOG_NAME_POS];
inx= DDL_LOG_NAME_POS + global_ddl_log.name_len;
- ddl_log_entry->from_name= &file_entry_buf[inx];
+ ddl_log_entry->from_name= (char*) &file_entry_buf[inx];
inx+= global_ddl_log.name_len;
- ddl_log_entry->handler_name= &file_entry_buf[inx];
- DBUG_RETURN(FALSE);
+ ddl_log_entry->handler_name= (char*) &file_entry_buf[inx];
+ DBUG_VOID_RETURN;
}
-
+
/*
Initialise ddl log
@@ -1045,6 +1042,7 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used;
DBUG_ENTER("get_free_ddl_log_entry");
+ safe_mutex_assert_owner(&LOCK_gdl);
if (global_ddl_log.first_free == NULL)
{
if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc(
@@ -1101,34 +1099,36 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
DDL_LOG_MEMORY_ENTRY **active_entry)
{
bool error, write_header;
+ char file_entry_buf[IO_SIZE];
DBUG_ENTER("write_ddl_log_entry");
if (init_ddl_log())
{
DBUG_RETURN(TRUE);
}
- global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
+ memset(file_entry_buf, 0, sizeof(file_entry_buf));
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
(char)DDL_LOG_ENTRY_CODE;
- global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
+ file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
(char)ddl_log_entry->action_type;
- global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
- int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
+ file_entry_buf[DDL_LOG_PHASE_POS]= 0;
+ int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
ddl_log_entry->next_entry);
- DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN);
- strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
- ddl_log_entry->name, FN_LEN - 1);
+ DBUG_ASSERT(strlen(ddl_log_entry->name) < global_ddl_log.name_len);
+ strmake(&file_entry_buf[DDL_LOG_NAME_POS], ddl_log_entry->name,
+ global_ddl_log.name_len - 1);
if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION)
{
- DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN);
- strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN],
- ddl_log_entry->from_name, FN_LEN - 1);
+ DBUG_ASSERT(strlen(ddl_log_entry->from_name) < global_ddl_log.name_len);
+ strmake(&file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len],
+ ddl_log_entry->from_name, global_ddl_log.name_len - 1);
}
else
- global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
- DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN);
- strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)],
- ddl_log_entry->handler_name, FN_LEN - 1);
+ file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len]= 0;
+ DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < global_ddl_log.name_len);
+ strmake(&file_entry_buf[DDL_LOG_NAME_POS + (2*global_ddl_log.name_len)],
+ ddl_log_entry->handler_name, global_ddl_log.name_len - 1);
if (get_free_ddl_log_entry(active_entry, &write_header))
{
DBUG_RETURN(TRUE);
@@ -1136,14 +1136,15 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
error= FALSE;
DBUG_PRINT("ddl_log",
("write type %c next %u name '%s' from_name '%s' handler '%s'",
- (char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
+ (char) file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
ddl_log_entry->next_entry,
- (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
- (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
- + FN_LEN],
- (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
- + (2*FN_LEN)]));
- if (write_ddl_log_file_entry((*active_entry)->entry_pos))
+ (char*) &file_entry_buf[DDL_LOG_NAME_POS],
+ (char*) &file_entry_buf[DDL_LOG_NAME_POS +
+ global_ddl_log.name_len],
+ (char*) &file_entry_buf[DDL_LOG_NAME_POS +
+ (2*global_ddl_log.name_len)]));
+ if (write_ddl_log_file_entry((uchar*) file_entry_buf,
+ (*active_entry)->entry_pos, IO_SIZE))
{
error= TRUE;
sql_print_error("Failed to write entry_no = %u",
@@ -1193,13 +1194,14 @@ bool write_execute_ddl_log_entry(uint first_entry,
DDL_LOG_MEMORY_ENTRY **active_entry)
{
bool write_header= FALSE;
- char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ char file_entry_buf[IO_SIZE];
DBUG_ENTER("write_execute_ddl_log_entry");
if (init_ddl_log())
{
DBUG_RETURN(TRUE);
}
+ memset(file_entry_buf, 0, sizeof(file_entry_buf));
if (!complete)
{
/*
@@ -1213,12 +1215,7 @@ bool write_execute_ddl_log_entry(uint first_entry,
}
else
file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE;
- file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */
- file_entry_buf[DDL_LOG_PHASE_POS]= 0;
int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry);
- file_entry_buf[DDL_LOG_NAME_POS]= 0;
- file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
- file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0;
if (!(*active_entry))
{
if (get_free_ddl_log_entry(active_entry, &write_header))
@@ -1226,7 +1223,9 @@ bool write_execute_ddl_log_entry(uint first_entry,
DBUG_RETURN(TRUE);
}
}
- if (write_ddl_log_file_entry((*active_entry)->entry_pos))
+ if (write_ddl_log_file_entry((uchar*) file_entry_buf,
+ (*active_entry)->entry_pos,
+ IO_SIZE))
{
sql_print_error("Error writing execute entry in ddl log");
release_ddl_log_memory_entry(*active_entry);
@@ -1271,10 +1270,16 @@ bool write_execute_ddl_log_entry(uint first_entry,
bool deactivate_ddl_log_entry(uint entry_no)
{
- char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ uchar file_entry_buf[DDL_LOG_NAME_POS];
DBUG_ENTER("deactivate_ddl_log_entry");
- if (!read_ddl_log_file_entry(entry_no))
+
+ /*
+ Only need to read and write the first bytes of the entry, where
+ ENTRY_TYPE, ACTION_TYPE and PHASE reside. Using DDL_LOG_NAME_POS
+ to include all info except for the names.
+ */
+ if (!read_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS))
{
if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
{
@@ -1292,7 +1297,7 @@ bool deactivate_ddl_log_entry(uint entry_no)
{
DBUG_ASSERT(0);
}
- if (write_ddl_log_file_entry(entry_no))
+ if (write_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS))
{
sql_print_error("Error in deactivating log entry. Position = %u",
entry_no);
@@ -1353,6 +1358,7 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry;
DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry;
DBUG_ENTER("release_ddl_log_memory_entry");
+ safe_mutex_assert_owner(&LOCK_gdl);
global_ddl_log.first_free= log_entry;
log_entry->next_log_entry= first_free;
@@ -1382,24 +1388,26 @@ bool execute_ddl_log_entry(THD *thd, uint first_entry)
{
DDL_LOG_ENTRY ddl_log_entry;
uint read_entry= first_entry;
+ uchar file_entry_buf[IO_SIZE];
DBUG_ENTER("execute_ddl_log_entry");
pthread_mutex_lock(&LOCK_gdl);
do
{
- if (read_ddl_log_entry(read_entry, &ddl_log_entry))
+ if (read_ddl_log_file_entry(file_entry_buf, read_entry, IO_SIZE))
{
- /* Write to error log and continue with next log entry */
+ /* Print the error to the log and continue with next log entry */
sql_print_error("Failed to read entry = %u from ddl log",
read_entry);
break;
}
+ set_ddl_log_entry_from_buf(read_entry, file_entry_buf, &ddl_log_entry);
DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
if (execute_ddl_log_action(thd, &ddl_log_entry))
{
- /* Write to error log and continue with next log entry */
+ /* Print the error to the log and continue with next log entry */
sql_print_error("Failed to execute action for entry = %u from ddl log",
read_entry);
break;
@@ -1444,13 +1452,14 @@ void execute_ddl_log_recovery()
uint num_entries, i;
THD *thd;
DDL_LOG_ENTRY ddl_log_entry;
+ uchar *file_entry_buf;
+ uint io_size;
char file_name[FN_REFLEN];
DBUG_ENTER("execute_ddl_log_recovery");
/*
Initialise global_ddl_log struct
*/
- bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf));
global_ddl_log.inited= FALSE;
global_ddl_log.recovery_phase= TRUE;
global_ddl_log.io_size= IO_SIZE;
@@ -1465,14 +1474,23 @@ void execute_ddl_log_recovery()
thd->store_globals();
num_entries= read_ddl_log_header();
+ io_size= global_ddl_log.io_size;
+ file_entry_buf= (uchar*) my_malloc(io_size, MYF(0));
+ if (!file_entry_buf)
+ {
+ sql_print_error("Failed to allocate buffer for recover ddl log");
+ DBUG_VOID_RETURN;
+ }
for (i= 1; i < num_entries + 1; i++)
{
- if (read_ddl_log_entry(i, &ddl_log_entry))
+ if (read_ddl_log_file_entry(file_entry_buf, i, io_size))
{
sql_print_error("Failed to read entry no = %u from ddl log",
i);
continue;
}
+
+ set_ddl_log_entry_from_buf(i, file_entry_buf, &ddl_log_entry);
if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE)
{
if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry))
@@ -1487,6 +1505,7 @@ void execute_ddl_log_recovery()
VOID(my_delete(file_name, MYF(0)));
global_ddl_log.recovery_phase= FALSE;
delete thd;
+ my_free(file_entry_buf, MYF(0));
/* Remember that we don't have a THD */
my_pthread_setspecific_ptr(THR_THD, 0);
DBUG_VOID_RETURN;
@@ -1503,14 +1522,16 @@ void execute_ddl_log_recovery()
void release_ddl_log()
{
- DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free;
- DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used;
+ DDL_LOG_MEMORY_ENTRY *free_list;
+ DDL_LOG_MEMORY_ENTRY *used_list;
DBUG_ENTER("release_ddl_log");
if (!global_ddl_log.do_release)
DBUG_VOID_RETURN;
pthread_mutex_lock(&LOCK_gdl);
+ free_list= global_ddl_log.first_free;
+ used_list= global_ddl_log.first_used;
while (used_list)
{
DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry;
@@ -4991,6 +5012,11 @@ send_result_message:
case HA_ADMIN_TRY_ALTER:
{
+ uint save_flags;
+ Alter_info *alter_info= &lex->alter_info;
+
+ /* Store the original value of alter_info->flags */
+ save_flags= alter_info->flags;
/*
This is currently used only by InnoDB. ha_innobase::optimize() answers
"try with alter", so here we close the table, do an ALTER TABLE,
@@ -4998,9 +5024,18 @@ send_result_message:
We have to end the row, so analyze could return more rows.
*/
protocol->store(STRING_WITH_LEN("note"), system_charset_info);
- protocol->store(STRING_WITH_LEN(
- "Table does not support optimize, doing recreate + analyze instead"),
- system_charset_info);
+ if(alter_info->flags & ALTER_ADMIN_PARTITION)
+ {
+ protocol->store(STRING_WITH_LEN(
+ "Table does not support optimize on partitions. All partitions "
+ "will be rebuilt and analyzed."),system_charset_info);
+ }
+ else
+ {
+ protocol->store(STRING_WITH_LEN(
+ "Table does not support optimize, doing recreate + analyze instead"),
+ system_charset_info);
+ }
if (protocol->write())
goto err;
DBUG_PRINT("info", ("HA_ADMIN_TRY_ALTER, trying analyze..."));
@@ -5014,9 +5049,15 @@ send_result_message:
close_thread_tables(thd);
if (!result_code) // recreation went ok
{
+ /*
+ Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags
+ to force analyze on all partitions.
+ */
+ alter_info->flags &= ~(ALTER_ADMIN_PARTITION);
if ((table->table= open_ltable(thd, table, lock_type, 0)) &&
((result_code= table->table->file->ha_analyze(thd, check_opt)) > 0))
result_code= 0; // analyze went ok
+ alter_info->flags= save_flags;
}
/* Start a new row for the final status row */
protocol->prepare_for_resend();
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 3de7aded51e..6e3a13c8e5f 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -1985,9 +1985,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
*/
#ifndef DBUG_OFF
uchar key[MAX_DBKEY_LENGTH];
- uint key_length= (uint) (strmov(strmov((char*)&key[0], db)+1,
- old_table)-(char*)&key[0])+1;
-
+ uint key_length= create_table_def_key((char *)key, db, old_table);
if (!is_table_name_exclusively_locked_by_this_thread(thd, key, key_length))
safe_mutex_assert_owner(&LOCK_open);
#endif
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 3cdc1853c14..c6ecdacb020 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1808,7 +1808,7 @@ static Time_zone*
tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
{
TABLE *table= 0;
- TIME_ZONE_INFO *tz_info;
+ TIME_ZONE_INFO *tz_info= NULL;
Tz_names_entry *tmp_tzname;
Time_zone *return_val= 0;
int res;
@@ -1816,7 +1816,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
my_time_t ttime;
char buff[MAX_FIELD_WIDTH];
String abbr(buff, sizeof(buff), &my_charset_latin1);
- char *alloc_buff, *tz_name_buff;
+ char *alloc_buff= NULL;
+ char *tz_name_buff= NULL;
/*
Temporary arrays that are used for loading of data for filling
TIME_ZONE_INFO structure
@@ -1836,22 +1837,6 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
DBUG_ENTER("tz_load_from_open_tables");
- /* Prepare tz_info for loading also let us make copy of time zone name */
- if (!(alloc_buff= (char*) alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) +
- tz_name->length() + 1)))
- {
- sql_print_error("Out of memory while loading time zone description");
- return 0;
- }
- tz_info= (TIME_ZONE_INFO *)alloc_buff;
- bzero(tz_info, sizeof(TIME_ZONE_INFO));
- tz_name_buff= alloc_buff + sizeof(TIME_ZONE_INFO);
- /*
- By writing zero to the end we guarantee that we can call ptr()
- instead of c_ptr() for time zone name.
- */
- strmake(tz_name_buff, tz_name->ptr(), tz_name->length());
-
/*
Let us find out time zone id by its name (there is only one index
and it is specifically for this purpose).
diff --git a/storage/heap/hp_delete.c b/storage/heap/hp_delete.c
index ceba0fcf12e..455996e32ef 100644
--- a/storage/heap/hp_delete.c
+++ b/storage/heap/hp_delete.c
@@ -104,7 +104,7 @@ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
const uchar *record, uchar *recpos, int flag)
{
- ulong blength,pos2,pos_hashnr,lastpos_hashnr;
+ ulong blength, pos2, pos_hashnr, lastpos_hashnr, key_pos;
HASH_INFO *lastpos,*gpos,*pos,*pos3,*empty,*last_ptr;
HP_SHARE *share=info->s;
DBUG_ENTER("hp_delete_key");
@@ -116,9 +116,9 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
last_ptr=0;
/* Search after record with key */
- pos= hp_find_hash(&keyinfo->block,
- hp_mask(hp_rec_hashnr(keyinfo, record), blength,
- share->records + 1));
+ key_pos= hp_mask(hp_rec_hashnr(keyinfo, record), blength, share->records + 1);
+ pos= hp_find_hash(&keyinfo->block, key_pos);
+
gpos = pos3 = 0;
while (pos->ptr_to_rec != recpos)
diff --git a/storage/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c
index 5079757272a..fc4e07d7169 100644
--- a/storage/innobase/btr/btr0btr.c
+++ b/storage/innobase/btr/btr0btr.c
@@ -1973,6 +1973,7 @@ btr_lift_page_up(
ulint root_page_no;
ulint ancestors;
ulint i;
+ ibool lift_father_up = FALSE;
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
@@ -2007,6 +2008,27 @@ btr_lift_page_up(
pages[ancestors++] = iter_page;
}
+ if (ancestors > 1 && page_level == 0) {
+ /* The father page also should be the only on its level (not
+ root). We should lift up the father page at first.
+ Because the leaf page should be lifted up only for root page.
+ The freeing page is based on page_level (==0 or !=0)
+ to choose segment. If the page_level is changed ==0 from !=0,
+ later freeing of the page doesn't find the page allocation
+ to be freed.*/
+
+ lift_father_up = TRUE;
+ page = father_page;
+ page_level = btr_page_get_level(page, mtr);
+
+ ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
+ ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
+ ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
+ MTR_MEMO_PAGE_X_FIX));
+
+ father_page = pages[1];
+ }
+
btr_search_drop_page_hash_index(page);
/* Make the father empty */
@@ -2018,7 +2040,7 @@ btr_lift_page_up(
lock_update_copy_and_discard(father_page, page);
/* Go upward to root page, decreasing levels by one. */
- for (i = 0; i < ancestors; i++) {
+ for (i = lift_father_up ? 1 : 0; i < ancestors; i++) {
iter_page = pages[i];
ut_ad(btr_page_get_level(iter_page, mtr) == (page_level + 1));
diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c
index 389e95bcb0a..8dc0a92408b 100644
--- a/storage/innobase/btr/btr0cur.c
+++ b/storage/innobase/btr/btr0cur.c
@@ -49,6 +49,10 @@ ulint btr_cur_n_sea = 0;
ulint btr_cur_n_non_sea_old = 0;
ulint btr_cur_n_sea_old = 0;
+#ifdef UNIV_DEBUG
+uint btr_cur_limit_optimistic_insert_debug = 0;
+#endif /* UNIV_DEBUG */
+
/* In the optimistic insert, if the insert does not fit, but this much space
can be released by page reorganize, then it is reorganized */
@@ -1022,6 +1026,9 @@ calculate_sizes_again:
goto calculate_sizes_again;
}
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
+ goto fail);
+
/* If there have been many consecutive inserts, and we are on the leaf
level, check if we have to split the page to reserve enough free space
for future updates of records. */
@@ -1034,7 +1041,9 @@ calculate_sizes_again:
&& (0 == level)
&& (btr_page_get_split_rec_to_right(cursor, &dummy_rec)
|| btr_page_get_split_rec_to_left(cursor, &dummy_rec))) {
-
+#ifdef UNIV_DEBUG
+fail:
+#endif /* UNIV_DEBUG */
if (big_rec_vec) {
dtuple_convert_back_big_rec(index, entry, big_rec_vec);
}
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index bcb903d22bf..194a7eea795 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -495,6 +495,12 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_rows_read, SHOW_LONG},
{"rows_updated",
(char*) &export_vars.innodb_rows_updated, SHOW_LONG},
+#ifdef UNIV_DEBUG
+ {"purge_trx_id_age",
+ (char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
+ {"purge_view_trx_id_age",
+ (char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
+#endif /* UNIV_DEBUG */
{NullS, NullS, SHOW_LONG}
};
@@ -9274,6 +9280,18 @@ static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
PLUGIN_VAR_RQCMDARG,
"Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()",
NULL, NULL, 0, 0, 1024, 0);
+
+static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug,
+ btr_cur_limit_optimistic_insert_debug, PLUGIN_VAR_RQCMDARG,
+ "Artificially limit the number of records per B-tree page (0=unlimited).",
+ NULL, NULL, 0, 0, UINT_MAX32, 0);
+
+static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug,
+ srv_purge_view_update_only_debug, PLUGIN_VAR_NOCMDARG,
+ "Pause actual purging any delete-marked records, but merely update the purge view. "
+ "It is to create artificially the situation the purge view have been updated "
+ "but the each purges were not done yet.",
+ NULL, NULL, FALSE);
#endif /* UNIV_DEBUG */
static struct st_mysql_sys_var* innobase_system_variables[]= {
@@ -9323,6 +9341,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
+ MYSQL_SYSVAR(limit_optimistic_insert_debug),
+ MYSQL_SYSVAR(trx_purge_view_update_only_debug),
#endif /* UNIV_DEBUG */
NULL
};
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 341d628c6dc..c2b81d0ae91 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -703,6 +703,11 @@ extern ulint btr_cur_n_sea;
extern ulint btr_cur_n_non_sea_old;
extern ulint btr_cur_n_sea_old;
+#ifdef UNIV_DEBUG
+/* Flag to limit optimistic insert records */
+extern uint btr_cur_limit_optimistic_insert_debug;
+#endif /* UNIV_DEBUG */
+
#ifndef UNIV_NONINL
#include "btr0cur.ic"
#endif
diff --git a/storage/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic
index bd2c46eb734..d894f0546d9 100644
--- a/storage/innobase/include/btr0cur.ic
+++ b/storage/innobase/include/btr0cur.ic
@@ -8,6 +8,16 @@ Created 10/16/1994 Heikki Tuuri
#include "btr0btr.h"
+#ifdef UNIV_DEBUG
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
+if (btr_cur_limit_optimistic_insert_debug\
+ && (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
+ CODE;\
+}
+#else
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)
+#endif /* UNIV_DEBUG */
+
/*************************************************************
Returns the page cursor component of a tree cursor. */
UNIV_INLINE
@@ -100,6 +110,9 @@ btr_cur_compress_recommendation(
page = btr_cur_get_page(cursor);
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page) * 2,
+ return(FALSE));
+
if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT)
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index 70b141eafeb..476a2c8accb 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -685,6 +685,13 @@ extern lock_sys_t* lock_sys;
remains set when the waiting lock is granted,
or if the lock is inherited to a neighboring
record */
+#define LOCK_CONV_BY_OTHER 4096 /* this bit is set when the lock is created
+ by other transaction */
+/* Checks if this is a waiting lock created by lock->trx itself.
+@param type_mode lock->type_mode
+@return whether it is a waiting lock belonging to lock->trx */
+#define lock_is_wait_not_by_other(type_mode) \
+ ((type_mode & (LOCK_CONV_BY_OTHER | LOCK_WAIT)) == LOCK_WAIT)
/* When lock bits are reset, the following flags are available: */
#define LOCK_RELEASE_WAIT 1
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index aa6c88e0538..e1dbc4d11fa 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -176,6 +176,10 @@ extern ulint srv_fatal_semaphore_wait_threshold;
#define SRV_SEMAPHORE_WAIT_EXTENSION 7200
extern ulint srv_dml_needed_delay;
+#ifdef UNIV_DEBUG
+extern my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
extern mutex_t* kernel_mutex_temp;/* mutex protecting the server, trx structs,
query threads, and lock table: we allocate
it from dynamic memory to get it to the
@@ -569,6 +573,10 @@ struct export_var_struct{
ulint innodb_rows_inserted;
ulint innodb_rows_updated;
ulint innodb_rows_deleted;
+#ifdef UNIV_DEBUG
+ ulint innodb_purge_trx_id_age;
+ ulint innodb_purge_view_trx_id_age;
+#endif /* UNIV_DEBUG */
};
/* The server system struct */
diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h
index fbae7eb9a6d..fc9ff021920 100644
--- a/storage/innobase/include/trx0purge.h
+++ b/storage/innobase/include/trx0purge.h
@@ -133,6 +133,10 @@ struct trx_purge_struct{
than this */
dulint purge_undo_no; /* Purge has advanced past all records
whose undo number is less than this */
+#ifdef UNIV_DEBUG
+ dulint done_trx_no; /* Indicate 'purge pointer' which have
+ purged already accurately. */
+#endif /* UNIV_DEBUG */
/*-----------------------------*/
ibool next_stored; /* TRUE if the info of the next record
to purge is stored below: if yes, then
diff --git a/storage/innobase/lock/lock0lock.c b/storage/innobase/lock/lock0lock.c
index 57df99fb401..d36e38df4af 100644
--- a/storage/innobase/lock/lock0lock.c
+++ b/storage/innobase/lock/lock0lock.c
@@ -725,12 +725,16 @@ lock_reset_lock_and_trx_wait(
/*=========================*/
lock_t* lock) /* in: record lock */
{
- ut_ad((lock->trx)->wait_lock == lock);
ut_ad(lock_get_wait(lock));
/* Reset the back pointer in trx to this waiting lock request */
- (lock->trx)->wait_lock = NULL;
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
+ ut_ad((lock->trx)->wait_lock == lock);
+ (lock->trx)->wait_lock = NULL;
+ } else {
+ ut_ad(lock_get_type(lock) == LOCK_REC);
+ }
lock->type_mode = lock->type_mode & ~LOCK_WAIT;
}
@@ -1437,9 +1441,9 @@ lock_rec_has_expl(
while (lock) {
if (lock->trx == trx
+ && !lock_is_wait_not_by_other(lock->type_mode)
&& lock_mode_stronger_or_eq(lock_get_mode(lock),
precise_mode & LOCK_MODE_MASK)
- && !lock_get_wait(lock)
&& (!lock_rec_get_rec_not_gap(lock)
|| (precise_mode & LOCK_REC_NOT_GAP)
|| page_rec_is_supremum(rec))
@@ -1723,7 +1727,7 @@ lock_rec_create(
HASH_INSERT(lock_t, hash, lock_sys->rec_hash,
lock_rec_fold(space, page_no), lock);
- if (type_mode & LOCK_WAIT) {
+ if (lock_is_wait_not_by_other(type_mode)) {
lock_set_lock_and_trx_wait(lock, trx);
}
@@ -1752,10 +1756,11 @@ lock_rec_enqueue_waiting(
lock request is set when performing an
insert of an index record */
rec_t* rec, /* in: record */
+ lock_t* lock, /* in: lock object; NULL if a new
+ one should be created. */
dict_index_t* index, /* in: index of record */
que_thr_t* thr) /* in: query thread */
{
- lock_t* lock;
trx_t* trx;
ut_ad(mutex_own(&kernel_mutex));
@@ -1785,8 +1790,16 @@ lock_rec_enqueue_waiting(
stderr);
}
- /* Enqueue the lock request that will wait to be granted */
- lock = lock_rec_create(type_mode | LOCK_WAIT, rec, index, trx);
+ if (lock == NULL) {
+ /* Enqueue the lock request that will wait to be granted */
+ lock = lock_rec_create(type_mode | LOCK_WAIT, rec, index, trx);
+ } else {
+ ut_ad(lock->type_mode & LOCK_WAIT);
+ ut_ad(lock->type_mode & LOCK_CONV_BY_OTHER);
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ lock_set_lock_and_trx_wait(lock, trx);
+ }
/* Check if a deadlock occurs: if yes, remove the lock request and
return an error code */
@@ -2011,6 +2024,7 @@ lock_rec_lock_slow(
que_thr_t* thr) /* in: query thread */
{
trx_t* trx;
+ lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
@@ -2025,7 +2039,27 @@ lock_rec_lock_slow(
trx = thr_get_trx(thr);
- if (lock_rec_has_expl(mode, rec, trx)) {
+ lock = lock_rec_has_expl(mode, rec, trx);
+ if (lock) {
+ if (lock->type_mode & LOCK_CONV_BY_OTHER) {
+ /* This lock or lock waiting was created by the other
+ transaction, not by the transaction (trx) itself.
+ So, the transaction (trx) should treat it collectly
+ according as whether granted or not. */
+
+ if (lock->type_mode & LOCK_WAIT) {
+ /* This lock request was not granted yet.
+ Should wait for granted. */
+
+ goto enqueue_waiting;
+ } else {
+ /* This lock request was already granted.
+ Just clearing the flag. */
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ }
+ }
+
/* The trx already has a strong enough lock on rec: do
nothing */
@@ -2035,7 +2069,9 @@ lock_rec_lock_slow(
the queue, as this transaction does not have a lock strong
enough already granted on the record, we have to wait. */
- return(lock_rec_enqueue_waiting(mode, rec, index, thr));
+ ut_ad(lock == NULL);
+enqueue_waiting:
+ return(lock_rec_enqueue_waiting(mode, rec, lock, index, thr));
} else if (!impl) {
/* Set the requested lock on the record */
@@ -2171,7 +2207,8 @@ lock_grant(
TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
for it */
- if (lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)
+ && lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
trx_end_lock_wait(lock->trx);
}
}
@@ -2188,6 +2225,7 @@ lock_rec_cancel(
{
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type(lock) == LOCK_REC);
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
@@ -2331,8 +2369,12 @@ lock_rec_reset_and_release_wait(
lock = lock_rec_get_first(rec);
while (lock != NULL) {
- if (lock_get_wait(lock)) {
+ if (lock_is_wait_not_by_other(lock->type_mode)) {
lock_rec_cancel(lock);
+ } else if (lock_get_wait(lock)) {
+ /* just reset LOCK_WAIT */
+ lock_rec_reset_nth_bit(lock, heap_no);
+ lock_reset_lock_and_trx_wait(lock);
} else {
lock_rec_reset_nth_bit(lock, heap_no);
}
@@ -3383,6 +3425,7 @@ lock_table_create(
ut_ad(table && trx);
ut_ad(mutex_own(&kernel_mutex));
+ ut_ad(!(type_mode & LOCK_CONV_BY_OTHER));
if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
++table->n_waiting_or_granted_auto_inc_locks;
@@ -3900,6 +3943,7 @@ lock_cancel_waiting_and_release(
lock_t* lock) /* in: waiting lock request */
{
ut_ad(mutex_own(&kernel_mutex));
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
if (lock_get_type(lock) == LOCK_REC) {
@@ -4871,7 +4915,7 @@ lock_rec_insert_check_and_lock(
/* Note that we may get DB_SUCCESS also here! */
err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
| LOCK_INSERT_INTENTION,
- next_rec, index, thr);
+ next_rec, NULL, index, thr);
} else {
err = DB_SUCCESS;
}
@@ -4941,10 +4985,23 @@ lock_rec_convert_impl_to_expl(
if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec,
impl_trx)) {
+ ulint type_mode = (LOCK_REC | LOCK_X
+ | LOCK_REC_NOT_GAP);
+
+ /* If the delete-marked record was locked already,
+ we should reserve lock waiting for impl_trx as
+ implicit lock. Because cannot lock at this moment.*/
+
+ if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))
+ && lock_rec_other_has_conflicting(
+ LOCK_X | LOCK_REC_NOT_GAP,
+ rec, impl_trx)) {
+
+ type_mode |= (LOCK_WAIT | LOCK_CONV_BY_OTHER);
+ }
lock_rec_add_to_queue(
- LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
- rec, index, impl_trx);
+ type_mode, rec, index, impl_trx);
}
}
}
diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c
index 785a0426195..678293b492b 100644
--- a/storage/innobase/row/row0ins.c
+++ b/storage/innobase/row/row0ins.c
@@ -2195,7 +2195,10 @@ row_ins_index_entry(
err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
ext_vec, n_ext_vec, thr);
if (err != DB_FAIL) {
-
+ if (index == dict_table_get_first_index(index->table)
+ && thr_get_trx(thr)->mysql_thd != 0) {
+ DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
+ }
return(err);
}
diff --git a/storage/innobase/srv/srv0srv.c b/storage/innobase/srv/srv0srv.c
index 3f6f1982992..3240b7515f8 100644
--- a/storage/innobase/srv/srv0srv.c
+++ b/storage/innobase/srv/srv0srv.c
@@ -48,6 +48,10 @@ Created 10/8/1995 Heikki Tuuri
#include "srv0start.h"
#include "row0mysql.h"
#include "ha_prototypes.h"
+#include "read0read.h"
+
+#include "m_string.h" /* for my_sys.h */
+#include "my_sys.h" /* DEBUG_SYNC_C */
/* This is set to TRUE if the MySQL user has set it in MySQL; currently
affects only FOREIGN KEY definition parsing */
@@ -1435,6 +1439,10 @@ srv_suspend_mysql_thread(
trx = thr_get_trx(thr);
+ if (trx->mysql_thd != 0) {
+ DEBUG_SYNC_C("srv_suspend_mysql_thread_enter");
+ }
+
os_event_set(srv_lock_timeout_thread_event);
mutex_enter(&kernel_mutex);
@@ -1913,6 +1921,25 @@ srv_export_innodb_status(void)
export_vars.innodb_rows_updated = srv_n_rows_updated;
export_vars.innodb_rows_deleted = srv_n_rows_deleted;
+#ifdef UNIV_DEBUG
+ if (ut_dulint_cmp(trx_sys->max_trx_id, purge_sys->done_trx_no) < 0) {
+ export_vars.innodb_purge_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_trx_id_age =
+ ut_dulint_minus(trx_sys->max_trx_id, purge_sys->done_trx_no);
+ }
+
+ if (!purge_sys->view
+ || ut_dulint_cmp(trx_sys->max_trx_id,
+ purge_sys->view->up_limit_id) < 0) {
+ export_vars.innodb_purge_view_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_view_trx_id_age =
+ ut_dulint_minus(trx_sys->max_trx_id,
+ purge_sys->view->up_limit_id);
+ }
+#endif /* UNIV_DEBUG */
+
mutex_exit(&srv_innodb_monitor_mutex);
}
@@ -2387,6 +2414,29 @@ loop:
+ buf_pool->n_pages_written;
srv_main_thread_op_info = "sleeping";
+#ifdef UNIV_DEBUG
+ if (btr_cur_limit_optimistic_insert_debug) {
+ /* If btr_cur_limit_optimistic_insert_debug is enabled
+ and no purge_threads, purge opportunity is increased
+ by x100 (1purge/100msec), to speed up debug scripts
+ which should wait for purged. */
+
+ if (!skip_sleep) {
+ os_thread_sleep(100000);
+ }
+
+ do {
+ if (srv_fast_shutdown
+ && srv_shutdown_state > 0) {
+ goto background_loop;
+ }
+
+ srv_main_thread_op_info = "purging";
+ n_pages_purged = trx_purge();
+
+ } while (n_pages_purged);
+ } else
+#endif /* UNIV_DEBUG */
if (!skip_sleep) {
os_thread_sleep(1000000);
diff --git a/storage/innobase/trx/trx0purge.c b/storage/innobase/trx/trx0purge.c
index 6fe5855ebfa..1e540c96536 100644
--- a/storage/innobase/trx/trx0purge.c
+++ b/storage/innobase/trx/trx0purge.c
@@ -34,6 +34,10 @@ trx_purge_t* purge_sys = NULL;
which needs no purge */
trx_undo_rec_t trx_purge_dummy_rec;
+#ifdef UNIV_DEBUG
+my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
/*********************************************************************
Checks if trx_id is >= purge_view: then it is guaranteed that its update
undo log still exists in the system. */
@@ -209,6 +213,7 @@ trx_purge_sys_create(void)
purge_sys->purge_trx_no = ut_dulint_zero;
purge_sys->purge_undo_no = ut_dulint_zero;
purge_sys->next_stored = FALSE;
+ ut_d(purge_sys->done_trx_no = ut_dulint_zero);
rw_lock_create(&purge_sys->latch, SYNC_PURGE_LATCH);
@@ -576,6 +581,7 @@ trx_purge_truncate_if_arr_empty(void)
ut_ad(mutex_own(&(purge_sys->mutex)));
if (purge_sys->arr->n_used == 0) {
+ ut_d(purge_sys->done_trx_no = purge_sys->purge_trx_no);
trx_purge_truncate_history();
@@ -1077,6 +1083,13 @@ trx_purge(void)
rw_lock_x_unlock(&(purge_sys->latch));
+#ifdef UNIV_DEBUG
+ if (srv_purge_view_update_only_debug) {
+ mutex_exit(&(purge_sys->mutex));
+ return(0);
+ }
+#endif
+
purge_sys->state = TRX_PURGE_ON;
/* Handle at most 20 undo log pages in one purge batch */
diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog
index f69f9e16904..e9c86a85d29 100644
--- a/storage/innodb_plugin/ChangeLog
+++ b/storage/innodb_plugin/ChangeLog
@@ -1,3 +1,22 @@
+2012-12-18 The InnoDB Team
+
+ * include/univ.i:
+ Fix Bug#Bug#13463493 INNODB PLUGIN WERE CHANGED, BUT STILL USE THE
+ SAME VERSION NUMBER 1.0.17
+
+2012-12-13 The InnoDB Team
+
+ * buf/buf0buf.c:
+ Fix Bug#14329288 IS THE CALL TO IBUF_MERGE_OR_DELETE_FOR_PAGE FROM
+ BUF_PAGE_GET_GEN REDUNDANT?
+
+2012-11-15 The InnoDB Team
+
+ * include/data0type.ic, include/rem0rec.h,
+ rem/rem0rec.c, row/row0merge.c:
+ Fix Bug#15874001 CREATE INDEX ON A UTF8 CHAR COLUMN FAILS WITH
+ ROW_FORMAT=REDUNDANT
+
2012-10-18 The InnoDB Team
* row/row0sel.c:
diff --git a/storage/innodb_plugin/btr/btr0btr.c b/storage/innodb_plugin/btr/btr0btr.c
index 9fef7843f9a..04f3a79866e 100644
--- a/storage/innodb_plugin/btr/btr0btr.c
+++ b/storage/innodb_plugin/btr/btr0btr.c
@@ -3072,6 +3072,8 @@ btr_lift_page_up(
buf_block_t* blocks[BTR_MAX_LEVELS];
ulint n_blocks; /*!< last used index in blocks[] */
ulint i;
+ ibool lift_father_up = FALSE;
+ buf_block_t* block_orig = block;
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
@@ -3082,11 +3084,13 @@ btr_lift_page_up(
{
btr_cur_t cursor;
- mem_heap_t* heap = mem_heap_create(100);
- ulint* offsets;
+ ulint* offsets = NULL;
+ mem_heap_t* heap = mem_heap_create(
+ sizeof(*offsets)
+ * (REC_OFFS_HEADER_SIZE + 1 + 1 + index->n_fields));
buf_block_t* b;
- offsets = btr_page_get_father_block(NULL, heap, index,
+ offsets = btr_page_get_father_block(offsets, heap, index,
block, mtr, &cursor);
father_block = btr_cur_get_block(&cursor);
father_page_zip = buf_block_get_page_zip(father_block);
@@ -3110,6 +3114,29 @@ btr_lift_page_up(
blocks[n_blocks++] = b = btr_cur_get_block(&cursor);
}
+ if (n_blocks && page_level == 0) {
+ /* The father page also should be the only on its level (not
+ root). We should lift up the father page at first.
+ Because the leaf page should be lifted up only for root page.
+ The freeing page is based on page_level (==0 or !=0)
+ to choose segment. If the page_level is changed ==0 from !=0,
+ later freeing of the page doesn't find the page allocation
+ to be freed.*/
+
+ lift_father_up = TRUE;
+ block = father_block;
+ page = buf_block_get_frame(block);
+ page_level = btr_page_get_level(page, mtr);
+
+ ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
+ ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
+ ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
+
+ father_block = blocks[0];
+ father_page_zip = buf_block_get_page_zip(father_block);
+ father_page = buf_block_get_frame(father_block);
+ }
+
mem_heap_free(heap);
}
@@ -3117,6 +3144,7 @@ btr_lift_page_up(
/* Make the father empty */
btr_page_empty(father_block, father_page_zip, index, page_level, mtr);
+ page_level++;
/* Copy the records to the father page one by one. */
if (0
@@ -3149,7 +3177,7 @@ btr_lift_page_up(
lock_update_copy_and_discard(father_block, block);
/* Go upward to root page, decrementing levels by one. */
- for (i = 0; i < n_blocks; i++, page_level++) {
+ for (i = lift_father_up ? 1 : 0; i < n_blocks; i++, page_level++) {
page_t* page = buf_block_get_frame(blocks[i]);
page_zip_des_t* page_zip= buf_block_get_page_zip(blocks[i]);
@@ -3171,7 +3199,7 @@ btr_lift_page_up(
ut_ad(page_validate(father_page, index));
ut_ad(btr_check_node_ptr(index, father_block, mtr));
- return(father_block);
+ return(lift_father_up ? block_orig : father_block);
}
/*************************************************************//**
diff --git a/storage/innodb_plugin/btr/btr0cur.c b/storage/innodb_plugin/btr/btr0cur.c
index 6c67d27ffec..e38b8a9bf53 100644
--- a/storage/innodb_plugin/btr/btr0cur.c
+++ b/storage/innodb_plugin/btr/btr0cur.c
@@ -86,6 +86,11 @@ srv_refresh_innodb_monitor_stats(). Referenced by
srv_printf_innodb_monitor(). */
UNIV_INTERN ulint btr_cur_n_sea_old = 0;
+#ifdef UNIV_DEBUG
+/* Flag to limit optimistic insert records */
+UNIV_INTERN uint btr_cur_limit_optimistic_insert_debug = 0;
+#endif /* UNIV_DEBUG */
+
/** In the optimistic insert, if the insert does not fit, but this much space
can be released by page reorganize, then it is reorganized */
#define BTR_CUR_PAGE_REORGANIZE_LIMIT (UNIV_PAGE_SIZE / 32)
@@ -1171,6 +1176,9 @@ btr_cur_optimistic_insert(
}
}
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
+ goto fail);
+
/* If there have been many consecutive inserts, and we are on the leaf
level, check if we have to split the page to reserve enough free space
for future updates of records. */
diff --git a/storage/innodb_plugin/buf/buf0buf.c b/storage/innodb_plugin/buf/buf0buf.c
index b49f5288681..e7e60fb2b2a 100644
--- a/storage/innodb_plugin/buf/buf0buf.c
+++ b/storage/innodb_plugin/buf/buf0buf.c
@@ -1108,32 +1108,21 @@ buf_page_make_young(
}
/********************************************************************//**
-Sets the time of the first access of a page and moves a page to the
-start of the buffer pool LRU list if it is too old. This high-level
-function can be used to prevent an important page from slipping
-out of the buffer pool. */
+Moves a page to the start of the buffer pool LRU list if it is too old.
+This high-level function can be used to prevent an important page from
+slipping out of the buffer pool. */
static
void
-buf_page_set_accessed_make_young(
-/*=============================*/
- buf_page_t* bpage, /*!< in/out: buffer block of a
+buf_page_make_young_if_needed(
+/*==========================*/
+ buf_page_t* bpage) /*!< in/out: buffer block of a
file page */
- unsigned access_time) /*!< in: bpage->access_time
- read under mutex protection,
- or 0 if unknown */
{
ut_ad(!buf_pool_mutex_own());
ut_a(buf_page_in_file(bpage));
if (buf_page_peek_if_too_old(bpage)) {
- buf_pool_mutex_enter();
- buf_LRU_make_block_young(bpage);
- buf_pool_mutex_exit();
- } else if (!access_time) {
- ulint time_ms = ut_time_ms();
- buf_pool_mutex_enter();
- buf_page_set_accessed(bpage, time_ms);
- buf_pool_mutex_exit();
+ buf_page_make_young(bpage);
}
}
@@ -1217,7 +1206,6 @@ buf_page_get_zip(
buf_page_t* bpage;
mutex_t* block_mutex;
ibool must_read;
- unsigned access_time;
#ifndef UNIV_LOG_DEBUG
ut_ad(!ibuf_inside());
@@ -1284,13 +1272,14 @@ err_exit:
got_block:
must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
- access_time = buf_page_is_accessed(bpage);
buf_pool_mutex_exit();
+ buf_page_set_accessed(bpage);
+
mutex_exit(block_mutex);
- buf_page_set_accessed_make_young(bpage, access_time);
+ buf_page_make_young_if_needed(bpage);
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ut_a(!bpage->file_page_was_freed);
@@ -1789,22 +1778,29 @@ wait_until_unfixed:
UNIV_MEM_INVALID(bpage, sizeof *bpage);
- mutex_exit(&block->mutex);
- mutex_exit(&buf_pool_zip_mutex);
buf_pool->n_pend_unzip++;
-
buf_pool_mutex_exit();
+ access_time = buf_page_is_accessed(&block->page);
+ mutex_exit(&block->mutex);
+ mutex_exit(&buf_pool_zip_mutex);
+
buf_page_free_descriptor(bpage);
- /* Decompress the page and apply buffered operations
- while not holding buf_pool_mutex or block->mutex. */
+ /* Decompress the page while not holding
+ buf_pool_mutex or block->mutex. */
success = buf_zip_decompress(block, srv_use_checksums);
ut_a(success);
if (UNIV_LIKELY(!recv_no_ibuf_operations)) {
- ibuf_merge_or_delete_for_page(block, space, offset,
- zip_size, TRUE);
+ if (access_time) {
+#ifdef UNIV_IBUF_COUNT_DEBUG
+ ut_a(ibuf_count_get(space, offset) == 0);
+#endif /* UNIV_IBUF_COUNT_DEBUG */
+ } else {
+ ibuf_merge_or_delete_for_page(
+ block, space, offset, zip_size, TRUE);
+ }
}
/* Unfix and unlatch the block. */
@@ -1861,16 +1857,16 @@ wait_until_unfixed:
buf_block_buf_fix_inc(block, file, line);
- mutex_exit(&block->mutex);
-
- /* Check if this is the first access to the page */
+ buf_pool_mutex_exit();
access_time = buf_page_is_accessed(&block->page);
- buf_pool_mutex_exit();
+ buf_page_set_accessed(&block->page);
+
+ mutex_exit(&block->mutex);
if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL)) {
- buf_page_set_accessed_make_young(&block->page, access_time);
+ buf_page_make_young_if_needed(&block->page);
}
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
@@ -1925,7 +1921,7 @@ wait_until_unfixed:
mtr_memo_push(mtr, block, fix_type);
- if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL) && !access_time) {
+ if (mode != BUF_PEEK_IF_IN_POOL && !access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
@@ -1975,15 +1971,13 @@ buf_page_optimistic_get(
buf_block_buf_fix_inc(block, file, line);
- mutex_exit(&block->mutex);
+ access_time = buf_page_is_accessed(&block->page);
- /* Check if this is the first access to the page.
- We do a dirty read on purpose, to avoid mutex contention.
- This field is only used for heuristic purposes; it does not
- affect correctness. */
+ buf_page_set_accessed(&block->page);
- access_time = buf_page_is_accessed(&block->page);
- buf_page_set_accessed_make_young(&block->page, access_time);
+ mutex_exit(&block->mutex);
+
+ buf_page_make_young_if_needed(&block->page);
ut_ad(!ibuf_inside()
|| ibuf_page(buf_block_get_space(block),
@@ -2035,7 +2029,7 @@ buf_page_optimistic_get(
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ut_a(block->page.file_page_was_freed == FALSE);
#endif
- if (UNIV_UNLIKELY(!access_time)) {
+ if (!access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
@@ -2095,22 +2089,12 @@ buf_page_get_known_nowait(
buf_block_buf_fix_inc(block, file, line);
- mutex_exit(&block->mutex);
+ buf_page_set_accessed(&block->page);
- if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
- buf_pool_mutex_enter();
- buf_LRU_make_block_young(&block->page);
- buf_pool_mutex_exit();
- } else if (!buf_page_is_accessed(&block->page)) {
- /* Above, we do a dirty read on purpose, to avoid
- mutex contention. The field buf_page_t::access_time
- is only used for heuristic purposes. Writes to the
- field must be protected by mutex, however. */
- ulint time_ms = ut_time_ms();
+ mutex_exit(&block->mutex);
- buf_pool_mutex_enter();
- buf_page_set_accessed(&block->page, time_ms);
- buf_pool_mutex_exit();
+ if (mode == BUF_MAKE_YOUNG) {
+ buf_page_make_young_if_needed(&block->page);
}
ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
@@ -2542,7 +2526,6 @@ buf_page_create(
buf_frame_t* frame;
buf_block_t* block;
buf_block_t* free_block = NULL;
- ulint time_ms = ut_time_ms();
ut_ad(mtr);
ut_ad(mtr->state == MTR_ACTIVE);
@@ -2627,12 +2610,12 @@ buf_page_create(
rw_lock_x_unlock(&block->lock);
}
- buf_page_set_accessed(&block->page, time_ms);
-
buf_pool_mutex_exit();
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
+ buf_page_set_accessed(&block->page);
+
mutex_exit(&block->mutex);
/* Delete possible entries for the page from the insert buffer:
diff --git a/storage/innodb_plugin/fil/fil0fil.c b/storage/innodb_plugin/fil/fil0fil.c
index 6f2ab938042..d5770baafea 100644
--- a/storage/innodb_plugin/fil/fil0fil.c
+++ b/storage/innodb_plugin/fil/fil0fil.c
@@ -2619,7 +2619,7 @@ retry:
mutex_exit(&fil_system->mutex);
#ifndef UNIV_HOTBACKUP
- if (success) {
+ if (success && !recv_recovery_on) {
mtr_t mtr;
mtr_start(&mtr);
@@ -4853,3 +4853,28 @@ fil_close(void)
fil_system = NULL;
}
+
+/****************************************************************//**
+Generate redo logs for swapping two .ibd files */
+UNIV_INTERN
+void
+fil_mtr_rename_log(
+/*===============*/
+ ulint old_space_id, /*!< in: tablespace id of the old
+ table. */
+ const char* old_name, /*!< in: old table name */
+ ulint new_space_id, /*!< in: tablespace id of the new
+ table */
+ const char* new_name, /*!< in: new table name */
+ const char* tmp_name) /*!< in: temp table name used while
+ swapping */
+{
+ mtr_t mtr;
+ mtr_start(&mtr);
+ fil_op_write_log(MLOG_FILE_RENAME, old_space_id,
+ 0, 0, old_name, tmp_name, &mtr);
+ fil_op_write_log(MLOG_FILE_RENAME, new_space_id,
+ 0, 0, new_name, old_name, &mtr);
+ mtr_commit(&mtr);
+}
+
diff --git a/storage/innodb_plugin/handler/ha_innodb.cc b/storage/innodb_plugin/handler/ha_innodb.cc
index ff5542c4b2c..2b2252d72e0 100644
--- a/storage/innodb_plugin/handler/ha_innodb.cc
+++ b/storage/innodb_plugin/handler/ha_innodb.cc
@@ -577,6 +577,12 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_rows_read, SHOW_LONG},
{"rows_updated",
(char*) &export_vars.innodb_rows_updated, SHOW_LONG},
+#ifdef UNIV_DEBUG
+ {"purge_trx_id_age",
+ (char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
+ {"purge_view_trx_id_age",
+ (char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
+#endif /* UNIV_DEBUG */
{NullS, NullS, SHOW_LONG}
};
@@ -923,6 +929,8 @@ convert_error_code_to_mysql(
#endif /* HA_ERR_TOO_MANY_CONCURRENT_TRXS */
case DB_UNSUPPORTED:
return(HA_ERR_UNSUPPORTED);
+ case DB_OUT_OF_MEMORY:
+ return(HA_ERR_OUT_OF_MEM);
}
}
@@ -1132,6 +1140,11 @@ innobase_mysql_tmpfile(void)
DBUG_ENTER("innobase_mysql_tmpfile");
+ DBUG_EXECUTE_IF(
+ "innobase_tmpfile_creation_failure",
+ DBUG_RETURN(-1);
+ );
+
tmpdir = my_tmpdir(&mysql_tmpdir_list);
/* The tmpdir parameter can not be NULL for GetTempFileName. */
@@ -1193,7 +1206,15 @@ innobase_mysql_tmpfile(void)
/*========================*/
{
int fd2 = -1;
- File fd = mysql_tmpfile("ib");
+ File fd;
+
+ DBUG_EXECUTE_IF(
+ "innobase_tmpfile_creation_failure",
+ return(-1);
+ );
+
+ fd = mysql_tmpfile("ib");
+
if (fd >= 0) {
/* Copy the file descriptor, so that the additional resources
allocated by create_temp_file() can be freed by invoking
@@ -11263,6 +11284,18 @@ static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
PLUGIN_VAR_RQCMDARG,
"Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()",
NULL, NULL, 0, 0, 1024, 0);
+
+static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug,
+ btr_cur_limit_optimistic_insert_debug, PLUGIN_VAR_RQCMDARG,
+ "Artificially limit the number of records per B-tree page (0=unlimited).",
+ NULL, NULL, 0, 0, UINT_MAX32, 0);
+
+static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug,
+ srv_purge_view_update_only_debug, PLUGIN_VAR_NOCMDARG,
+ "Pause actual purging any delete-marked records, but merely update the purge view. "
+ "It is to create artificially the situation the purge view have been updated "
+ "but the each purges were not done yet.",
+ NULL, NULL, FALSE);
#endif /* UNIV_DEBUG */
static struct st_mysql_sys_var* innobase_system_variables[]= {
@@ -11328,6 +11361,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(io_capacity),
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
+ MYSQL_SYSVAR(limit_optimistic_insert_debug),
+ MYSQL_SYSVAR(trx_purge_view_update_only_debug),
#endif /* UNIV_DEBUG */
NULL
};
diff --git a/storage/innodb_plugin/handler/handler0alter.cc b/storage/innodb_plugin/handler/handler0alter.cc
index 0422abb0021..b3ecd02f575 100644
--- a/storage/innodb_plugin/handler/handler0alter.cc
+++ b/storage/innodb_plugin/handler/handler0alter.cc
@@ -98,7 +98,6 @@ innobase_col_to_mysql(
case DATA_MYSQL:
ut_ad(flen >= len);
ut_ad(col->mbmaxlen >= col->mbminlen);
- ut_ad(col->mbmaxlen > col->mbminlen || flen == len);
memcpy(dest, data, len);
break;
diff --git a/storage/innodb_plugin/include/btr0cur.h b/storage/innodb_plugin/include/btr0cur.h
index 1c421167828..7744d2d1ee2 100644
--- a/storage/innodb_plugin/include/btr0cur.h
+++ b/storage/innodb_plugin/include/btr0cur.h
@@ -795,6 +795,11 @@ srv_printf_innodb_monitor(). */
extern ulint btr_cur_n_sea_old;
#endif /* !UNIV_HOTBACKUP */
+#ifdef UNIV_DEBUG
+/* Flag to limit optimistic insert records */
+extern uint btr_cur_limit_optimistic_insert_debug;
+#endif /* UNIV_DEBUG */
+
#ifndef UNIV_NONINL
#include "btr0cur.ic"
#endif
diff --git a/storage/innodb_plugin/include/btr0cur.ic b/storage/innodb_plugin/include/btr0cur.ic
index e31f77c77eb..5fc4651ca13 100644
--- a/storage/innodb_plugin/include/btr0cur.ic
+++ b/storage/innodb_plugin/include/btr0cur.ic
@@ -27,6 +27,16 @@ Created 10/16/1994 Heikki Tuuri
#include "btr0btr.h"
#ifdef UNIV_DEBUG
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
+if (btr_cur_limit_optimistic_insert_debug\
+ && (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
+ CODE;\
+}
+#else
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)
+#endif /* UNIV_DEBUG */
+
+#ifdef UNIV_DEBUG
/*********************************************************//**
Returns the page cursor component of a tree cursor.
@return pointer to page cursor component */
@@ -146,6 +156,9 @@ btr_cur_compress_recommendation(
page = btr_cur_get_page(cursor);
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page) * 2,
+ return(FALSE));
+
if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT)
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
diff --git a/storage/innodb_plugin/include/buf0buf.h b/storage/innodb_plugin/include/buf0buf.h
index fd286f6c26c..b3fcff14f0d 100644
--- a/storage/innodb_plugin/include/buf0buf.h
+++ b/storage/innodb_plugin/include/buf0buf.h
@@ -927,8 +927,7 @@ UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
- buf_page_t* bpage, /*!< in/out: control block */
- ulint time_ms) /*!< in: ut_time_ms() */
+ buf_page_t* bpage) /*!< in/out: control block */
__attribute__((nonnull));
/*********************************************************************//**
Gets the buf_block_t handle of a buffered file block if an uncompressed
@@ -1260,10 +1259,11 @@ struct buf_page_struct{
to read this for heuristic
purposes without holding any
mutex or latch */
- unsigned access_time:32; /*!< time of first access, or
- 0 if the block was never accessed
- in the buffer pool */
/* @} */
+ unsigned access_time; /*!< time of first access, or
+ 0 if the block was never accessed
+ in the buffer pool. Protected by
+ block mutex */
# if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ibool file_page_was_freed;
/*!< this is set to TRUE when fsp
diff --git a/storage/innodb_plugin/include/buf0buf.ic b/storage/innodb_plugin/include/buf0buf.ic
index 39135a2ece1..098ec379761 100644
--- a/storage/innodb_plugin/include/buf0buf.ic
+++ b/storage/innodb_plugin/include/buf0buf.ic
@@ -531,15 +531,15 @@ UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
- buf_page_t* bpage, /*!< in/out: control block */
- ulint time_ms) /*!< in: ut_time_ms() */
+ buf_page_t* bpage) /*!< in/out: control block */
{
ut_a(buf_page_in_file(bpage));
- ut_ad(buf_pool_mutex_own());
+ ut_ad(!buf_pool_mutex_own());
+ ut_ad(mutex_own(buf_page_get_mutex(bpage)));
if (!bpage->access_time) {
/* Make this the time of the first access. */
- bpage->access_time = time_ms;
+ bpage->access_time = ut_time_ms();
}
}
diff --git a/storage/innodb_plugin/include/data0type.ic b/storage/innodb_plugin/include/data0type.ic
index 2bf67a941bd..8a838e32689 100644
--- a/storage/innodb_plugin/include/data0type.ic
+++ b/storage/innodb_plugin/include/data0type.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -439,34 +439,16 @@ dtype_get_fixed_size_low(
} else if (!comp) {
return(len);
} else {
- /* We play it safe here and ask MySQL for
- mbminlen and mbmaxlen. Although
- mbminlen and mbmaxlen are
- initialized if and only if prtype
- is (in one of the 3 functions in this file),
- it could be that none of these functions
- has been called. */
-
+#ifdef UNIV_DEBUG
ulint i_mbminlen, i_mbmaxlen;
innobase_get_cset_width(
dtype_get_charset_coll(prtype),
&i_mbminlen, &i_mbmaxlen);
- if (UNIV_UNLIKELY(mbminlen != i_mbminlen)
- || UNIV_UNLIKELY(mbmaxlen != i_mbmaxlen)) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: "
- "mbminlen=%lu, "
- "mbmaxlen=%lu, "
- "type->mbminlen=%lu, "
- "type->mbmaxlen=%lu\n",
- (ulong) i_mbminlen,
- (ulong) i_mbmaxlen,
- (ulong) mbminlen,
- (ulong) mbmaxlen);
- }
+ ut_ad(mbminlen == i_mbminlen);
+ ut_ad(mbmaxlen == i_mbmaxlen);
+#endif /* UNIV_DEBUG */
if (mbminlen == mbmaxlen) {
return(len);
}
diff --git a/storage/innodb_plugin/include/fil0fil.h b/storage/innodb_plugin/include/fil0fil.h
index 05217168764..09e6e37b223 100644
--- a/storage/innodb_plugin/include/fil0fil.h
+++ b/storage/innodb_plugin/include/fil0fil.h
@@ -726,6 +726,21 @@ fil_tablespace_is_being_deleted(
/*============================*/
ulint id); /*!< in: space id */
+/****************************************************************//**
+Generate redo logs for swapping two .ibd files */
+UNIV_INTERN
+void
+fil_mtr_rename_log(
+/*===============*/
+ ulint old_space_id, /*!< in: tablespace id of the old
+ table. */
+ const char* old_name, /*!< in: old table name */
+ ulint new_space_id, /*!< in: tablespace id of the new
+ table */
+ const char* new_name, /*!< in: new table name */
+ const char* tmp_name); /*!< in: temp table name used while
+ swapping */
+
typedef struct fil_space_struct fil_space_t;
#endif
diff --git a/storage/innodb_plugin/include/lock0lock.h b/storage/innodb_plugin/include/lock0lock.h
index b3e1e5c4537..1b472e7b059 100644
--- a/storage/innodb_plugin/include/lock0lock.h
+++ b/storage/innodb_plugin/include/lock0lock.h
@@ -796,14 +796,22 @@ lock_rec_get_page_no(
remains set when the waiting lock is granted,
or if the lock is inherited to a neighboring
record */
-#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_MODE_MASK
+#define LOCK_CONV_BY_OTHER 4096 /*!< this bit is set when the lock is created
+ by other transaction */
+#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_MODE_MASK
# error
#endif
-#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_TYPE_MASK
+#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_TYPE_MASK
# error
#endif
/* @} */
+/** Checks if this is a waiting lock created by lock->trx itself.
+@param type_mode lock->type_mode
+@return whether it is a waiting lock belonging to lock->trx */
+#define lock_is_wait_not_by_other(type_mode) \
+ ((type_mode & (LOCK_CONV_BY_OTHER | LOCK_WAIT)) == LOCK_WAIT)
+
/** Lock operation struct */
typedef struct lock_op_struct lock_op_t;
/** Lock operation struct */
diff --git a/storage/innodb_plugin/include/rem0rec.h b/storage/innodb_plugin/include/rem0rec.h
index 06de23be757..0666c43a026 100644
--- a/storage/innodb_plugin/include/rem0rec.h
+++ b/storage/innodb_plugin/include/rem0rec.h
@@ -362,24 +362,6 @@ rec_get_offsets_func(
rec_get_offsets_func(rec,index,offsets,n,heap,__FILE__,__LINE__)
/******************************************************//**
-Determine the offset to each field in a leaf-page record
-in ROW_FORMAT=COMPACT. This is a special case of
-rec_init_offsets() and rec_get_offsets_func(). */
-UNIV_INTERN
-void
-rec_init_offsets_comp_ordinary(
-/*===========================*/
- const rec_t* rec, /*!< in: physical record in
- ROW_FORMAT=COMPACT */
- ulint extra, /*!< in: number of bytes to reserve
- between the record header and
- the data payload
- (usually REC_N_NEW_EXTRA_BYTES) */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint* offsets);/*!< in/out: array of offsets;
- in: n=rec_offs_n_fields(offsets) */
-
-/******************************************************//**
The following function determines the offsets to each field
in the record. It can reuse a previously allocated array. */
UNIV_INTERN
@@ -639,8 +621,48 @@ rec_copy(
/*=====*/
void* buf, /*!< in: buffer */
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
+/**********************************************************//**
+Determines the size of a data tuple prefix in a temporary file.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_temp(
+/*========================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+ __attribute__((warn_unused_result, nonnull));
+
+/******************************************************//**
+Determine the offset to each field in temporary file.
+@see rec_convert_dtuple_to_temp() */
+UNIV_INTERN
+void
+rec_init_offsets_temp(
+/*==================*/
+ const rec_t* rec, /*!< in: temporary file record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint* offsets)/*!< in/out: array of offsets;
+ in: n=rec_offs_n_fields(offsets) */
+ __attribute__((nonnull));
+
+/*********************************************************//**
+Builds a temporary file record out of a data tuple.
+@see rec_init_offsets_temp() */
+UNIV_INTERN
+void
+rec_convert_dtuple_to_temp(
+/*=======================*/
+ rec_t* rec, /*!< out: record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields) /*!< in: number of fields */
+ __attribute__((nonnull));
+
/**************************************************************//**
Copies the first n fields of a physical record to a new physical record in
a buffer.
@@ -675,21 +697,6 @@ rec_fold(
__attribute__((pure));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************//**
-Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
-UNIV_INTERN
-void
-rec_convert_dtuple_to_rec_comp(
-/*===========================*/
- rec_t* rec, /*!< in: origin of record */
- ulint extra, /*!< in: number of bytes to
- reserve between the record
- header and the data payload
- (normally REC_N_NEW_EXTRA_BYTES) */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint status, /*!< in: status bits of the record */
- const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields);/*!< in: number of data fields */
-/*********************************************************//**
Builds a physical record out of a data tuple and
stores it into the given buffer.
@return pointer to the origin of physical record */
@@ -722,10 +729,7 @@ UNIV_INTERN
ulint
rec_get_converted_size_comp_prefix(
/*===============================*/
- const dict_index_t* index, /*!< in: record descriptor;
- dict_table_is_comp() is
- assumed to hold, even if
- it does not */
+ const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint* extra); /*!< out: extra size */
diff --git a/storage/innodb_plugin/include/srv0srv.h b/storage/innodb_plugin/include/srv0srv.h
index 1a9f54882c5..322c15ffe45 100644
--- a/storage/innodb_plugin/include/srv0srv.h
+++ b/storage/innodb_plugin/include/srv0srv.h
@@ -247,6 +247,10 @@ extern ulint srv_fatal_semaphore_wait_threshold;
#define SRV_SEMAPHORE_WAIT_EXTENSION 7200
extern ulint srv_dml_needed_delay;
+#ifdef UNIV_DEBUG
+extern my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
extern mutex_t* kernel_mutex_temp;/* mutex protecting the server, trx structs,
query threads, and lock table: we allocate
it from dynamic memory to get it to the
@@ -650,6 +654,11 @@ struct export_var_struct{
ulint innodb_rows_inserted; /*!< srv_n_rows_inserted */
ulint innodb_rows_updated; /*!< srv_n_rows_updated */
ulint innodb_rows_deleted; /*!< srv_n_rows_deleted */
+#ifdef UNIV_DEBUG
+ ulint innodb_purge_trx_id_age; /*!< max_trx_id - purged trx_id */
+ ulint innodb_purge_view_trx_id_age; /*!< rw_max_trx_id
+ - purged view's min trx_id */
+#endif /* UNIV_DEBUG */
};
/** The server system struct */
diff --git a/storage/innodb_plugin/include/trx0purge.h b/storage/innodb_plugin/include/trx0purge.h
index 37d3795efb0..f221a65d076 100644
--- a/storage/innodb_plugin/include/trx0purge.h
+++ b/storage/innodb_plugin/include/trx0purge.h
@@ -153,6 +153,10 @@ struct trx_purge_struct{
than this */
undo_no_t purge_undo_no; /*!< Purge has advanced past all records
whose undo number is less than this */
+#ifdef UNIV_DEBUG
+ trx_id_t done_trx_no; /* Indicate 'purge pointer' which have
+ purged already accurately. */
+#endif /* UNIV_DEBUG */
/*-----------------------------*/
ibool next_stored; /*!< TRUE if the info of the next record
to purge is stored below: if yes, then
diff --git a/storage/innodb_plugin/include/univ.i b/storage/innodb_plugin/include/univ.i
index 6cede60a6e0..f57227370aa 100644
--- a/storage/innodb_plugin/include/univ.i
+++ b/storage/innodb_plugin/include/univ.i
@@ -43,9 +43,12 @@ Created 1/20/1994 Heikki Tuuri
#include "hb_univ.i"
#endif /* UNIV_HOTBACKUP */
-#define INNODB_VERSION_MAJOR 1
-#define INNODB_VERSION_MINOR 0
-#define INNODB_VERSION_BUGFIX 17
+#include <mysql_version.h>
+
+/* Set InnoDB Plugin version to be the same as the MySQL server version.
+MYSQL_VERSION_ID is something like 50168. */
+#define INNODB_VERSION_MAJOR (MYSQL_VERSION_ID / 10000)
+#define INNODB_VERSION_MINOR (MYSQL_VERSION_ID / 100 % 100)
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
@@ -56,14 +59,7 @@ component, i.e. we show M.N.P as M.N */
#define INNODB_VERSION_SHORT \
(INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR)
-/* auxiliary macros to help creating the version as string */
-#define __INNODB_VERSION(a, b, c) (#a "." #b "." #c)
-#define _INNODB_VERSION(a, b, c) __INNODB_VERSION(a, b, c)
-
-#define INNODB_VERSION_STR \
- _INNODB_VERSION(INNODB_VERSION_MAJOR, \
- INNODB_VERSION_MINOR, \
- INNODB_VERSION_BUGFIX)
+#define INNODB_VERSION_STR MYSQL_SERVER_VERSION
#define REFMAN "http://dev.mysql.com/doc/refman/5.1/en/"
diff --git a/storage/innodb_plugin/lock/lock0lock.c b/storage/innodb_plugin/lock/lock0lock.c
index 49f732da175..f302e9b1011 100644
--- a/storage/innodb_plugin/lock/lock0lock.c
+++ b/storage/innodb_plugin/lock/lock0lock.c
@@ -791,12 +791,16 @@ lock_reset_lock_and_trx_wait(
/*=========================*/
lock_t* lock) /*!< in: record lock */
{
- ut_ad((lock->trx)->wait_lock == lock);
ut_ad(lock_get_wait(lock));
/* Reset the back pointer in trx to this waiting lock request */
- (lock->trx)->wait_lock = NULL;
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
+ ut_ad((lock->trx)->wait_lock == lock);
+ (lock->trx)->wait_lock = NULL;
+ } else {
+ ut_ad(lock_get_type_low(lock) == LOCK_REC);
+ }
lock->type_mode &= ~LOCK_WAIT;
}
@@ -1420,9 +1424,9 @@ lock_rec_has_expl(
while (lock) {
if (lock->trx == trx
+ && !lock_is_wait_not_by_other(lock->type_mode)
&& lock_mode_stronger_or_eq(lock_get_mode(lock),
precise_mode & LOCK_MODE_MASK)
- && !lock_get_wait(lock)
&& (!lock_rec_get_rec_not_gap(lock)
|| (precise_mode & LOCK_REC_NOT_GAP)
|| heap_no == PAGE_HEAP_NO_SUPREMUM)
@@ -1721,7 +1725,7 @@ lock_rec_create(
HASH_INSERT(lock_t, hash, lock_sys->rec_hash,
lock_rec_fold(space, page_no), lock);
- if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
+ if (lock_is_wait_not_by_other(type_mode)) {
lock_set_lock_and_trx_wait(lock, trx);
}
@@ -1752,10 +1756,11 @@ lock_rec_enqueue_waiting(
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
+ lock_t* lock, /*!< in: lock object; NULL if a new
+ one should be created. */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
- lock_t* lock;
trx_t* trx;
ut_ad(mutex_own(&kernel_mutex));
@@ -1789,9 +1794,17 @@ lock_rec_enqueue_waiting(
stderr);
}
- /* Enqueue the lock request that will wait to be granted */
- lock = lock_rec_create(type_mode | LOCK_WAIT,
- block, heap_no, index, trx);
+ if (lock == NULL) {
+ /* Enqueue the lock request that will wait to be granted */
+ lock = lock_rec_create(type_mode | LOCK_WAIT,
+ block, heap_no, index, trx);
+ } else {
+ ut_ad(lock->type_mode & LOCK_WAIT);
+ ut_ad(lock->type_mode & LOCK_CONV_BY_OTHER);
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ lock_set_lock_and_trx_wait(lock, trx);
+ }
/* Check if a deadlock occurs: if yes, remove the lock request and
return an error code */
@@ -2036,6 +2049,7 @@ lock_rec_lock_slow(
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
+ lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
@@ -2050,7 +2064,27 @@ lock_rec_lock_slow(
trx = thr_get_trx(thr);
- if (lock_rec_has_expl(mode, block, heap_no, trx)) {
+ lock = lock_rec_has_expl(mode, block, heap_no, trx);
+ if (lock) {
+ if (lock->type_mode & LOCK_CONV_BY_OTHER) {
+ /* This lock or lock waiting was created by the other
+ transaction, not by the transaction (trx) itself.
+ So, the transaction (trx) should treat it collectly
+ according as whether granted or not. */
+
+ if (lock->type_mode & LOCK_WAIT) {
+ /* This lock request was not granted yet.
+ Should wait for granted. */
+
+ goto enqueue_waiting;
+ } else {
+ /* This lock request was already granted.
+ Just clearing the flag. */
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ }
+ }
+
/* The trx already has a strong enough lock on rec: do
nothing */
@@ -2060,8 +2094,10 @@ lock_rec_lock_slow(
the queue, as this transaction does not have a lock strong
enough already granted on the record, we have to wait. */
+ ut_ad(lock == NULL);
+enqueue_waiting:
return(lock_rec_enqueue_waiting(mode, block, heap_no,
- index, thr));
+ lock, index, thr));
} else if (!impl) {
/* Set the requested lock on the record */
@@ -2203,7 +2239,8 @@ lock_grant(
TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
for it */
- if (lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)
+ && lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
trx_end_lock_wait(lock->trx);
}
}
@@ -2220,6 +2257,7 @@ lock_rec_cancel(
{
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type_low(lock) == LOCK_REC);
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
@@ -2362,8 +2400,12 @@ lock_rec_reset_and_release_wait(
lock = lock_rec_get_first(block, heap_no);
while (lock != NULL) {
- if (lock_get_wait(lock)) {
+ if (lock_is_wait_not_by_other(lock->type_mode)) {
lock_rec_cancel(lock);
+ } else if (lock_get_wait(lock)) {
+ /* just reset LOCK_WAIT */
+ lock_rec_reset_nth_bit(lock, heap_no);
+ lock_reset_lock_and_trx_wait(lock);
} else {
lock_rec_reset_nth_bit(lock, heap_no);
}
@@ -3588,6 +3630,7 @@ lock_table_create(
ut_ad(table && trx);
ut_ad(mutex_own(&kernel_mutex));
+ ut_ad(!(type_mode & LOCK_CONV_BY_OTHER));
if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
++table->n_waiting_or_granted_auto_inc_locks;
@@ -4139,6 +4182,7 @@ lock_cancel_waiting_and_release(
lock_t* lock) /*!< in: waiting lock request */
{
ut_ad(mutex_own(&kernel_mutex));
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
if (lock_get_type_low(lock) == LOCK_REC) {
@@ -5153,7 +5197,7 @@ lock_rec_insert_check_and_lock(
err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
| LOCK_INSERT_INTENTION,
block, next_rec_heap_no,
- index, thr);
+ NULL, index, thr);
} else {
err = DB_SUCCESS;
}
@@ -5229,10 +5273,23 @@ lock_rec_convert_impl_to_expl(
if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block,
heap_no, impl_trx)) {
+ ulint type_mode = (LOCK_REC | LOCK_X
+ | LOCK_REC_NOT_GAP);
+
+ /* If the delete-marked record was locked already,
+ we should reserve lock waiting for impl_trx as
+ implicit lock. Because cannot lock at this moment.*/
+
+ if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))
+ && lock_rec_other_has_conflicting(
+ LOCK_X | LOCK_REC_NOT_GAP, block,
+ heap_no, impl_trx)) {
+
+ type_mode |= (LOCK_WAIT | LOCK_CONV_BY_OTHER);
+ }
lock_rec_add_to_queue(
- LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
- block, heap_no, index, impl_trx);
+ type_mode, block, heap_no, index, impl_trx);
}
}
}
diff --git a/storage/innodb_plugin/log/log0recv.c b/storage/innodb_plugin/log/log0recv.c
index 677ada9fbde..96d63144366 100644
--- a/storage/innodb_plugin/log/log0recv.c
+++ b/storage/innodb_plugin/log/log0recv.c
@@ -955,8 +955,11 @@ recv_parse_or_apply_log_rec_body(
not NULL, then the log record is
applied to the page, and the log
record should be complete then */
- mtr_t* mtr) /*!< in: mtr or NULL; should be non-NULL
+ mtr_t* mtr, /*!< in: mtr or NULL; should be non-NULL
if and only if block is non-NULL */
+ ulint space_id)
+ /*!< in: tablespace id obtained by
+ parsing initial log record */
{
dict_index_t* index = NULL;
page_t* page;
@@ -1228,8 +1231,11 @@ recv_parse_or_apply_log_rec_body(
ut_ad(!page || page_type != FIL_PAGE_TYPE_ALLOCATED);
ptr = mlog_parse_string(ptr, end_ptr, page, page_zip);
break;
- case MLOG_FILE_CREATE:
case MLOG_FILE_RENAME:
+ ptr = fil_op_log_parse_or_replay(ptr, end_ptr, type,
+ space_id, 0);
+ break;
+ case MLOG_FILE_CREATE:
case MLOG_FILE_DELETE:
case MLOG_FILE_CREATE2:
ptr = fil_op_log_parse_or_replay(ptr, end_ptr, type, 0, 0);
@@ -1601,7 +1607,8 @@ recv_recover_page_func(
recv_parse_or_apply_log_rec_body(recv->type, buf,
buf + recv->len,
- block, &mtr);
+ block, &mtr,
+ recv_addr->space);
end_lsn = recv->start_lsn + recv->len;
mach_write_ull(FIL_PAGE_LSN + page, end_lsn);
@@ -2067,7 +2074,7 @@ recv_parse_log_rec(
#endif /* UNIV_LOG_LSN_DEBUG */
new_ptr = recv_parse_or_apply_log_rec_body(*type, new_ptr, end_ptr,
- NULL, NULL);
+ NULL, NULL, *space);
if (UNIV_UNLIKELY(new_ptr == NULL)) {
return(0);
diff --git a/storage/innodb_plugin/rem/rem0rec.c b/storage/innodb_plugin/rem/rem0rec.c
index 9f90d2940dd..70d38262bce 100644
--- a/storage/innodb_plugin/rem/rem0rec.c
+++ b/storage/innodb_plugin/rem/rem0rec.c
@@ -167,7 +167,6 @@ rec_get_n_extern_new(
{
const byte* nulls;
const byte* lens;
- dict_field_t* field;
ulint null_mask;
ulint n_extern;
ulint i;
@@ -188,10 +187,13 @@ rec_get_n_extern_new(
/* read the lengths of fields 0..n */
do {
- ulint len;
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ const dict_col_t* col
+ = dict_field_get_col(field);
+ ulint len;
- field = dict_index_get_nth_field(index, i);
- if (!(dict_field_get_col(field)->prtype & DATA_NOT_NULL)) {
+ if (!(col->prtype & DATA_NOT_NULL)) {
/* nullable field => read the null flag */
if (UNIV_UNLIKELY(!(byte) null_mask)) {
@@ -209,8 +211,6 @@ rec_get_n_extern_new(
if (UNIV_UNLIKELY(!field->fixed_len)) {
/* Variable-length field: read the length */
- const dict_col_t* col
- = dict_field_get_col(field);
len = *lens--;
/* If the maximum length of the field is up
to 255 bytes, the actual length is always
@@ -239,16 +239,15 @@ rec_get_n_extern_new(
Determine the offset to each field in a leaf-page record
in ROW_FORMAT=COMPACT. This is a special case of
rec_init_offsets() and rec_get_offsets_func(). */
-UNIV_INTERN
+UNIV_INLINE __attribute__((nonnull))
void
rec_init_offsets_comp_ordinary(
/*===========================*/
const rec_t* rec, /*!< in: physical record in
ROW_FORMAT=COMPACT */
- ulint extra, /*!< in: number of bytes to reserve
- between the record header and
- the data payload
- (usually REC_N_NEW_EXTRA_BYTES) */
+ ibool temp, /*!< in: whether to use the
+ format for temporary files in
+ index creation */
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in/out: array of offsets;
in: n=rec_offs_n_fields(offsets) */
@@ -256,27 +255,38 @@ rec_init_offsets_comp_ordinary(
ulint i = 0;
ulint offs = 0;
ulint any_ext = 0;
- const byte* nulls = rec - (extra + 1);
+ const byte* nulls = temp
+ ? rec - 1
+ : rec - (1 + REC_N_NEW_EXTRA_BYTES);
const byte* lens = nulls
- UT_BITS_IN_BYTES(index->n_nullable);
- dict_field_t* field;
ulint null_mask = 1;
#ifdef UNIV_DEBUG
- /* We cannot invoke rec_offs_make_valid() here, because it can hold
- that extra != REC_N_NEW_EXTRA_BYTES. Similarly, rec_offs_validate()
- will fail in that case, because it invokes rec_get_status(). */
+ /* We cannot invoke rec_offs_make_valid() here if temp=TRUE.
+ Similarly, rec_offs_validate() will fail in that case, because
+ it invokes rec_get_status(). */
offsets[2] = (ulint) rec;
offsets[3] = (ulint) index;
#endif /* UNIV_DEBUG */
+ ut_ad(temp || dict_table_is_comp(index->table));
+
+ if (temp && dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only need to
+ adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+
/* read the lengths of fields 0..n */
do {
- ulint len;
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ const dict_col_t* col
+ = dict_field_get_col(field);
+ ulint len;
- field = dict_index_get_nth_field(index, i);
- if (!(dict_field_get_col(field)->prtype
- & DATA_NOT_NULL)) {
+ if (!(col->prtype & DATA_NOT_NULL)) {
/* nullable field => read the null flag */
if (UNIV_UNLIKELY(!(byte) null_mask)) {
@@ -296,10 +306,9 @@ rec_init_offsets_comp_ordinary(
null_mask <<= 1;
}
- if (UNIV_UNLIKELY(!field->fixed_len)) {
+ if (!field->fixed_len
+ || (temp && !dict_col_get_fixed_size(col, temp))) {
/* Variable-length field: read the length */
- const dict_col_t* col
- = dict_field_get_col(field);
len = *lens--;
/* If the maximum length of the field is up
to 255 bytes, the actual length is always
@@ -393,9 +402,8 @@ rec_init_offsets(
= dict_index_get_n_unique_in_tree(index);
break;
case REC_STATUS_ORDINARY:
- rec_init_offsets_comp_ordinary(rec,
- REC_N_NEW_EXTRA_BYTES,
- index, offsets);
+ rec_init_offsets_comp_ordinary(
+ rec, FALSE, index, offsets);
return;
}
@@ -766,17 +774,19 @@ rec_get_nth_field_offs_old(
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
-UNIV_INTERN
+UNIV_INLINE __attribute__((warn_unused_result, nonnull(1,2)))
ulint
-rec_get_converted_size_comp_prefix(
-/*===============================*/
+rec_get_converted_size_comp_prefix_low(
+/*===================================*/
const dict_index_t* index, /*!< in: record descriptor;
dict_table_is_comp() is
assumed to hold, even if
it does not */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
- ulint* extra) /*!< out: extra size */
+ ulint* extra, /*!< out: extra size */
+ ibool temp) /*!< in: whether this is a
+ temporary file record */
{
ulint extra_size;
ulint data_size;
@@ -785,15 +795,25 @@ rec_get_converted_size_comp_prefix(
ut_ad(fields);
ut_ad(n_fields > 0);
ut_ad(n_fields <= dict_index_get_n_fields(index));
+ ut_ad(!temp || extra);
- extra_size = REC_N_NEW_EXTRA_BYTES
+ extra_size = temp
+ ? UT_BITS_IN_BYTES(index->n_nullable)
+ : REC_N_NEW_EXTRA_BYTES
+ UT_BITS_IN_BYTES(index->n_nullable);
data_size = 0;
+ if (temp && dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only need to
+ adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+
/* read the lengths of fields 0..n */
for (i = 0; i < n_fields; i++) {
const dict_field_t* field;
ulint len;
+ ulint fixed_len;
const dict_col_t* col;
field = dict_index_get_nth_field(index, i);
@@ -809,8 +829,14 @@ rec_get_converted_size_comp_prefix(
continue;
}
- ut_ad(len <= col->len || col->mtype == DATA_BLOB);
+ ut_ad(len <= col->len || col->mtype == DATA_BLOB
+ || (col->len == 0 && col->mtype == DATA_VARCHAR));
+ fixed_len = field->fixed_len;
+ if (temp && fixed_len
+ && !dict_col_get_fixed_size(col, temp)) {
+ fixed_len = 0;
+ }
/* If the maximum length of a variable-length field
is up to 255 bytes, the actual length is always stored
in one byte. If the maximum length is more than 255
@@ -818,11 +844,11 @@ rec_get_converted_size_comp_prefix(
0..127. The length will be encoded in two bytes when
it is 128 or more, or when the field is stored externally. */
- if (field->fixed_len) {
- ut_ad(len == field->fixed_len);
+ if (fixed_len) {
+ ut_ad(len == fixed_len);
/* dict_index_add_col() should guarantee this */
ut_ad(!field->prefix_len
- || field->fixed_len == field->prefix_len);
+ || fixed_len == field->prefix_len);
} else if (dfield_is_ext(&fields[i])) {
ut_ad(col->len >= 256 || col->mtype == DATA_BLOB);
extra_size += 2;
@@ -839,7 +865,7 @@ rec_get_converted_size_comp_prefix(
data_size += len;
}
- if (UNIV_LIKELY_NULL(extra)) {
+ if (extra) {
*extra = extra_size;
}
@@ -847,6 +873,23 @@ rec_get_converted_size_comp_prefix(
}
/**********************************************************//**
+Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_comp_prefix(
+/*===============================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+{
+ ut_ad(dict_table_is_comp(index->table));
+ return(rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, FALSE));
+}
+
+/**********************************************************//**
Determines the size of a data tuple in ROW_FORMAT=COMPACT.
@return total size */
UNIV_INTERN
@@ -890,8 +933,8 @@ rec_get_converted_size_comp(
return(ULINT_UNDEFINED);
}
- return(size + rec_get_converted_size_comp_prefix(index, fields,
- n_fields, extra));
+ return(size + rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, FALSE));
}
/***********************************************************//**
@@ -1068,19 +1111,18 @@ rec_convert_dtuple_to_rec_old(
/*********************************************************//**
Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
-UNIV_INTERN
+UNIV_INLINE __attribute__((nonnull))
void
rec_convert_dtuple_to_rec_comp(
/*===========================*/
rec_t* rec, /*!< in: origin of record */
- ulint extra, /*!< in: number of bytes to
- reserve between the record
- header and the data payload
- (normally REC_N_NEW_EXTRA_BYTES) */
const dict_index_t* index, /*!< in: record descriptor */
- ulint status, /*!< in: status bits of the record */
const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields)/*!< in: number of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint status, /*!< in: status bits of the record */
+ ibool temp) /*!< in: whether to use the
+ format for temporary files in
+ index creation */
{
const dfield_t* field;
const dtype_t* type;
@@ -1092,31 +1134,44 @@ rec_convert_dtuple_to_rec_comp(
ulint n_node_ptr_field;
ulint fixed_len;
ulint null_mask = 1;
- ut_ad(extra == 0 || dict_table_is_comp(index->table));
- ut_ad(extra == 0 || extra == REC_N_NEW_EXTRA_BYTES);
+ ut_ad(temp || dict_table_is_comp(index->table));
ut_ad(n_fields > 0);
- switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
- case REC_STATUS_ORDINARY:
+ if (temp) {
+ ut_ad(status == REC_STATUS_ORDINARY);
ut_ad(n_fields <= dict_index_get_n_fields(index));
n_node_ptr_field = ULINT_UNDEFINED;
- break;
- case REC_STATUS_NODE_PTR:
- ut_ad(n_fields == dict_index_get_n_unique_in_tree(index) + 1);
- n_node_ptr_field = n_fields - 1;
- break;
- case REC_STATUS_INFIMUM:
- case REC_STATUS_SUPREMUM:
- ut_ad(n_fields == 1);
- n_node_ptr_field = ULINT_UNDEFINED;
- break;
- default:
- ut_error;
- return;
+ nulls = rec - 1;
+ if (dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only
+ need to adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+ } else {
+ nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
+
+ switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
+ case REC_STATUS_ORDINARY:
+ ut_ad(n_fields <= dict_index_get_n_fields(index));
+ n_node_ptr_field = ULINT_UNDEFINED;
+ break;
+ case REC_STATUS_NODE_PTR:
+ ut_ad(n_fields
+ == dict_index_get_n_unique_in_tree(index) + 1);
+ n_node_ptr_field = n_fields - 1;
+ break;
+ case REC_STATUS_INFIMUM:
+ case REC_STATUS_SUPREMUM:
+ ut_ad(n_fields == 1);
+ n_node_ptr_field = ULINT_UNDEFINED;
+ break;
+ default:
+ ut_error;
+ return;
+ }
}
end = rec;
- nulls = rec - (extra + 1);
lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
/* clear the SQL-null flags */
memset(lens + 1, 0, nulls - lens);
@@ -1162,6 +1217,10 @@ rec_convert_dtuple_to_rec_comp(
ifield = dict_index_get_nth_field(index, i);
fixed_len = ifield->fixed_len;
+ if (temp && fixed_len
+ && !dict_col_get_fixed_size(ifield->col, temp)) {
+ fixed_len = 0;
+ }
/* If the maximum length of a variable-length field
is up to 255 bytes, the actual length is always stored
in one byte. If the maximum length is more than 255
@@ -1222,8 +1281,7 @@ rec_convert_dtuple_to_rec_new(
rec = buf + extra_size;
rec_convert_dtuple_to_rec_comp(
- rec, REC_N_NEW_EXTRA_BYTES, index, status,
- dtuple->fields, dtuple->n_fields);
+ rec, index, dtuple->fields, dtuple->n_fields, status, FALSE);
/* Set the info bits of the record */
rec_set_info_and_status_bits(rec, dtuple_get_info_bits(dtuple));
@@ -1285,6 +1343,54 @@ rec_convert_dtuple_to_rec(
return(rec);
}
+#ifndef UNIV_HOTBACKUP
+/**********************************************************//**
+Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_temp(
+/*========================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+{
+ return(rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, TRUE));
+}
+
+/******************************************************//**
+Determine the offset to each field in temporary file.
+@see rec_convert_dtuple_to_temp() */
+UNIV_INTERN
+void
+rec_init_offsets_temp(
+/*==================*/
+ const rec_t* rec, /*!< in: temporary file record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint* offsets)/*!< in/out: array of offsets;
+ in: n=rec_offs_n_fields(offsets) */
+{
+ rec_init_offsets_comp_ordinary(rec, TRUE, index, offsets);
+}
+
+/*********************************************************//**
+Builds a temporary file record out of a data tuple.
+@see rec_init_offsets_temp() */
+UNIV_INTERN
+void
+rec_convert_dtuple_to_temp(
+/*=======================*/
+ rec_t* rec, /*!< out: record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields) /*!< in: number of fields */
+{
+ rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields,
+ REC_STATUS_ORDINARY, TRUE);
+}
+
/**************************************************************//**
Copies the first n fields of a physical record to a data tuple. The fields
are copied to the memory heap. */
@@ -1495,6 +1601,7 @@ rec_copy_prefix_to_buf(
return(*buf + (rec - (lens + 1)));
}
+#endif /* UNIV_HOTBACKUP */
/***************************************************************//**
Validates the consistency of an old-style physical record.
diff --git a/storage/innodb_plugin/row/row0ins.c b/storage/innodb_plugin/row/row0ins.c
index 92ce04774ea..7e81cf6ab2c 100644
--- a/storage/innodb_plugin/row/row0ins.c
+++ b/storage/innodb_plugin/row/row0ins.c
@@ -2265,7 +2265,10 @@ row_ins_index_entry(
err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
n_ext, thr);
if (err != DB_FAIL) {
-
+ if (index == dict_table_get_first_index(index->table)
+ && thr_get_trx(thr)->mysql_thd != 0) {
+ DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
+ }
return(err);
}
diff --git a/storage/innodb_plugin/row/row0merge.c b/storage/innodb_plugin/row/row0merge.c
index 5da2a4b8534..63e14e6568f 100644
--- a/storage/innodb_plugin/row/row0merge.c
+++ b/storage/innodb_plugin/row/row0merge.c
@@ -291,6 +291,7 @@ row_merge_buf_add(
const dict_field_t* ifield;
const dict_col_t* col;
ulint col_no;
+ ulint fixed_len;
const dfield_t* row_field;
ulint len;
@@ -340,9 +341,21 @@ row_merge_buf_add(
ut_ad(len <= col->len || col->mtype == DATA_BLOB);
- if (ifield->fixed_len) {
- ut_ad(len == ifield->fixed_len);
+ fixed_len = ifield->fixed_len;
+ if (fixed_len && !dict_table_is_comp(index->table)
+ && col->mbminlen != col->mbmaxlen) {
+ /* CHAR in ROW_FORMAT=REDUNDANT is always
+ fixed-length, but in the temporary file it is
+ variable-length for variable-length character
+ sets. */
+ fixed_len = 0;
+ }
+
+ if (fixed_len) {
+ ut_ad(len == fixed_len);
ut_ad(!dfield_is_ext(field));
+ ut_ad(!col->mbmaxlen || len >= col->mbminlen
+ * (fixed_len / col->mbmaxlen));
} else if (dfield_is_ext(field)) {
extra_size += 2;
} else if (len < 128
@@ -363,12 +376,11 @@ row_merge_buf_add(
ulint size;
ulint extra;
- size = rec_get_converted_size_comp(index,
- REC_STATUS_ORDINARY,
- entry, n_fields, &extra);
+ size = rec_get_converted_size_temp(
+ index, entry, n_fields, &extra);
- ut_ad(data_size + extra_size + REC_N_NEW_EXTRA_BYTES == size);
- ut_ad(extra_size + REC_N_NEW_EXTRA_BYTES == extra);
+ ut_ad(data_size + extra_size == size);
+ ut_ad(extra_size == extra);
}
#endif /* UNIV_DEBUG */
@@ -572,14 +584,9 @@ row_merge_buf_write(
ulint extra_size;
const dfield_t* entry = buf->tuples[i];
- size = rec_get_converted_size_comp(index,
- REC_STATUS_ORDINARY,
- entry, n_fields,
- &extra_size);
+ size = rec_get_converted_size_temp(
+ index, entry, n_fields, &extra_size);
ut_ad(size >= extra_size);
- ut_ad(extra_size >= REC_N_NEW_EXTRA_BYTES);
- extra_size -= REC_N_NEW_EXTRA_BYTES;
- size -= REC_N_NEW_EXTRA_BYTES;
/* Encode extra_size + 1 */
if (extra_size + 1 < 0x80) {
@@ -592,9 +599,8 @@ row_merge_buf_write(
ut_ad(b + size < block[1]);
- rec_convert_dtuple_to_rec_comp(b + extra_size, 0, index,
- REC_STATUS_ORDINARY,
- entry, n_fields);
+ rec_convert_dtuple_to_temp(b + extra_size, index,
+ entry, n_fields);
b += size;
@@ -696,6 +702,8 @@ row_merge_read(
ib_uint64_t ofs = ((ib_uint64_t) offset) * sizeof *buf;
ibool success;
+ DBUG_EXECUTE_IF("row_merge_read_failure", return(FALSE););
+
#ifdef UNIV_DEBUG
if (row_merge_print_block_read) {
fprintf(stderr, "row_merge_read fd=%d ofs=%lu\n",
@@ -732,6 +740,8 @@ row_merge_write(
ib_uint64_t ofs = ((ib_uint64_t) offset)
* sizeof(row_merge_block_t);
+ DBUG_EXECUTE_IF("row_merge_write_failure", return(FALSE););
+
#ifdef UNIV_DEBUG
if (row_merge_print_block_write) {
fprintf(stderr, "row_merge_write fd=%d ofs=%lu\n",
@@ -841,7 +851,7 @@ err_exit:
*mrec = *buf + extra_size;
- rec_init_offsets_comp_ordinary(*mrec, 0, index, offsets);
+ rec_init_offsets_temp(*mrec, index, offsets);
data_size = rec_offs_data_size(offsets);
@@ -860,7 +870,7 @@ err_exit:
*mrec = b + extra_size;
- rec_init_offsets_comp_ordinary(*mrec, 0, index, offsets);
+ rec_init_offsets_temp(*mrec, index, offsets);
data_size = rec_offs_data_size(offsets);
ut_ad(extra_size + data_size < sizeof *buf);
@@ -2183,9 +2193,10 @@ row_merge_drop_temp_indexes(void)
}
/*********************************************************************//**
-Create a merge file. */
-static
-void
+Create a merge file.
+@return file descriptor, or -1 on failure */
+static __attribute__((nonnull, warn_unused_result))
+int
row_merge_file_create(
/*==================*/
merge_file_t* merge_file) /*!< out: merge file structure */
@@ -2193,6 +2204,7 @@ row_merge_file_create(
merge_file->fd = innobase_mysql_tmpfile();
merge_file->offset = 0;
merge_file->n_rec = 0;
+ return(merge_file->fd);
}
/*********************************************************************//**
@@ -2418,6 +2430,28 @@ row_merge_rename_tables(
goto err_exit;
}
+ /* Generate the redo logs for file operations */
+ fil_mtr_rename_log(old_table->space, old_name,
+ new_table->space, new_table->name, tmp_name);
+
+ /* What if the redo logs are flushed to disk here? This is
+ tested with following crash point */
+ DBUG_EXECUTE_IF("bug14669848_precommit", log_buffer_flush_to_disk();
+ DBUG_SUICIDE(););
+
+ /* File operations cannot be rolled back. So, before proceeding
+ with file operations, commit the dictionary changes.*/
+ trx_commit_for_mysql(trx);
+
+ /* If server crashes here, the dictionary in InnoDB and MySQL
+ will differ. The .ibd files and the .frm files must be swapped
+ manually by the administrator. No loss of data. */
+ DBUG_EXECUTE_IF("bug14669848", DBUG_SUICIDE(););
+
+ /* Ensure that the redo logs are flushed to disk. The config
+ innodb_flush_log_at_trx_commit must not affect this. */
+ log_buffer_flush_to_disk();
+
/* The following calls will also rename the .ibd data files if
the tables are stored in a single-table tablespace */
@@ -2591,7 +2625,7 @@ row_merge_build_indexes(
ulint block_size;
ulint i;
ulint error;
- int tmpfd;
+ int tmpfd = -1;
ut_ad(trx);
ut_ad(old_table);
@@ -2608,13 +2642,31 @@ row_merge_build_indexes(
block_size = 3 * sizeof *block;
block = os_mem_alloc_large(&block_size);
+ /* Initialize all the merge file descriptors, so that we
+ don't call row_merge_file_destroy() on uninitialized
+ merge file descriptor */
+
+ for (i = 0; i < n_indexes; i++) {
+ merge_files[i].fd = -1;
+ }
+
for (i = 0; i < n_indexes; i++) {
- row_merge_file_create(&merge_files[i]);
+ if (row_merge_file_create(&merge_files[i]) < 0)
+ {
+ error = DB_OUT_OF_MEMORY;
+ goto func_exit;
+ }
}
tmpfd = innobase_mysql_tmpfile();
+ if (tmpfd < 0)
+ {
+ error = DB_OUT_OF_MEMORY;
+ goto func_exit;
+ }
+
/* Reset the MySQL row buffer that is used when reporting
duplicate keys. */
innobase_rec_reset(table);
@@ -2655,7 +2707,9 @@ row_merge_build_indexes(
}
func_exit:
- close(tmpfd);
+ if (tmpfd >= 0) {
+ close(tmpfd);
+ }
for (i = 0; i < n_indexes; i++) {
row_merge_file_destroy(&merge_files[i]);
diff --git a/storage/innodb_plugin/row/row0mysql.c b/storage/innodb_plugin/row/row0mysql.c
index 137a164c4cd..9379912a218 100644
--- a/storage/innodb_plugin/row/row0mysql.c
+++ b/storage/innodb_plugin/row/row0mysql.c
@@ -4083,6 +4083,13 @@ end:
trx->error_state = DB_SUCCESS;
trx_general_rollback_for_mysql(trx, NULL);
trx->error_state = DB_SUCCESS;
+ } else {
+ if (old_is_tmp && !new_is_tmp) {
+ /* After ALTER TABLE the table statistics
+ needs to be rebuilt. It will be rebuilt
+ when the table is loaded again. */
+ table->stat_initialized = FALSE;
+ }
}
}
diff --git a/storage/innodb_plugin/row/row0umod.c b/storage/innodb_plugin/row/row0umod.c
index 31f7c9f4888..a1c86424625 100644
--- a/storage/innodb_plugin/row/row0umod.c
+++ b/storage/innodb_plugin/row/row0umod.c
@@ -493,6 +493,7 @@ row_undo_mod_upd_del_sec(
ulint err = DB_SUCCESS;
ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
+ ut_ad(!node->undo_row);
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -546,6 +547,8 @@ row_undo_mod_del_mark_sec(
dict_index_t* index;
ulint err;
+ ut_ad(!node->undo_row);
+
heap = mem_heap_create(1024);
while (node->index != NULL) {
diff --git a/storage/innodb_plugin/row/row0undo.c b/storage/innodb_plugin/row/row0undo.c
index b1606bda5ef..f07d8013919 100644
--- a/storage/innodb_plugin/row/row0undo.c
+++ b/storage/innodb_plugin/row/row0undo.c
@@ -217,7 +217,7 @@ row_undo_search_clust_to_pcur(
node->row = row_build(ROW_COPY_DATA, clust_index, rec,
offsets, NULL, ext, node->heap);
- if (node->update) {
+ if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
node->undo_row = dtuple_copy(node->row, node->heap);
row_upd_replace(node->undo_row, &node->undo_ext,
clust_index, node->update, node->heap);
diff --git a/storage/innodb_plugin/srv/srv0srv.c b/storage/innodb_plugin/srv/srv0srv.c
index 8ad4c02e322..4ddd0a4603d 100644
--- a/storage/innodb_plugin/srv/srv0srv.c
+++ b/storage/innodb_plugin/srv/srv0srv.c
@@ -85,6 +85,15 @@ Created 10/8/1995 Heikki Tuuri
#include "ha_prototypes.h"
#include "trx0i_s.h"
#include "os0sync.h" /* for HAVE_ATOMIC_BUILTINS */
+#include "read0read.h"
+
+#ifdef __WIN__
+/* error LNK2001: unresolved external symbol _debug_sync_C_callback_ptr */
+# define DEBUG_SYNC_C(dummy) ((void) 0)
+#else
+# include "m_string.h" /* for my_sys.h */
+# include "my_sys.h" /* DEBUG_SYNC_C */
+#endif
/* This is set to TRUE if the MySQL user has set it in MySQL; currently
affects only FOREIGN KEY definition parsing */
@@ -1474,6 +1483,10 @@ srv_suspend_mysql_thread(
trx = thr_get_trx(thr);
+ if (trx->mysql_thd != 0) {
+ DEBUG_SYNC_C("srv_suspend_mysql_thread_enter");
+ }
+
os_event_set(srv_lock_timeout_thread_event);
mutex_enter(&kernel_mutex);
@@ -1964,6 +1977,25 @@ srv_export_innodb_status(void)
export_vars.innodb_rows_updated = srv_n_rows_updated;
export_vars.innodb_rows_deleted = srv_n_rows_deleted;
+#ifdef UNIV_DEBUG
+ if (ut_dulint_cmp(trx_sys->max_trx_id, purge_sys->done_trx_no) < 0) {
+ export_vars.innodb_purge_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_trx_id_age =
+ ut_dulint_minus(trx_sys->max_trx_id, purge_sys->done_trx_no);
+ }
+
+ if (!purge_sys->view
+ || ut_dulint_cmp(trx_sys->max_trx_id,
+ purge_sys->view->up_limit_id) < 0) {
+ export_vars.innodb_purge_view_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_view_trx_id_age =
+ ut_dulint_minus(trx_sys->max_trx_id,
+ purge_sys->view->up_limit_id);
+ }
+#endif /* UNIV_DEBUG */
+
mutex_exit(&srv_innodb_monitor_mutex);
}
@@ -2468,6 +2500,30 @@ loop:
srv_main_thread_op_info = "sleeping";
srv_main_1_second_loops++;
+#ifdef UNIV_DEBUG
+ if (btr_cur_limit_optimistic_insert_debug) {
+ /* If btr_cur_limit_optimistic_insert_debug is enabled
+ and no purge_threads, purge opportunity is increased
+ by x100 (1purge/100msec), to speed up debug scripts
+ which should wait for purged. */
+
+ if (!skip_sleep) {
+ os_thread_sleep(100000);
+ srv_main_sleeps++;
+ }
+
+ do {
+ if (srv_fast_shutdown
+ && srv_shutdown_state > 0) {
+ goto background_loop;
+ }
+
+ srv_main_thread_op_info = "purging";
+ n_pages_purged = trx_purge();
+
+ } while (n_pages_purged);
+ } else
+#endif /* UNIV_DEBUG */
if (!skip_sleep) {
os_thread_sleep(1000000);
diff --git a/storage/innodb_plugin/trx/trx0purge.c b/storage/innodb_plugin/trx/trx0purge.c
index 56607c9ff93..4e79187b5c5 100644
--- a/storage/innodb_plugin/trx/trx0purge.c
+++ b/storage/innodb_plugin/trx/trx0purge.c
@@ -51,6 +51,10 @@ UNIV_INTERN trx_purge_t* purge_sys = NULL;
which needs no purge */
UNIV_INTERN trx_undo_rec_t trx_purge_dummy_rec;
+#ifdef UNIV_DEBUG
+UNIV_INTERN my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
/*****************************************************************//**
Checks if trx_id is >= purge_view: then it is guaranteed that its update
undo log still exists in the system.
@@ -226,6 +230,7 @@ trx_purge_sys_create(void)
purge_sys->purge_trx_no = ut_dulint_zero;
purge_sys->purge_undo_no = ut_dulint_zero;
purge_sys->next_stored = FALSE;
+ ut_d(purge_sys->done_trx_no = ut_dulint_zero);
rw_lock_create(&purge_sys->latch, SYNC_PURGE_LATCH);
@@ -637,6 +642,7 @@ trx_purge_truncate_if_arr_empty(void)
ut_ad(mutex_own(&(purge_sys->mutex)));
if (purge_sys->arr->n_used == 0) {
+ ut_d(purge_sys->done_trx_no = purge_sys->purge_trx_no);
trx_purge_truncate_history();
@@ -1140,6 +1146,13 @@ trx_purge(void)
rw_lock_x_unlock(&(purge_sys->latch));
+#ifdef UNIV_DEBUG
+ if (srv_purge_view_update_only_debug) {
+ mutex_exit(&(purge_sys->mutex));
+ return(0);
+ }
+#endif
+
purge_sys->state = TRX_PURGE_ON;
/* Handle at most 20 undo log pages in one purge batch */
diff --git a/storage/innodb_plugin/trx/trx0rec.c b/storage/innodb_plugin/trx/trx0rec.c
index dc55690c9c3..4de0ed8f9b8 100644
--- a/storage/innodb_plugin/trx/trx0rec.c
+++ b/storage/innodb_plugin/trx/trx0rec.c
@@ -36,6 +36,7 @@ Created 3/26/1996 Heikki Tuuri
#ifndef UNIV_HOTBACKUP
#include "dict0dict.h"
#include "ut0mem.h"
+#include "read0read.h"
#include "row0ext.h"
#include "row0upd.h"
#include "que0que.h"
@@ -1617,6 +1618,25 @@ trx_undo_prev_version_build(
if (row_upd_changes_field_size_or_external(index, offsets, update)) {
ulint n_ext;
+ /* We should confirm the existence of disowned external data,
+ if the previous version record is delete marked. If the trx_id
+ of the previous record is seen by purge view, we should treat
+ it as missing history, because the disowned external data
+ might be purged already.
+
+ The inherited external data (BLOBs) can be freed (purged)
+ after trx_id was committed, provided that no view was started
+ before trx_id. If the purge view can see the committed
+ delete-marked record by trx_id, no transactions need to access
+ the BLOB. */
+
+ if ((update->info_bits & REC_INFO_DELETED_FLAG)
+ && read_view_sees_trx_id(purge_sys->view, trx_id)) {
+ /* treat as a fresh insert, not to
+ cause assertion error at the caller. */
+ return(DB_SUCCESS);
+ }
+
/* We have to set the appropriate extern storage bits in the
old version of the record: the extern bits in rec for those
fields that update does NOT update, as well as the bits for