diff options
451 files changed, 15663 insertions, 9347 deletions
diff --git a/.bzrignore b/.bzrignore index c91df60c99f..c04ce91b355 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1763,7 +1763,9 @@ netware/.deps/my_manage.Po netware/.deps/mysql_install_db.Po netware/.deps/mysql_test_run.Po netware/.deps/mysqld_safe.Po +netware/init_db.sql netware/libmysql.imp +netware/test_db.sql pack_isam/*.ds? perror/*.ds? perror/*.vcproj diff --git a/BUILD/check-cpu b/BUILD/check-cpu index 24b76c7198d..2852aa98ef3 100755 --- a/BUILD/check-cpu +++ b/BUILD/check-cpu @@ -64,10 +64,11 @@ check_cpu () { ;; # Intel ia32 - *X[eE][oO][nN]*) + *Intel*Core*|*X[eE][oO][nN]*) # a Xeon is just another pentium4 ... # ... unless it has the "lm" (long-mode) flag set, - # in that case it's a Xeon with EM64T support + # in that case it's a Xeon with EM64T support + # So is Intel Core. if [ -z "$cpu_flag_lm" ]; then cpu_arg="pentium4"; else @@ -164,26 +165,34 @@ check_cpu () { cc_ver=`$cc --version | sed 1q` cc_verno=`echo $cc_ver | sed -e 's/^.*gcc/gcc/g; s/[^0-9. ]//g; s/^ *//g; s/ .*//g'` + set -- `echo $cc_verno | tr '.' ' '` + cc_major=$1 + cc_minor=$2 + cc_patch=$3 + cc_comp=`expr $cc_major '*' 100 '+' $cc_minor` case "$cc_ver--$cc_verno" in *GCC*) # different gcc backends (and versions) have different CPU flags case `gcc -dumpmachine` in i?86-*) - case "$cc_verno" in - 3.4*|3.5*|4.*) - check_cpu_args='-mtune=$cpu_arg -march=$cpu_arg' - ;; - *) - check_cpu_args='-mcpu=$cpu_arg -march=$cpu_arg' - ;; - esac + if test "$cc_comp" -lt 304 + then + check_cpu_args='-mcpu=$cpu_arg' + else + check_cpu_args='-mtune=$cpu_arg' + fi ;; ppc-*) check_cpu_args='-mcpu=$cpu_arg -mtune=$cpu_arg' ;; x86_64-*) - check_cpu_args='-mtune=$cpu_arg' + if test "$cc_comp" -lt 304 + then + check_cpu_args='-mcpu=$cpu_arg' + else + check_cpu_args='-mtune=$cpu_arg' + fi ;; *) check_cpu_cflags="" diff --git a/BUILD/compile-pentium-debug-max b/BUILD/compile-pentium-debug-max index adb9b7899a5..b2c8b4d9331 100755 --- a/BUILD/compile-pentium-debug-max +++ b/BUILD/compile-pentium-debug-max @@ -4,6 +4,6 @@ path=`dirname $0` . "$path/SETUP.sh" "$@" --with-debug=full extra_flags="$pentium_cflags $debug_cflags" -extra_configs="$pentium_configs $debug_configs $max_configs $error_inject" +extra_configs="$pentium_configs $debug_configs $max_configs $error_inject --with-experimental-collations" . "$path/FINISH.sh" diff --git a/CMakeLists.txt b/CMakeLists.txt index eb4d77db73f..48992cf574b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,10 +131,10 @@ ADD_SUBDIRECTORY(dbug) ADD_SUBDIRECTORY(strings) ADD_SUBDIRECTORY(regex) ADD_SUBDIRECTORY(mysys) +ADD_SUBDIRECTORY(zlib) ADD_SUBDIRECTORY(extra/yassl) ADD_SUBDIRECTORY(extra/yassl/taocrypt) ADD_SUBDIRECTORY(extra) -ADD_SUBDIRECTORY(zlib) ADD_SUBDIRECTORY(storage/heap) ADD_SUBDIRECTORY(storage/myisam) ADD_SUBDIRECTORY(storage/myisammrg) diff --git a/Docs/.cvsignore b/Docs/.cvsignore index 777efb1eb7b..6a00212535b 100644 --- a/Docs/.cvsignore +++ b/Docs/.cvsignore @@ -1,40 +1,5 @@ -COPYING -COPYING.LIB +INSTALL-SOURCE INSTALL-BINARY Makefile Makefile.in -Manual-updates -before-gpl-changes-manual.texi -include.texi -manual-before-gpl.texi -manual-tmp.aux -manual-tmp.cp -manual-tmp.fn -manual-tmp.ky -manual-tmp.log -manual-tmp.pdf -manual-tmp.pg -manual-tmp.texi -manual-tmp.toc -manual-tmp.tp -manual-tmp.vr -manual.aux -manual.cp -manual.cps -manual.fn -manual.fns -manual.html -manual.ky -manual.log -manual.pdf -manual.pg -manual.toc -manual.tp -manual.txt -manual.vr -manual_a4.ps -manual_a4.ps.gz -manual_letter.ps -manual_letter.ps.gz -manual_toc.html mysql.info diff --git a/Docs/internals.texi b/Docs/internals.texi deleted file mode 100644 index e1462531270..00000000000 --- a/Docs/internals.texi +++ /dev/null @@ -1,101 +0,0 @@ -\input texinfo @c -*-texinfo-*- -@c -@c ********************************************************* -@c -@c This is a dummy placeholder file for internals.texi in the -@c MySQL source trees. -@c -@c Note, that the internals documentation has been moved into a separate -@c BitKeeper source tree named "mysqldoc" - do not attempt to edit this -@c file! All changes to internals.texi should be done in the mysqldoc tree. -@c -@c See http://www.mysql.com/doc/en/Installing_source_tree.html -@c for information about how to work with BitKeeper source trees. -@c -@c This dummy file is being replaced with the actual file from the -@c mysqldoc tree when building the official source distribution. -@c -@c Please e-mail docs@mysql.com for more information or if -@c you are interested in doing a translation. -@c -@c ********************************************************* -@c -@c %**start of header - -@setfilename internals.info - -@c We want the types in the same index -@syncodeindex tp fn - -@ifclear tex-debug -@c This removes the black squares in the right margin -@finalout -@end ifclear - -@c Set background for HTML -@set _body_tags BGCOLOR=silver TEXT=#000000 LINK=#101090 VLINK=#7030B0 -@c Set some style elements for the manual in HTML form. 'suggested' -@c natural language colors: aqua, black, blue, fuchsia, gray, green, -@c lime, maroon, navy, olive, purple, red, silver, teal, white, and -@c yellow. From Steeve Buehler <ahr@YogElements.com> -@set _extra_head <style> code {color:purple} tt {color:green} samp {color:navy} pre {color:maroon} </style> - -@settitle Dummy MySQL internals documentation for version @value{mysql_version}. - -@c We want single-sided heading format, with chapters on new pages. To -@c get double-sided format change 'on' below to 'odd' -@setchapternewpage on - -@paragraphindent 0 - -@c %**end of header - -@ifinfo -@format -START-INFO-DIR-ENTRY -* mysql: (mysql). MySQL documentation. -END-INFO-DIR-ENTRY -@end format -@end ifinfo - -@titlepage -@sp 10 -@center @titlefont{Empty placeholder for the MySQL Internals Documentation} -@sp 10 -@center Copyright @copyright{} 1995-2003 MySQL AB -@c blank page after title page makes page 1 be a page front. -@c also makes the back of the title page blank. -@page -@end titlepage - -@c This should be added. The HTML conversion also needs a MySQL version -@c number somewhere. - -@iftex -@c change this to double if you want formatting for double-sided -@c printing -@headings single - -@oddheading @thischapter @| @| @thispage -@evenheading @thispage @| @| MySQL Internal Reference for Version @value{mysql_version} - -@end iftex - -@node Top, (dir), (dir), (dir) - -@ifinfo -This is an empty placeholder file for the MySQL internals documentation. - -The real version of this file is now maintained in a separate BitKeeper -source tree! Please see -@url{http://www.mysql.com/doc/en/Installing_source_tree.html} for more info -on how to work with BitKeeper. - -Please do not attempt to edit this file directly - use the one in the -@code{mysqldoc} BK tree instead. - -This file will be replaced with the current @code{internals.texi} when -building the official source distribution. -@end ifinfo - -@bye diff --git a/Docs/linuxthreads.txt b/Docs/linuxthreads.txt index 30270125c0d..552415fe794 100644 --- a/Docs/linuxthreads.txt +++ b/Docs/linuxthreads.txt @@ -1,3 +1,5 @@ +[Note this information is obsolete] + Notes on compiling glibc for the standard MySQL binary: - make sure you have gcc 2.95 and gmake 3.79 or newer diff --git a/Docs/my_sys.txt b/Docs/my_sys.txt deleted file mode 100644 index 85ffc13ecb4..00000000000 --- a/Docs/my_sys.txt +++ /dev/null @@ -1,140 +0,0 @@ -Functions i mysys: (For flags se my_sys.h) - - int my_copy _A((const char *from,const char *to,myf MyFlags)); - - Copy file - - int my_delete _A((const char *name,myf MyFlags)); - - Delete file - - int my_getwd _A((string buf,uint size,myf MyFlags)); - int my_setwd _A((const char *dir,myf MyFlags)); - - Get and set working directory - - string my_tempnam _A((const char *pfx,myf MyFlags)); - - Make a uniq temp file name by using dir and adding something after - pfx to make name uniq. Name is made by adding a uniq 6 length-string - and TMP_EXT after pfx. - Returns pointer to malloced area for filename. Should be freed by - free(). - - File my_open _A((const char *FileName,int Flags,myf MyFlags)); - File my_create _A((const char *FileName,int CreateFlags, - int AccsesFlags, myf MyFlags)); - int my_close _A((File Filedes,myf MyFlags)); - uint my_read _A((File Filedes,byte *Buffer,uint Count,myf MyFlags)); - uint my_write _A((File Filedes,const byte *Buffer,uint Count, - myf MyFlags)); - ulong my_seek _A((File fd,ulong pos,int whence,myf MyFlags)); - ulong my_tell _A((File fd,myf MyFlags)); - - Use instead of open,open-with-create-flag, close read and write - to get automatic error-messages (flag: MYF_WME) and only have - to test for != 0 if error (flag: MY_NABP). - - int my_rename _A((const char *from,const char *to,myf MyFlags)); - - Rename file - - FILE *my_fopen _A((const char *FileName,int Flags,myf MyFlags)); - FILE *my_fdopen _A((File Filedes,int Flags,myf MyFlags)); - int my_fclose _A((FILE *fd,myf MyFlags)); - uint my_fread _A((FILE *stream,byte *Buffer,uint Count,myf MyFlags)); - uint my_fwrite _A((FILE *stream,const byte *Buffer,uint Count, - myf MyFlags)); - ulong my_fseek _A((FILE *stream,ulong pos,int whence,myf MyFlags)); - ulong my_ftell _A((FILE *stream,myf MyFlags)); - - Same read-interface for streams as for files - - gptr _mymalloc _A((uint uSize,const char *sFile, - uint uLine, myf MyFlag)); - gptr _myrealloc _A((string pPtr,uint uSize,const char *sFile, - uint uLine, myf MyFlag)); - void _myfree _A((gptr pPtr,const char *sFile,uint uLine)); - int _sanity _A((const char *sFile,unsigned int uLine)); - gptr _myget_copy_of_memory _A((const byte *from,uint length, - const char *sFile, uint uLine, - myf MyFlag)); - - malloc(size,myflag) is mapped to this functions if not compiled - with -DSAFEMALLOC - - void TERMINATE _A((void)); - - Writes malloc-info on stdout if compiled with -DSAFEMALLOC. - - int my_chsize _A((File fd,ulong newlength,myf MyFlags)); - - Change size of file - - void my_error _D((int nr,myf MyFlags, ...)); - - Writes message using error number (se mysys/errors.h) on - stdout or curses if MYSYS_PROGRAM_USES_CURSES() is called. - - void my_message _A((const char *str,myf MyFlags)); - - Writes message-string on - stdout or curses if MYSYS_PROGRAM_USES_CURSES() is called. - - void my_init _A((void )); - - Start each program (in main) with this. - void my_end _A((int infoflag)); - - Gives info about program. - - If infoflag & MY_CHECK_ERROR prints if some files are left open - - If infoflag & MY_GIVE_INFO prints timing info and malloc info - about prog. - - int my_redel _A((const char *from, const char *to, int MyFlags)); - - Delete from before rename of to to from. Copyes state from old - file to new file. If MY_COPY_TIME is set sets old time. - - int my_copystat _A((const char *from, const char *to, int MyFlags)); - - Copye state from old file to new file. - If MY_COPY_TIME is set sets copy also time. - - string my_filename _A((File fd)); - - Give filename of open file. - - int dirname _A((string to,const char *name)); - - Copy name of directory from filename. - - int test_if_hard_path _A((const char *dir_name)); - - Test if dirname is a hard path (Starts from root) - - void convert_dirname _A((string name)); - - Convert dirname acording to system. - - In MSDOS changes all caracters to capitals and changes '/' to - '\' - string fn_ext _A((const char *name)); - - Returns pointer to extension in filename - string fn_format _A((string to,const char *name,const char *dsk, - const char *form,int flag)); - format a filename with replace of library and extension and - converts between different systems. - params to and name may be identicall - function dosn't change name if name != to - Flag may be: 1 force replace filnames library with 'dsk' - 2 force replace extension with 'form' */ - 4 force Unpack filename (replace ~ with home) - 8 Pack filename as short as possibly for output to - user. - All open requests should allways use at least: - "open(fn_format(temp_buffe,name,"","",4),...)" to unpack home and - convert filename to system-form. - - string fn_same _A((string toname,const char *name,int flag)); - - Copys directory and extension from name to toname if neaded. - copy can be forced by same flags that in fn_format. - - int wild_compare _A((const char *str,const char *wildstr)); - - Compare if str matches wildstr. Wildstr can contain "*" and "?" - as match-characters. - Returns 0 if match. - - void get_date _A((string to,int timeflag)); - - Get current date in a form ready for printing. - - void soundex _A((string out_pntr, string in_pntr)) - - Makes in_pntr to a 5 chars long string. All words that sounds - alike have the same string. - - int init_key_cache _A((ulong use_mem,ulong leave_this_much_mem)); - - Use cacheing of keys in MISAM, PISAM, and ISAM. - KEY_CACHE_SIZE is a good size. - - Remember to lock databases for optimal cacheing - - void end_key_cache _A((void)); - - End key-cacheing. diff --git a/Docs/net_doc.txt b/Docs/net_doc.txt deleted file mode 100644 index 4f21383c06d..00000000000 --- a/Docs/net_doc.txt +++ /dev/null @@ -1,943 +0,0 @@ - MySQL Client/Server Protocol Documentation - - - -Introduction ------------- - - -This paper has the objective of presenting a through description -of the client/server protocol that is embodied in MySQL. Particularly, -this paper aims to document and describe: - -- manner in which MySQL server detects client connection requests and - creates connection -- manner in which MySQL client C API call connects to server - the - entire protocol of sending/receiving data by MySQL server and C API - code -- manner in which queries are sent by client C API calls to server -- manner in which query results are sent by server -- manner in which query results are resolved by server -- sending and receiving of error messages - - -This paper does not have the goal or describing nor documenting other -related MySQL issues, like usage of thread libraries, MySQL standard -library set, MySQL strings library and other MySQL specific libraries, -type definitions and utilities. - -Issues that are covered by this paper are contained in the following -source code files: - -- libmysql/net.c and sql/net_serv.cc, the two being identical -- client/libmysql.c (not entire file is covered) -- include/mysql_com.h -- include/mysql.h -- sql/mysqld.cc (not entire file is covered) -- sql/net_pkg.cc -- sql/sql_base.cc (not entire file is covered) -- sql/sql_select.cc (not entire file is covered) -- sql/sql_parse.cc (not entire file is covered) - -Note: libmysql/net.c was client/net.c prior to MySQL 3.23.11. -sql/net_serv.cc was sql/net_serv.c prior to MySQL 3.23.16. - -Beside this introduction this paper presents basic definitions, -constants, structures and global variables, all related functions in -server and in C API. Textual description of the entire protocol -functioning is described in the last chapter of this paper. - - -Constants, structures and global variables ------------------------------------------- - -This chapter will describe all constants, structures and -global variables relevant to client/server protocol. - -Constants - -They are important as they contain default values, the ones -that are valid if options are not set in any other way. Beside that -MySQL source code does not contain a single non-defined constant in -its code. This description of constants does not include -configuration and conditional compilation #defines. - -NAME_LEN - field and table name length, current value 64 -HOSTNAME_LENGTH - length of the hostname, current value 64 -USERNAME_LENGTH - username length, current value 16 -MYSQL_PORT - default TCP/IP port number, current value 3306 -MYSQL_UNIX_ADDR - full path of the default Unix socket file, current value - "/tmp/mysql.sock" -MYSQL_NAMEDPIPE - full path of the default NT pipe file, current value - "MySQL" -MYSQL_SERVICENAME - name of the MySQL Service on NT, current value "MySQL" -NET_HEADER_SIZE - size of the network header, when no - compression is used, current value 4 -COMP_HEADER_SIZE - additional size of network header when - compression is used, current value 3 - -What follows are set of constants, defined in source only, which -define capabilities of the client built with that version of C -API. Simply, when some new feature is added in client, that client -feature is defined, so that server can detect what capabilities a -client program has. - -CLIENT_LONG_PASSWORD - client supports new more secure passwords -CLIENT_LONG_FLAG - client uses longer flags -CLIENT_CONNECT_WITH_DB - client can specify db on connect -CLIENT_COMPRESS - client can use compression protocol -CLIENT_ODBC - ODBC client -CLIENT_LOCAL_FILES - client can use LOAD DATA INFILE LOCAL -CLIENT_IGNORE_SPACE - client can ignore spaces before '(' -CLIENT_CHANGE_USER - client supports the mysql_change_user() - -What follows are other constants, pertaining to timeouts and sizes - -MYSQL_ERRMSG_SIZE - maximum size of error message string, current value 200 -NET_READ_TIMEOUT - read timeout, current value 30 seconds -NET_WRITE_TIMEOUT - write timeout, current value 60 seconds -NET_WAIT_TIMEOUT - wait for new query timeout, current value 8*60*60 - seconds, that is, 8 hours -packet_error - value returned in case of socket errors, current - value -1 -TES_BLOCKING - used in debug mode for setting up blocking testing -RETRY COUNT - number of times network read and write will be - retried, current value 1 - -There are also error messages for last_errno, which depict system -errors, and are used on the server only. - -ER_NET_PACKAGE_TOO_LARGE - packet is larger than max_allowed_packet -ER_OUT_OF_RESOURCES - practically no more memory -ER_NET_ERROR_ON_WRITE - error in writing to NT Named Pipe -ER_NET_WRITE_INTERRUPTED - some signal or interrupt happened - during write -ER_NET_READ_ERROR_FROM_PIPE - error in reading from NT Named Pipe -ER_NET_FCNTL_ERROR - error in trying to set fcntl on socket - descriptor -ER_NET_PACKETS_OUT_OF_ORDER - packet numbers on client and - server side differ -ER_NET_UNCOMPRESS_ERROR - error in uncompress of compressed packet - - - Structs and enums - - -struct NET - -This is MySQL's network handle structure, used in all client/server -read/write functions. On the server, it is initialized and preserved -in each thread. On the client, it is a part of the MYSQL struct, -which is the MySQL handle used in all C API functions. This structure -uniquely identifies a connection, either on the server or client -side. It consists of the following fields: - - Vio* vio - explained above - HANDLE hPipe - Handle for NT Named Pipe file - my_socket fd - file descriptor used for both TCP/IP socket and - Unix socket file - int fcntl - contains info on fcntl options used on fd. Mostly - used for saving info if blocking is used or not - unsigned char *buff - network buffer used for storing data for - reading from/writing to socket - unsigned char,*buff_end - points to the end of buff - unsigned char *write_pos - present writing position in buff - unsigned char *read_pos - present reading position in buff. This - pointer is used for reading data after - calling my_net_read function and function - that are just its wrappers - char last_error[MYSQL_ERRMSG_SIZE] - holds last error message - unsigned int last_errno - holds last error code of the network - protocol. Its possible values are listed - in above constants. It is used only on - the server side - unsigned int max_packet - holds current value of buff size - unsigned int timeout - stores read timeout value for that connection - unsigned int pkt_nr - stores the value of the current packet number in - a batch of packets. Used primarily for - detection of protocol errors resulting in a - mismatch - my_bool error - holds either 1 or 0 depending on the error condition - my_bool return_errno - if its value != 0 then there is an error in - protocol mismatch between client and server - my_bool compress - if true compression is used in the protocol - unsigned long remain_in_buf - used only in reading compressed packets. - Explained in my_net_read - unsigned long length - used only for storing the length of the read - packet. Explained in my_net_read - unsigned long buf_length - used only in reading compressed packets. - Explained in my_net_read - unsigned long where_b - used only in reading compressed packets. - Explained in my_net_read - short int more - used for reporting in mysql_list_processes - char save_char - used in reading compressed packets for saving chars - in order to make zero-delimited strings. Explained - in my_net_read - -A few typedefs will be defined for easier understanding of the text that -follows. - -typedef char **MYSQL_ROW - data containing one row of values - -typedef unsigned int MYSQL_FIELD_OFFSET - offset in bytes of the current field - -typedef MYSQL_ROWS *MYSQL_ROW_OFFSET - offset in bytes of the current row - -struct MYSQL_FIELD - contains all info on the attributes of a -specific column in a result set, plus info on lengths of the column in -a result set. This struct is tagged as st_mysql_field. This structure -consists of the following fields: - - char *name - name of column - char *table - table of column if column was a field and not - an expression or constant - char *def - default value (set by mysql_list_fields) - enum enum_field_types type - see above - unsigned int length - width of column in the current row - unsigned int max_length - maximum width of that column in entire - result set - unsigned int flags - corresponding to Extra in DESCRIBE - unsigned int decimals - number of decimals in field - - -struct MYSQL_ROWS - a node for each row in the single linked -list forming entire result set. This struct is tagged as -st_mysql_rows, and has two fields: - - struct st_mysql_rows *next - pointer to the next one - MYSQL_ROW data - see above - - -struct MYSQL_DATA - contains all rows from result set. It is -tagged as st_mysql_data and has following fields: - - my_ulonglong rows - how many rows - unsigned int fields - how many columns - MYSQL_ROWS *data - see above. This is the first node of the linked list - MEM_ROOT alloc - MEM_ROOT is MySQL memory allocation structure, and - this field is used to store all fields and rows. - - -struct st_mysql_options - holds various client options, and -contains following fields: - - unsigned int connect_timeout - time in seconds for connection - unsigned int client_flag - used to hold client capabilities - my_bool compress - boolean for compression - my_bool named_pipe - is Named Pipe used? (on NT) - unsigned int port - what TCP port is used - char *host - host to connect to - char *init_command - command to be executed upon connection - char *user - account name on MySQL server - char *password - password for the above - char *unix_socket - full path for Unix socket file - char *db - default database - char *my_cnf_file - optional configuration file - char *my_cnf_group - optional header for options - - -struct MYSQL - MySQL client's handle. Required for any -operation issued from client to server. Tagged as st_mysql and having -following fields: - - NET net - see above - char *host - host on which MySQL server is running - char *user - MySQL username - char *passwd - password for above - char *unix_socket- full path of Unix socket file - char *server_version - version of the server - char *host_info - contains info on how has connection been - established, TCP port, socket or Named Pipe - char *info - used to store information on the query results, - like number of rows affected etc. - char *db - current database - unsigned int port - TCP port in use - unsigned int client_flag - client capabilities - unsigned int server_capabilities - server capabilities - unsigned int protocol_version - version of the protocol - unsigned int field_count - used for storing number of fields - immediately upon execution of a query, - but before fetching rows - unsigned long thread_id - server thread to which this connection - is attached - my_ulonglong affected_rows - used for storing number of rows - immediately upon execution of a query, - but before fetching rows - my_ulonglong insert_id - fetching LAST_INSERT_ID() through client C API - my_ulonglong extra_info - used by mysqlshow -unsigned long packet_length - saving size of the first packet upon - execution of a query - enum mysql_status status - see above - MYSQL_FIELD *fields - see above - MEM_ROOT field_alloc - memory used for storing previous field (fields) - my_bool free_me - boolean that flags if MYSQL was allocated in mysql_init - my_bool reconnect - used to automatically reconnect - struct st_mysql_options options - see above - char scramble_buff[9] - key for scrambling password before sending it - to server - - -struct MYSQL_RES - tagged as st_mysql_res and used to store -entire result set from a single query. Contains following fields: - - my_ulonglong row_count - number of rows - unsigned int field_count - number of columns - unsigned int current_field - cursor for fetching fields - MYSQL_FIELD *fields - see above - MYSQL_DATA *data - see above, and used in buffered reads, that is, - mysql_store_result only - MYSQL_ROWS *data_cursor - pointing to the field of above "data" - MEM_ROOT field_alloc - memory allocation for above "fields" - MYSQL_ROW row - used for storing row by row in unbuffered reads, - that is, in mysql_use_result - MYSQL_ROW current_row - cursor to the current row for buffered reads - unsigned long *lengths - column lengths of current row - MYSQL *handle - see above, used in unbuffered reads, that is, in - mysql_use_result - my_bool eof - used by mysql_fetch_row as a marker for end of data - - - - Global variables - - -unsigned long max_allowed_packet - maximum allowable value of network - buffer. Default value - 1MB - -unsigned long net_buffer_length - default, starting value of network - buffer - 8KB - -unsigned long bytes_sent - total number of bytes written since startup - of the server - -unsigned long bytes_received - total number of bytes read since startup - of the server - - -Synopsis of the basic client/server protocol --------------------------------------------- - -Purpose of this chapter is to provide a complete picture of -the basic client/server protocol implemented in MySQL. It was felt -it is necessary after writing descriptions for all of the functions -involved in basic protocol. There are at present 11 functions -involved, with several structures, many constants etc, which are all -described in detail. But as a forest could not be seen from the trees, -so the concept of the protocol could not be deciphered easily from a -thorough documentation of minutiae. - -Although the concept of the protocol was not changed with the -introduction of vio system, embodied in violate.cc source file and VIO -system, the introduction of these has changed the code substantially. Before -VIO was introduced, functions for reading from/writing to network -connection had to deal with various network standards. So, these functions -depended on whether TCP port or Unix socket file or NT Named Pipe file is -used. This is all changed now and single vio_ functions are called, while -all this diversity is covered by vio_ functions. - -In MySQL a specific buffered network input/output transport model -has been implemented. Although each operating system may have its -own buffering for network connections, MySQL has added its own -buffering model. This same for each of the three transport protocol -types that are used in MySQL client/server communications, which -are TCP/IP sockets (on all systems), Unix socket files on Unix and -Unix-like operating systems and Named Pipe files on NT. Although -TCP/IP sockets are omnipresent, the latter two types have been added -for local connections. Those two connection types can be used in -local mode only, that is, when both client and server reside on the -same host, and are introduced because they enable better speeds for -local connections. This is especially useful for WWW type of -applications. Startup options of MySQL server allow that either -TCP/IP sockets or local connection (OS dependent) can be disallowed. - -In order to implement buffered input/output, MySQL allocates a -buffer. The starting size of this buffer is determined by the value -of the global variable net_buffer_length, which can be changed at -MySQL server startup. This is, as explained, only the startup length -of MySQL network buffer. Because a single item that has to be read -or written can be larger than that value, MySQL will increase buffer -size as long as that size reaches value of the global variable -max_allowed_packet, which is also settable at server startup. Maximum -value of this variable is limited by the way MySQL stores/reads -sizes of packets to be sent/read, which means by the way MySQL -formats packages. - -Basically each packet consists of two parts, a header and data. In -the case when compression is not used, header consists of 4 bytes -of which 3 contain the length of the packet to be sent and one holds -the packet number. When compression is used there are onother 3 -bytes which store the size of uncompressed data. Because of the way -MySQL packs length into 3 bytes, plus due to the usage of some -special values in the most significant byte, maximum size of -max_allowed_packet is limited to 24MB at present. So, if compression -is not used, at first 4 bytes are written to the buffer and then -data itself. As MySQL buffers I/O logical packets are packet together -until packets fill up entire size of the buffer. That size no less -than net_buffer_size, but no greater than max_allowed_packet. So, -actual writing to the network is done when this buffer is filled -up. As frequently sequence of buffers make a logical unit, like a -result set, then at the end of sending data, even if buffer is not -full, data is written (flushed to the connection) with a call of -the net_flush function. So that no single packet can be larger than -this value, checks are made throughout the code to make sure that -no single field or command could exceed that value. - -In order to maintain coherency in consecutive packets, each packet -is numbered and their number stored as a part of a header, as -explained above. Packets start with 0, so whenever a logical packet -is written, that number is incremented. On the other side when -packets are read, value that is fetched is compared with the value -stored and if there is no mismatch that value is incremented, too. -Packet number is reset on the client side when unwanted connections -are removed from the connection and on the server side when a new -command has been started. - - -So, before writing, the buffer contains a sequence of logical -packets, consisting of header plus data consecutively. If compression -is used, packet numbers are not stored in each header of the logical -packets, but a whole buffer, or a part of it if flushing is done, -containing one or more logical packets are compressed. In that case -a new larger header, is formed, and all logical packets contained -in the buffer are compressed together. This way only one packet is -formed which makes several logical packets, which improves both -speed and compression ratio. On the other side, when this large -compressed packet is read, it is first uncompressed, and then logical -packets are sent, one by one, to the calling functions. - - -All this functionality is described in detail in the following -chapter. It does not contain functions that form logical packets, or -that read and write to connections but also functions that are used -for initialization, clearing of connections. There are functions at -higher level dealing with sending fields, rows, establishing -connections, sending commands, but those are not explained in the -following chapter. - - -Functions utilized in client/server protocol --------------------------------------------- - -First of all, functions are described that are involved in preparing, -reading, or writing data over TCP port, Unix socket file, or named -pipe, and functions directly related to those. All of these functions -are used both in server and client. Server and client specific code -segments are documented in each function description. - -Each MySQL function checks for errors in memory allocation and -freeing, as well as in every OS call, like the one dealing with -files and sockets, and for errors in indigenous MySQL function -calls. This is expected, but has to be said here so as not to repeat -it in every function description. - -Older versions of MySQL have utilized the following macros for -reading from or writing to a socket. - -raw_net_read - calls OS function recv function that reads N bytes -from a socket into a buffer. Number of bytes read is returned. - -raw_net_write - calls OS function send to write N bytes from a -buffer to socket. Number of bytes written is returned. - -These macros are replaced with VIO (Virtual I/O) functions. - - -Function name: my_net_init - -Parameters: struct NET *, enum_net_type, struct Vio - -Return value: 1 for error, 0 for success - -Function purpose: To initialize properly all NET fields, - allocate memory and set socket options - -Function description - -First of all, buff field of NET struct is allocated to the size of -net_buffer_length, and on failure function exits with 0. All fields -in NET are set to their default or starting values. As net_buffer_length -and max_allowed_packet are configurable, max_allowed_packet is set -equal to net_buffer_length if the latter one is greater. max_packet -is set for that NET to net_buffer_length, and buff_end points to -buff end. vio field is set to the second parameter. If it is a -real connection, which is the case when second parameter is not -null, then fd field is set by calling vio_fd function. read_pos and -write_pos to buff, while remaining integers are set to 0. If function -is run on the MySQL server on Unix and server is started in a test -mode that would require testing of blocking, then vio_blocking -function is called. Last, fast throughput mode is set by a call to -vio_fastsend function. - - -Function name: net_end - -Parameters: struct NET * - -Return value: void - -Function purpose: To release memory allocated to buff - - - -Function name: net_realloc (private, static function) - -Parameters: struct NET, ulong (unsigned long) - -Return value: 1 for error, 0 for success - -Function purpose: To change memory allocated to buff - -Function description - -New length of buff field of NET struct is passed as second parameter. -It is first checked versus max_allowed_packet and if greater, an -error is returned. New length is aligned to 4096-byte boundary. Then, -buff is reallocated, buff_end, max_packet, and write_pas reset to -the same values as in my_net_init. - - - -Function name: net_clear (used on client side only) - -Parameters: struct NET * - -Return value: void - -Function purpose: To read unread packets - -Function description - -This function is used on client side only, and is executed -only if a program is not started in test mode. This function reads -unread packets without processing them. First, non-blocking mode is -set on systems that do not have non-blocking mode defined. This is -performed by checking the mode with vio_is_blocking function. and -setting non-blocking mode by vio_blocking function. If this operation -was successful, then packets are read by vio_read function, to which -vio field of NET is passed together with buff and max_packet field -values. field of the same struct at a length of max_packet. If -blocking was active before reading is performed, blocking is set with -vio_blocking function. After reading has been performed, pkt_nr is -reset to 0 and write_pos reset to buff. In order to clarify some -matters non-blocking mode enables executing program to dissociate from -a connection, so that error in connection would not hang entire -program or its thread. - -Function name: net_flush - -Parameters: struct NET * - -Return value: 1 for error, 0 for success - -Function purpose: To write remaining bytes in buff to socket - -Function description - -net_real_write (described below) is performed is write_pos -differs from buff, both being fields of the only parameter. write_pos -is reset to buff. This function has to be used, as MySQL uses buffered -writes (as will be explained more in the function net_write_buff). - - -Function name: my_net_write - -Parameters: struct NET *, const char *, ulong - -Return value: 1 for error, 0 for success - -Function purpose: Write a logical packet in the second parameter - of third parameter length - -Function description - -The purpose of this function is to prepare a logical packet such -that entire content of data, pointed to by second parameter and in -length of third parameter is sent to the other side. In case of -server, it is used for sending result sets, and in case of client -it is used for sending local data. This function foremost prepares -a header for the packet. Normally, the header consists of 4 bytes, -of which the first 3 bytes contain the length of the packet, thereby -limiting a maximum allowable length of a packet to 16MB, while the -fourth byte contains the packet number, which is used when one large -packet has to be divided into sequence of packets. This way each -sub-packet gets its number which should be matched on the other -side. When compression is used another three bytes are added to -packet header, thus packet header is in that case increased to 7 -bytes. Additional three bytes are used to save the length of -compressed data. As in connection that uses compression option, -code packs packets together,, a header prepared by this function -is later not used in writing to / reading from network, but only -to distinguish logical packets within a buffered read operation. - - -This function, first stores the value of the third parameter into the -first 3 bytes of local char variable of NET_HEADER_SIZE size by usage -of function int3store. Then, at this point, if compression is not -used, pkt_nr is increased, and its value stored in the last byte of -the said local char[] variable. If compression is used, 0 is stored in -both values. Then those four bytes are sent to other side by the usage -of the function net_write_buff(to be explained later on), and if -successful, entire packet in second parameter of the length described -in third parameter is sent by the usage of the same function. - - -Function name: net_write_command - -Parameters: struct NET *, char, const char *, ulong - -Return value: 1 for error, 0 for success - -Function purpose: Send a command with a packet as in previous function - -Function description - -This function is very similar to the previous one. The only -difference is that first packet is enlarged by one byte, so that the -command precedes the packet to be sent. This is implemented by -increasing first packet by one byte, which contains a command code. As -command codes do not use the range of values that are used by character -sets, so when the other side receives a packet, first byte after -header contains a command code. This function is used by client for -sending all commands and queries, and by server in connection process -and for sending errors. - - -Function name: net_write_buff (private, static function) - -Parameters: struct NET *, const char *, uint - -Return value: 1 for error, 0 for success - -Function purpose: To write a packet of any size by cutting it -and using next function for writing it - -Function description - -This function was created after compression feature has been -added to MySQL. This function supposes that packets have already been -properly formatted, regarding packet header etc. The principal reason for -this function to exist is because a packet that is sent by client or -server does not have to be less than max_packet. So this function -first calculates how much data has been left in a buff, by getting a -difference between buff_end and write_pos and storing it to local -variable left_length. Then a loop is run as long as the length to be -sent is greater than length of left bytes (left_length). In a loop -data from second parameter is copied to buff at write_pos, as much as -it can be, that is, by left_length. Then net_real_write function is called -(see below) with NET, buff, and max_packet parameters. This function -is the lowest level function that writes data over established -connection. In the loop, write_pos is reset to buff, the pointer to data -(second parameter) is moved by the amount of data sent (left_length), -length of data to be sent (third parameter) is decreased by the amount -sent (left_length) and left_length is reset to max_packet value, which -ends the loop. This logic was necessary, as there could have been some -data yet unsent (write_pos != buf), while data to be sent could be as -large as necessary, thus requiring many loops. At the end of function, -remaining data in second parameter are copied to buff at write_pos, by -the remaining length of data to be sent (third parameter). So, in the -next call of this function remaining data will be sent, as buff is -used in the call to net_real_write. It is very important to note that if -a packet to be sent is less than the number of bytes that are still -available in buff, then there will be no writing over network, but -only logical packets will be added one after another. This will -accelerate network traffic, plus if compression is used, the -expected compression rate would be higher. That is why server or -client functions that sends data uses at the end of data net_flush -function described above. - - -Function name: net_real_write - -Parameters: struct NET *, const char *, ulong - -Return value: 1 for error, 0 for success - -Function purpose: To write data to a socket or pipe, with -compression if used - -Function description - -First, more field is set to 2, to enable reporting in -mysql_list_processes. Then if compression is enabled on that -connection, a new local buffer (variable b) is initialized to the -length of total header (normal header + compression header) and if no -memory is available, an error is returned. This buffer (b) is used for -holding the final, compressed packet to be written over the -connection. Furthermore in compression initialization, second -parameter at length of third parameter is copied to the local buffer -b, and MySQL's wrapped zlib's compression function is run at total -header offset of the local buffer. Please, do note that this function -does not test effectiveness of compression. If compression is turned -on in some connection, it is used all of the time. Also, it is very -important to be cognizant of the fact that this algorithm makes -possible that a single compressed packet contains several logical -packets. In this way compression rate is increased and network -throughput is increased as well. However, this algorithm has -consequences on the other side, that reads compressed packet, which -is covered in my_net_read function. After compression is done, the full -compression header is properly formed with the packet number, -compressed and uncompressed lengths. At the end of compression code, -third parameter is increased by total header length, as the original -header is not used (see above), and second parameter, pointer to data, -is set to point to local buffer b, in order that the further flow of -function is independent of compression. If a function is executed -on server side, a thread alarm initialized and if non-blocking is -active set at NET_WRITE_TIMEOUT. Two local (char *) pointers are -initialized, pos at beginning of second parameter, and end at end of -data. Then the loop is run as long as all data is written, which means -as long as pos != end. First vio_write function is called, with -parameters of vio field, pos and size of data (end - pos). Number of -bytes written over connection is saved in local variable (length). If -error is returned local bool variable (interrupted) is set according -to the return value of the vio_should_retry called with vio field as -parameter. This bool variable indicates whether writing was -interrupted in some way or not. - -Further, error from vio_write is treated differently on Unix versus -other OS's (Win32 or OS/2). On Unix an alarm is set if one is not -in use, no bytes have been written and there has been no interruption. -Also, in that case, if connection is not in blocking mode, a sub-loop -is run as long as blocking is not set with vio_blocking function. -Within the loop another run of above vio_write is run based on -return value of vio_is_retry function, provided number of repeated -writes is less than RETRY_COUNT. If that is not the case, error -field of struct NET is set to 1 and function exits. At the exit -of sub-loop number of reruns already executed is reset to zero and -another run of above vio_write function is attempted. If the function -is run on Win32 and OS/2, and in the case that function flow was -not interrupted and thread alarm is not in use, again the main loop -is continued until pos != end. In the case that this function is -executed on thread safe client program, a communication flow is -tested on EINTR, caused by context switching, by use of vio_errno -function, in which case the loop is continued. At the end of -processing of the error from vio_write, error field of struct NET -is set, and if on server last_errno field is set to -ER_NET_WRITE_INTERRUPTED in the case that local bool variable -(interrupted) is true or to ER_NET_ERROR_ON_WRITE. Before the end -of loop, in order to make possible evaluation of the loop condition, -pos is increased by the value written in last iteration (length). -Also global variable bytes_sent is increased by the same value, for -status purposes. At the end of the functions more fields is reset, -in case of compression, compression buffer (b) memory is released -and if thread is still in use, it is ended and blocking state is -reset to its original state, and function returns error is all bytes -are not written. - - - -Function name: my_real_read (private, static function) - -Parameters: struct NET *, ulong * - -Return value: length of bytes read - -Function purpose: low level network connection read function - -Function description - -This function has made as a separate one when compression was -introduced in MySQL client/server protocol . It contains basic, low -level network reading functionality, while all dealings with -compressed packets are handled in next function. Compression in this -function is only handled in as much to unfold the length of uncompressed -data. First blocking state of connection is saved in local bool -variable net_blocking, and field more is set 1 for detailed reporting -in mysqld_list_processes. A new thread alarm is initialized, in order -to enable read timeout handling, and if on server and a connection can -block a program, the alarm is set at a value of timeout field. Local -pointer is set to the position of the next logical packet, with its -header skipped, which is at field where_b offset from buff. Next, a -two time run code is entered. A loop is run exactly two times because -first time number of bytes to be fetched (remain) are set to the -header size, which is different when compression is used or not used -on the connection. After first fetch has been done, number of packets -that will be received in second iteration is well known, as fetched -header contains the size of packet, packet number, and in the case of -compression, the size of the uncompressed packet. Then, as long as there are -bytes to read the loop is entered with first reading data from network -connection with vio_read function, called with parameters of field -vio, current position and remaining number of bytes, which value is -hold by local variable (remain) initialized at the value of header size, -which differs if compression is used. Number of bytes read are -returned in local length variable. If error is returned local bool -variable (interrupted) is set according to the return value of the -vio_should_retry called with vio field as parameter. This bool -variable indicates whether reading was interrupted in some way or not. - -Further, error from vio_read is treated differently on Unix versus -other OS's (Win32 or OS/2). On Unix an alarm is set if one is not -in use, no bytes have been read and there has been no interruption. -Also, in that case, if connection is not in blocking mode, a sub-loop -is run as long as blocking is not set with vio_blocking function. -Within the loop another run of above vio_read is run based on return -value of vio_is_retry function, provided number of repeated writes -is less than RETRY_COUNT. If that is not the case, error field of -struct NET is set to 1 and function exits. At the exit of sub-loop -number of reruns already executed is reset to zero and another run -of above vio_read function is attempted. If the function is run on -Win32 and OS/2, and in the case that function flow was not interrupted -and thread alarm is not in use, again the main loop is continued -as long as there are bytes remaining. In the case that this function -is executed on thread safe client program, then if another run -should be made, which is decided by the output of vio_should_retry -function, in which case the loop is continued. At the end of -processing of the error from vio_read, error field of struct NET -is set, and if on server last_errno field is set to ER_NET_READ_INTERRUPTED -in the case that local bool variable (interrupted) is true or to -ER_NET_ERROR_ON_READ. In case of such an error this function exits -and returns error. In the case when there is no error, number of -remaining bytes (remain) is decreased by the number of bytes read, -which should be zero, but in case it is not the entire code is still -in while (remain > 0) loop, which will be exited immediately if it -is. This has been done to accommodate errors in the traffic level -and for the very slow connections. Current position in field buff -is also moved by the amount of bytes read by vio_read function, and -global variable bytes_received is increased by the same value in a -thread safe manner. When the loop that is run until necessary bytes -are read (remain) is finished, then if external loop is in its first -run, of the two, packet sequencing is tested for consistency by -comparing the number contained at 4th byte in header with pkt_nr -field. Header location is found at where_b offset to field_b. Usage -of where_b is obligatory due to the possible compression usage. If -there is no compression on a connection, then where_b is always 0. -If there is a discrepancy, then first byte of the header is checked -whether it is equal to 255, because when error is sent by the server, -or by a client if it is sending data (like in LOAD DATA INFILE -LOCAL...), then first byte in header is set to 255. If it is not -255, then an error on packets being out of order is printed. In any -case, on server, last_errno field is set to ER_NET_PACKETS_OUT_OF_ORDER -and the function returns with an error, that is, the value returned is -packet_error. If a check on serial number of packet is successful, -pkt_nr field is incremented in order to enable checking packet order -with next packet and if compression is used, uncompressed length -is extracted from a proper position in header and returned in the -second parameter of this function. Length of the packet is saved, -for the purpose of a proper return value from this function. Still -in the first iteration of the main loop, a check must be made if -field buff could accommodate entire package that comes, in its -compressed or uncompressed form. This is done in such a way, because -zlib's compress and uncompress functions use the same memory area -for compression and uncompression. Necessary field buff length is -equal to current offset where data are (where_b which is zero for -non-compression), plus the larger value of compressed or uncompressed -package to be read in a second run. If this value is larger than -the current length of field buff, which is read from field max_packet, -then field buff has to be reallocated. If reallocation with net_realloc -function fails, the function returns an error. Before a second -loop is started, length to be read is set to the length of expected -data and current position (pos) is set at where_b offset from field -buff. At the end of function, if alarm is set, which is the case -if it is run on server or on a client if a function is interrupted -and another run of vio_read is attempted, alarm is ended and blocking -state is restored from the saved local bool variable net_blocking. -Function returns number of bytes read or the error (packet_error). - - -Function name: my_net_read - -Parameters: struct NET * - -Return value: length of bytes read - -Function purpose: Highest level general purpose reading function - -Function description - -First, if compression is not used, my_real_read is called, with -struct NET * a first parameter, and pointer to local ulong complen -as a second parameter, but its value is not used here. Number of -bytes read is returned in local ulong variable len. read_pos field -is set to an offset of value of where_b field from field buff. -where_b field actually denotes where in field buff is the current -packet. If returned number of bytes read (local variable len) does -not signal that an error in packet transmission occurred (that is, -it is not set to packet_error), then the string contained in read_pos -is zero terminated. Simply, the end of the string starting at -read_pos, and ending at read_pos + len, is set to zero. This is -done in that way, because mysql_use_result expects a zero terminated -string, and function returns with a value local variable len. This -ends this function in the case that compression is not used and the -remaining code is executed only if compression is enabled on the -connection. - -In order to explain how a compressed packet logically is cut into -meningful packets, the full meaning of several NET fields should -be explained. First of all, fields in NET are used and not local -variables, as all values should be saved between consecutive calls -of this function. Simply, this function is called in order to return -logical packets, but this function does not need to call my_real_read -function everytime, because when a large packet is uncompressed, -it may, but not necessarily so, contain several logical packets. -Therefore, in order to preserve data on logical packets local -variables are not used. Instead fields in NET struct are used. Field -remain_in_buf denotes how many bytes of entire uncompressed packets -is still contained within buff. field buf_length saves the value -of the length of entire uncompressed packet. field save_char is -used to save the character at the position where the packet ends, -which character has to be replaced with a zero, '\0', in order to -make a logical packet zero delimited, for mysql_use_result. Field -length stores the value of the length of compressed packet. Field -read_pos as usual, points to the current reading position. This -char * pointer is used by all functions that call this function in -order to fetch their data. Field buff is not used for that purpose, -but read_pos is used instead. This change was introduced with -compression, when algorithm accommodated grouping of several packets -together. - -Now that meanings of all relevant NET fields are explained, -we can proceed with the flow of this function for the case when -compression is active. First, if there are remaining portions of -compressed packet in a field buff, saved character value is set at -the position where zero char '\0' was inserted to enable the string -to be zero delimited for mysql_use_result. Then a loop is started. -In the first part of the loop, if there are remaining bytes, local -uchar *pos variable is set at the current position in field buff -where a new packet starts. This position is an (buf_length - -remain_in_buf) offset in field buff. As it is possible that next -logical packet is not read to the full length in the remaining of -the field buf, several things had to be inspected. It should be -noted that data that is read from net_real_read contains only logical -packets containing 4 byte headers only, being 4 byte headers prepared -by my_net_write or net_write_command. But, when written, logical -packet could be so divided that only a part of header is read in. -Therefore after pointer to the start of the next packet has been -saved, a check is made whether number of remaining bytes in buffer -is less than 4, being 3 bytes for length and one byte for packet -number. If it is greater, then the length of the logical packet is -extracted and saved a length field. Then a check is made whether -entire packet is contained within a buf, that is, a check is made -that the logical packet is fully contained in the buffer. In that -case, number of bytes remaining in buffer is decreased by the full -length of logical packet (4 + length field), read_pos is moved -forward by 4 bytes to skip header and be set at a beginning of data -in logical packet, length field is saved for the value to be returned -in function and the loop is exited. In the case that the entire -logical packet is not contained within the buffer, then if length of -the entire buffer differs from remaining length of logical packet, -it (logical packet) is moved to the beginning of the field buff. -If length of the entire buffer equals the remaining length of logical -packet, where_b and buf_length fields are set to 0. This is done -so that in both cases buffer is ready to accept next part of packet. - -In order to get a next part of a packet, still within a loop, -my_real_read function is called and length of compressed packet is -returned to a local len variable, and length of compressed data is -returned in complen variable. In the case of non-compression value -of complen is zero. If packet_error is from my_real_read function, -this function returns also with packet_error. If it is not a -packet_error, my_uncompress function is called to uncompress data. -It is called with offset of where_b data from field buff, as it is -the position where compressed packet starts, and with len and complen -values, being lengths of compressed and uncompressed data. If there -is no compression, 0 is returned for uncompressed size from -my_real_read function, and my_uncompress wrapper function is made -to skip zlib uncompress in that case. If error is returned from -my_uncompress, error field is set to 1, if on server last_errno is -set to ER_NET_UNCOMPRESS_ERROR and loop is exited and function -returns with packet_error. If not, buf_length and remain_in_buf -fields are set to the uncompressed size of buffer and the loop is -continued. When the loop is exited save_char field is used to save -the char at end of a logical packet, which is an offset of field -len from position in field buff pointed by field read_pos, in order -that zero char is set at the same position, for mysql_use_result. -Function returns the length of the logical packet without its header. diff --git a/Makefile.am b/Makefile.am index 6d435bff85a..c4a4a040a40 100644 --- a/Makefile.am +++ b/Makefile.am @@ -97,7 +97,10 @@ tags: .PHONY: init-db bin-dist \ test test-force test-full test-force-full test-force-mem \ test-pl test-force-pl test-full-pl test-force-full-pl test-force-pl-mem \ - test-unit test-ps test-nr test-pr test-ns test-binlog-statement + test-unit test-ps test-nr test-pr test-ns test-binlog-statement \ + test-ext-funcs test-ext-rpl test-ext-partitions test-ext \ + test-fast test-fast-cursor test-fast-view test-fast-prepare \ + test-full-qa # Target 'test' will run the regression test suite using the built server. # @@ -120,11 +123,11 @@ test-nr: test-pr: cd mysql-test ; \ - @PERL@ ./mysql-test-run.pl $(force) --ps-protocol --mysqld=--binlog-format=row + @PERL@ ./mysql-test-run.pl $(force) $(mem) --ps-protocol --mysqld=--binlog-format=row test-ns: cd mysql-test ; \ - @PERL@ ./mysql-test-run.pl $(force) --mysqld=--binlog-format=mixed + @PERL@ ./mysql-test-run.pl $(force) $(mem) --mysqld=--binlog-format=mixed test-binlog-statement: cd mysql-test ; \ @@ -142,7 +145,29 @@ test-force-full: #used by autopush.pl to run memory based tests test-force-mem: - $(MAKE) 'force=--force --mem' test + $(MAKE) force=--force mem=--mem test + +test-bt: + -cd mysql-test ; MTR_BUILD_THREAD=auto \ + @PERL@ ./mysql-test-run.pl --comment=normal --force --timer \ + --skip-ndbcluster --report-features + -cd mysql-test ; MTR_BUILD_THREAD=auto \ + @PERL@ ./mysql-test-run.pl --comment=ps --force --timer \ + --skip-ndbcluster --ps-protocol + -cd mysql-test ; MTR_BUILD_THREAD=auto \ + @PERL@ ./mysql-test-run.pl --comment=normal+rowrepl --force --timer \ + --skip-ndbcluster --mysqld=--binlog-format=row + -cd mysql-test ; MTR_BUILD_THREAD=auto \ + @PERL@ ./mysql-test-run.pl --comment=ps+rowrepl+NDB --force --timer \ + --ps-protocol --mysqld=--binlog-format=row + -cd mysql-test ; MTR_BUILD_THREAD=auto \ + @PERL@ ./mysql-test-run.pl --comment=NDB --force --timer \ + --with-ndbcluster-only + +test-bt-debug: + -cd mysql-test ; MTR_BUILD_THREAD=auto \ + @PERL@ ./mysql-test-run.pl --comment=debug --force --timer \ + --skip-ndbcluster --skip-rpl --report-features # Keep these for a while test-pl: test @@ -151,5 +176,43 @@ test-force-pl: test-force test-force-pl-mem: test-force-mem test-force-full-pl: test-force-full +test-ext-funcs: + cd mysql-test ; \ + @PERL@ ./mysql-test-run.pl --force --suite=funcs_1 ; \ + @PERL@ ./mysql-test-run.pl --force --suite=funcs_2 + +test-ext-rpl: + cd mysql-test ; \ + @PERL@ ./mysql-test-run.pl --force --suite=rpl + +test-ext-partitions: + cd mysql-test ; \ + @PERL@ ./mysql-test-run.pl --force --suite=partitions + +test-ext-jp: + cd mysql-test ; \ + @PERL@ ./mysql-test-run.pl --force --suite=jp + +test-ext: test-ext-funcs test-ext-rpl test-ext-partitions test-ext-jp + +test-fast: + cd mysql-test ; \ + @PERL@ ./mysql-test-run.pl $(subset) --force --skip-ndb --skip-innodb --skip-im --skip-rpl ; \ + @PERL@ ./mysql-test-run.pl $(subset) --force --suite=funcs_1 --do-test=myisam + +test-fast-view: + $(MAKE) subset=--view-protocol test-fast + +test-fast-cursor: + $(MAKE) subset=--cursor-protocol test-fast + +test-fast-prepare: + $(MAKE) subset=--ps-protocol test-fast + +test-full-qa: + $(MAKE) force=--force test-pr \ + test-binlog-statement test-ext test-fast-view \ + test-fast-cursor test-unit + # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/client/CMakeLists.txt b/client/CMakeLists.txt index d492c49a6b2..ea9e7547354 100644 --- a/client/CMakeLists.txt +++ b/client/CMakeLists.txt @@ -95,6 +95,7 @@ ADD_EXECUTABLE(mysqladmin mysqladmin.cc) TARGET_LINK_LIBRARIES(mysqladmin mysqlclient mysys dbug yassl taocrypt zlib wsock32) ADD_EXECUTABLE(mysqlslap mysqlslap.c) +SET_SOURCE_FILES_PROPERTIES(mysqlslap.c PROPERTIES COMPILE_FLAGS "-DTHREADS") TARGET_LINK_LIBRARIES(mysqlslap mysqlclient mysys yassl taocrypt zlib wsock32 dbug) ADD_EXECUTABLE(echo echo.c) diff --git a/client/Makefile.am b/client/Makefile.am index 1c908ea715e..e22080e3dd8 100644 --- a/client/Makefile.am +++ b/client/Makefile.am @@ -34,7 +34,7 @@ LDADD= @CLIENT_EXTRA_LDFLAGS@ $(CLIENT_THREAD_LIBS) \ noinst_HEADERS = sql_string.h completion_hash.h my_readline.h \ client_priv.h -EXTRA_DIST = get_password.c CMakeLists.txt +EXTRA_DIST = get_password.c CMakeLists.txt echo.c bin_PROGRAMS = mysql \ mysqladmin \ @@ -76,6 +76,7 @@ mysqlimport_LDADD = $(CXXLDFLAGS) $(CLIENT_THREAD_LIBS) \ mysqlshow_SOURCES= mysqlshow.c mysqlslap_SOURCES= mysqlslap.c +mysqlslap_CFLAGS= -DTHREAD -UUNDEF_THREADS_HACK mysqlslap_LDADD = $(CXXLDFLAGS) $(CLIENT_THREAD_LIBS) \ @CLIENT_EXTRA_LDFLAGS@ \ $(LIBMYSQLCLIENT_LA) \ @@ -94,9 +95,9 @@ DEFS = -DUNDEF_THREADS_HACK \ -DDEFAULT_MYSQL_HOME="\"$(prefix)\"" \ -DDATADIR="\"$(localstatedir)\"" -sql_src=log_event.h mysql_priv.h log_event.cc my_decimal.h my_decimal.cc +sql_src=log_event.h mysql_priv.h rpl_constants.h \ + log_event.cc my_decimal.h my_decimal.cc strings_src=decimal.c -EXTRA_DIST = get_password.c CMakeLists.txt echo.c link_sources: for f in $(sql_src) ; do \ diff --git a/client/client_priv.h b/client/client_priv.h index 646619f62b1..4dbf3f7895b 100644 --- a/client/client_priv.h +++ b/client/client_priv.h @@ -49,7 +49,6 @@ enum options_client OPT_TRIGGERS, OPT_MYSQL_ONLY_PRINT, OPT_MYSQL_LOCK_DIRECTORY, - OPT_MYSQL_SLAP_SLAVE, OPT_USE_THREADS, OPT_IMPORT_USE_THREADS, OPT_MYSQL_NUMBER_OF_QUERY, @@ -58,6 +57,14 @@ enum options_client OPT_TZ_UTC, OPT_AUTO_CLOSE, OPT_CREATE_SLAP_SCHEMA, OPT_SLAP_CSV, OPT_SLAP_CREATE_STRING, OPT_SLAP_AUTO_GENERATE_SQL_LOAD_TYPE, OPT_SLAP_AUTO_GENERATE_WRITE_NUM, + OPT_SLAP_AUTO_GENERATE_ADD_AUTO, + OPT_SLAP_AUTO_GENERATE_GUID_PRIMARY, + OPT_SLAP_AUTO_GENERATE_EXECUTE_QUERIES, + OPT_SLAP_AUTO_GENERATE_SECONDARY_INDEXES, + OPT_SLAP_AUTO_GENERATE_UNIQUE_WRITE_NUM, + OPT_SLAP_AUTO_GENERATE_UNIQUE_QUERY_NUM, + OPT_SLAP_PRE_QUERY, + OPT_SLAP_POST_QUERY, OPT_MYSQL_REPLACE_INTO, OPT_BASE64_OUTPUT, OPT_SERVER_ID, OPT_FIX_TABLE_NAMES, OPT_FIX_DB_NAMES, OPT_SSL_VERIFY_SERVER_CERT, OPT_DEBUG_INFO, OPT_COLUMN_TYPES diff --git a/client/echo.c b/client/echo.c index 4483eaad293..e3d22edb3ae 100644 --- a/client/echo.c +++ b/client/echo.c @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/client/mysql.cc b/client/mysql.cc index 48172d97f56..d2c7bd8812c 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -2517,7 +2517,8 @@ print_table_data_xml(MYSQL_RES *result) tee_fputs("<?xml version=\"1.0\"?>\n\n<resultset statement=\"", PAGER); xmlencode_print(glob_buffer.ptr(), (int)strlen(glob_buffer.ptr())); - tee_fputs("\">", PAGER); + tee_fputs("\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">", + PAGER); fields = mysql_fetch_fields(result); while ((cur = mysql_fetch_row(result))) diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index ccfff4d7a03..cb704d716cc 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -40,7 +40,7 @@ ulonglong last_values[MAX_MYSQL_VAR]; static int interval=0; static my_bool option_force=0,interrupted=0,new_line=0, opt_compress=0, opt_relative=0, opt_verbose=0, opt_vertical=0, - tty_password= 0, info_flag= 0; + tty_password= 0, info_flag= 0, opt_nobeep; static uint tcp_port = 0, option_wait = 0, option_silent=0, nr_iterations, opt_count_iterations= 0; static ulong opt_connect_timeout, opt_shutdown_timeout; @@ -54,6 +54,7 @@ static char *opt_ndb_connectstring=0; static char *shared_memory_base_name=0; #endif static uint opt_protocol=0; +static myf error_flags; /* flags to pass to my_printf_error, like ME_BELL */ /* When using extended-status relatively, ex_val_max_len is the estimated @@ -154,6 +155,8 @@ static struct my_option my_long_options[] = NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", (gptr*) &host, (gptr*) &host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"no-beep", 'b', "Turn off beep on error.", (gptr*) &opt_nobeep, + (gptr*) &opt_nobeep, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given it's asked from the tty.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, @@ -352,6 +355,8 @@ int main(int argc,char *argv[]) #endif if (default_charset) mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset); + error_flags= (myf)(opt_nobeep ? 0 : ME_BELL); + if (sql_connect(&mysql, option_wait)) { unsigned int err= mysql_errno(&mysql); @@ -450,7 +455,7 @@ static my_bool sql_connect(MYSQL *mysql, uint wait) if (!host) host= (char*) LOCAL_HOST; my_printf_error(0,"connect to server at '%s' failed\nerror: '%s'", - MYF(ME_BELL), host, mysql_error(mysql)); + error_flags, host, mysql_error(mysql)); if (mysql_errno(mysql) == CR_CONNECTION_ERROR) { fprintf(stderr, @@ -525,14 +530,14 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) char buff[FN_REFLEN+20]; if (argc < 2) { - my_printf_error(0,"Too few arguments to create",MYF(ME_BELL)); + my_printf_error(0, "Too few arguments to create", error_flags); return 1; } sprintf(buff,"create database `%.*s`",FN_REFLEN,argv[1]); if (mysql_query(mysql,buff)) { my_printf_error(0,"CREATE DATABASE failed; error: '%-.200s'", - MYF(ME_BELL), mysql_error(mysql)); + error_flags, mysql_error(mysql)); return -1; } argc--; argv++; @@ -542,7 +547,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { if (argc < 2) { - my_printf_error(0,"Too few arguments to drop",MYF(ME_BELL)); + my_printf_error(0, "Too few arguments to drop", error_flags); return 1; } if (drop_db(mysql,argv[1])) @@ -567,7 +572,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (mysql_shutdown(mysql, SHUTDOWN_DEFAULT)) { - my_printf_error(0,"shutdown failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "shutdown failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -588,7 +593,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) case ADMIN_RELOAD: if (mysql_query(mysql,"flush privileges")) { - my_printf_error(0,"reload failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "reload failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -599,7 +604,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) REFRESH_READ_LOCK | REFRESH_SLAVE | REFRESH_MASTER))) { - my_printf_error(0,"refresh failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "refresh failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -607,7 +612,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) case ADMIN_FLUSH_THREADS: if (mysql_refresh(mysql,(uint) REFRESH_THREADS)) { - my_printf_error(0,"refresh failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "refresh failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -651,7 +656,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) "show processlist")) || !(result = mysql_store_result(mysql))) { - my_printf_error(0,"process list failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "process list failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -674,7 +679,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) char *pos; if (argc < 2) { - my_printf_error(0,"Too few arguments to 'kill'",MYF(ME_BELL)); + my_printf_error(0, "Too few arguments to 'kill'", error_flags); return 1; } pos=argv[1]; @@ -682,7 +687,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { if (mysql_kill(mysql,(ulong) atol(pos))) { - my_printf_error(0,"kill failed on %ld; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "kill failed on %ld; error: '%s'", error_flags, atol(pos), mysql_error(mysql)); error=1; } @@ -698,7 +703,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) case ADMIN_DEBUG: if (mysql_dump_debug_info(mysql)) { - my_printf_error(0,"debug failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "debug failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -712,7 +717,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (mysql_query(mysql,"show /*!40003 GLOBAL */ variables") || !(res=mysql_store_result(mysql))) { - my_printf_error(0,"unable to show variables; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "unable to show variables; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -734,7 +739,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (mysql_query(mysql, "show /*!50002 GLOBAL */ status") || !(res = mysql_store_result(mysql))) { - my_printf_error(0, "unable to show status; error: '%s'", MYF(ME_BELL), + my_printf_error(0, "unable to show status; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -784,7 +789,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { if (mysql_refresh(mysql,REFRESH_LOG)) { - my_printf_error(0,"refresh failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "refresh failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -794,7 +799,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { if (mysql_query(mysql,"flush hosts")) { - my_printf_error(0,"refresh failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "refresh failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -804,7 +809,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { if (mysql_query(mysql,"flush tables")) { - my_printf_error(0,"refresh failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "refresh failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -814,7 +819,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { if (mysql_query(mysql,"flush status")) { - my_printf_error(0,"refresh failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "refresh failed; error: '%s'", error_flags, mysql_error(mysql)); return -1; } @@ -831,7 +836,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (argc < 2) { - my_printf_error(0,"Too few arguments to change password",MYF(ME_BELL)); + my_printf_error(0, "Too few arguments to change password", error_flags); return 1; } if (argv[1][0]) @@ -854,7 +859,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (mysql_query(mysql, "SHOW VARIABLES LIKE 'old_passwords'")) { my_printf_error(0, "Could not determine old_passwords setting from server; error: '%s'", - MYF(ME_BELL),mysql_error(mysql)); + error_flags, mysql_error(mysql)); return -1; } else @@ -865,7 +870,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) my_printf_error(0, "Could not get old_passwords setting from " "server; error: '%s'", - MYF(ME_BELL),mysql_error(mysql)); + error_flags, mysql_error(mysql)); return -1; } if (!mysql_num_rows(res)) @@ -890,7 +895,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (mysql_query(mysql,"set sql_log_off=1")) { my_printf_error(0, "Can't turn off logging; error: '%s'", - MYF(ME_BELL),mysql_error(mysql)); + error_flags, mysql_error(mysql)); return -1; } if (mysql_query(mysql,buff)) @@ -898,7 +903,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (mysql_errno(mysql)!=1290) { my_printf_error(0,"unable to change password; error: '%s'", - MYF(ME_BELL),mysql_error(mysql)); + error_flags, mysql_error(mysql)); return -1; } else @@ -912,7 +917,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) " with grant tables disabled (was started with" " --skip-grant-tables).\n" "Use: \"mysqladmin flush-privileges password '*'\"" - " instead", MYF(ME_BELL)); + " instead", error_flags); return -1; } } @@ -923,7 +928,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) case ADMIN_START_SLAVE: if (mysql_query(mysql, "START SLAVE")) { - my_printf_error(0, "Error starting slave: %s", MYF(ME_BELL), + my_printf_error(0, "Error starting slave: %s", error_flags, mysql_error(mysql)); return -1; } @@ -933,7 +938,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) case ADMIN_STOP_SLAVE: if (mysql_query(mysql, "STOP SLAVE")) { - my_printf_error(0, "Error stopping slave: %s", MYF(ME_BELL), + my_printf_error(0, "Error stopping slave: %s", error_flags, mysql_error(mysql)); return -1; } @@ -959,7 +964,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) else { my_printf_error(0,"mysqld doesn't answer to ping, error: '%s'", - MYF(ME_BELL),mysql_error(mysql)); + error_flags, mysql_error(mysql)); return -1; } } @@ -970,7 +975,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { if (argc < 2) { - my_printf_error(0,"Too few arguments to ndb-mgm",MYF(ME_BELL)); + my_printf_error(0, "Too few arguments to ndb-mgm", error_flags); return 1; } { @@ -984,7 +989,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) break; #endif default: - my_printf_error(0,"Unknown command: '%-.60s'",MYF(ME_BELL),argv[0]); + my_printf_error(0, "Unknown command: '%-.60s'", error_flags, argv[0]); return 1; } } @@ -1062,7 +1067,7 @@ static int drop_db(MYSQL *mysql, const char *db) sprintf(name_buff,"drop database `%.*s`",FN_REFLEN,db); if (mysql_query(mysql,name_buff)) { - my_printf_error(0,"DROP DATABASE %s failed;\nerror: '%s'",MYF(ME_BELL), + my_printf_error(0, "DROP DATABASE %s failed;\nerror: '%s'", error_flags, db,mysql_error(mysql)); return 1; } @@ -1287,7 +1292,7 @@ static my_bool get_pidfile(MYSQL *mysql, char *pidfile) if (mysql_query(mysql, "SHOW VARIABLES LIKE 'pid_file'")) { - my_printf_error(0,"query failed; error: '%s'",MYF(ME_BELL), + my_printf_error(0, "query failed; error: '%s'", error_flags, mysql_error(mysql)); } result = mysql_store_result(mysql); diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 940fac6da38..dec750d7313 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -158,11 +158,7 @@ class Load_log_processor public: Load_log_processor() {} - ~Load_log_processor() - { - destroy(); - delete_dynamic(&file_names); - } + ~Load_log_processor() {} int init() { @@ -182,20 +178,22 @@ public: target_dir_name_len= strlen(target_dir_name); } void destroy() + { + File_name_record *ptr= (File_name_record *)file_names.buffer; + File_name_record *end= ptr + file_names.elements; + for (; ptr < end; ptr++) { - File_name_record *ptr= (File_name_record *)file_names.buffer; - File_name_record *end= ptr + file_names.elements; - for (; ptr<end; ptr++) + if (ptr->fname) { - if (ptr->fname) - { - my_free(ptr->fname, MYF(MY_WME)); - delete ptr->event; - bzero((char *)ptr, sizeof(File_name_record)); - } + my_free(ptr->fname, MYF(MY_WME)); + delete ptr->event; + bzero((char *)ptr, sizeof(File_name_record)); } } + delete_dynamic(&file_names); + } + /* Obtain Create_file event for LOAD DATA statement by its file_id. @@ -483,19 +481,17 @@ static int write_event_header_and_base64(Log_event *ev, FILE *result_file, PRINT_EVENT_INFO *print_event_info) { + IO_CACHE *head= &print_event_info->head_cache; + IO_CACHE *body= &print_event_info->body_cache; DBUG_ENTER("write_event_header_and_base64"); - /* Write header and base64 output to cache */ - IO_CACHE result_cache; - if (open_cached_file(&result_cache, NULL, NULL, 0, MYF(MY_WME | MY_NABP))) - return 1; - ev->print_header(&result_cache, print_event_info, FALSE); - ev->print_base64(&result_cache, print_event_info, FALSE); + /* Write header and base64 output to cache */ + ev->print_header(head, print_event_info, FALSE); + ev->print_base64(body, print_event_info, FALSE); /* Read data from cache and write to result file */ - my_b_copy_to_file(&result_cache, result_file); - close_cached_file(&result_cache); - DBUG_RETURN(0); + DBUG_RETURN(copy_event_cache_to_file_and_reinit(head, result_file) || + copy_event_cache_to_file_and_reinit(body, result_file)); } @@ -1577,6 +1573,7 @@ int main(int argc, char** argv) cleanup(); free_defaults(defaults_argv); my_free_open_file_info(); + load_processor.destroy(); /* We cannot free DBUG, it is used in global destructors after exit(). */ my_end((info_flag ? MY_CHECK_ERROR : 0) | MY_DONT_FREE_DBUG); diff --git a/client/mysqlslap.c b/client/mysqlslap.c index ad2c8685ba1..ef11f4bab5b 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -62,7 +62,6 @@ TODO: Add language for better tests String length for files and those put on the command line are not setup to handle binary data. - Report results of each thread into the lock file we use. More stats Break up tests and run them on multiple hosts at once. Allow output to be fed into a database directly. @@ -71,16 +70,18 @@ TODO: #define SHOW_VERSION "0.9" -#define HUGE_STRING_LENGTH 8096 +#define HUGE_STRING_LENGTH 8196 #define RAND_STRING_SIZE 126 +/* Types */ +#define SELECT_TYPE 0 +#define UPDATE_TYPE 1 +#define INSERT_TYPE 2 +#define UPDATE_TYPE_REQUIRES_PREFIX 3 +#define CREATE_TABLE_TYPE 4 +#define SELECT_TYPE_REQUIRES_PREFIX 5 + #include "client_priv.h" -#ifdef HAVE_LIBPTHREAD -#include <my_pthread.h> -#endif -#include <my_sys.h> -#include <m_string.h> -#include <mysql.h> #include <mysqld_error.h> #include <my_dir.h> #include <signal.h> @@ -92,9 +93,6 @@ TODO: #endif #include <ctype.h> -#define MYSLAPLOCK "/myslaplock.lck" -#define MYSLAPLOCK_DIR "/tmp" - #ifdef __WIN__ #define srandom srand #define random rand @@ -105,10 +103,23 @@ TODO: static char *shared_memory_base_name=0; #endif +/* Global Thread counter */ +uint thread_counter; +pthread_mutex_t counter_mutex; +pthread_cond_t count_threshhold; +uint master_wakeup; +pthread_mutex_t sleeper_mutex; +pthread_cond_t sleep_threshhold; + static char **defaults_argv; +char **primary_keys; +unsigned long long primary_keys_number_of; + static char *host= NULL, *opt_password= NULL, *user= NULL, *user_supplied_query= NULL, + *user_supplied_pre_statements= NULL, + *user_supplied_post_statements= NULL, *default_engine= NULL, *opt_mysql_unix_port= NULL; @@ -116,26 +127,35 @@ const char *delimiter= "\n"; const char *create_schema_string= "mysqlslap"; -const char *lock_directory; -char lock_file_str[FN_REFLEN]; - static my_bool opt_preserve; static my_bool opt_only_print= FALSE; -static my_bool opt_slave; - static my_bool opt_compress= FALSE, tty_password= FALSE, opt_silent= FALSE, + auto_generate_sql_autoincrement= FALSE, + auto_generate_sql_guid_primary= FALSE, auto_generate_sql= FALSE; const char *auto_generate_sql_type= "mixed"; static unsigned long connect_flags= CLIENT_MULTI_RESULTS; -static int verbose, num_int_cols, num_char_cols, delimiter_length; -static int iterations; +static int verbose, delimiter_length; +const char *num_int_cols_opt; +const char *num_char_cols_opt; +/* Yes, we do set defaults here */ +static unsigned int num_int_cols= 1; +static unsigned int num_char_cols= 1; +static unsigned int num_int_cols_index= 0; +static unsigned int num_char_cols_index= 0; + +static unsigned int iterations; static char *default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME; static ulonglong actual_queries= 0; +static ulonglong auto_actual_queries; +static ulonglong auto_generate_sql_unique_write_number; +static ulonglong auto_generate_sql_unique_query_number; +static unsigned int auto_generate_sql_secondary_indexes; static ulonglong num_of_query; static ulonglong auto_generate_sql_number; const char *concurrency_str= NULL; @@ -150,7 +170,6 @@ static uint opt_protocol= 0; static int get_options(int *argc,char ***argv); static uint opt_mysql_port= 0; -static uint opt_use_threads; static const char *load_default_groups[]= { "mysqlslap","client",0 }; @@ -159,9 +178,22 @@ typedef struct statement statement; struct statement { char *string; size_t length; + unsigned char type; + char *option; + size_t option_length; statement *next; }; +typedef struct option_string option_string; + +struct option_string { + char *string; + size_t length; + char *option; + size_t option_length; + option_string *next; +}; + typedef struct stats stats; struct stats { @@ -175,7 +207,6 @@ typedef struct thread_context thread_context; struct thread_context { statement *stmt; ulonglong limit; - bool thread; }; typedef struct conclusions conclusions; @@ -192,27 +223,36 @@ struct conclusions { unsigned long long min_rows; }; +static option_string *engine_options= NULL; +static statement *pre_statements= NULL; +static statement *post_statements= NULL; static statement *create_statements= NULL, - *engine_statements= NULL, *query_statements= NULL; /* Prototypes */ void print_conclusions(conclusions *con); void print_conclusions_csv(conclusions *con); -void generate_stats(conclusions *con, statement *eng, stats *sptr); +void generate_stats(conclusions *con, option_string *eng, stats *sptr); uint parse_comma(const char *string, uint **range); uint parse_delimiter(const char *script, statement **stmt, char delm); +uint parse_option(const char *origin, option_string **stmt, char delm); static int drop_schema(MYSQL *mysql, const char *db); uint get_random_string(char *buf); static statement *build_table_string(void); static statement *build_insert_string(void); -static statement *build_query_string(void); +static statement *build_update_string(void); +static statement * build_select_string(my_bool key); +static int generate_primary_key_list(MYSQL *mysql, option_string *engine_stmt); +static int drop_primary_key_list(void); static int create_schema(MYSQL *mysql, const char *db, statement *stmt, - statement *engine_stmt); + option_string *engine_stmt); static int run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit); -int run_task(thread_context *con); +pthread_handler_t run_task(void *p); void statement_cleanup(statement *stmt); +void option_cleanup(option_string *stmt); +void concurrency_loop(MYSQL *mysql, uint current, option_string *eptr); +static int run_statements(MYSQL *mysql, statement *stmt); static const char ALPHANUMERICS[]= "0123456789ABCDEFGHIJKLMNOPQRSTWXYZabcdefghijklmnopqrstuvwxyz"; @@ -246,13 +286,7 @@ static int gettimeofday(struct timeval *tp, void *tzp) int main(int argc, char **argv) { MYSQL mysql; - int x; - unsigned long long client_limit; - statement *eptr; - -#ifdef __WIN__ - opt_use_threads= 1; -#endif + option_string *eptr; MY_INIT(argv[0]); @@ -309,78 +343,63 @@ int main(int argc, char **argv) } } + VOID(pthread_mutex_init(&counter_mutex, NULL)); + VOID(pthread_cond_init(&count_threshhold, NULL)); + VOID(pthread_mutex_init(&sleeper_mutex, NULL)); + VOID(pthread_cond_init(&sleep_threshhold, NULL)); + /* Main iterations loop */ - eptr= engine_statements; + eptr= engine_options; do { /* For the final stage we run whatever queries we were asked to run */ uint *current; - conclusions conclusion; - - for (current= concurrency; current && *current; current++) - { - stats *head_sptr; - stats *sptr; - - head_sptr= (stats *)my_malloc(sizeof(stats) * iterations, MYF(MY_ZEROFILL)); - - bzero(&conclusion, sizeof(conclusions)); - if (num_of_query) - client_limit= num_of_query / *current; - else - client_limit= actual_queries; - - for (x= 0, sptr= head_sptr; x < iterations; x++, sptr++) - { - /* - We might not want to load any data, such as when we are calling - a stored_procedure that doesn't use data, or we know we already have - data in the table. - */ - if (!opt_preserve) - drop_schema(&mysql, create_schema_string); - /* First we create */ - if (create_statements) - create_schema(&mysql, create_schema_string, create_statements, eptr); + if (verbose >= 2) + printf("Starting Concurrency Test\n"); - run_scheduler(sptr, query_statements, *current, client_limit); + if (*concurrency) + { + for (current= concurrency; current && *current; current++) + concurrency_loop(&mysql, *current, eptr); + } + else + { + uint infinite= 1; + do { + concurrency_loop(&mysql, infinite, eptr); } - - generate_stats(&conclusion, eptr, head_sptr); - - if (!opt_silent) - print_conclusions(&conclusion); - if (opt_csv_str) - print_conclusions_csv(&conclusion); - - my_free((byte *)head_sptr, MYF(0)); + while (infinite++); } if (!opt_preserve) drop_schema(&mysql, create_schema_string); + } while (eptr ? (eptr= eptr->next) : 0); + VOID(pthread_mutex_destroy(&counter_mutex)); + VOID(pthread_cond_destroy(&count_threshhold)); + VOID(pthread_mutex_destroy(&sleeper_mutex)); + VOID(pthread_cond_destroy(&sleep_threshhold)); + if (!opt_only_print) mysql_close(&mysql); /* Close & free connection */ - - /* Remove lock file */ - my_delete(lock_file_str, MYF(0)); - /* now free all the strings we created */ if (opt_password) - my_free(opt_password, MYF(0)); + my_free((gptr)opt_password, MYF(0)); - my_free((byte *)concurrency, MYF(0)); + my_free((gptr)concurrency, MYF(0)); statement_cleanup(create_statements); - statement_cleanup(engine_statements); statement_cleanup(query_statements); + statement_cleanup(pre_statements); + statement_cleanup(post_statements); + option_cleanup(engine_options); #ifdef HAVE_SMEM if (shared_memory_base_name) - my_free(shared_memory_base_name,MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr)shared_memory_base_name, MYF(MY_ALLOW_ZERO_PTR)); #endif free_defaults(defaults_argv); my_end(0); @@ -388,6 +407,76 @@ int main(int argc, char **argv) return 0; } +void concurrency_loop(MYSQL *mysql, uint current, option_string *eptr) +{ + unsigned int x; + stats *head_sptr; + stats *sptr; + conclusions conclusion; + unsigned long long client_limit; + + head_sptr= (stats *)my_malloc(sizeof(stats) * iterations, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + + bzero(&conclusion, sizeof(conclusions)); + + if (auto_actual_queries) + client_limit= auto_actual_queries; + else if (num_of_query) + client_limit= num_of_query / current; + else + client_limit= actual_queries; + + for (x= 0, sptr= head_sptr; x < iterations; x++, sptr++) + { + /* + We might not want to load any data, such as when we are calling + a stored_procedure that doesn't use data, or we know we already have + data in the table. + */ + if (!opt_preserve) + drop_schema(mysql, create_schema_string); + + /* First we create */ + if (create_statements) + create_schema(mysql, create_schema_string, create_statements, eptr); + + /* + If we generated GUID we need to build a list of them from creation that + we can later use. + */ + if (verbose >= 2) + printf("Generating primary key list\n"); + if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) + generate_primary_key_list(mysql, eptr); + + if (pre_statements) + run_statements(mysql, pre_statements); + + run_scheduler(sptr, query_statements, current, client_limit); + + if (post_statements) + run_statements(mysql, post_statements); + + /* We are finished with this run */ + if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) + drop_primary_key_list(); + } + + if (verbose >= 2) + printf("Generating stats\n"); + + generate_stats(&conclusion, eptr, head_sptr); + + if (!opt_silent) + print_conclusions(&conclusion); + if (opt_csv_str) + print_conclusions_csv(&conclusion); + + my_free((gptr)head_sptr, MYF(0)); + +} + static struct my_option my_long_options[] = { @@ -397,10 +486,42 @@ static struct my_option my_long_options[] = "Generate SQL where not supplied by file or command line.", (gptr*) &auto_generate_sql, (gptr*) &auto_generate_sql, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-generate-sql-add-autoincrement", OPT_SLAP_AUTO_GENERATE_ADD_AUTO, + "Add autoincrement to auto-generated tables.", + (gptr*) &auto_generate_sql_autoincrement, + (gptr*) &auto_generate_sql_autoincrement, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-generate-sql-execute-number", OPT_SLAP_AUTO_GENERATE_EXECUTE_QUERIES, + "Set this number to generate a set number of queries to run.\n", + (gptr*) &auto_actual_queries, (gptr*) &auto_actual_queries, + 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-generate-sql-guid-primary", OPT_SLAP_AUTO_GENERATE_GUID_PRIMARY, + "Add GUID based primary keys to auto-generated tables.", + (gptr*) &auto_generate_sql_guid_primary, + (gptr*) &auto_generate_sql_guid_primary, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql-load-type", OPT_SLAP_AUTO_GENERATE_SQL_LOAD_TYPE, - "Load types are mixed, write, or read. Default is mixed\n", + "Load types are mixed, update, write, key, or read. Default is mixed\n", (gptr*) &auto_generate_sql_type, (gptr*) &auto_generate_sql_type, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-generate-sql-secondary-indexes", + OPT_SLAP_AUTO_GENERATE_SECONDARY_INDEXES, + "Number of secondary indexes to add auto-generated tables.", + (gptr*) &auto_generate_sql_secondary_indexes, + (gptr*) &auto_generate_sql_secondary_indexes, 0, + GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-generate-sql-unique-query-number", + OPT_SLAP_AUTO_GENERATE_UNIQUE_QUERY_NUM, + "Number of unique queries auto tests", + (gptr*) &auto_generate_sql_unique_query_number, + (gptr*) &auto_generate_sql_unique_query_number, + 0, GET_ULL, REQUIRED_ARG, 10, 0, 0, 0, 0, 0}, + {"auto-generate-sql-unique-write-number", + OPT_SLAP_AUTO_GENERATE_UNIQUE_WRITE_NUM, + "Number of unique queries for auto-generate-sql-write-number", + (gptr*) &auto_generate_sql_unique_write_number, + (gptr*) &auto_generate_sql_unique_write_number, + 0, GET_ULL, REQUIRED_ARG, 10, 0, 0, 0, 0, 0}, {"auto-generate-sql-write-number", OPT_SLAP_AUTO_GENERATE_WRITE_NUM, "Number of rows to insert to used in read and write loads (default is 100).\n", (gptr*) &auto_generate_sql_number, (gptr*) &auto_generate_sql_number, @@ -435,17 +556,14 @@ static struct my_option my_long_options[] = REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"iterations", 'i', "Number of times too run the tests.", (gptr*) &iterations, (gptr*) &iterations, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0}, - {"lock-directory", OPT_MYSQL_LOCK_DIRECTORY, "Directory to use to keep locks.", - (gptr*) &lock_directory, (gptr*) &lock_directory, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"number-char-cols", 'x', "Number of VARCHAR columns to create table with if specifying --auto-generate-sql ", - (gptr*) &num_char_cols, (gptr*) &num_char_cols, 0, GET_UINT, REQUIRED_ARG, - 1, 0, 0, 0, 0, 0}, + (gptr*) &num_char_cols_opt, (gptr*) &num_char_cols_opt, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, {"number-int-cols", 'y', "Number of INT columns to create table with if specifying --auto-generate-sql.", - (gptr*) &num_int_cols, (gptr*) &num_int_cols, 0, GET_UINT, REQUIRED_ARG, - 1, 0, 0, 0, 0, 0}, + (gptr*) &num_int_cols_opt, (gptr*) &num_int_cols_opt, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, {"number-of-queries", OPT_MYSQL_NUMBER_OF_QUERY, "Limit each client to this number of queries (this is not exact).", (gptr*) &num_of_query, (gptr*) &num_of_query, 0, @@ -465,6 +583,16 @@ static struct my_option my_long_options[] = {"port", 'P', "Port number to use for connection.", (gptr*) &opt_mysql_port, (gptr*) &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, + {"post-query", OPT_SLAP_POST_QUERY, + "Query to run or file containing query to run after executing.", + (gptr*) &user_supplied_post_statements, + (gptr*) &user_supplied_post_statements, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"pre-query", OPT_SLAP_PRE_QUERY, + "Query to run or file containing query to run before executing.", + (gptr*) &user_supplied_pre_statements, + (gptr*) &user_supplied_pre_statements, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"preserve-schema", OPT_MYSQL_PRESERVE_SCHEMA, "Preserve the schema from the mysqlslap run, this happens unless " "--auto-generate-sql or --create are used.", @@ -485,17 +613,10 @@ static struct my_option my_long_options[] = {"silent", 's', "Run program in silent mode - no output.", (gptr*) &opt_silent, (gptr*) &opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"slave", OPT_MYSQL_SLAP_SLAVE, "Follow master locks for other slap clients", - (gptr*) &opt_slave, (gptr*) &opt_slave, 0, GET_BOOL, NO_ARG, - 0, 0, 0, 0, 0, 0}, {"socket", 'S', "Socket file to use for connection.", (gptr*) &opt_mysql_unix_port, (gptr*) &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> - {"use-threads", OPT_USE_THREADS, - "Use pthread calls instead of fork() calls (default on Windows)", - (gptr*) &opt_use_threads, (gptr*) &opt_use_threads, 0, - GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifndef DONT_ALLOW_USER_CHANGE {"user", 'u', "User for login if not current user.", (gptr*) &user, (gptr*) &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -552,7 +673,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), if (argument) { char *start= argument; - my_free(opt_password,MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr)opt_password, MYF(MY_ALLOW_ZERO_PTR)); opt_password= my_strdup(argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) @@ -614,43 +735,196 @@ get_random_string(char *buf) static statement * build_table_string(void) { - char buf[512]; - int col_count; + char buf[HUGE_STRING_LENGTH]; + unsigned int col_count; statement *ptr; DYNAMIC_STRING table_string; DBUG_ENTER("build_table_string"); - DBUG_PRINT("info", ("num int cols %d num char cols %d", + DBUG_PRINT("info", ("num int cols %u num char cols %u", num_int_cols, num_char_cols)); init_dynamic_string(&table_string, "", 1024, 1024); dynstr_append(&table_string, "CREATE TABLE `t1` ("); - for (col_count= 1; col_count <= num_int_cols; col_count++) + + if (auto_generate_sql_autoincrement) { - sprintf(buf, "intcol%d INT(32)", col_count); - dynstr_append(&table_string, buf); + dynstr_append(&table_string, "id serial"); - if (col_count < num_int_cols || num_char_cols > 0) + if (num_int_cols || num_char_cols) dynstr_append(&table_string, ","); } - for (col_count= 1; col_count <= num_char_cols; col_count++) + + if (auto_generate_sql_guid_primary) { - sprintf(buf, "charcol%d VARCHAR(128)", col_count); - dynstr_append(&table_string, buf); + dynstr_append(&table_string, "id varchar(32) primary key"); - if (col_count < num_char_cols) + if (num_int_cols || num_char_cols || auto_generate_sql_guid_primary) + dynstr_append(&table_string, ","); + } + + if (auto_generate_sql_secondary_indexes) + { + unsigned int count; + + for (count= 0; count < auto_generate_sql_secondary_indexes; count++) + { + if (count) /* Except for the first pass we add a comma */ + dynstr_append(&table_string, ","); + + if (snprintf(buf, HUGE_STRING_LENGTH, "id%d varchar(32) unique key", count) + > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in create table\n"); + exit(1); + } + dynstr_append(&table_string, buf); + } + + if (num_int_cols || num_char_cols) dynstr_append(&table_string, ","); } + + if (num_int_cols) + for (col_count= 1; col_count <= num_int_cols; col_count++) + { + if (num_int_cols_index) + { + if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d INT(32), INDEX(intcol%d)", + col_count, col_count) > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in create table\n"); + exit(1); + } + } + else + { + if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d INT(32) ", col_count) + > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in create table\n"); + exit(1); + } + } + dynstr_append(&table_string, buf); + + if (col_count < num_int_cols || num_char_cols > 0) + dynstr_append(&table_string, ","); + } + + if (num_char_cols) + for (col_count= 1; col_count <= num_char_cols; col_count++) + { + if (num_char_cols_index) + { + if (snprintf(buf, HUGE_STRING_LENGTH, + "charcol%d VARCHAR(128), INDEX(charcol%d) ", + col_count, col_count) > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in creating table\n"); + exit(1); + } + } + else + { + if (snprintf(buf, HUGE_STRING_LENGTH, "charcol%d VARCHAR(128)", + col_count) > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in creating table\n"); + exit(1); + } + } + dynstr_append(&table_string, buf); + + if (col_count < num_char_cols) + dynstr_append(&table_string, ","); + } + dynstr_append(&table_string, ")"); - ptr= (statement *)my_malloc(sizeof(statement), MYF(MY_ZEROFILL)); - ptr->string = (char *)my_malloc(table_string.length+1, MYF(MY_WME)); + ptr= (statement *)my_malloc(sizeof(statement), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + ptr->string = (char *)my_malloc(table_string.length+1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->length= table_string.length+1; + ptr->type= CREATE_TABLE_TYPE; strmov(ptr->string, table_string.str); dynstr_free(&table_string); DBUG_RETURN(ptr); } +/* + build_update_string() + + This function builds insert statements when the user opts to not supply + an insert file or string containing insert data +*/ +static statement * +build_update_string(void) +{ + char buf[HUGE_STRING_LENGTH]; + unsigned int col_count; + statement *ptr; + DYNAMIC_STRING update_string; + DBUG_ENTER("build_update_string"); + + init_dynamic_string(&update_string, "", 1024, 1024); + + dynstr_append(&update_string, "UPDATE t1 SET "); + + if (num_int_cols) + for (col_count= 1; col_count <= num_int_cols; col_count++) + { + if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d = %ld", col_count, + random()) > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in creating update\n"); + exit(1); + } + dynstr_append(&update_string, buf); + + if (col_count < num_int_cols || num_char_cols > 0) + dynstr_append_mem(&update_string, ",", 1); + } + + if (num_char_cols) + for (col_count= 1; col_count <= num_char_cols; col_count++) + { + char rand_buffer[RAND_STRING_SIZE]; + int buf_len= get_random_string(rand_buffer); + + if (snprintf(buf, HUGE_STRING_LENGTH, "charcol%d = '%.*s'", col_count, + buf_len, rand_buffer) + > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in creating update\n"); + exit(1); + } + dynstr_append(&update_string, buf); + + if (col_count < num_char_cols) + dynstr_append_mem(&update_string, ",", 1); + } + + if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) + dynstr_append(&update_string, " WHERE id = "); + + + ptr= (statement *)my_malloc(sizeof(statement), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + + ptr->string= (char *)my_malloc(update_string.length + 1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + ptr->length= update_string.length+1; + if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) + ptr->type= UPDATE_TYPE_REQUIRES_PREFIX ; + else + ptr->type= UPDATE_TYPE; + strmov(ptr->string, update_string.str); + dynstr_free(&update_string); + DBUG_RETURN(ptr); +} + /* build_insert_string() @@ -661,38 +935,82 @@ build_table_string(void) static statement * build_insert_string(void) { - char buf[RAND_STRING_SIZE]; - int col_count; + char buf[HUGE_STRING_LENGTH]; + unsigned int col_count; statement *ptr; DYNAMIC_STRING insert_string; DBUG_ENTER("build_insert_string"); init_dynamic_string(&insert_string, "", 1024, 1024); - dynstr_append_mem(&insert_string, "INSERT INTO t1 VALUES (", 23); - for (col_count= 1; col_count <= num_int_cols; col_count++) + dynstr_append(&insert_string, "INSERT INTO t1 VALUES ("); + + if (auto_generate_sql_autoincrement) { - sprintf(buf, "%ld", random()); - dynstr_append(&insert_string, buf); + dynstr_append(&insert_string, "NULL"); - if (col_count < num_int_cols || num_char_cols > 0) - dynstr_append_mem(&insert_string, ",", 1); + if (num_int_cols || num_char_cols) + dynstr_append(&insert_string, ","); } - for (col_count= 1; col_count <= num_char_cols; col_count++) + + if (auto_generate_sql_guid_primary) { - int buf_len= get_random_string(buf); - dynstr_append_mem(&insert_string, "'", 1); - dynstr_append_mem(&insert_string, buf, buf_len); - dynstr_append_mem(&insert_string, "'", 1); + dynstr_append(&insert_string, "uuid()"); - if (col_count < num_char_cols) - dynstr_append_mem(&insert_string, ",", 1); + if (num_int_cols || num_char_cols) + dynstr_append(&insert_string, ","); + } + + if (auto_generate_sql_secondary_indexes) + { + unsigned int count; + + for (count= 0; count < auto_generate_sql_secondary_indexes; count++) + { + if (count) /* Except for the first pass we add a comma */ + dynstr_append(&insert_string, ","); + + dynstr_append(&insert_string, "uuid()"); + } + + if (num_int_cols || num_char_cols) + dynstr_append(&insert_string, ","); } + + if (num_int_cols) + for (col_count= 1; col_count <= num_int_cols; col_count++) + { + if (snprintf(buf, HUGE_STRING_LENGTH, "%ld", random()) > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in creating insert\n"); + exit(1); + } + dynstr_append(&insert_string, buf); + + if (col_count < num_int_cols || num_char_cols > 0) + dynstr_append_mem(&insert_string, ",", 1); + } + + if (num_char_cols) + for (col_count= 1; col_count <= num_char_cols; col_count++) + { + int buf_len= get_random_string(buf); + dynstr_append_mem(&insert_string, "'", 1); + dynstr_append_mem(&insert_string, buf, buf_len); + dynstr_append_mem(&insert_string, "'", 1); + + if (col_count < num_char_cols) + dynstr_append_mem(&insert_string, ",", 1); + } + dynstr_append_mem(&insert_string, ")", 1); - ptr= (statement *)my_malloc(sizeof(statement), MYF(MY_ZEROFILL)); - ptr->string= (char *)my_malloc(insert_string.length+1, MYF(MY_WME)); + ptr= (statement *)my_malloc(sizeof(statement), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + ptr->string= (char *)my_malloc(insert_string.length + 1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->length= insert_string.length+1; + ptr->type= INSERT_TYPE; strmov(ptr->string, insert_string.str); dynstr_free(&insert_string); DBUG_RETURN(ptr); @@ -700,26 +1018,31 @@ build_insert_string(void) /* - build_query_string() + build_select_string() This function builds a query if the user opts to not supply a query statement or file containing a query statement */ static statement * -build_query_string(void) +build_select_string(my_bool key) { - char buf[512]; - int col_count; + char buf[HUGE_STRING_LENGTH]; + unsigned int col_count; statement *ptr; static DYNAMIC_STRING query_string; - DBUG_ENTER("build_query_string"); + DBUG_ENTER("build_select_string"); init_dynamic_string(&query_string, "", 1024, 1024); dynstr_append_mem(&query_string, "SELECT ", 7); for (col_count= 1; col_count <= num_int_cols; col_count++) { - sprintf(buf, "intcol%d", col_count); + if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d", col_count) + > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in creating select\n"); + exit(1); + } dynstr_append(&query_string, buf); if (col_count < num_int_cols || num_char_cols > 0) @@ -728,17 +1051,34 @@ build_query_string(void) } for (col_count= 1; col_count <= num_char_cols; col_count++) { - sprintf(buf, "charcol%d", col_count); + if (snprintf(buf, HUGE_STRING_LENGTH, "charcol%d", col_count) + > HUGE_STRING_LENGTH) + { + fprintf(stderr, "Memory Allocation error in creating select\n"); + exit(1); + } dynstr_append(&query_string, buf); if (col_count < num_char_cols) dynstr_append_mem(&query_string, ",", 1); } - dynstr_append_mem(&query_string, " FROM t1", 8); - ptr= (statement *)my_malloc(sizeof(statement), MYF(MY_ZEROFILL)); - ptr->string= (char *)my_malloc(query_string.length+1, MYF(MY_WME)); + dynstr_append(&query_string, " FROM t1"); + + if ((key) && + (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary)) + dynstr_append(&query_string, " WHERE id = "); + + ptr= (statement *)my_malloc(sizeof(statement), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + ptr->string= (char *)my_malloc(query_string.length + 1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->length= query_string.length+1; + if ((key) && + (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary)) + ptr->type= SELECT_TYPE_REQUIRES_PREFIX; + else + ptr->type= SELECT_TYPE; strmov(ptr->string, query_string.str); dynstr_free(&query_string); DBUG_RETURN(ptr); @@ -773,12 +1113,40 @@ get_options(int *argc,char ***argv) exit(1); } - parse_comma(concurrency_str ? concurrency_str : "1", &concurrency); + if (auto_generate_sql && auto_generate_sql_guid_primary && + auto_generate_sql_autoincrement) + { + fprintf(stderr, + "%s: Either auto-generate-sql-guid-primary or auto-generate-sql-add-autoincrement can be used!\n", + my_progname); + exit(1); + } - if (lock_directory) - snprintf(lock_file_str, FN_REFLEN, "%s/%s", lock_directory, MYSLAPLOCK); - else - snprintf(lock_file_str, FN_REFLEN, "%s/%s", MYSLAPLOCK_DIR, MYSLAPLOCK); + /* + We are testing to make sure that if someone specified a key search + that we actually added a key! + */ + if (auto_generate_sql && auto_generate_sql_type[0] == 'k') + if ( auto_generate_sql_autoincrement == FALSE && + auto_generate_sql_guid_primary == FALSE) + { + fprintf(stderr, + "%s: Can't perform key test without a primary key!\n", + my_progname); + exit(1); + } + + + + if (auto_generate_sql && num_of_query && auto_actual_queries) + { + fprintf(stderr, + "%s: Either auto-generate-sql-execute-number or number-of-queries can be used!\n", + my_progname); + exit(1); + } + + parse_comma(concurrency_str ? concurrency_str : "1", &concurrency); if (opt_csv_str) { @@ -803,23 +1171,76 @@ get_options(int *argc,char ***argv) if (opt_only_print) opt_silent= TRUE; + if (num_int_cols_opt) + { + option_string *str; + parse_option(num_int_cols_opt, &str, ','); + num_int_cols= atoi(str->string); + if (str->option) + num_int_cols_index= atoi(str->option); + option_cleanup(str); + } + + if (num_char_cols_opt) + { + option_string *str; + parse_option(num_char_cols_opt, &str, ','); + num_char_cols= atoi(str->string); + if (str->option) + num_char_cols_index= atoi(str->option); + else + num_char_cols_index= 0; + option_cleanup(str); + } + + if (auto_generate_sql) { unsigned long long x= 0; statement *ptr_statement; + if (verbose >= 2) + printf("Building Create Statements for Auto\n"); + create_statements= build_table_string(); + /* + Pre-populate table + */ + for (ptr_statement= create_statements, x= 0; + x < auto_generate_sql_unique_write_number; + x++, ptr_statement= ptr_statement->next) + { + ptr_statement->next= build_insert_string(); + } + + if (verbose >= 2) + printf("Building Query Statements for Auto\n"); if (auto_generate_sql_type[0] == 'r') { - for (ptr_statement= create_statements, x= 0; - x < auto_generate_sql_number; + if (verbose >= 2) + printf("Generating SELECT Statements for Auto\n"); + + query_statements= build_select_string(FALSE); + for (ptr_statement= query_statements, x= 0; + x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { - ptr_statement->next= build_insert_string(); + ptr_statement->next= build_select_string(FALSE); } + } + else if (auto_generate_sql_type[0] == 'k') + { + if (verbose >= 2) + printf("Generating SELECT for keys Statements for Auto\n"); - query_statements= build_query_string(); + query_statements= build_select_string(TRUE); + for (ptr_statement= query_statements, x= 0; + x < auto_generate_sql_unique_query_number; + x++, ptr_statement= ptr_statement->next) + { + ptr_statement->next= build_select_string(TRUE); + } } else if (auto_generate_sql_type[0] == 'w') { @@ -828,14 +1249,26 @@ get_options(int *argc,char ***argv) Archive (since strings which were identical one after another would be too easily optimized). */ + if (verbose >= 2) + printf("Generating INSERT Statements for Auto\n"); query_statements= build_insert_string(); for (ptr_statement= query_statements, x= 0; - x < auto_generate_sql_number; + x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { ptr_statement->next= build_insert_string(); } } + else if (auto_generate_sql_type[0] == 'u') + { + query_statements= build_update_string(); + for (ptr_statement= query_statements, x= 0; + x < auto_generate_sql_unique_query_number; + x++, ptr_statement= ptr_statement->next) + { + ptr_statement->next= build_update_string(); + } + } else /* Mixed mode is default */ { int coin= 0; @@ -846,7 +1279,7 @@ get_options(int *argc,char ***argv) at the moment it results in "every other". */ for (ptr_statement= query_statements, x= 0; - x < 4; + x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { if (coin) @@ -856,7 +1289,7 @@ get_options(int *argc,char ***argv) } else { - ptr_statement->next= build_query_string(); + ptr_statement->next= build_select_string(TRUE); coin= 1; } } @@ -878,12 +1311,13 @@ get_options(int *argc,char ***argv) fprintf(stderr,"%s: Could not open create file\n", my_progname); exit(1); } - tmp_string= (char *)my_malloc(sbuf.st_size+1, MYF(MY_WME)); + tmp_string= (char *)my_malloc(sbuf.st_size + 1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); my_read(data_file, tmp_string, sbuf.st_size, MYF(0)); tmp_string[sbuf.st_size]= '\0'; my_close(data_file,MYF(0)); parse_delimiter(tmp_string, &create_statements, delimiter[0]); - my_free(tmp_string, MYF(0)); + my_free((gptr)tmp_string, MYF(0)); } else if (create_string) { @@ -904,14 +1338,15 @@ get_options(int *argc,char ***argv) fprintf(stderr,"%s: Could not open query supplied file\n", my_progname); exit(1); } - tmp_string= (char *)my_malloc(sbuf.st_size+1, MYF(MY_WME)); + tmp_string= (char *)my_malloc(sbuf.st_size + 1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); my_read(data_file, tmp_string, sbuf.st_size, MYF(0)); tmp_string[sbuf.st_size]= '\0'; my_close(data_file,MYF(0)); if (user_supplied_query) actual_queries= parse_delimiter(tmp_string, &query_statements, delimiter[0]); - my_free(tmp_string, MYF(0)); + my_free((gptr)tmp_string, MYF(0)); } else if (user_supplied_query) { @@ -920,8 +1355,71 @@ get_options(int *argc,char ***argv) } } + if (user_supplied_pre_statements && my_stat(user_supplied_pre_statements, &sbuf, MYF(0))) + { + File data_file; + if (!MY_S_ISREG(sbuf.st_mode)) + { + fprintf(stderr,"%s: User query supplied file was not a regular file\n", + my_progname); + exit(1); + } + if ((data_file= my_open(user_supplied_pre_statements, O_RDWR, MYF(0))) == -1) + { + fprintf(stderr,"%s: Could not open query supplied file\n", my_progname); + exit(1); + } + tmp_string= (char *)my_malloc(sbuf.st_size + 1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + my_read(data_file, tmp_string, sbuf.st_size, MYF(0)); + tmp_string[sbuf.st_size]= '\0'; + my_close(data_file,MYF(0)); + if (user_supplied_pre_statements) + actual_queries= parse_delimiter(tmp_string, &pre_statements, + delimiter[0]); + my_free((gptr)tmp_string, MYF(0)); + } + else if (user_supplied_pre_statements) + { + actual_queries= parse_delimiter(user_supplied_pre_statements, &pre_statements, + delimiter[0]); + } + + if (user_supplied_post_statements && my_stat(user_supplied_post_statements, &sbuf, MYF(0))) + { + File data_file; + if (!MY_S_ISREG(sbuf.st_mode)) + { + fprintf(stderr,"%s: User query supplied file was not a regular file\n", + my_progname); + exit(1); + } + if ((data_file= my_open(user_supplied_post_statements, O_RDWR, MYF(0))) == -1) + { + fprintf(stderr,"%s: Could not open query supplied file\n", my_progname); + exit(1); + } + tmp_string= (char *)my_malloc(sbuf.st_size + 1, + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + my_read(data_file, tmp_string, sbuf.st_size, MYF(0)); + tmp_string[sbuf.st_size]= '\0'; + my_close(data_file,MYF(0)); + if (user_supplied_post_statements) + parse_delimiter(tmp_string, &post_statements, + delimiter[0]); + my_free((gptr)tmp_string, MYF(0)); + } + else if (user_supplied_post_statements) + { + parse_delimiter(user_supplied_post_statements, &post_statements, + delimiter[0]); + } + + if (verbose >= 2) + printf("Parsing engines to use.\n"); + if (default_engine) - parse_delimiter(default_engine, &engine_statements, ','); + parse_option(default_engine, &engine_options, ','); if (tty_password) opt_password= get_tty_password(NullS); @@ -937,24 +1435,99 @@ static int run_query(MYSQL *mysql, const char *query, int len) return 0; } - if (verbose >= 2) + if (verbose >= 3) printf("%.*s;\n", len, query); return mysql_real_query(mysql, query, len); } +static int +generate_primary_key_list(MYSQL *mysql, option_string *engine_stmt) +{ + MYSQL_RES *result; + MYSQL_ROW row; + unsigned long long counter; + DBUG_ENTER("generate_primary_key_list"); + + /* + Blackhole is a special case, this allows us to test the upper end + of the server during load runs. + */ + if (opt_only_print || (engine_stmt && + strstr(engine_stmt->string, "blackhole"))) + { + primary_keys_number_of= 1; + primary_keys= (char **)my_malloc((uint)(sizeof(char *) * + primary_keys_number_of), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + /* Yes, we strdup a const string to simplify the interface */ + primary_keys[0]= my_strdup("796c4422-1d94-102a-9d6d-00e0812d", MYF(0)); + } + else + { + if (run_query(mysql, "SELECT id from t1", strlen("SELECT id from t1"))) + { + fprintf(stderr,"%s: Cannot select GUID primary keys. (%s)\n", my_progname, + mysql_error(mysql)); + exit(1); + } + + result= mysql_store_result(mysql); + primary_keys_number_of= mysql_num_rows(result); + + /* So why check this? Blackhole :) */ + if (primary_keys_number_of) + { + /* + We create the structure and loop and create the items. + */ + primary_keys= (char **)my_malloc((uint)(sizeof(char *) * + primary_keys_number_of), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + row= mysql_fetch_row(result); + for (counter= 0; counter < primary_keys_number_of; + counter++, row= mysql_fetch_row(result)) + primary_keys[counter]= my_strdup(row[0], MYF(0)); + } + + mysql_free_result(result); + } + + DBUG_RETURN(0); +} + +static int +drop_primary_key_list(void) +{ + unsigned long long counter; + + if (primary_keys_number_of) + { + for (counter= 0; counter < primary_keys_number_of; counter++) + my_free((gptr)primary_keys[counter], MYF(0)); + + my_free((gptr)primary_keys, MYF(0)); + } + + return 0; +} static int create_schema(MYSQL *mysql, const char *db, statement *stmt, - statement *engine_stmt) + option_string *engine_stmt) { char query[HUGE_STRING_LENGTH]; statement *ptr; + statement *after_create; int len; + ulonglong count; DBUG_ENTER("create_schema"); len= snprintf(query, HUGE_STRING_LENGTH, "CREATE SCHEMA `%s`", db); + if (verbose >= 2) + printf("Loading Pre-data\n"); + if (run_query(mysql, query, len)) { fprintf(stderr,"%s: Cannot create schema %s : %s\n", my_progname, db, @@ -968,8 +1541,9 @@ create_schema(MYSQL *mysql, const char *db, statement *stmt, } else { - if (verbose >= 2) + if (verbose >= 3) printf("%s;\n", query); + if (mysql_select_db(mysql, db)) { fprintf(stderr,"%s: Cannot select schema '%s': %s\n",my_progname, db, @@ -990,16 +1564,46 @@ create_schema(MYSQL *mysql, const char *db, statement *stmt, } } - for (ptr= stmt; ptr && ptr->length; ptr= ptr->next) + count= 0; + after_create= stmt; + +limit_not_met: + for (ptr= after_create; ptr && ptr->length; ptr= ptr->next, count++) { - if (run_query(mysql, ptr->string, ptr->length)) + if (auto_generate_sql && ( auto_generate_sql_number == count)) + break; + + if (engine_stmt && engine_stmt->option && ptr->type == CREATE_TABLE_TYPE) { - fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", - my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); - exit(1); + char buffer[HUGE_STRING_LENGTH]; + + snprintf(buffer, HUGE_STRING_LENGTH, "%s %s", ptr->string, + engine_stmt->option); + if (run_query(mysql, buffer, strlen(buffer))) + { + fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", + my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); + exit(1); + } + } + else + { + if (run_query(mysql, ptr->string, ptr->length)) + { + fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", + my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); + exit(1); + } } } + if (auto_generate_sql && (auto_generate_sql_number > count )) + { + /* Special case for auto create, we don't want to create tables twice */ + after_create= stmt->next; + goto limit_not_met; + } + DBUG_RETURN(0); } @@ -1024,136 +1628,84 @@ drop_schema(MYSQL *mysql, const char *db) } static int +run_statements(MYSQL *mysql, statement *stmt) +{ + statement *ptr; + DBUG_ENTER("run_statements"); + + for (ptr= stmt; ptr && ptr->length; ptr= ptr->next) + { + if (run_query(mysql, ptr->string, ptr->length)) + { + fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", + my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); + exit(1); + } + } + + DBUG_RETURN(0); +} + +static int run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) { -#ifndef __WIN__ uint x; -#endif - File lock_file; struct timeval start_time, end_time; thread_context con; + pthread_t mainthread; /* Thread descriptor */ + pthread_attr_t attr; /* Thread attributes */ DBUG_ENTER("run_scheduler"); con.stmt= stmts; con.limit= limit; - con.thread= opt_use_threads ? 1 :0; - - lock_file= my_open(lock_file_str, O_CREAT|O_WRONLY|O_TRUNC, MYF(0)); - if (!opt_slave) - if (my_lock(lock_file, F_WRLCK, 0, F_TO_EOF, MYF(0))) - { - fprintf(stderr,"%s: Could not get lockfile\n", - my_progname); - exit(0); - } + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, + PTHREAD_CREATE_DETACHED); -#ifdef HAVE_LIBPTHREAD - if (opt_use_threads) - { - pthread_t mainthread; /* Thread descriptor */ - pthread_attr_t attr; /* Thread attributes */ - - for (x= 0; x < concur; x++) - { - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, - PTHREAD_CREATE_DETACHED); + pthread_mutex_lock(&counter_mutex); + thread_counter= 0; - /* now create the thread */ - if (pthread_create(&mainthread, &attr, (void *)run_task, - (void *)&con) != 0) - { - fprintf(stderr,"%s: Could not create thread\n", - my_progname); - exit(0); - } - } - } -#endif -#if !(defined(__WIN__) || defined(__NETWARE__)) -#ifdef HAVE_LIBPTHREAD - else -#endif + pthread_mutex_lock(&sleeper_mutex); + master_wakeup= 1; + pthread_mutex_unlock(&sleeper_mutex); + for (x= 0; x < concur; x++) { - fflush(NULL); - for (x= 0; x < concur; x++) + /* now you create the thread */ + if (pthread_create(&mainthread, &attr, run_task, + (void *)&con) != 0) { - int pid; - DBUG_PRINT("info", ("x: %d concurrency: %u", x, *concurrency)); - pid= fork(); - switch(pid) - { - case 0: - /* child */ - DBUG_PRINT("info", ("fork returned 0, calling task(\"%s\"), pid %d gid %d", - stmts ? stmts->string : "", pid, getgid())); - if (verbose >= 2) - fprintf(stderr, - "%s: fork returned 0, calling task pid %d gid %d\n", - my_progname, pid, getgid()); - run_task(&con); - exit(0); - break; - case -1: - /* error */ - DBUG_PRINT("info", - ("fork returned -1, failing pid %d gid %d", pid, getgid())); - fprintf(stderr, - "%s: Failed on fork: -1, max procs per parent exceeded.\n", - my_progname); - /*exit(1);*/ - goto WAIT; - default: - /* parent, forked */ - DBUG_PRINT("info", ("default, break: pid %d gid %d", pid, getgid())); - if (verbose >= 2) - fprintf(stderr,"%s: fork returned %d, gid %d\n", - my_progname, pid, getgid()); - break; - } + fprintf(stderr,"%s: Could not create thread\n", + my_progname); + exit(0); } + thread_counter++; } -#endif + pthread_mutex_unlock(&counter_mutex); + pthread_attr_destroy(&attr); - /* Lets release use some clients! */ - if (!opt_slave) - my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0)); + pthread_mutex_lock(&sleeper_mutex); + master_wakeup= 0; + pthread_mutex_unlock(&sleeper_mutex); + pthread_cond_broadcast(&sleep_threshhold); gettimeofday(&start_time, NULL); /* - We look to grab a write lock at this point. Once we get it we know that - all clients have completed their work. + We loop until we know that all children have cleaned up. */ - if (opt_use_threads) + pthread_mutex_lock(&counter_mutex); + while (thread_counter) { - if (my_lock(lock_file, F_WRLCK, 0, F_TO_EOF, MYF(0))) - { - fprintf(stderr,"%s: Could not get lockfile\n", - my_progname); - exit(0); - } - my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0)); - } -#ifndef __WIN__ - else - { -WAIT: - while (x--) - { - int status, pid; - pid= wait(&status); - DBUG_PRINT("info", ("Parent: child %d status %d", pid, status)); - if (status != 0) - printf("%s: Child %d died with the status %d\n", - my_progname, pid, status); - } + struct timespec abstime; + + set_timespec(abstime, 3); + pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime); } -#endif + pthread_mutex_unlock(&counter_mutex); + gettimeofday(&end_time, NULL); - my_close(lock_file, MYF(0)); sptr->timing= timedif(end_time, start_time); sptr->users= concur; @@ -1163,28 +1715,41 @@ WAIT: } -int -run_task(thread_context *con) +pthread_handler_t run_task(void *p) { ulonglong counter= 0, queries; - File lock_file= -1; MYSQL *mysql; MYSQL_RES *result; MYSQL_ROW row; statement *ptr; + thread_context *con= (thread_context *)p; DBUG_ENTER("run_task"); DBUG_PRINT("info", ("task script \"%s\"", con->stmt ? con->stmt->string : "")); + pthread_mutex_lock(&sleeper_mutex); + while (master_wakeup) + { + pthread_cond_wait(&sleep_threshhold, &sleeper_mutex); + } + pthread_mutex_unlock(&sleeper_mutex); + if (!(mysql= mysql_init(NULL))) - goto end; + { + fprintf(stderr,"%s: mysql_init() failed ERROR : %s\n", + my_progname, mysql_error(mysql)); + exit(0); + } - if (con->thread && mysql_thread_init()) - goto end; + if (mysql_thread_init()) + { + fprintf(stderr,"%s: mysql_thread_init() failed ERROR : %s\n", + my_progname, mysql_error(mysql)); + exit(0); + } DBUG_PRINT("info", ("trying to connect to host %s as user %s", host, user)); - lock_file= my_open(lock_file_str, O_RDWR, MYF(0)); - my_lock(lock_file, F_RDLCK, 0, F_TO_EOF, MYF(0)); + if (!opt_only_print) { /* Connect to server */ @@ -1213,18 +1778,59 @@ run_task(thread_context *con) } DBUG_PRINT("info", ("connected.")); if (verbose >= 3) - fprintf(stderr, "connected!\n"); + printf("connected!\n"); queries= 0; limit_not_met: for (ptr= con->stmt; ptr && ptr->length; ptr= ptr->next) { - if (run_query(mysql, ptr->string, ptr->length)) + /* + We have to execute differently based on query type. This should become a function. + */ + if ((ptr->type == UPDATE_TYPE_REQUIRES_PREFIX) || + (ptr->type == SELECT_TYPE_REQUIRES_PREFIX)) { - fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", - my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); - goto end; + int length; + unsigned int key_val; + char *key; + char buffer[HUGE_STRING_LENGTH]; + + /* + This should only happen if some sort of new engine was + implemented that didn't properly handle UPDATEs. + + Just in case someone runs this under an experimental engine we don't + want a crash so the if() is placed here. + */ + DBUG_ASSERT(primary_keys_number_of); + if (primary_keys_number_of) + { + key_val= (unsigned int)(random() % primary_keys_number_of); + key= primary_keys[key_val]; + + DBUG_ASSERT(key); + + length= snprintf(buffer, HUGE_STRING_LENGTH, "%.*s '%s'", + (int)ptr->length, ptr->string, key); + + if (run_query(mysql, buffer, length)) + { + fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", + my_progname, (uint)length, buffer, mysql_error(mysql)); + exit(0); + } + } } + else + { + if (run_query(mysql, ptr->string, ptr->length)) + { + fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", + my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); + exit(0); + } + } + if (mysql_field_count(mysql)) { result= mysql_store_result(mysql); @@ -1243,20 +1849,97 @@ limit_not_met: end: - if (lock_file != -1) - { - my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0)); - my_close(lock_file, MYF(0)); - } - if (!opt_only_print) mysql_close(mysql); - if (con->thread) - my_thread_end(); + my_thread_end(); + + pthread_mutex_lock(&counter_mutex); + thread_counter--; + pthread_cond_signal(&count_threshhold); + pthread_mutex_unlock(&counter_mutex); + DBUG_RETURN(0); } +uint +parse_option(const char *origin, option_string **stmt, char delm) +{ + char *retstr; + char *ptr= (char *)origin; + option_string **sptr= stmt; + option_string *tmp; + uint length= strlen(origin); + uint count= 0; /* We know that there is always one */ + + for (tmp= *sptr= (option_string *)my_malloc(sizeof(option_string), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); + (retstr= strchr(ptr, delm)); + tmp->next= (option_string *)my_malloc(sizeof(option_string), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)), + tmp= tmp->next) + { + char buffer[HUGE_STRING_LENGTH]; + char *buffer_ptr; + + count++; + strncpy(buffer, ptr, (size_t)(retstr - ptr)); + if ((buffer_ptr= strchr(buffer, ':'))) + { + char *option_ptr; + + tmp->length= (size_t)(buffer_ptr - buffer); + tmp->string= my_strndup(ptr, (uint)tmp->length, MYF(MY_FAE)); + + option_ptr= ptr + 1 + tmp->length; + + /* Move past the : and the first string */ + tmp->option_length= (size_t)(retstr - option_ptr); + tmp->option= my_strndup(option_ptr, (uint)tmp->option_length, + MYF(MY_FAE)); + } + else + { + tmp->string= my_strndup(ptr, (size_t)(retstr - ptr), MYF(MY_FAE)); + tmp->length= (size_t)(retstr - ptr); + } + + ptr+= retstr - ptr + 1; + if (isspace(*ptr)) + ptr++; + count++; + } + + if (ptr != origin+length) + { + char *origin_ptr; + + if ((origin_ptr= strchr(ptr, ':'))) + { + char *option_ptr; + + tmp->length= (size_t)(origin_ptr - ptr); + tmp->string= my_strndup(origin, tmp->length, MYF(MY_FAE)); + + option_ptr= (char *)ptr + 1 + tmp->length; + + /* Move past the : and the first string */ + tmp->option_length= (size_t)((ptr + length) - option_ptr); + tmp->option= my_strndup(option_ptr, tmp->option_length, + MYF(MY_FAE)); + } + else + { + tmp->length= (size_t)((ptr + length) - ptr); + tmp->string= my_strndup(ptr, tmp->length, MYF(MY_FAE)); + } + + count++; + } + + return count; +} + uint parse_delimiter(const char *script, statement **stmt, char delm) @@ -1268,13 +1951,15 @@ parse_delimiter(const char *script, statement **stmt, char delm) uint length= strlen(script); uint count= 0; /* We know that there is always one */ - for (tmp= *sptr= (statement *)my_malloc(sizeof(statement), MYF(MY_ZEROFILL)); + for (tmp= *sptr= (statement *)my_malloc(sizeof(statement), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); (retstr= strchr(ptr, delm)); - tmp->next= (statement *)my_malloc(sizeof(statement), MYF(MY_ZEROFILL)), + tmp->next= (statement *)my_malloc(sizeof(statement), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)), tmp= tmp->next) { count++; - tmp->string= my_strndup(ptr, (size_t)(retstr - ptr), MYF(MY_FAE)); + tmp->string= my_strndup(ptr, (uint)(retstr - ptr), MYF(MY_FAE)); tmp->length= (size_t)(retstr - ptr); ptr+= retstr - ptr + 1; if (isspace(*ptr)) @@ -1284,7 +1969,7 @@ parse_delimiter(const char *script, statement **stmt, char delm) if (ptr != script+length) { - tmp->string= my_strndup(ptr, (size_t)((script + length) - ptr), + tmp->string= my_strndup(ptr, (uint)((script + length) - ptr), MYF(MY_FAE)); tmp->length= (size_t)((script + length) - ptr); count++; @@ -1306,7 +1991,8 @@ parse_comma(const char *string, uint **range) if (*ptr == ',') count++; /* One extra spot for the NULL */ - nptr= *range= (uint *)my_malloc(sizeof(uint) * (count + 1), MYF(MY_ZEROFILL)); + nptr= *range= (uint *)my_malloc(sizeof(uint) * (count + 1), + MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr= (char *)string; x= 0; @@ -1341,23 +2027,25 @@ void print_conclusions_csv(conclusions *con) { char buffer[HUGE_STRING_LENGTH]; + const char *ptr= auto_generate_sql_type ? auto_generate_sql_type : "query"; snprintf(buffer, HUGE_STRING_LENGTH, - "%s,query,%ld.%03ld,%ld.%03ld,%ld.%03ld,%d,%llu\n", + "%s,%s,%ld.%03ld,%ld.%03ld,%ld.%03ld,%d,%llu\n", con->engine ? con->engine : "", /* Storage engine we ran against */ + ptr, /* Load type */ con->avg_timing / 1000, con->avg_timing % 1000, /* Time to load */ con->min_timing / 1000, con->min_timing % 1000, /* Min time */ con->max_timing / 1000, con->max_timing % 1000, /* Max time */ con->users, /* Children used */ con->avg_rows /* Queries run */ ); - my_write(csv_file, buffer, strlen(buffer), MYF(0)); + my_write(csv_file, buffer, (uint)strlen(buffer), MYF(0)); } void -generate_stats(conclusions *con, statement *eng, stats *sptr) +generate_stats(conclusions *con, option_string *eng, stats *sptr) { stats *ptr; - int x; + unsigned int x; con->min_timing= sptr->timing; con->max_timing= sptr->timing; @@ -1387,6 +2075,24 @@ generate_stats(conclusions *con, statement *eng, stats *sptr) } void +option_cleanup(option_string *stmt) +{ + option_string *ptr, *nptr; + if (!stmt) + return; + + for (ptr= stmt; ptr; ptr= nptr) + { + nptr= ptr->next; + if (ptr->string) + my_free((gptr)ptr->string, MYF(0)); + if (ptr->option) + my_free((gptr)ptr->option, MYF(0)); + my_free((gptr)(byte *)ptr, MYF(0)); + } +} + +void statement_cleanup(statement *stmt) { statement *ptr, *nptr; @@ -1397,7 +2103,7 @@ statement_cleanup(statement *stmt) { nptr= ptr->next; if (ptr->string) - my_free(ptr->string, MYF(0)); - my_free((byte *)ptr, MYF(0)); + my_free((gptr)ptr->string, MYF(0)); + my_free((gptr)(byte *)ptr, MYF(0)); } } diff --git a/client/mysqltest.c b/client/mysqltest.c index 528e169f471..bb6cc037b84 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -106,7 +106,7 @@ static my_bool parsing_disabled= 0; static my_bool info_flag; static my_bool display_result_vertically= FALSE, display_metadata= FALSE; static my_bool disable_query_log= 0, disable_result_log= 0; -static my_bool disable_warnings= 0, disable_ps_warnings= 0; +static my_bool disable_warnings= 0; static my_bool disable_info= 1; static my_bool abort_on_error= 1; static my_bool server_initialized= 0; @@ -266,7 +266,6 @@ enum enum_commands { Q_ENABLE_RESULT_LOG, Q_DISABLE_RESULT_LOG, Q_WAIT_FOR_SLAVE_TO_STOP, Q_ENABLE_WARNINGS, Q_DISABLE_WARNINGS, - Q_ENABLE_PS_WARNINGS, Q_DISABLE_PS_WARNINGS, Q_ENABLE_INFO, Q_DISABLE_INFO, Q_ENABLE_METADATA, Q_DISABLE_METADATA, Q_EXEC, Q_DELIMITER, @@ -330,8 +329,6 @@ const char *command_names[]= "wait_for_slave_to_stop", "enable_warnings", "disable_warnings", - "enable_ps_warnings", - "disable_ps_warnings", "enable_info", "disable_info", "enable_metadata", @@ -6088,8 +6085,6 @@ int main(int argc, char **argv) case Q_DISABLE_RESULT_LOG: disable_result_log=1; break; case Q_ENABLE_WARNINGS: disable_warnings=0; break; case Q_DISABLE_WARNINGS: disable_warnings=1; break; - case Q_ENABLE_PS_WARNINGS: disable_ps_warnings=0; break; - case Q_DISABLE_PS_WARNINGS: disable_ps_warnings=1; break; case Q_ENABLE_INFO: disable_info=0; break; case Q_DISABLE_INFO: disable_info=1; break; case Q_ENABLE_METADATA: display_metadata=1; break; diff --git a/config/ac-macros/character_sets.m4 b/config/ac-macros/character_sets.m4 index 1ab6e7dd780..8c3e8ca73b7 100644 --- a/config/ac-macros/character_sets.m4 +++ b/config/ac-macros/character_sets.m4 @@ -429,3 +429,16 @@ then else AC_MSG_RESULT(no) fi + + +# Shall we build experimental collations +AC_ARG_WITH(experimental-collations, + [], + [with_exp_coll=$withval], + [with_exp_coll=no] +) + +if test "$with_exp_coll" = "yes" +then + AC_DEFINE([HAVE_UTF8_GENERAL_CS], [1], [certain Japanese customer]) +fi diff --git a/configure.in b/configure.in index 99f05be35a5..50ca08ab1ed 100644 --- a/configure.in +++ b/configure.in @@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! # remember to also update version.c in ndb -AM_INIT_AUTOMAKE(mysql, 5.1.17-beta) +AM_INIT_AUTOMAKE(mysql, 5.1.18-beta) AM_CONFIG_HEADER(config.h) PROTOCOL_VERSION=10 @@ -2296,12 +2296,18 @@ AC_ARG_WITH(man, [with_man=yes] ) -if test "$with_man" = "yes" +if test X"$with_man" = Xyes then man_dirs="man" - man1_files=`ls -1 $srcdir/man/*.1 | sed -e 's;^.*man/;;'` + if test X"$have_ndbcluster" = Xyes + then + man1_files=`ls $srcdir/man/*.1 | sed -e 's;^.*man/;;'` + man8_files=`ls $srcdir/man/*.8 | sed -e 's;^.*man/;;'` + else + man1_files=`ls $srcdir/man/*.1 | grep -v '/ndb' | sed -e 's;^.*man/;;'` + man8_files=`ls $srcdir/man/*.8 | grep -v '/ndb' | sed -e 's;^.*man/;;'` + fi man1_files=`echo $man1_files` - man8_files=`ls -1 $srcdir/man/*.8 | sed -e 's;^.*man/;;'` man8_files=`echo $man8_files` else man_dirs="" diff --git a/extra/CMakeLists.txt b/extra/CMakeLists.txt index ed40ec3070a..a7a5e3e7b66 100644 --- a/extra/CMakeLists.txt +++ b/extra/CMakeLists.txt @@ -38,7 +38,7 @@ ADD_CUSTOM_TARGET(GenError DEPENDS ${PROJECT_SOURCE_DIR}/include/mysqld_error.h) ADD_EXECUTABLE(my_print_defaults my_print_defaults.c) -TARGET_LINK_LIBRARIES(my_print_defaults strings mysys dbug taocrypt odbc32 odbccp32 wsock32) +TARGET_LINK_LIBRARIES(my_print_defaults strings mysys dbug taocrypt wsock32) ADD_EXECUTABLE(perror perror.c) TARGET_LINK_LIBRARIES(perror strings mysys dbug wsock32) diff --git a/extra/yassl/taocrypt/benchmark/Makefile.am b/extra/yassl/taocrypt/benchmark/Makefile.am index 891dd532b98..2fe1c90c90d 100644 --- a/extra/yassl/taocrypt/benchmark/Makefile.am +++ b/extra/yassl/taocrypt/benchmark/Makefile.am @@ -1,5 +1,5 @@ INCLUDES = -I$(srcdir)/../include -I$(srcdir)/../mySTL -bin_PROGRAMS = benchmark +noinst_PROGRAMS = benchmark benchmark_SOURCES = benchmark.cpp benchmark_LDADD = $(top_builddir)/extra/yassl/taocrypt/src/libtaocrypt.la benchmark_CXXFLAGS = -DYASSL_PURE_C diff --git a/extra/yassl/taocrypt/taocrypt.vcproj b/extra/yassl/taocrypt/taocrypt.vcproj index 7ffcb664346..bcbc0f82192 100755 --- a/extra/yassl/taocrypt/taocrypt.vcproj +++ b/extra/yassl/taocrypt/taocrypt.vcproj @@ -12,8 +12,8 @@ <Configurations> <Configuration Name="Debug|Win32" - OutputDirectory=".\Debug" - IntermediateDirectory=".\Debug" + OutputDirectory=".\debug_obj" + IntermediateDirectory=".\debug_obj" ConfigurationType="4" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -21,16 +21,17 @@ <Tool Name="VCCLCompilerTool" Optimization="0" + OptimizeForProcessor="2" AdditionalIncludeDirectories="include,mySTL" PreprocessorDefinitions="WIN32;_DEBUG;_LIB" ExceptionHandling="FALSE" BasicRuntimeChecks="3" RuntimeLibrary="1" UsePrecompiledHeader="2" - PrecompiledHeaderFile=".\Debug/taocrypt.pch" - AssemblerListingLocation=".\Debug/" - ObjectFile=".\Debug/" - ProgramDataBaseFileName=".\Debug/" + PrecompiledHeaderFile=".\debug_obj/taocrypt.pch" + AssemblerListingLocation=".\debug_obj/" + ObjectFile=".\debug_obj/" + ProgramDataBaseFileName=".\debug_obj/" BrowseInformation="1" WarningLevel="3" SuppressStartupBanner="TRUE" @@ -40,7 +41,7 @@ Name="VCCustomBuildTool"/> <Tool Name="VCLibrarianTool" - OutputFile=".\Debug\taocrypt.lib" + OutputFile=".\debug_obj\taocrypt.lib" SuppressStartupBanner="TRUE"/> <Tool Name="VCMIDLTool"/> @@ -65,8 +66,8 @@ </Configuration> <Configuration Name="Release|Win32" - OutputDirectory=".\Release" - IntermediateDirectory=".\Release" + OutputDirectory=".\release_obj" + IntermediateDirectory=".\release_obj" ConfigurationType="4" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -75,6 +76,7 @@ Name="VCCLCompilerTool" Optimization="2" InlineFunctionExpansion="1" + OptimizeForProcessor="2" AdditionalIncludeDirectories="include,mySTL" PreprocessorDefinitions="WIN32;NDEBUG;_LIB" StringPooling="TRUE" @@ -82,10 +84,10 @@ RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" UsePrecompiledHeader="2" - PrecompiledHeaderFile=".\Release/taocrypt.pch" - AssemblerListingLocation=".\Release/" - ObjectFile=".\Release/" - ProgramDataBaseFileName=".\Release/" + PrecompiledHeaderFile=".\release_obj/taocrypt.pch" + AssemblerListingLocation=".\release_obj/" + ObjectFile=".\release_obj/" + ProgramDataBaseFileName=".\release_obj/" WarningLevel="3" SuppressStartupBanner="TRUE" CompileAs="0"/> @@ -93,7 +95,7 @@ Name="VCCustomBuildTool"/> <Tool Name="VCLibrarianTool" - OutputFile=".\Release\taocrypt.lib" + OutputFile=".\release_obj\taocrypt.lib" SuppressStartupBanner="TRUE"/> <Tool Name="VCMIDLTool"/> @@ -125,423 +127,63 @@ Filter="cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"> <File RelativePath="src\aes.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\aestables.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\algebra.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\arc4.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\asn.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\coding.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\des.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\dh.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\dsa.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\file.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\hash.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\integer.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\md2.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\md4.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\md5.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\misc.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\random.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\ripemd.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\rsa.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\sha.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> </Filter> <Filter diff --git a/extra/yassl/yassl.vcproj b/extra/yassl/yassl.vcproj index ec7dceb5096..bc020747096 100755 --- a/extra/yassl/yassl.vcproj +++ b/extra/yassl/yassl.vcproj @@ -12,8 +12,8 @@ <Configurations> <Configuration Name="Debug|Win32" - OutputDirectory=".\Debug" - IntermediateDirectory=".\Debug" + OutputDirectory=".\debug_obj" + IntermediateDirectory=".\debug_obj" ConfigurationType="4" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -21,16 +21,17 @@ <Tool Name="VCCLCompilerTool" Optimization="0" + OptimizeForProcessor="2" AdditionalIncludeDirectories="include,taocrypt\include,taocrypt\mySTL" PreprocessorDefinitions="WIN32;_DEBUG;_LIB;YASSL_PREFIX" ExceptionHandling="FALSE" BasicRuntimeChecks="3" RuntimeLibrary="1" UsePrecompiledHeader="2" - PrecompiledHeaderFile=".\Debug/yassl.pch" - AssemblerListingLocation=".\Debug/" - ObjectFile=".\Debug/" - ProgramDataBaseFileName=".\Debug/" + PrecompiledHeaderFile=".\debug_obj/yassl.pch" + AssemblerListingLocation=".\debug_obj/" + ObjectFile=".\debug_obj/" + ProgramDataBaseFileName=".\debug_obj/" BrowseInformation="1" WarningLevel="3" SuppressStartupBanner="TRUE" @@ -40,7 +41,7 @@ Name="VCCustomBuildTool"/> <Tool Name="VCLibrarianTool" - OutputFile=".\Debug\yassl.lib" + OutputFile=".\debug_obj\yassl.lib" SuppressStartupBanner="TRUE"/> <Tool Name="VCMIDLTool"/> @@ -65,8 +66,8 @@ </Configuration> <Configuration Name="Release|Win32" - OutputDirectory=".\Release" - IntermediateDirectory=".\Release" + OutputDirectory=".\release_obj" + IntermediateDirectory=".\release_obj" ConfigurationType="4" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -75,6 +76,7 @@ Name="VCCLCompilerTool" Optimization="2" InlineFunctionExpansion="1" + OptimizeForProcessor="2" AdditionalIncludeDirectories="include,taocrypt\include,taocrypt\mySTL" PreprocessorDefinitions="WIN32;NDEBUG;_LIB;YASSL_PREFIX" StringPooling="TRUE" @@ -82,10 +84,10 @@ RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" UsePrecompiledHeader="2" - PrecompiledHeaderFile=".\Release/yassl.pch" - AssemblerListingLocation=".\Release/" - ObjectFile=".\Release/" - ProgramDataBaseFileName=".\Release/" + PrecompiledHeaderFile=".\release_obj/yassl.pch" + AssemblerListingLocation=".\release_obj/" + ObjectFile=".\release_obj/" + ProgramDataBaseFileName=".\release_obj/" WarningLevel="3" SuppressStartupBanner="TRUE" CompileAs="0"/> @@ -93,7 +95,7 @@ Name="VCCustomBuildTool"/> <Tool Name="VCLibrarianTool" - OutputFile=".\Release\yassl.lib" + OutputFile=".\release_obj\yassl.lib" SuppressStartupBanner="TRUE"/> <Tool Name="VCMIDLTool"/> @@ -125,255 +127,39 @@ Filter="cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"> <File RelativePath="src\buffer.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\cert_wrapper.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\crypto_wrapper.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\handshake.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\lock.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\log.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\socket_wrapper.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\ssl.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\timer.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\yassl_error.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\yassl_imp.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> <File RelativePath="src\yassl_int.cpp"> - <FileConfiguration - Name="Debug|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="0" - AdditionalIncludeDirectories="" - PreprocessorDefinitions="" - BasicRuntimeChecks="3" - BrowseInformation="1"/> - </FileConfiguration> - <FileConfiguration - Name="Release|Win32"> - <Tool - Name="VCCLCompilerTool" - Optimization="2" - AdditionalIncludeDirectories="" - PreprocessorDefinitions=""/> - </FileConfiguration> </File> </Filter> <Filter diff --git a/include/config-win.h b/include/config-win.h index fadf24c732f..8d6f8885626 100644 --- a/include/config-win.h +++ b/include/config-win.h @@ -425,14 +425,8 @@ inline double ulonglong2double(ulonglong value) #define shared_memory_buffer_length 16000 #define default_shared_memory_base_name "MYSQL" -#ifdef CYBOZU -#define MYSQL_DEFAULT_CHARSET_NAME "utf8" -#define MYSQL_DEFAULT_COLLATION_NAME "utf8_general_cs" -#define HAVE_UTF8_GENERAL_CS 1 -#else #define MYSQL_DEFAULT_CHARSET_NAME "latin1" #define MYSQL_DEFAULT_COLLATION_NAME "latin1_swedish_ci" -#endif #define HAVE_SPATIAL 1 #define HAVE_RTREE_KEYS 1 @@ -443,10 +437,8 @@ inline double ulonglong2double(ulonglong value) /* Define charsets you want */ /* #undef HAVE_CHARSET_armscii8 */ /* #undef HAVE_CHARSET_ascii */ -#ifndef CYBOZU #define HAVE_CHARSET_big5 1 #define HAVE_CHARSET_cp1250 1 -#endif /* #undef HAVE_CHARSET_cp1251 */ /* #undef HAVE_CHARSET_cp1256 */ /* #undef HAVE_CHARSET_cp1257 */ @@ -455,33 +447,27 @@ inline double ulonglong2double(ulonglong value) /* #undef HAVE_CHARSET_cp866 */ #define HAVE_CHARSET_cp932 1 /* #undef HAVE_CHARSET_dec8 */ -#ifndef CYBOZU #define HAVE_CHARSET_eucjpms 1 #define HAVE_CHARSET_euckr 1 #define HAVE_CHARSET_gb2312 1 #define HAVE_CHARSET_gbk 1 -#endif /* #undef HAVE_CHARSET_greek */ /* #undef HAVE_CHARSET_hebrew */ /* #undef HAVE_CHARSET_hp8 */ /* #undef HAVE_CHARSET_keybcs2 */ /* #undef HAVE_CHARSET_koi8r */ /* #undef HAVE_CHARSET_koi8u */ -#ifndef CYBOZU #define HAVE_CHARSET_latin1 1 #define HAVE_CHARSET_latin2 1 -#endif /* #undef HAVE_CHARSET_latin5 */ /* #undef HAVE_CHARSET_latin7 */ /* #undef HAVE_CHARSET_macce */ /* #undef HAVE_CHARSET_macroman */ #define HAVE_CHARSET_sjis 1 /* #undef HAVE_CHARSET_swe7 */ -#ifndef CYBOZU #define HAVE_CHARSET_tis620 1 #define HAVE_CHARSET_ucs2 1 #define HAVE_CHARSET_ujis 1 -#endif #define HAVE_CHARSET_utf8 1 #define HAVE_UCA_COLLATIONS 1 diff --git a/include/my_global.h b/include/my_global.h index b4886700b57..8b35f995a48 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -1489,12 +1489,25 @@ do { doubleget_union _tmp; \ #define dlerror() "" #endif +#ifndef __NETWARE__ /* - Include standard definitions of operator new and delete. + * Include standard definitions of operator new and delete. */ #ifdef __cplusplus #include <new> #endif +#else +/* + * Define placement versions of operator new and operator delete since + * we don't have <new> when building for Netware. + */ +#ifdef __cplusplus +inline void *operator new(size_t, void *ptr) { return ptr; } +inline void *operator new[](size_t, void *ptr) { return ptr; } +inline void operator delete(void*, void*) { /* Do nothing */ } +inline void operator delete[](void*, void*) { /* Do nothing */ } +#endif +#endif /* Length of decimal number represented by INT32. */ #define MY_INT32_NUM_DECIMAL_DIGITS 11 diff --git a/include/my_pthread.h b/include/my_pthread.h index 4df105e1b20..340dc32981a 100644 --- a/include/my_pthread.h +++ b/include/my_pthread.h @@ -701,6 +701,15 @@ extern uint my_thread_end_wait_time; Keep track of shutdown,signal, and main threads so that my_end() will not report errors with them */ + +/* Which kind of thread library is in use */ + +#define THD_LIB_OTHER 1 +#define THD_LIB_NPTL 2 +#define THD_LIB_LT 4 + +extern uint thd_lib_detected; + /* statistics_xxx functions are for not essential statistic */ #ifndef thread_safe_increment diff --git a/include/my_sys.h b/include/my_sys.h index 8e831b448d7..cff8c73c9ff 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -312,8 +312,17 @@ struct st_my_file_info extern struct st_my_file_info *my_file_info; +typedef struct st_dynamic_array +{ + char *buffer; + uint elements,max_element; + uint alloc_increment; + uint size_of_element; +} DYNAMIC_ARRAY; + typedef struct st_my_tmpdir { + DYNAMIC_ARRAY full_list; char **list; uint cur, max; #ifdef THREAD @@ -321,14 +330,6 @@ typedef struct st_my_tmpdir #endif } MY_TMPDIR; -typedef struct st_dynamic_array -{ - char *buffer; - uint elements,max_element; - uint alloc_increment; - uint size_of_element; -} DYNAMIC_ARRAY; - typedef struct st_dynamic_string { char *str; diff --git a/include/thr_alarm.h b/include/thr_alarm.h index 14dd538c9e4..a2694ba105b 100644 --- a/include/thr_alarm.h +++ b/include/thr_alarm.h @@ -24,11 +24,6 @@ extern "C" { #ifndef USE_ALARM_THREAD #define USE_ONE_SIGNAL_HAND /* One must call process_alarm */ #endif -#ifdef HAVE_LINUXTHREADS -#define THR_CLIENT_ALARM SIGALRM -#else -#define THR_CLIENT_ALARM SIGUSR1 -#endif #ifdef HAVE_rts_threads #undef USE_ONE_SIGNAL_HAND #define USE_ALARM_THREAD @@ -90,6 +85,9 @@ typedef struct st_alarm { my_bool malloced; } ALARM; +extern uint thr_client_alarm; +extern pthread_t alarm_thread; + #define thr_alarm_init(A) (*(A))=0 #define thr_alarm_in_use(A) (*(A)!= 0) void init_thr_alarm(uint max_alarm); diff --git a/mysql-test/extra/binlog_tests/binlog.test b/mysql-test/extra/binlog_tests/binlog.test index 834edcff474..c59685a0e65 100644 --- a/mysql-test/extra/binlog_tests/binlog.test +++ b/mysql-test/extra/binlog_tests/binlog.test @@ -20,9 +20,7 @@ begin; insert t2 values (5); commit; # first COMMIT must be Query_log_event, second - Xid_log_event ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; drop table t1,t2; # @@ -44,10 +42,10 @@ commit; drop table t1; --replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events in 'master-bin.000001' from 102; +show binlog events in 'master-bin.000001' from 106; --replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events in 'master-bin.000002' from 102; +show binlog events in 'master-bin.000002' from 106; # Test of a too big SET INSERT_ID: see if the truncated value goes # into binlog (right), or the too big value (wrong); we look at the @@ -80,9 +78,24 @@ DELETE FROM user WHERE host='localhost' AND user='@#@'; --enable_warnings use test; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; drop table t1,t2,t3,tt1; -- source extra/binlog_tests/binlog_insert_delayed.test + +#Bug #26079 max_binlog_size + innodb = not make new binlog and hang server +# server should not hang, binlog must rotate in the end +reset master; +--disable_warnings +drop table if exists t3; +--enable_warnings +create table t3 (a int(11) NOT NULL AUTO_INCREMENT, b text, PRIMARY KEY (a) ) engine=innodb; +show master status; +let $it=4; +while ($it) +{ +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +dec $it; +} +show master status /* must show new binlog index after rotating */; +drop table t3; diff --git a/mysql-test/extra/binlog_tests/binlog_insert_delayed.test b/mysql-test/extra/binlog_tests/binlog_insert_delayed.test index 4da883b9e60..d073c8ef227 100644 --- a/mysql-test/extra/binlog_tests/binlog_insert_delayed.test +++ b/mysql-test/extra/binlog_tests/binlog_insert_delayed.test @@ -23,9 +23,7 @@ inc $count; # moving binlog check affront of multi-rows queries which work is indeterministic (extra table_maps) # todo: better check is to substitute SHOW BINLOG with reading from binlog, probably bug#19459 is in # the way ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; insert delayed into t1 values (null),(null),(null),(null); inc $count; inc $count; inc $count; inc $count; diff --git a/mysql-test/extra/binlog_tests/blackhole.test b/mysql-test/extra/binlog_tests/blackhole.test index 05e59838168..80f998359ba 100644 --- a/mysql-test/extra/binlog_tests/blackhole.test +++ b/mysql-test/extra/binlog_tests/blackhole.test @@ -121,11 +121,7 @@ select * from t2; select * from t3; let $VERSION=`select version()`; ---replace_result $VERSION VERSION ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events; - +source include/show_binlog_events.inc; drop table t1,t2,t3; # @@ -157,10 +153,7 @@ start transaction; insert into t1 values(2); rollback; set autocommit=1; ---replace_result $VERSION VERSION ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events; +source include/show_binlog_events.inc; drop table if exists t1; # End of 5.0 tests diff --git a/mysql-test/extra/binlog_tests/ctype_cp932_binlog.test b/mysql-test/extra/binlog_tests/ctype_cp932_binlog.test index 5e93d6e126e..30585ece71c 100644 --- a/mysql-test/extra/binlog_tests/ctype_cp932_binlog.test +++ b/mysql-test/extra/binlog_tests/ctype_cp932_binlog.test @@ -26,9 +26,7 @@ SET @var1= x'8300'; # code (and I have used it to test the fix) until there is some way to # exercise this code from mysql-test-run. EXECUTE stmt1 USING @var1; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS FROM 102; +source include/show_binlog_events.inc; SELECT HEX(f1) FROM t1; DROP table t1; # end test for bug#11338 diff --git a/mysql-test/extra/binlog_tests/ctype_ucs_binlog.test b/mysql-test/extra/binlog_tests/ctype_ucs_binlog.test index fcf39e38163..e1a9dba7775 100644 --- a/mysql-test/extra/binlog_tests/ctype_ucs_binlog.test +++ b/mysql-test/extra/binlog_tests/ctype_ucs_binlog.test @@ -9,8 +9,7 @@ create table t2 (c char(30)) charset=ucs2; set @v=convert('abc' using ucs2); reset master; insert into t2 values (@v); ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; # more important than SHOW BINLOG EVENTS, mysqlbinlog (where we # absolutely need variables names to be quoted and strings to be # escaped). diff --git a/mysql-test/extra/binlog_tests/drop_temp_table.test b/mysql-test/extra/binlog_tests/drop_temp_table.test index 9c8647395bf..87f94eff987 100644 --- a/mysql-test/extra/binlog_tests/drop_temp_table.test +++ b/mysql-test/extra/binlog_tests/drop_temp_table.test @@ -23,10 +23,7 @@ connection con2; # To be sure that logging has been done, we use a user lock. select get_lock("a",10); let $VERSION=`select version()`; ---replace_result $VERSION VERSION ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events; +source include/show_binlog_events.inc; drop database `drop-temp+table-test`; # End of 4.1 tests diff --git a/mysql-test/extra/binlog_tests/insert_select-binlog.test b/mysql-test/extra/binlog_tests/insert_select-binlog.test index 07da4a1907f..b09eebcb996 100644 --- a/mysql-test/extra/binlog_tests/insert_select-binlog.test +++ b/mysql-test/extra/binlog_tests/insert_select-binlog.test @@ -18,9 +18,7 @@ insert into t1 select * from t2; # The above should produce an error, but still be in the binlog; # verify the binlog : let $VERSION=`select version()`; ---replace_result $VERSION VERSION ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events; +source include/show_binlog_events.inc; select * from t1; drop table t1, t2; @@ -33,9 +31,7 @@ reset master; create table t2(unique(a)) select a from t1; # The above should produce an error, *and* not appear in the binlog let $VERSION=`select version()`; ---replace_result $VERSION VERSION ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events; +source include/show_binlog_events.inc; drop table t1; # End of 4.1 tests diff --git a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test index bb4194bc7a7..d6ccc403ce9 100644 --- a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test +++ b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test @@ -29,9 +29,7 @@ insert into t1 values(1); insert into t2 select * from t1; commit; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; delete from t1; delete from t2; @@ -43,9 +41,7 @@ insert into t2 select * from t1; # should say some changes to non-transact1onal tables couldn't be rolled back rollback; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; delete from t1; delete from t2; @@ -59,9 +55,7 @@ insert into t2 select * from t1; rollback to savepoint my_savepoint; commit; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; delete from t1; delete from t2; @@ -77,9 +71,7 @@ insert into t1 values(7); commit; select a from t1 order by a; # check that savepoints work :) ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; # and when ROLLBACK is not explicit? delete from t1; @@ -99,9 +91,7 @@ connection con2; # so SHOW BINLOG EVENTS may come before con1 does the loggin. To be sure that # logging has been done, we use a user lock. select get_lock("a",10); ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; # and when not in a transact1on? delete from t1; @@ -111,9 +101,7 @@ reset master; insert into t1 values(9); insert into t2 select * from t1; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; # Check that when the query updat1ng the MyISAM table is the first in the # transaction, we log it immediately. @@ -124,16 +112,11 @@ reset master; insert into t1 values(10); # first make t1 non-empty begin; insert into t2 select * from t1; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; insert into t1 values(11); commit; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; - +source include/show_binlog_events.inc; # Check that things work like before this BEGIN/ROLLBACK code was added, # when t2 is INNODB @@ -149,9 +132,7 @@ insert into t1 values(12); insert into t2 select * from t1; commit; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; delete from t1; delete from t2; @@ -162,9 +143,7 @@ insert into t1 values(13); insert into t2 select * from t1; rollback; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; delete from t1; delete from t2; @@ -178,9 +157,7 @@ insert into t2 select * from t1; rollback to savepoint my_savepoint; commit; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; delete from t1; delete from t2; @@ -196,9 +173,7 @@ insert into t1 values(18); commit; select a from t1 order by a; # check that savepoints work :) ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; # Test for BUG#5714, where a MyISAM update in the transaction used to # release row-level locks in InnoDB @@ -257,9 +232,7 @@ insert into t2 values (3); disconnect con2; connection con3; select get_lock("lock1",60); ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; do release_lock("lock1"); drop table t0,t2; @@ -324,9 +297,7 @@ CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * ROLLBACK; SELECT * from t2; DROP TABLE t1,t2; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// -show binlog events from 102; +source include/show_binlog_events.inc; # Test for BUG#16559 (ROLLBACK should always have a zero error code in # binlog). Has to be here and not earlier, as the SELECTs influence diff --git a/mysql-test/extra/rpl_tests/rpl_ddl.test b/mysql-test/extra/rpl_tests/rpl_ddl.test index 15794e5e035..e40532f005f 100644 --- a/mysql-test/extra/rpl_tests/rpl_ddl.test +++ b/mysql-test/extra/rpl_tests/rpl_ddl.test @@ -1,31 +1,128 @@ -######################## rpl_ddl.test ######################## -# # -# DDL statements (sometimes with implicit COMMIT) executed # -# by the master and it's propagation into the slave # -# # -############################################################## - +################# extra/rpl_tests/rpl_ddl.test ######################## +# # +# DDL statements (sometimes with implicit COMMIT) and other stuff # +# executed on the master and it's propagation into the slave. # +# # +# The variables # +# $engine_type -- storage engine to be tested/used for the # +# permanent tables within the master # +# $temp_engine_type -- storage engine which supports TEMPORARY # +# tables <> $engine_type # +# $temp_engine_type must point to an all # +# time available storage engine # +# 2007-02 MySQL 5.1 MyISAM and MEMORY only # +# $show_binlog -- print binlog entries # +# 0 - no (default) + fits to the file with # +# results # +# 1 - yes (important for debugging) # +# This variable is used within # +# include/rpl_stmt_seq.inc. # +# $manipulate -- Manipulation of the binary logs # +# 0 - do nothing # +# 1 - so that the output of SHOW BINLOG # +# EVENTS IN <current log> contains only # +# commands of the current test sequence # +# This is especially useful, if the # +# $show_binlog is set to 1 and many # +# subtest are executed. # +# This variable is used within # +# include/rpl_stmt_seq.inc. # +# have to be set before sourcing this script. # +# # +# General assumption about the ideal replication behaviour: # +# Whatever on the master is executed the content of the slave must # +# be in sync with it. # +# # +# Tests of special interest: # +# a) Which DDL commands cause an implicit COMMIT ? # +# This is also of interest outside of replication. # +# b) Transactions modifying table content ending with # +# - explicit COMMIT or ROLLBACK # +# - implicit COMMIT because the connection to the master # +# executed a corresponding DDL statement or runs in # +# AUTOCOMMIT mode # +# - something similar to "implicit COMMIT" if the storage # +# engine (master) is not transactional # +# c) Command which change no data like SELECT or SHOW # +# They do not change anything within the master but # +# this must be also valid for the slave. # +# # +####################################################################### + +# Last update: +# 2007-02-12 ML: - slave needs AUTOCOMMIT = 1, because we want to check only +# the propagation of actions of the master connection. +# - replace comments via SQL by "--echo ..." +# - remove some bugs within the testscripts +# - remove the use of include/rpl_stmt_seq2.inc +# +# +# NOTES: +# 2006-11-15 Lars: Matthias (ML) is the "owner" of this test case. +# So, please get him to review it whenever you want to +# do changes to it. +# +# PLEASE BE CAREFUL, WHEN MODIFYING THE TESTS !! # -# NOTE, PLEASE BE CAREFUL, WHEN MODIFYING THE TESTS !! +# Typical test architecture (--> include/rpl_stmt_seq.inc) +# -------------------------------------------------------- +# 1. Master (no AUTOCOMMIT!): INSERT INTO mysqltest1.t1 without commit +# 2. Master and slave: Check the content of mysqltest1.t1 +# 3. Master (no AUTOCOMMIT!): EXECUTE the statement to be tested +# 4. Master and slave: Check the content of mysqltest1.t1 +# 5. Master (no AUTOCOMMIT!): ROLLBACK +# 6. Master and slave: Check the content of mysqltest1.t1 +# If the previous into mysqltest1.t1 inserted row is visible, +# than the statement to be tested caused an explicit COMMIT +# (statement = COMMIT) or an implicit COMMIT (example CREATE TABLE). +# If the previous into mysqltest1.t1 inserted row is not visible, +# than the statement to be tested caused either an explicit ROLLBACK +# (statement = ROLLBACK), an implicit ROLLBACK (deadlock etc. but +# not tested here) or it does not cause any transaction end. +# 7. Flush the logs +# +# Some rules: +# ----------- +# 1. Any use of mysqltest1.t1 within the statement to be tested must be +# avoided if possible. The only known exception is around LOCK TABLE. +# +# 2. The test logics needs for +# master connection: AUTOCOMMIT = 0 +# slave connection: AUTOCOMMIT = 1 +# The master connection is the actor and the slave connection is +# only an observer. I.e. the slave connection must not influence +# the activities of master connection. # -# 1. !All! objects to be dropped, renamed, altered ... must be created -# in AUTOCOMMIT= 1 mode before AUTOCOMMIT is set to 0 and the test -# sequences start. +# 3. !All! objects to be dropped, renamed, altered ... must be created +# before the tests start. +# --> less switching of AUTOCOMMIT mode on master side. # -# 2. Never use a test object, which was direct or indirect affected by a -# preceeding test sequence again. -# Except table d1.t1 where ONLY DML is allowed. +# 4. Never use a test object, which was direct or indirect affected by a +# preceeding test sequence again. +# If one preceeding test sequence hits a (sometimes not visible, +# because the sql error code of the statement might be 0) bug +# and these rules are ignored, a following test sequence might earn ugly +# effects like failing 'sync_slave_with_master', crashes of the slave or +# abort of the test case etc.. This means during analysis the first look +# points into a totally wrong area. +# Except table mysqltest1.t1 where ONLY DML is allowed. # -# If one preceeding test sequence hits a (sometimes not good visible, -# because the sql error code of the statement might be 0) bug -# and these rules are ignored, a following test sequence might earn ugly -# effects like failing 'sync_slave_with_master', crashes of the slave or -# abort of the test case etc.. +# 5. This file is used in several tests (t/rpl_ddl_<whatever>.test). +# Please be aware that every change of the current file affects +# the results of these tests. # -# 3. The assignment of the DDL command to be tested to $my_stmt can -# be a bit difficult. "'" must be avoided, because the test -# routine "include/rpl_stmt_seq.inc" performs a -# eval SELECT CONCAT('######## ','$my_stmt',' ########') as ""; +# ML: Some maybe banal hints: +# 1. The fact that we have here a master - slave replication does +# not cause that many general MySQL properties do not apply. +# Example: +# The connection to the slave is just a simple session and not a however +# magic working "copy" of the master session or something similar. +# - TEMPORARY TABLES and @variables are session specific +# - the slave session cannot see these things of the master. +# 2. The slave connection must not call sync_slave_with_master. +# 3. SHOW STATUS SLAVE must be run within the slave connection. +# 4. Testcase analysis becomes much more comfortable if +# $show_binlog within include/rpl_stmt_seq.inc is set to 1. # ############################################################### @@ -33,8 +130,10 @@ ############################################################### # The sync_slave_with_master is needed to make the xids deterministic. sync_slave_with_master; -connection master; +--echo +--echo -------- switch to master ------- +connection master; SET AUTOCOMMIT = 1; # # 1. DROP all objects, which probably already exist, but must be created here @@ -47,7 +146,7 @@ DROP DATABASE IF EXISTS mysqltest3; # # 2. CREATE all objects needed # working database is mysqltest1 -# working (transactional!) is mysqltest1.t1 +# working table (transactional!) is mysqltest1.t1 # CREATE DATABASE mysqltest1; CREATE DATABASE mysqltest2; @@ -73,25 +172,23 @@ eval CREATE TABLE mysqltest1.t16 (f1 BIGINT) ENGINE=$engine_type; eval CREATE TABLE mysqltest1.t17 (f1 BIGINT) ENGINE=$engine_type; eval CREATE TABLE mysqltest1.t18 (f1 BIGINT) ENGINE=$engine_type; eval CREATE TABLE mysqltest1.t19 (f1 BIGINT) ENGINE=$engine_type; -CREATE TEMPORARY TABLE mysqltest1.t23 (f1 BIGINT); +eval CREATE TEMPORARY TABLE mysqltest1.t23 (f1 BIGINT) ENGINE=$temp_engine_type; # # 3. master sessions: never do AUTOCOMMIT -# slave sessions: never do AUTOCOMMIT +# slave sessions: do AUTOCOMMIT # SET AUTOCOMMIT = 0; use mysqltest1; sync_slave_with_master; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SET AUTOCOMMIT = 0; +SET AUTOCOMMIT = 1; use mysqltest1; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log # We don't want to abort the whole test if one statement sent @@ -100,6 +197,21 @@ SELECT '-------- switch to master -------' as ""; --disable_abort_on_error ############################################################### +# Banal case: commands which should never commit +# Just for checking if the test sequence is usable +############################################################### + +let $my_stmt= SELECT 1; +let $my_master_commit= false; +let $my_slave_commit= false; +--source include/rpl_stmt_seq.inc + +let $my_stmt= SELECT COUNT(*) FROM t1; +let $my_master_commit= false; +let $my_slave_commit= false; +--source include/rpl_stmt_seq.inc + +############################################################### # Banal case: (explicit) COMMIT and ROLLBACK # Just for checking if the test sequence is usable ############################################################### @@ -143,84 +255,64 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW TABLES LIKE 't2'; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log SHOW TABLES LIKE 't2'; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -# Note: Since this test is executed with a skip-innodb slave, the -# slave incorrectly commits the insert. One can *not* have InnoDB on -# master and MyISAM on slave and expect that a transactional rollback -# after a CREATE TEMPORARY TABLE should work correctly on the slave. -# For this to work properly the handler on the slave must be able to -# handle transactions (e.g. InnoDB or NDB). + let $my_stmt= DROP TEMPORARY TABLE mysqltest1.t23; let $my_master_commit= false; -let $my_slave_commit= true; +let $my_slave_commit= false; --source include/rpl_stmt_seq.inc SHOW TABLES LIKE 't23'; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log SHOW TABLES LIKE 't23'; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log let $my_stmt= RENAME TABLE mysqltest1.t3 to mysqltest1.t20; let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW TABLES LIKE 't20'; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log SHOW TABLES LIKE 't20'; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log let $my_stmt= ALTER TABLE mysqltest1.t4 ADD column f2 BIGINT; let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc describe mysqltest1.t4; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log describe mysqltest1.t4; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log -let $my_stmt= CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE=; +let $my_stmt= CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= $engine_type; let $my_master_commit= true; let $my_slave_commit= true; ---source include/rpl_stmt_seq2.inc - -# Note: Since this test is executed with a skip-innodb slave, the -# slave incorrectly commits the insert. One can *not* have InnoDB on -# master and MyISAM on slave and expect that a transactional rollback -# after a CREATE TEMPORARY TABLE should work correctly on the slave. -# For this to work properly the handler on the slave must be able to -# handle transactions (e.g. InnoDB or NDB). +--source include/rpl_stmt_seq.inc + let $engine=''; let $eng_type=''; -let $my_stmt= CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT); +let $my_stmt= CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT) ENGINE=$temp_engine_type; let $my_master_commit= false; -let $my_slave_commit= true; +let $my_slave_commit= false; --source include/rpl_stmt_seq.inc let $my_stmt= TRUNCATE TABLE mysqltest1.t7; @@ -228,9 +320,12 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SELECT * FROM mysqltest1.t7; ---echo -------- switch to slave -------- sync_slave_with_master; +--echo +--echo -------- switch to slave -------- +connection slave; SELECT * FROM mysqltest1.t7; +--echo --echo -------- switch to master ------- connection master; @@ -238,8 +333,13 @@ connection master; # Cases with LOCK/UNLOCK ############################################################### -# MySQL insists in locking mysqltest1.t1, because rpl_stmt_seq performs an -# INSERT into this table. +# Attention: +# We have to LOCK mysqltest1.t1 here, though it violates the testing +# philosophy. +# Mysql response in case without previous LOCK TABLES mysqltest1.t1 +# is: +# SELECT MAX(...) FROM mysqltest1.t1 is +# ERROR HY000: Table 't1' was not locked with LOCK TABLES let $my_stmt= LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ; let $my_master_commit= true; let $my_slave_commit= true; @@ -253,8 +353,9 @@ let $my_slave_commit= false; --source include/rpl_stmt_seq.inc # With prior read locking -# Note that this test generate an error since the rpl_stmt_seq.inc -# tries to insert into t1. +# Attention: +# This subtest generates an error since the rpl_stmt_seq.inc +# tries to insert into t1. LOCK TABLES mysqltest1.t1 READ; let $my_stmt= UNLOCK TABLES; let $my_master_commit= false; @@ -277,30 +378,26 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW INDEX FROM mysqltest1.t6; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log SHOW INDEX FROM mysqltest1.t6; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log let $my_stmt= CREATE INDEX my_idx5 ON mysqltest1.t5(f1); let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW INDEX FROM mysqltest1.t5; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log SHOW INDEX FROM mysqltest1.t5; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log ############################################################### # Cases with DATABASE @@ -311,35 +408,31 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW DATABASES LIKE "mysqltest2"; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log SHOW DATABASES LIKE "mysqltest2"; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log let $my_stmt= CREATE DATABASE mysqltest3; let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW DATABASES LIKE "mysqltest3"; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log SHOW DATABASES LIKE "mysqltest3"; +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log # End of 4.1 tests ############################################################### -# Cases with stored procedures +# Cases with STORED PROCEDUREs ############################################################### let $my_stmt= CREATE PROCEDURE p1() READS SQL DATA SELECT "this is p1"; let $my_master_commit= true; @@ -348,12 +441,13 @@ let $my_slave_commit= true; --vertical_results --replace_column 5 # 6 # SHOW PROCEDURE STATUS LIKE 'p1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; --replace_column 5 # 6 # SHOW PROCEDURE STATUS LIKE 'p1'; +--echo +--echo -------- switch to master ------- connection master; --horizontal_results @@ -364,12 +458,13 @@ let $my_slave_commit= true; --vertical_results --replace_column 5 # 6 # SHOW PROCEDURE STATUS LIKE 'p1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; --replace_column 5 # 6 # SHOW PROCEDURE STATUS LIKE 'p1'; +--echo +--echo -------- switch to master ------- connection master; --horizontal_results @@ -379,11 +474,12 @@ let $my_slave_commit= true; --source include/rpl_stmt_seq.inc --vertical_results SHOW PROCEDURE STATUS LIKE 'p1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SHOW PROCEDURE STATUS LIKE 'p1'; +--echo +--echo -------- switch to master ------- connection master; --horizontal_results @@ -395,11 +491,12 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW CREATE VIEW v1; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SHOW CREATE VIEW v1; +--echo +--echo -------- switch to master ------- connection master; let $my_stmt= ALTER VIEW v1 AS select f1 from t1; @@ -407,11 +504,12 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW CREATE VIEW v1; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SHOW CREATE VIEW v1; +--echo +--echo -------- switch to master ------- connection master; let $my_stmt= DROP VIEW IF EXISTS v1; @@ -420,12 +518,13 @@ let $my_slave_commit= true; --source include/rpl_stmt_seq.inc --error 1146 SHOW CREATE VIEW v1; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; --error 1146 SHOW CREATE VIEW v1; +--echo +--echo -------- switch to master ------- connection master; ############################################################### @@ -436,11 +535,12 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW TRIGGERS; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SHOW TRIGGERS; +--echo +--echo -------- switch to master ------- connection master; let $my_stmt= DROP TRIGGER trg1; @@ -448,11 +548,12 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SHOW TRIGGERS; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SHOW TRIGGERS; +--echo +--echo -------- switch to master ------- connection master; ############################################################### @@ -463,11 +564,12 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SELECT user FROM mysql.user WHERE user = 'user1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SELECT user FROM mysql.user WHERE user = 'user1'; +--echo +--echo -------- switch to master ------- connection master; let $my_stmt= RENAME USER user1@localhost TO rename1@localhost; @@ -475,11 +577,12 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SELECT user FROM mysql.user WHERE user = 'rename1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SELECT user FROM mysql.user WHERE user = 'rename1'; +--echo +--echo -------- switch to master ------- connection master; let $my_stmt= DROP USER rename1@localhost; @@ -487,21 +590,21 @@ let $my_master_commit= true; let $my_slave_commit= true; --source include/rpl_stmt_seq.inc SELECT user FROM mysql.user WHERE user = 'rename1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log +--echo +--echo -------- switch to slave -------- connection slave; SELECT user FROM mysql.user WHERE user = 'rename1'; -connection master; ############################################################### # Cleanup ############################################################### ---disable_warnings -DROP DATABASE IF EXISTS mysqltest1; -DROP DATABASE IF EXISTS mysqltest2; -DROP DATABASE IF EXISTS mysqltest3; +use test; +--echo +--echo -------- switch to master ------- +connection master; +DROP DATABASE mysqltest1; +# mysqltest2 was alreday DROPPED some tests before. +DROP DATABASE mysqltest3; --enable_warnings -- source include/master-slave-end.inc - diff --git a/mysql-test/extra/rpl_tests/rpl_deadlock.test b/mysql-test/extra/rpl_tests/rpl_deadlock.test index 236a5f801b0..f6e02546a0b 100644 --- a/mysql-test/extra/rpl_tests/rpl_deadlock.test +++ b/mysql-test/extra/rpl_tests/rpl_deadlock.test @@ -82,7 +82,7 @@ show slave status; stop slave; delete from t3; -change master to master_log_pos=544; # the BEGIN log event +change master to master_log_pos=548; # the BEGIN log event begin; select * from t2 for update; # hold lock start slave; @@ -107,7 +107,7 @@ set global max_relay_log_size=0; # This is really copy-paste of 2) of above stop slave; delete from t3; -change master to master_log_pos=544; +change master to master_log_pos=548; begin; select * from t2 for update; start slave; diff --git a/mysql-test/extra/rpl_tests/rpl_log.test b/mysql-test/extra/rpl_tests/rpl_log.test index 9f6f4bf7e57..932fcdf670b 100644 --- a/mysql-test/extra/rpl_tests/rpl_log.test +++ b/mysql-test/extra/rpl_tests/rpl_log.test @@ -42,13 +42,13 @@ select count(*) from t1; show binlog events; --replace_column 2 # 5 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -show binlog events from 102 limit 1; +show binlog events from 106 limit 1; --replace_column 2 # 5 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -show binlog events from 102 limit 2; +show binlog events from 106 limit 2; --replace_column 2 # 5 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -show binlog events from 102 limit 2,1; +show binlog events from 106 limit 2,1; flush logs; # We need an extra update before doing save_master_pos. @@ -88,10 +88,7 @@ connection master; eval create table t2 (n int)ENGINE=$engine_type; insert into t2 values (1); ---replace_result $VERSION VERSION ---replace_column 2 # 5 # ---replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -show binlog events; +source include/show_binlog_events.inc; --replace_result $VERSION VERSION --replace_column 2 # 5 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ @@ -142,10 +139,7 @@ insert into t1 values (NULL, 1); reset master; set insert_id=5; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id()); ---replace_result $VERSION VERSION ---replace_column 2 # 5 # ---replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -show binlog events; +source include/show_binlog_events.inc; select * from t1; drop table t1; diff --git a/mysql-test/extra/rpl_tests/rpl_multi_query.test b/mysql-test/extra/rpl_tests/rpl_multi_query.test index 30a83886a86..2438556450d 100644 --- a/mysql-test/extra/rpl_tests/rpl_multi_query.test +++ b/mysql-test/extra/rpl_tests/rpl_multi_query.test @@ -23,8 +23,6 @@ delimiter ;/ sync_slave_with_master; select * from mysqltest.t1; connection master; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; drop database mysqltest; sync_slave_with_master; diff --git a/mysql-test/extra/rpl_tests/rpl_ndb_ddl.test b/mysql-test/extra/rpl_tests/rpl_ndb_ddl.test deleted file mode 100644 index 26c368589ba..00000000000 --- a/mysql-test/extra/rpl_tests/rpl_ndb_ddl.test +++ /dev/null @@ -1,507 +0,0 @@ -######################## rpl_ddl.test ######################## -# # -# DDL statements (sometimes with implicit COMMIT) executed # -# by the master and it's propagation into the slave # -# # -############################################################## - -# -# NOTE, PLEASE BE CAREFUL, WHEN MODIFYING THE TESTS !! -# -# 1. !All! objects to be dropped, renamed, altered ... must be created -# in AUTOCOMMIT= 1 mode before AUTOCOMMIT is set to 0 and the test -# sequences start. -# -# 2. Never use a test object, which was direct or indirect affected by a -# preceeding test sequence again. -# Except table d1.t1 where ONLY DML is allowed. -# -# If one preceeding test sequence hits a (sometimes not good visible, -# because the sql error code of the statement might be 0) bug -# and these rules are ignored, a following test sequence might earn ugly -# effects like failing 'sync_slave_with_master', crashes of the slave or -# abort of the test case etc.. -# -# 3. The assignment of the DDL command to be tested to $my_stmt can -# be a bit difficult. "'" must be avoided, because the test -# routine "include/rpl_stmt_seq.inc" performs a -# eval SELECT CONCAT('######## ','$my_stmt',' ########') as ""; -# - -############################################################### -# Some preparations -############################################################### -# The sync_slave_with_master is needed to make the xids deterministic. -sync_slave_with_master; -connection master; - -SET AUTOCOMMIT = 1; -# -# 1. DROP all objects, which probably already exist, but must be created here -# ---disable_warnings -DROP DATABASE IF EXISTS mysqltest1; -DROP DATABASE IF EXISTS mysqltest2; -DROP DATABASE IF EXISTS mysqltest3; ---enable_warnings -# -# 2. CREATE all objects needed -# working database is mysqltest1 -# working (transactional!) is mysqltest1.t1 -# -CREATE DATABASE mysqltest1; -CREATE DATABASE mysqltest2; -eval CREATE TABLE mysqltest1.t1 (f1 BIGINT) ENGINE=$engine_type; -INSERT INTO mysqltest1.t1 SET f1= 0; -eval CREATE TABLE mysqltest1.t2 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t3 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t4 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t5 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t6 (f1 BIGINT) ENGINE=$engine_type; -CREATE INDEX my_idx6 ON mysqltest1.t6(f1); -eval CREATE TABLE mysqltest1.t7 (f1 BIGINT) ENGINE=$engine_type; -INSERT INTO mysqltest1.t7 SET f1= 0; -eval CREATE TABLE mysqltest1.t8 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t9 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t10 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t11 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t12 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t13 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t14 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t15 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t16 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t17 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t18 (f1 BIGINT) ENGINE=$engine_type; -eval CREATE TABLE mysqltest1.t19 (f1 BIGINT) ENGINE=$engine_type; -CREATE TEMPORARY TABLE mysqltest1.t23 (f1 BIGINT); - -# -# 3. master sessions: never do AUTOCOMMIT -# slave sessions: never do AUTOCOMMIT -# -SET AUTOCOMMIT = 0; -use mysqltest1; -sync_slave_with_master; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SET AUTOCOMMIT = 0; -use mysqltest1; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - - -# We don't want to abort the whole test if one statement sent -# to the server gets an error, because the following test -# sequences are nearly independend of the previous statements. ---disable_abort_on_error - -############################################################### -# Banal case: (explicit) COMMIT and ROLLBACK -# Just for checking if the test sequence is usable -############################################################### - -let $my_stmt= COMMIT; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc - -let $my_stmt= ROLLBACK; -let $my_master_commit= false; -let $my_slave_commit= false; ---source include/rpl_stmt_seq.inc - -############################################################### -# Cases with commands very similar to COMMIT -############################################################### - -let $my_stmt= SET AUTOCOMMIT=1; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SET AUTOCOMMIT=0; - -let $my_stmt= START TRANSACTION; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc - -let $my_stmt= BEGIN; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc - -############################################################### -# Cases with (BASE) TABLES and (UPDATABLE) VIEWs -############################################################### - -let $my_stmt= DROP TABLE mysqltest1.t2; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW TABLES LIKE 't2'; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SHOW TABLES LIKE 't2'; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -# Note: Since this test is executed with a skip-innodb slave, the -# slave incorrectly commits the insert. One can *not* have InnoDB on -# master and MyISAM on slave and expect that a transactional rollback -# after a CREATE TEMPORARY TABLE should work correctly on the slave. -# For this to work properly the handler on the slave must be able to -# handle transactions (e.g. InnoDB or NDB). -let $my_stmt= DROP TEMPORARY TABLE mysqltest1.t23; -let $my_master_commit= false; -let $my_slave_commit= false; ---source include/rpl_stmt_seq.inc -SHOW TABLES LIKE 't23'; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SHOW TABLES LIKE 't23'; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -let $my_stmt= RENAME TABLE mysqltest1.t3 to mysqltest1.t20; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW TABLES LIKE 't20'; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SHOW TABLES LIKE 't20'; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -let $my_stmt= ALTER TABLE mysqltest1.t4 ADD column f2 BIGINT; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -describe mysqltest1.t4; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -describe mysqltest1.t4; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -let $my_stmt= CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE=; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq2.inc - -# Note: Since this test is executed with a skip-innodb slave, the -# slave incorrectly commits the insert. One can *not* have InnoDB on -# master and MyISAM on slave and expect that a transactional rollback -# after a CREATE TEMPORARY TABLE should work correctly on the slave. -# For this to work properly the handler on the slave must be able to -# handle transactions (e.g. InnoDB or NDB). -let $engine=''; -let $eng_type=''; - -let $my_stmt= CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT); -let $my_master_commit= false; -let $my_slave_commit= false; ---source include/rpl_stmt_seq.inc - -let $my_stmt= TRUNCATE TABLE mysqltest1.t7; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SELECT * FROM mysqltest1.t7; ---echo -------- switch to slave -------- -sync_slave_with_master; -SELECT * FROM mysqltest1.t7; ---echo -------- switch to master ------- -connection master; - -############################################################### -# Cases with LOCK/UNLOCK -############################################################### - -# MySQL insists in locking mysqltest1.t1, because rpl_stmt_seq performs an -# INSERT into this table. -let $my_stmt= LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -UNLOCK TABLES; - -# No prior locking -let $my_stmt= UNLOCK TABLES; -let $my_master_commit= false; -let $my_slave_commit= false; ---source include/rpl_stmt_seq.inc - -# With prior read locking -# Note that this test generate an error since the rpl_stmt_seq.inc -# tries to insert into t1. -LOCK TABLES mysqltest1.t1 READ; -let $my_stmt= UNLOCK TABLES; -let $my_master_commit= false; -let $my_slave_commit= false; ---source include/rpl_stmt_seq.inc - -# With prior write locking -LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ; -let $my_stmt= UNLOCK TABLES; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc - -############################################################### -# Cases with INDEXES -############################################################### - -let $my_stmt= DROP INDEX my_idx6 ON mysqltest1.t6; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW INDEX FROM mysqltest1.t6; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SHOW INDEX FROM mysqltest1.t6; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -let $my_stmt= CREATE INDEX my_idx5 ON mysqltest1.t5(f1); -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW INDEX FROM mysqltest1.t5; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SHOW INDEX FROM mysqltest1.t5; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -############################################################### -# Cases with DATABASE -############################################################### - -let $my_stmt= DROP DATABASE mysqltest2; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW DATABASES LIKE "mysqltest2"; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SHOW DATABASES LIKE "mysqltest2"; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -let $my_stmt= CREATE DATABASE mysqltest3; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW DATABASES LIKE "mysqltest3"; -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -SHOW DATABASES LIKE "mysqltest3"; -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log - -# End of 4.1 tests - -############################################################### -# Cases with stored procedures -############################################################### -let $my_stmt= CREATE PROCEDURE p1() READS SQL DATA SELECT "this is p1"; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc ---vertical_results ---replace_column 5 # 6 # -SHOW PROCEDURE STATUS LIKE 'p1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; ---replace_column 5 # 6 # -SHOW PROCEDURE STATUS LIKE 'p1'; -connection master; ---horizontal_results - -let $my_stmt= ALTER PROCEDURE p1 COMMENT "I have been altered"; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc ---vertical_results ---replace_column 5 # 6 # -SHOW PROCEDURE STATUS LIKE 'p1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; ---replace_column 5 # 6 # -SHOW PROCEDURE STATUS LIKE 'p1'; -connection master; ---horizontal_results - -let $my_stmt= DROP PROCEDURE p1; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc ---vertical_results -SHOW PROCEDURE STATUS LIKE 'p1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SHOW PROCEDURE STATUS LIKE 'p1'; -connection master; ---horizontal_results - -############################################################### -# Cases with VIEWs -############################################################### -let $my_stmt= CREATE OR REPLACE VIEW v1 as select * from t1; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW CREATE VIEW v1; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SHOW CREATE VIEW v1; -connection master; - -let $my_stmt= ALTER VIEW v1 AS select f1 from t1; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW CREATE VIEW v1; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SHOW CREATE VIEW v1; -connection master; - -let $my_stmt= DROP VIEW IF EXISTS v1; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc ---error 1146 -SHOW CREATE VIEW v1; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; ---error 1146 -SHOW CREATE VIEW v1; -connection master; - -############################################################### -# Cases with TRIGGERs -############################################################### -let $my_stmt= CREATE TRIGGER trg1 BEFORE INSERT ON t1 FOR EACH ROW SET @a:=1; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW TRIGGERS; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SHOW TRIGGERS; -connection master; - -let $my_stmt= DROP TRIGGER trg1; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SHOW TRIGGERS; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SHOW TRIGGERS; -connection master; - -############################################################### -# Cases with USERs -############################################################### -let $my_stmt= CREATE USER user1@localhost; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SELECT user FROM mysql.user WHERE user = 'user1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SELECT user FROM mysql.user WHERE user = 'user1'; -connection master; - -let $my_stmt= RENAME USER user1@localhost TO rename1@localhost; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SELECT user FROM mysql.user WHERE user = 'rename1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SELECT user FROM mysql.user WHERE user = 'rename1'; -connection master; - -let $my_stmt= DROP USER rename1@localhost; -let $my_master_commit= true; -let $my_slave_commit= true; ---source include/rpl_stmt_seq.inc -SELECT user FROM mysql.user WHERE user = 'rename1'; ---disable_query_log -SELECT '-------- switch to slave -------' as ""; ---enable_query_log -connection slave; -SELECT user FROM mysql.user WHERE user = 'rename1'; -connection master; - -############################################################### -# Cleanup -############################################################### ---disable_warnings -DROP DATABASE IF EXISTS mysqltest1; -DROP DATABASE IF EXISTS mysqltest2; -DROP DATABASE IF EXISTS mysqltest3; ---enable_warnings - --- source include/master-slave-end.inc - diff --git a/mysql-test/extra/rpl_tests/rpl_row_charset.test b/mysql-test/extra/rpl_tests/rpl_row_charset.test index 336a038db10..4ce5245a79a 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_charset.test +++ b/mysql-test/extra/rpl_tests/rpl_row_charset.test @@ -113,9 +113,7 @@ select * from mysqltest2.t1 order by a; connection master; drop database mysqltest2; drop database mysqltest3; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; sync_slave_with_master; # Check that we can change global.collation_server (since 5.0.3) diff --git a/mysql-test/extra/rpl_tests/rpl_row_delayed_ins.test b/mysql-test/extra/rpl_tests/rpl_row_delayed_ins.test index 0e235f8838f..c4e6dbc84c2 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_delayed_ins.test +++ b/mysql-test/extra/rpl_tests/rpl_row_delayed_ins.test @@ -15,9 +15,7 @@ SELECT * FROM t1 ORDER BY a; sync_slave_with_master; connection master; ---replace_result $VERSION VERSION ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events; +source include/show_binlog_events.inc; sync_slave_with_master; SELECT * FROM t1 ORDER BY a; connection master; diff --git a/mysql-test/extra/rpl_tests/rpl_row_sp002.test b/mysql-test/extra/rpl_tests/rpl_row_sp002.test index 9d056626cf2..47afcce875b 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_sp002.test +++ b/mysql-test/extra/rpl_tests/rpl_row_sp002.test @@ -222,7 +222,7 @@ sync_with_master; select * from test.t3; connection master; -#show binlog events from 1626; +#show binlog events from 1627; # First lets cleanup diff --git a/mysql-test/extra/rpl_tests/rpl_row_sp003.test b/mysql-test/extra/rpl_tests/rpl_row_sp003.test index 0934e2d6f98..df318ee0c0b 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_sp003.test +++ b/mysql-test/extra/rpl_tests/rpl_row_sp003.test @@ -65,7 +65,7 @@ sync_slave_with_master; connection slave; SELECT * FROM test.t1; connection master; -#show binlog events from 719; +#show binlog events from 720; DROP PROCEDURE IF EXISTS test.p1; DROP PROCEDURE IF EXISTS test.p2; diff --git a/mysql-test/extra/rpl_tests/rpl_stm_charset.test b/mysql-test/extra/rpl_tests/rpl_stm_charset.test index 5657b06e88f..629ccdf69f7 100644 --- a/mysql-test/extra/rpl_tests/rpl_stm_charset.test +++ b/mysql-test/extra/rpl_tests/rpl_stm_charset.test @@ -109,9 +109,7 @@ select * from mysqltest2.t1 order by a; connection master; drop database mysqltest2; drop database mysqltest3; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; sync_slave_with_master; # Check that we can change global.collation_server (since 5.0.3) diff --git a/mysql-test/extra/rpl_tests/rpl_truncate_helper.test b/mysql-test/extra/rpl_tests/rpl_truncate_helper.test index 7f1506c4010..64a8de7c6a0 100644 --- a/mysql-test/extra/rpl_tests/rpl_truncate_helper.test +++ b/mysql-test/extra/rpl_tests/rpl_truncate_helper.test @@ -37,6 +37,4 @@ SELECT * FROM t1; connection master; DROP TABLE t1; let $SERVER_VERSION=`select version()`; ---replace_result $SERVER_VERSION SERVER_VERSION ---replace_regex /\/\* xid=[0-9]+ \*\//\/* xid= *\// /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS; +source include/show_binlog_events.inc; diff --git a/mysql-test/include/ndb_backup_print.inc b/mysql-test/include/ndb_backup_print.inc new file mode 100644 index 00000000000..57fb279491c --- /dev/null +++ b/mysql-test/include/ndb_backup_print.inc @@ -0,0 +1,6 @@ +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults $ndb_restore_opts -b $the_backup_id -n 1 $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id $ndb_restore_filter > $MYSQLTEST_VARDIR/tmp/tmp.dat +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults $ndb_restore_opts -b $the_backup_id -n 2 $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id $ndb_restore_filter >> $MYSQLTEST_VARDIR/tmp/tmp.dat +--exec sort $MYSQLTEST_VARDIR/tmp/tmp.dat +--exec rm -f $MYSQLTEST_VARDIR/tmp/tmp.dat +--let ndb_restore_opts= +--let ndb_restore_filter= diff --git a/mysql-test/include/rpl_events.inc b/mysql-test/include/rpl_events.inc new file mode 100644 index 00000000000..04885f31997 --- /dev/null +++ b/mysql-test/include/rpl_events.inc @@ -0,0 +1,119 @@ +################################################################## +# Author: Giuseppe, Chuck Bell # +# Date: 17-January-2007 # +# Purpose: To test that event effects are replicated # +# in both row based and statement based format # +################################################################## + +--disable_warnings +DROP EVENT IF EXISTS test.justonce; +drop table if exists t1,t2; +--enable_warnings + +# first, we need a table to record something from an event + +eval CREATE TABLE `t1` ( + `id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT, + `c` VARCHAR(50) NOT NULL, + `ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (`id`) +) ENGINE=$engine_type DEFAULT CHARSET=utf8; + +INSERT INTO t1 (c) VALUES ('manually'); + +# then, we create the event +CREATE EVENT test.justonce ON SCHEDULE AT NOW() + INTERVAL 2 SECOND DO INSERT INTO t1 +(c) VALUES ('from justonce'); + +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'justonce'; + +# wait 3 seconds, so the event can trigger +--real_sleep 3 + +# check that table t1 contains something +--echo "in the master" +--enable_info +--replace_column 3 TIMESTAMP +SELECT * FROM t1 WHERE c = 'from justonce' OR c = 'manually' ORDER BY id; +--disable_info + +sync_slave_with_master; + +--echo "in the slave" +--enable_info +--replace_column 3 TIMESTAMP +SELECT * FROM t1 WHERE c = 'from justonce' OR c = 'manually' ORDER BY id; +--disable_info + +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'justonce'; + +# Create an event on the slave and check to see what the originator is. +--disable_warnings +DROP EVENT IF EXISTS test.slave_once; +--enable_warnings + +CREATE EVENT test.slave_once ON SCHEDULE EVERY 5 MINUTE DO +INSERT INTO t1(c) VALUES ('from slave_once'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_once'; + +--disable_warnings +DROP EVENT IF EXISTS test.slave_once; +--enable_warnings + +connection master; + +# BUG#20384 - disable events on slave +--disable_warnings +DROP EVENT IF EXISTS test.justonce; +--enable_warnings + +CREATE EVENT test.er ON SCHEDULE EVERY 3 SECOND DO +INSERT INTO t1(c) VALUES ('from er'); +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; + +sync_slave_with_master; + +--echo "in the slave" +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; + +connection master; +--echo "in the master" +ALTER EVENT test.er ON SCHEDULE EVERY 5 SECOND DO INSERT into t1(c) VALUES ('from alter er'); +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; + +sync_slave_with_master; + +--echo "in the slave" +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; + +connection master; +--echo "in the master" +DROP EVENT test.er; +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test'; + +--disable_info + +sync_slave_with_master; + +--echo "in the slave" +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test'; + +# test the DISABLE ON SLAVE for setting event SLAVESIDE_DISABLED as status +# on CREATE EVENT + +CREATE EVENT test.slave_terminate ON SCHEDULE EVERY 3 SECOND +DO INSERT INTO t1(c) VALUES ('from slave_terminate'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_terminate'; + +DROP EVENT test.slave_terminate; + +CREATE EVENT test.slave_terminate ON SCHEDULE EVERY 3 SECOND +DISABLE ON SLAVE DO INSERT INTO t1(c) VALUES ('from slave_terminate'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_terminate'; + +DROP EVENT test.slave_terminate; + +--echo "in the master" +connection master; +DROP TABLE t1; + diff --git a/mysql-test/include/rpl_stmt_seq.inc b/mysql-test/include/rpl_stmt_seq.inc index 3c91505d0d6..6c944dc4729 100644 --- a/mysql-test/include/rpl_stmt_seq.inc +++ b/mysql-test/include/rpl_stmt_seq.inc @@ -1,30 +1,51 @@ -# include/rpl_stmt_seq.inc -# -# Please be very careful when editing this routine, because the handling of -# the $variables is extreme sensitive. -# +################### include/rpl_stmt_seq.inc ########################### +# # +# Check if a given SQL statement (->$my_stmt) / AUTOCOMMIT mode / # +# storage engine somehow involved causes COMMIT or ROLLBACK. # +# # +# # +# The typical test sequence # +# ------------------------- # +# 1. master connection: INSERT without commit # +# check table content of master and slave # +# 2. master connection: EXECUTE the statement # +# check table content of master and slave # +# 3. master connection: ROLLBACK # +# check table content of master and slave # +# 4. flush the logs # +# # +# The variables # +# $show_binlog -- print binlog entries # +# 0 - default + fits to the file with # +# results # +# 1 - useful for debugging # +# This variable is used within # +# include/rpl_stmt_seq.inc. # +# $manipulate -- Manipulation of the binary logs # +# 0 - do nothing # +# 1 - so that the output of SHOW BINLOG # +# EVENTS IN <current log> contains only # +# commands of the current test sequence # +# This is especially useful, if the # +# $show_binlog is set to 1 and many # +# subtest are executed. # +# This variable is used within # +# include/rpl_stmt_seq.inc. # +# have to be set before sourcing this script. # +# # +# Please be very careful when editing this routine, because the # +# handling of the $variables is extreme sensitive. # +# # +######################################################################## -############################################################### -# Debug options : To debug this test script -############################################################### -let $show_binlog= 0; -let $manipulate= 1; - -######## The typical test sequence -# 1. INSERT without commit -# check table content of master and slave -# 2. EXECUTE the statement -# check table content of master and slave -# 3. ROLLBACK -# check table content of master and slave -# 4. flush the logs +# Last update: +# 2007-02-12 ML Replace comments via SQL by "--echo ..." +# let $VERSION=`select version()`; ---disable_query_log -# SELECT '######## new test sequence ########' as ""; -eval SELECT CONCAT('######## ','$my_stmt',' ########') as ""; ---enable_query_log +--echo +--echo ######## $my_stmt ######## ############################################################### @@ -49,11 +70,10 @@ let $_log_num_s= `select @aux`; ############################################################### # INSERT ############################################################### +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log -# Maybe it would be smarter to use a table with autoincrement column. +# Maybe it would be smarter to use a table with an autoincrement column. let $MAX= `SELECT MAX(f1) FROM t1` ; eval INSERT INTO t1 SET f1= $MAX + 1; # results before DDL(to be tested) @@ -66,10 +86,9 @@ eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; } sync_slave_with_master; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log # results before DDL(to be tested) SELECT MAX(f1) FROM t1; if ($show_binlog) @@ -82,10 +101,9 @@ eval SHOW BINLOG EVENTS IN 'slave-bin.$_log_num_s'; ############################################################### # command to be tested ############################################################### +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log eval $my_stmt; # Devaluate $my_stmt, to detect script bugs let $my_stmt= ERROR: YOU FORGOT TO FILL IN THE STATEMENT; @@ -99,10 +117,9 @@ eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; } sync_slave_with_master; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log # results after DDL(to be tested) SELECT MAX(f1) FROM t1; if ($show_binlog) @@ -115,10 +132,9 @@ eval SHOW BINLOG EVENTS IN 'slave-bin.$_log_num_s'; ############################################################### # ROLLBACK ############################################################### +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log ROLLBACK; # results after final ROLLBACK SELECT MAX(f1) FROM t1; @@ -140,10 +156,9 @@ eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; } sync_slave_with_master; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log # results after final ROLLBACK SELECT MAX(f1) FROM t1; --disable_query_log @@ -172,19 +187,17 @@ if ($manipulate) # - flush the master and the slave log # ---> both start to write into new logs with incremented number # - increment $_log_num_n +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log flush logs; # sleep 1; # eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; sync_slave_with_master; +--echo +--echo -------- switch to slave -------- connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log # the final content of the binary log flush logs; # The next sleep is urgent needed. @@ -195,7 +208,6 @@ flush logs; inc $_log_num_n; } +--echo +--echo -------- switch to master ------- connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log diff --git a/mysql-test/include/rpl_stmt_seq2.inc b/mysql-test/include/rpl_stmt_seq2.inc deleted file mode 100644 index 7671a6a857c..00000000000 --- a/mysql-test/include/rpl_stmt_seq2.inc +++ /dev/null @@ -1,201 +0,0 @@ -# include/rpl_stmt_seq.inc -# -# Please be very careful when editing this routine, because the handling of -# the $variables is extreme sensitive. -# - -############################################################### -# Debug options : To debug this test script -############################################################### -let $show_binlog= 0; -let $manipulate= 1; - -######## The typical test sequence -# 1. INSERT without commit -# check table content of master and slave -# 2. EXECUTE the statement -# check table content of master and slave -# 3. ROLLBACK -# check table content of master and slave -# 4. flush the logs - -let $VERSION=`select version()`; - ---disable_query_log -# SELECT '######## new test sequence ########' as ""; -eval SELECT CONCAT('######## ','$my_stmt',' $engine_type',' ########') as ""; ---enable_query_log - - -############################################################### -# Predict the number of the current log -############################################################### -# Disable the logging of the log number computation. ---disable_query_log -# $_log_num_n should contain the number of the current binlog in numeric style. -# If this routine is called for the first time, $_log_num will not initialized -# and contain the value '' instead of '1'. So we will correct it here. -# -eval set @aux= IF('$_log_num_n' = '', '1', '$_log_num_n'); -let $_log_num_n= `SELECT @aux`; -eval set @aux= LPAD('$_log_num_n',6,'0'); -# SELECT @aux AS "@aux is"; -# -# $_log_num_s should contain the number of the current binlog in string style. -let $_log_num_s= `select @aux`; -# eval SELECT '$log_num' ; ---enable_query_log - -############################################################### -# INSERT -############################################################### -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log -# Maybe it would be smarter to use a table with autoincrement column. -let $MAX= `SELECT MAX(f1) FROM t1` ; -eval INSERT INTO t1 SET f1= $MAX + 1; -# results before DDL(to be tested) -SELECT MAX(f1) FROM t1; -if ($show_binlog) -{ ---replace_result $VERSION VERSION ---replace_column 2 # 5 # -eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; -} -sync_slave_with_master; - -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -# results before DDL(to be tested) -SELECT MAX(f1) FROM t1; -if ($show_binlog) -{ ---replace_result $VERSION VERSION ---replace_column 2 # 5 # -eval SHOW BINLOG EVENTS IN 'slave-bin.$_log_num_s'; -} - -############################################################### -# command to be tested -############################################################### -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log -eval $my_stmt $engine_type; -# Devaluate $my_stmt, to detect script bugs -let $my_stmt= ERROR: YOU FORGOT TO FILL IN THE STATEMENT; -# results after DDL(to be tested) -SELECT MAX(f1) FROM t1; -if ($show_binlog) -{ ---replace_result $VERSION VERSION ---replace_column 2 # 5 # -eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; -} -sync_slave_with_master; - -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -# results after DDL(to be tested) -SELECT MAX(f1) FROM t1; -if ($show_binlog) -{ ---replace_result $VERSION VERSION ---replace_column 2 # 5 # -eval SHOW BINLOG EVENTS IN 'slave-bin.$_log_num_s'; -} - -############################################################### -# ROLLBACK -############################################################### -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log -ROLLBACK; -# results after final ROLLBACK -SELECT MAX(f1) FROM t1; -# Try to detect if the DDL command caused that the INSERT is committed -# $MAX holds the highest/last value just before the insert of MAX + 1 ---disable_query_log -eval SELECT CONCAT(CONCAT('TEST-INFO: MASTER: The INSERT is ', - IF(MAX(f1) = $MAX + 1, 'committed', 'not committed')), - IF((MAX(f1) = $MAX + 1) XOR NOT $my_master_commit, - ' (Succeeded)', - ' (Failed)')) AS "" - FROM mysqltest1.t1; ---enable_query_log -if ($show_binlog) -{ ---replace_result $VERSION VERSION ---replace_column 2 # 5 # -eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; -} -sync_slave_with_master; - -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -# results after final ROLLBACK -SELECT MAX(f1) FROM t1; ---disable_query_log -eval SELECT CONCAT(CONCAT('TEST-INFO: SLAVE: The INSERT is ', - IF(MAX(f1) = $MAX + 1, 'committed', 'not committed')), - IF((MAX(f1) = $MAX + 1) XOR NOT $my_slave_commit, - ' (Succeeded)', - ' (Failed)')) AS "" - FROM mysqltest1.t1; ---enable_query_log -if ($show_binlog) -{ ---replace_result $VERSION VERSION ---replace_column 2 # 5 # -eval SHOW BINLOG EVENTS IN 'slave-bin.$_log_num_s'; -} - -############################################################### -# Manipulate binlog -############################################################### -if ($manipulate) -{ -#### Manipulate the binary logs, -# so that the output of SHOW BINLOG EVENTS IN <current log> -# contains only commands of the current test sequence. -# - flush the master and the slave log -# ---> both start to write into new logs with incremented number -# - increment $_log_num_n -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log -flush logs; -# sleep 1; -# eval SHOW BINLOG EVENTS IN 'master-bin.$_log_num_s'; -sync_slave_with_master; - -connection slave; ---disable_query_log -SELECT '-------- switch to slave --------' as ""; ---enable_query_log -# the final content of the binary log -flush logs; -# The next sleep is urgent needed. -# Without this sleep the slaves crashes often, when the SHOW BINLOG -# is executed. :-( -# sleep 1; -# eval SHOW BINLOG EVENTS IN 'slave-bin.$_log_num_s'; -inc $_log_num_n; -} - -connection master; ---disable_query_log -SELECT '-------- switch to master -------' as ""; ---enable_query_log diff --git a/mysql-test/include/rpl_udf.inc b/mysql-test/include/rpl_udf.inc new file mode 100644 index 00000000000..d9e19cda484 --- /dev/null +++ b/mysql-test/include/rpl_udf.inc @@ -0,0 +1,189 @@ +##################################################################### +# Author: Chuck Bell # +# Date: 2006-12-21 # +# Purpose: To test that UDFs are replicated in both row based and # +# statement based format. This tests work completed in WL#3629. # +# # +# This test is designed to exercise two of the three types of UDFs: # +# 1) UDFs via loadable libraries, and 2) UDFs with a SQL body. # +##################################################################### + +--source include/have_udf.inc + +# +# To run this tests the "sql/udf_example.c" need to be compiled into +# udf_example.so and LD_LIBRARY_PATH should be setup to point out where +# the library are. +# + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# Test 1) Test UDFs via loadable libraries +# +--echo "*** Test 1) Test UDFs via loadable libraries *** +--echo "Running on the master" +--enable_info +--replace_result $UDF_EXAMPLE_LIB UDF_EXAMPLE_LIB +eval CREATE FUNCTION myfunc_double RETURNS REAL SONAME "$UDF_EXAMPLE_LIB"; +--replace_result $UDF_EXAMPLE_LIB UDF_EXAMPLE_LIB +eval CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "$UDF_EXAMPLE_LIB"; +--replace_result $UDF_EXAMPLE_LIB UDF_EXAMPLE_LIB +--error ER_CANT_FIND_DL_ENTRY +eval CREATE FUNCTION myfunc_nonexist RETURNS INTEGER SONAME "$UDF_EXAMPLE_LIB"; +--replace_column 3 UDF_LIB +SELECT * FROM mysql.func; +--disable_info + +save_master_pos; +connection slave; +sync_with_master; + +# Check to see that UDF CREATE statements were replicated +--echo "Running on the slave" +--enable_info +--replace_column 3 UDF_LIB +SELECT * FROM mysql.func; +--disable_info + +connection master; + +# Use the UDFs to do something +--echo "Running on the master" +--enable_info +eval CREATE TABLE t1(sum INT, price FLOAT(24)) ENGINE=$engine_type; +INSERT INTO t1 VALUES(myfunc_int(100), myfunc_double(50.00)); +INSERT INTO t1 VALUES(myfunc_int(10), myfunc_double(5.00)); +INSERT INTO t1 VALUES(myfunc_int(200), myfunc_double(25.00)); +INSERT INTO t1 VALUES(myfunc_int(1), myfunc_double(500.00)); +SELECT * FROM t1 ORDER BY sum; +--disable_info + +sync_slave_with_master; + +# Check to see if data was replicated +--echo "Running on the slave" +--enable_info +SELECT * FROM t1 ORDER BY sum; + +# Check to see that the functions are available for execution on the slave +SELECT myfunc_int(25); +SELECT myfunc_double(75.00); +--disable_info + +connection master; + +# Drop the functions +--echo "Running on the master" +--enable_info +DROP FUNCTION myfunc_double; +DROP FUNCTION myfunc_int; +SELECT * FROM mysql.func; +--disable_info + +sync_slave_with_master; + +# Check to see if the UDFs were dropped on the slave +--echo "Running on the slave" +--enable_info +SELECT * FROM mysql.func; +--disable_info + +connection master; + +# Cleanup +--echo "Running on the master" +--enable_info +DROP TABLE t1; +--disable_info + +# +# Test 2) Test UDFs with SQL body +# +--echo "*** Test 2) Test UDFs with SQL body *** +--echo "Running on the master" +--enable_info +CREATE FUNCTION myfuncsql_int(i INT) RETURNS INTEGER DETERMINISTIC RETURN i; +CREATE FUNCTION myfuncsql_double(d DOUBLE) RETURNS INTEGER DETERMINISTIC RETURN d * 2.00; +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +--disable_info + +sync_slave_with_master; + +# Check to see that UDF CREATE statements were replicated +--echo "Running on the slave" +--enable_info +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +--disable_info + +connection master; + +# Use the UDFs to do something +--echo "Running on the master" +--enable_info +eval CREATE TABLE t1(sum INT, price FLOAT(24)) ENGINE=$engine_type; +INSERT INTO t1 VALUES(myfuncsql_int(100), myfuncsql_double(50.00)); +INSERT INTO t1 VALUES(myfuncsql_int(10), myfuncsql_double(5.00)); +INSERT INTO t1 VALUES(myfuncsql_int(200), myfuncsql_double(25.00)); +INSERT INTO t1 VALUES(myfuncsql_int(1), myfuncsql_double(500.00)); +SELECT * FROM t1 ORDER BY sum; +--disable_info + +sync_slave_with_master; + +# Check to see if data was replicated +--echo "Running on the slave" +--enable_info +SELECT * FROM t1 ORDER BY sum; +--disable_info + +connection master; + +# Modify the UDFs to add a comment +--echo "Running on the master" +--enable_info +ALTER FUNCTION myfuncsql_int COMMENT "This was altered."; +ALTER FUNCTION myfuncsql_double COMMENT "This was altered."; +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +--disable_info + +sync_slave_with_master; + +# Check to see if data was replicated +--echo "Running on the slave" +--enable_info +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; + +# Check to see that the functions are available for execution on the slave +SELECT myfuncsql_int(25); +SELECT myfuncsql_double(75.00); +--disable_info + +connection master; + +# Drop the functions +--echo "Running on the master" +--enable_info +DROP FUNCTION myfuncsql_double; +DROP FUNCTION myfuncsql_int; +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +--disable_info + +sync_slave_with_master; + +# Check to see if the UDFs were dropped on the slave +--echo "Running on the slave" +--enable_info +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +--disable_info + +connection master; + +# Cleanup +--echo "Running on the master" +--enable_info +DROP TABLE t1; +--disable_info diff --git a/mysql-test/include/show_binlog_events.inc b/mysql-test/include/show_binlog_events.inc index ae848d10687..7377b4a0fed 100644 --- a/mysql-test/include/show_binlog_events.inc +++ b/mysql-test/include/show_binlog_events.inc @@ -1,5 +1,5 @@ ---let $binlog_start=102 +--let $binlog_start=106 --replace_result $binlog_start <binlog_start> --replace_column 2 # 4 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ +--replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ --eval show binlog events from $binlog_start diff --git a/mysql-test/install_test_db.sh b/mysql-test/install_test_db.sh index 74d95fdc521..e4df8f619cc 100644 --- a/mysql-test/install_test_db.sh +++ b/mysql-test/install_test_db.sh @@ -22,7 +22,7 @@ if [ x$1 = x"--bin" ]; then BINARY_DIST=1 bindir=../bin - scriptdir=../bin + scriptdir=bin libexecdir=../libexec # Check if it's a binary distribution or a 'make install' @@ -33,7 +33,7 @@ if [ x$1 = x"--bin" ]; then then execdir=../../sbin bindir=../../bin - scriptdir=../../bin + scriptdir=../bin libexecdir=../../libexec else execdir=../bin @@ -43,7 +43,7 @@ else execdir=../sql bindir=../client fix_bin=. - scriptdir=../scripts + scriptdir=scripts libexecdir=../libexec fi @@ -99,15 +99,14 @@ if [ x$BINARY_DIST = x1 ] ; then basedir=.. else basedir=. -EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/" +EXTRA_ARG="--windows" fi -mysqld_boot="${MYSQLD_BOOTSTRAP-$mysqld}" +INSTALL_CMD="$scriptdir/mysql_install_db --no-defaults $EXTRA_ARG --basedir=$basedir --datadir=mysql-test/$ldata --srcdir=." +echo "running $INSTALL_CMD" -mysqld_boot="$mysqld_boot --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --tmpdir=. $EXTRA_ARG" -echo "running $mysqld_boot" - -if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot +cd .. +if $INSTALL_CMD then exit 0 else diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 7a825af174a..43ed90534d4 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -354,7 +354,7 @@ sub do_before_start_slave ($); sub ndbd_start ($$$); sub ndb_mgmd_start ($); sub mysqld_start ($$$); -sub mysqld_arguments ($$$$$); +sub mysqld_arguments ($$$$); sub stop_all_servers (); sub run_mysqltest ($); sub usage ($); @@ -1495,9 +1495,17 @@ sub executable_setup () { if (!$opt_extern) { - # Look for SQL scripts directory - $path_sql_dir= mtr_path_exists("$glob_basedir/share", - "$glob_basedir/scripts"); + # Look for SQL scripts directory + if ( mtr_file_exists("$path_share/mysql_system_tables.sql") ne "") + { + # The SQL scripts are in path_share + $path_sql_dir= $path_share; + } + else + { + $path_sql_dir= mtr_path_exists("$glob_basedir/share", + "$glob_basedir/scripts"); + } if ( $mysql_version_id >= 50100 ) { @@ -1877,8 +1885,7 @@ sub environment_setup () { mtr_native_path($exe_mysqlslap) . " -uroot " . "--port=$master->[0]->{'port'} " . - "--socket=$master->[0]->{'path_sock'} --password= " . - "--lock-directory=$opt_tmpdir"; + "--socket=$master->[0]->{'path_sock'} --password= "; if ( $opt_debug ) { @@ -3628,21 +3635,20 @@ sub do_before_start_slave ($) { } -sub mysqld_arguments ($$$$$) { +sub mysqld_arguments ($$$$) { my $args= shift; - my $type= shift; - my $idx= shift; + my $mysqld= shift; my $extra_opt= shift; my $slave_master_info= shift; + my $idx= $mysqld->{'idx'}; my $sidx= ""; # Index as string, 0 is empty string - if ( $idx > 0 ) + if ( $idx> 0 ) { - $sidx= "$idx"; + $sidx= $idx; } my $prefix= ""; # If mysqltest server arg - if ( $glob_use_embedded_server ) { $prefix= "--server-arg="; @@ -3656,8 +3662,16 @@ sub mysqld_arguments ($$$$$) { if ( $mysql_version_id >= 50036) { - # Prevent the started mysqld to access files outside of vardir - mtr_add_arg($args, "%s--secure-file-priv=%s", $prefix, $opt_vardir); + # By default, prevent the started mysqld to access files outside of vardir + my $secure_file_dir= $opt_vardir; + if ( $opt_suite ne "main" ) + { + # When running a suite other than default allow the mysqld + # access to subdirs of mysql-test/ in order to make it possible + # to "load data" from the suites data/ directory. + $secure_file_dir= $glob_mysql_test_dir; + } + mtr_add_arg($args, "%s--secure-file-priv=%s", $prefix, $secure_file_dir); } if ( $mysql_version_id >= 50000 ) @@ -3679,36 +3693,53 @@ sub mysqld_arguments ($$$$$) { } } + mtr_add_arg($args, "%s--pid-file=%s", $prefix, + $mysqld->{'path_pid'}); + + mtr_add_arg($args, "%s--port=%d", $prefix, + $mysqld->{'port'}); + + mtr_add_arg($args, "%s--socket=%s", $prefix, + $mysqld->{'path_sock'}); + + mtr_add_arg($args, "%s--datadir=%s", $prefix, + $mysqld->{'path_myddir'}); + + + if ( $mysql_version_id >= 50106 ) + { + # Turn on logging to bothe tables and file + mtr_add_arg($args, "%s--log-output=table,file", $prefix); + } + + mtr_add_arg($args, "%s--log=%s", $prefix, $mysqld->{'path_mylog'}); + + # Check if "extra_opt" contains --skip-log-bin my $skip_binlog= grep(/^--skip-log-bin/, @$extra_opt); - if ( $type eq 'master' ) + if ( $mysqld->{'type'} eq 'master' ) { - my $id= $idx > 0 ? $idx + 101 : 1; - if (! ($opt_skip_master_binlog || $skip_binlog) ) { mtr_add_arg($args, "%s--log-bin=%s/log/master-bin%s", $prefix, $opt_vardir, $sidx); } - mtr_add_arg($args, "%s--pid-file=%s", $prefix, - $master->[$idx]->{'path_pid'}); - mtr_add_arg($args, "%s--port=%d", $prefix, - $master->[$idx]->{'port'}); - mtr_add_arg($args, "%s--server-id=%d", $prefix, $id); - mtr_add_arg($args, "%s--socket=%s", $prefix, - $master->[$idx]->{'path_sock'}); - mtr_add_arg($args, "%s--innodb_data_file_path=ibdata1:10M:autoextend", $prefix); + + mtr_add_arg($args, "%s--server-id=%d", $prefix, + $idx > 0 ? $idx + 101 : 1); + + mtr_add_arg($args, "%s--innodb_data_file_path=ibdata1:10M:autoextend", + $prefix); + mtr_add_arg($args, "%s--local-infile", $prefix); - mtr_add_arg($args, "%s--datadir=%s", $prefix, - $master->[$idx]->{'path_myddir'}); if ( $idx > 0 or !$use_innodb) { mtr_add_arg($args, "%s--skip-innodb", $prefix); } - my $cluster= $clusters->[$master->[$idx]->{'cluster'}]; + my $cluster= $clusters->[$mysqld->{'cluster'}]; if ( $opt_skip_ndbcluster || !$cluster->{'pid'}) { @@ -3725,28 +3756,14 @@ sub mysqld_arguments ($$$$$) { } } - if ( $mysql_version_id <= 50106 ) - { - # Force mysqld to use log files up until 5.1.6 - mtr_add_arg($args, "%s--log=%s", $prefix, $master->[0]->{'path_mylog'}); - } - else - { - # Turn on logging, will be sent to tables - mtr_add_arg($args, "%s--log=", $prefix); - } - mtr_add_arg($args, "%s--plugin_dir=%s", $prefix, dirname($lib_example_plugin)); } - - if ( $type eq 'slave' ) + else { - my $slave_server_id= 2 + $idx; - my $slave_rpl_rank= $slave_server_id; + mtr_error("unknown mysqld type") + unless $mysqld->{'type'} eq 'slave'; - mtr_add_arg($args, "%s--datadir=%s", $prefix, - $slave->[$idx]->{'path_myddir'}); mtr_add_arg($args, "%s--init-rpl-role=slave", $prefix); if (! ( $opt_skip_slave_binlog || $skip_binlog )) { @@ -3756,18 +3773,14 @@ sub mysqld_arguments ($$$$$) { } mtr_add_arg($args, "%s--master-retry-count=10", $prefix); - mtr_add_arg($args, "%s--pid-file=%s", $prefix, - $slave->[$idx]->{'path_pid'}); - mtr_add_arg($args, "%s--port=%d", $prefix, - $slave->[$idx]->{'port'}); + mtr_add_arg($args, "%s--relay-log=%s/log/slave%s-relay-bin", $prefix, $opt_vardir, $sidx); mtr_add_arg($args, "%s--report-host=127.0.0.1", $prefix); mtr_add_arg($args, "%s--report-port=%d", $prefix, - $slave->[$idx]->{'port'}); + $mysqld->{'port'}); mtr_add_arg($args, "%s--report-user=root", $prefix); mtr_add_arg($args, "%s--skip-innodb", $prefix); - mtr_add_arg($args, "%s--skip-ndbcluster", $prefix); mtr_add_arg($args, "%s--skip-slave-start", $prefix); # Directory where slaves find the dumps generated by "load data" @@ -3776,8 +3789,6 @@ sub mysqld_arguments ($$$$$) { my $slave_load_path= "../tmp"; mtr_add_arg($args, "%s--slave-load-tmpdir=%s", $prefix, $slave_load_path); - mtr_add_arg($args, "%s--socket=%s", $prefix, - $slave->[$idx]->{'path_sock'}); mtr_add_arg($args, "%s--set-variable=slave_net_timeout=10", $prefix); if ( @$slave_master_info ) @@ -3795,13 +3806,16 @@ sub mysqld_arguments ($$$$$) { mtr_add_arg($args, "%s--master-password=", $prefix); mtr_add_arg($args, "%s--master-port=%d", $prefix, $master->[0]->{'port'}); # First master + + my $slave_server_id= 2 + $idx; + my $slave_rpl_rank= $slave_server_id; mtr_add_arg($args, "%s--server-id=%d", $prefix, $slave_server_id); mtr_add_arg($args, "%s--rpl-recovery-rank=%d", $prefix, $slave_rpl_rank); } if ( $opt_skip_ndbcluster_slave || - $slave->[$idx]->{'cluster'} == -1 || - !$clusters->[$slave->[$idx]->{'cluster'}]->{'pid'} ) + $mysqld->{'cluster'} == -1 || + !$clusters->[$mysqld->{'cluster'}]->{'pid'} ) { mtr_add_arg($args, "%s--skip-ndbcluster", $prefix); } @@ -3809,41 +3823,21 @@ sub mysqld_arguments ($$$$$) { { mtr_add_arg($args, "%s--ndbcluster", $prefix); mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix, - $clusters->[$slave->[$idx]->{'cluster'}]->{'connect_string'}); + $clusters->[$mysqld->{'cluster'}]->{'connect_string'}); + if ( $mysql_version_id >= 50100 ) { mtr_add_arg($args, "%s--ndb-extra-logging", $prefix); } } - - if ( $mysql_version_id <= 50106 ) - { - # Force mysqld to use log files up until 5.1.6 - mtr_add_arg($args, "%s--log=%s", $prefix, $master->[0]->{'path_mylog'}); - } - else - { - # Turn on logging, will be sent to tables - mtr_add_arg($args, "%s--log=", $prefix); - } - } # end slave if ( $opt_debug ) { - if ( $type eq 'master' ) - { - mtr_add_arg($args, "%s--debug=d:t:i:A,%s/log/master%s.trace", - $prefix, $path_vardir_trace, $sidx); - } - if ( $type eq 'slave' ) - { - mtr_add_arg($args, "%s--debug=d:t:i:A,%s/log/slave%s.trace", - $prefix, $path_vardir_trace, $sidx); - } + mtr_add_arg($args, "%s--debug=d:t:i:A,%s/log/%s%s.trace", + $prefix, $path_vardir_trace, $mysqld->{'type'}, $sidx); } - # FIXME always set nowdays??? SMALL_SERVER mtr_add_arg($args, "%s--key_buffer_size=1M", $prefix); mtr_add_arg($args, "%s--sort_buffer=256K", $prefix); mtr_add_arg($args, "%s--max_heap_table_size=1M", $prefix); @@ -3869,18 +3863,10 @@ sub mysqld_arguments ($$$$$) { mtr_add_arg($args, "%s--gdb", $prefix); } - # If we should run all tests cases, we will use a local server for that - - if ( -w "/" ) - { - # We are running as root; We need to add the --root argument - mtr_add_arg($args, "%s--user=root", $prefix); - } - my $found_skip_core= 0; foreach my $arg ( @opt_extra_mysqld_opt, @$extra_opt ) { - # Allow --skip-core-file to be set in master.opt file + # Allow --skip-core-file to be set in <testname>-[master|slave].opt file if ($arg eq "--skip-core-file") { $found_skip_core= 1; @@ -3904,7 +3890,7 @@ sub mysqld_arguments ($$$$$) { mtr_add_arg($args, "%s--rpl-recovery-rank=1", $prefix); mtr_add_arg($args, "%s--init-rpl-role=master", $prefix); } - elsif ( $type eq 'master' ) + elsif ( $mysqld->{'type'} eq 'master' ) { mtr_add_arg($args, "%s--open-files-limit=1024", $prefix); } @@ -3955,7 +3941,7 @@ sub mysqld_start ($$$) { valgrind_arguments($args, \$exe); } - mysqld_arguments($args,$type,$idx,$extra_opt,$slave_master_info); + mysqld_arguments($args,$mysqld,$extra_opt,$slave_master_info); if ( $opt_gdb || $opt_manual_gdb) { @@ -4700,7 +4686,7 @@ sub run_mysqltest ($) { if ( $glob_use_embedded_server ) { - mysqld_arguments($args,'master',0,$tinfo->{'master_opt'},[]); + mysqld_arguments($args,$master->[0],$tinfo->{'master_opt'},[]); } # ---------------------------------------------------------------------- diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index 1e25cd8047e..2d550294c84 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -35,8 +35,8 @@ if [ -d ../sql ] ; then exec_mgmtsrvr=$ndbtop/src/mgmsrv/ndb_mgmd exec_waiter=$ndbtop/tools/ndb_waiter exec_test=$ndbtop/tools/ndb_test_platform - exec_test_ndberror= exec_test_ndberror=$ndbtop/src/ndbapi/ndberror_check + exec_mgmtclient=$ndbtop/src/mgmclient/ndb_mgm else BINARY_DIST=1 if test -x "$BASEDIR/libexec/ndbd" diff --git a/mysql-test/r/binlog_row_binlog.result b/mysql-test/r/binlog_row_binlog.result index 769f23ea86c..6fcaad010d2 100644 --- a/mysql-test/r/binlog_row_binlog.result +++ b/mysql-test/r/binlog_row_binlog.result @@ -8,25 +8,25 @@ commit; begin; insert t2 values (5); commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb -master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; create table t1 (a int) engine=innodb +master-bin.000001 # Query # # use `test`; create table t2 (a int) engine=innodb +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ drop table t1,t2; reset master; create table t1 (n int) engine=innodb; begin; commit; drop table t1; -show binlog events in 'master-bin.000001' from 102; +show binlog events in 'master-bin.000001' from 106; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1 (n int) engine=innodb master-bin.000001 # Query 1 # use `test`; BEGIN @@ -232,7 +232,7 @@ master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Xid 1 # COMMIT /* xid= */ master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 -show binlog events in 'master-bin.000002' from 102; +show binlog events in 'master-bin.000002' from 106; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Query 1 # use `test`; drop table t1 reset master; @@ -254,58 +254,58 @@ INSERT INTO user SET host='localhost', user='@#@', password=password('Just a tes UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@'; DELETE FROM user WHERE host='localhost' AND user='@#@'; use test; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) -master-bin.000001 # Query 1 # use `test`; CREATE TABLE IF NOT EXISTS `t2` ( +master-bin.000001 # Query # # use `test`; create table t1 (id tinyint auto_increment primary key) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (a int) +master-bin.000001 # Query # # use `test`; CREATE TABLE IF NOT EXISTS `t2` ( `a` int(11) DEFAULT NULL ) -master-bin.000001 # Query 1 # use `test`; CREATE TABLE IF NOT EXISTS `t3` ( +master-bin.000001 # Query # # use `test`; CREATE TABLE IF NOT EXISTS `t3` ( `a` int(11) DEFAULT NULL ) -master-bin.000001 # Table_map 1 # table_id: # (mysql.user) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysql.user) -master-bin.000001 # Update_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysql.user) -master-bin.000001 # Delete_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysql.user) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysql.user) +master-bin.000001 # Update_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysql.user) +master-bin.000001 # Delete_rows # # table_id: # flags: STMT_END_F drop table t1,t2,t3,tt1; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; insert delayed into t1 values (207); insert delayed into t1 values (null); insert delayed into t1 values (300); -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) -master-bin.000001 # Query 1 # use `test`; CREATE TABLE IF NOT EXISTS `t2` ( +master-bin.000001 # Query # # use `test`; create table t1 (id tinyint auto_increment primary key) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (a int) +master-bin.000001 # Query # # use `test`; CREATE TABLE IF NOT EXISTS `t2` ( `a` int(11) DEFAULT NULL ) -master-bin.000001 # Query 1 # use `test`; CREATE TABLE IF NOT EXISTS `t3` ( +master-bin.000001 # Query # # use `test`; CREATE TABLE IF NOT EXISTS `t3` ( `a` int(11) DEFAULT NULL ) -master-bin.000001 # Table_map 1 # table_id: # (mysql.user) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysql.user) -master-bin.000001 # Update_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysql.user) -master-bin.000001 # Delete_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; DROP TABLE `t1`,`t2`,`t3` /* generated by server */ -master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysql.user) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysql.user) +master-bin.000001 # Update_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysql.user) +master-bin.000001 # Delete_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; DROP TABLE `t1`,`t2`,`t3` /* generated by server */ +master-bin.000001 # Query # # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F insert delayed into t1 values (null),(null),(null),(null); insert delayed into t1 values (null),(null),(400),(null); 11 == 11 @@ -323,3 +323,17 @@ a 400 401 drop table t1; +reset master; +drop table if exists t3; +create table t3 (a int(11) NOT NULL AUTO_INCREMENT, b text, PRIMARY KEY (a) ) engine=innodb; +show master status; +File Position Binlog_Do_DB Binlog_Ignore_DB +master-bin.000001 346 +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +show master status /* must show new binlog index after rotating */; +File Position Binlog_Do_DB Binlog_Ignore_DB +master-bin.000002 106 +drop table t3; diff --git a/mysql-test/r/binlog_row_blackhole.result b/mysql-test/r/binlog_row_blackhole.result index f370232e2c3..8e90ac4f30b 100644 --- a/mysql-test/r/binlog_row_blackhole.result +++ b/mysql-test/r/binlog_row_blackhole.result @@ -104,23 +104,22 @@ select * from t2; a select * from t3; a -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; drop table t1,t2 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=blackhole -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; create table t2 (a varchar(200)) engine=blackhole -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; alter table t1 add b int -master-bin.000001 # Query 1 # use `test`; alter table t1 drop b -master-bin.000001 # Query 1 # use `test`; create table t3 like t1 +master-bin.000001 # Query # # use `test`; drop table t1,t2 +master-bin.000001 # Query # # use `test`; create table t1 (a int) engine=blackhole +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; create table t2 (a varchar(200)) engine=blackhole +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; alter table t1 add b int +master-bin.000001 # Query # # use `test`; alter table t1 drop b +master-bin.000001 # Query # # use `test`; create table t3 like t1 drop table t1,t2,t3; CREATE TABLE t1(a INT, b INT) ENGINE=BLACKHOLE; DELETE FROM t1 WHERE a=10; @@ -143,12 +142,11 @@ start transaction; insert into t1 values(2); rollback; set autocommit=1; -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=blackhole -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; COMMIT +master-bin.000001 # Query # # use `test`; create table t1 (a int) engine=blackhole +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; COMMIT drop table if exists t1; diff --git a/mysql-test/r/binlog_row_ctype_ucs.result b/mysql-test/r/binlog_row_ctype_ucs.result index 4eeff79e13a..4f4e7bcedd7 100644 --- a/mysql-test/r/binlog_row_ctype_ucs.result +++ b/mysql-test/r/binlog_row_ctype_ucs.result @@ -3,10 +3,10 @@ create table t2 (c char(30)) charset=ucs2; set @v=convert('abc' using ucs2); reset master; insert into t2 values (@v); -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Table_map 1 141 table_id: # (test.t2) -master-bin.000001 141 Write_rows 1 231 table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F flush logs; /*!40019 SET @@session.max_insert_delayed_threads=0*/; /*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; diff --git a/mysql-test/r/binlog_row_drop_tmp_tbl.result b/mysql-test/r/binlog_row_drop_tmp_tbl.result index 2b09fe069e3..503076d66d9 100644 --- a/mysql-test/r/binlog_row_drop_tmp_tbl.result +++ b/mysql-test/r/binlog_row_drop_tmp_tbl.result @@ -11,8 +11,7 @@ get_lock("a",10) select get_lock("a",10); get_lock("a",10) 1 -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # create database `drop-temp+table-test` +master-bin.000001 # Query # # create database `drop-temp+table-test` drop database `drop-temp+table-test`; diff --git a/mysql-test/r/binlog_row_insert_select.result b/mysql-test/r/binlog_row_insert_select.result index 14cef6709b6..cd6ddafc47b 100644 --- a/mysql-test/r/binlog_row_insert_select.result +++ b/mysql-test/r/binlog_row_insert_select.result @@ -6,11 +6,10 @@ insert into t2 values(1),(2); reset master; insert into t1 select * from t2; ERROR 23000: Duplicate entry '2' for key 'a' -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 -master-bin.000001 102 Table_map 1 141 table_id: # (test.t1) -master-bin.000001 141 Write_rows 1 175 table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F select * from t1; a 1 @@ -21,7 +20,6 @@ insert into t1 values(1),(1); reset master; create table t2(unique(a)) select a from t1; ERROR 23000: Duplicate entry '1' for key 'a' -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 drop table t1; diff --git a/mysql-test/r/binlog_row_mix_innodb_myisam.result b/mysql-test/r/binlog_row_mix_innodb_myisam.result index 185ca33d4db..6ac942176c7 100644 --- a/mysql-test/r/binlog_row_mix_innodb_myisam.result +++ b/mysql-test/r/binlog_row_mix_innodb_myisam.result @@ -6,14 +6,14 @@ begin; insert into t1 values(1); insert into t2 select * from t1; commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -23,14 +23,14 @@ insert into t2 select * from t1; rollback; Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; ROLLBACK delete from t1; delete from t2; reset master; @@ -43,18 +43,18 @@ rollback to savepoint my_savepoint; Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; savepoint my_savepoint +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -72,20 +72,20 @@ select a from t1 order by a; a 5 7 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; savepoint my_savepoint +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -98,52 +98,52 @@ insert into t2 select * from t1; select get_lock("a",10); get_lock("a",10) 1 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; ROLLBACK delete from t1; delete from t2; reset master; insert into t1 values(9); insert into t2 select * from t1; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F delete from t1; delete from t2; reset master; insert into t1 values(10); begin; insert into t2 select * from t1; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F insert into t1 values(11); commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ alter table t2 engine=INNODB; delete from t1; delete from t2; @@ -152,14 +152,14 @@ begin; insert into t1 values(12); insert into t2 select * from t1; commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -167,7 +167,7 @@ begin; insert into t1 values(13); insert into t2 select * from t1; rollback; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info delete from t1; delete from t2; @@ -179,12 +179,12 @@ insert into t1 values(15); insert into t2 select * from t1; rollback to savepoint my_savepoint; commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -200,14 +200,14 @@ select a from t1 order by a; a 16 18 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; alter table t2 engine=MyISAM; @@ -252,30 +252,30 @@ insert into t2 values (3); select get_lock("lock1",60); get_lock("lock1",60) 1 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Delete_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; alter table t2 engine=MyISAM -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; drop table t1,t2 -master-bin.000001 # Query 1 # use `test`; create table t0 (n int) -master-bin.000001 # Table_map 1 # table_id: # (test.t0) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t0) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; create table t2 (n int) engine=innodb +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Delete_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; alter table t2 engine=MyISAM +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; drop table t1,t2 +master-bin.000001 # Query # # use `test`; create table t0 (n int) +master-bin.000001 # Table_map # # table_id: # (test.t0) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t0) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; create table t2 (n int) engine=innodb do release_lock("lock1"); drop table t0,t2; set autocommit=0; @@ -355,39 +355,39 @@ SELECT * from t2; a b 100 100 DROP TABLE t1,t2; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; DROP TABLE if exists t2 -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS t2 -master-bin.000001 # Query 1 # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; DROP TABLE t2 -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; DROP TABLE if exists t2 +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS t2 +master-bin.000001 # Query # # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t2 +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ reset master; create table t1 (a int) engine=innodb; create table t2 (a int) engine=myisam; diff --git a/mysql-test/r/binlog_stm_binlog.result b/mysql-test/r/binlog_stm_binlog.result index fb0453b5b68..66fe3e40270 100644 --- a/mysql-test/r/binlog_stm_binlog.result +++ b/mysql-test/r/binlog_stm_binlog.result @@ -4,11 +4,11 @@ insert into t1 values (1,2); commit; show binlog events; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: #, Binlog ver: # -master-bin.000001 102 Query 1 209 use `test`; create table t1 (a int, b int) engine=innodb -master-bin.000001 209 Query 1 277 use `test`; BEGIN -master-bin.000001 277 Query 1 90 use `test`; insert into t1 values (1,2) -master-bin.000001 367 Xid 1 394 COMMIT /* XID */ +master-bin.000001 4 Format_desc 1 106 Server ver: #, Binlog ver: # +master-bin.000001 106 Query 1 213 use `test`; create table t1 (a int, b int) engine=innodb +master-bin.000001 213 Query 1 281 use `test`; BEGIN +master-bin.000001 281 Query 1 90 use `test`; insert into t1 values (1,2) +master-bin.000001 371 Xid 1 398 COMMIT /* XID */ drop table t1; drop table if exists t1, t2; reset master; @@ -20,23 +20,23 @@ commit; begin; insert t2 values (5); commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb -master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert t1 values (5) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert t2 values (5) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; create table t1 (a int) engine=innodb +master-bin.000001 # Query # # use `test`; create table t2 (a int) engine=innodb +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert t1 values (5) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert t2 values (5) +master-bin.000001 # Xid # # COMMIT /* XID */ drop table t1,t2; reset master; create table t1 (n int) engine=innodb; begin; commit; drop table t1; -show binlog events in 'master-bin.000001' from 102; +show binlog events in 'master-bin.000001' from 106; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1 (n int) engine=innodb master-bin.000001 # Query 1 # use `test`; BEGIN @@ -142,7 +142,7 @@ master-bin.000001 # Query 1 # use `test`; insert into t1 values(2 + 4) master-bin.000001 # Query 1 # use `test`; insert into t1 values(1 + 4) master-bin.000001 # Xid 1 # COMMIT /* xid= */ master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 -show binlog events in 'master-bin.000002' from 102; +show binlog events in 'master-bin.000002' from 106; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Query 1 # use `test`; drop table t1 reset master; @@ -164,46 +164,46 @@ INSERT INTO user SET host='localhost', user='@#@', password=password('Just a tes UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@'; DELETE FROM user WHERE host='localhost' AND user='@#@'; use test; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) -master-bin.000001 # Intvar 1 # INSERT_ID=127 -master-bin.000001 # Query 1 # use `test`; insert into t1 values(null) -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) -master-bin.000001 # Query 1 # use `test`; create table if not exists t2 select * from t1 -master-bin.000001 # Query 1 # use `test`; create temporary table tt1 (a int) -master-bin.000001 # Query 1 # use `test`; create table if not exists t3 like tt1 -master-bin.000001 # Query 1 # use `mysql`; INSERT INTO user SET host='localhost', user='@#@', password=password('Just a test') -master-bin.000001 # Query 1 # use `mysql`; UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@' -master-bin.000001 # Query 1 # use `mysql`; DELETE FROM user WHERE host='localhost' AND user='@#@' +master-bin.000001 # Query # # use `test`; create table t1 (id tinyint auto_increment primary key) +master-bin.000001 # Intvar # # INSERT_ID=127 +master-bin.000001 # Query # # use `test`; insert into t1 values(null) +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (a int) +master-bin.000001 # Query # # use `test`; create table if not exists t2 select * from t1 +master-bin.000001 # Query # # use `test`; create temporary table tt1 (a int) +master-bin.000001 # Query # # use `test`; create table if not exists t3 like tt1 +master-bin.000001 # Query # # use `mysql`; INSERT INTO user SET host='localhost', user='@#@', password=password('Just a test') +master-bin.000001 # Query # # use `mysql`; UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@' +master-bin.000001 # Query # # use `mysql`; DELETE FROM user WHERE host='localhost' AND user='@#@' drop table t1,t2,t3,tt1; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; insert delayed into t1 values (207); insert delayed into t1 values (null); insert delayed into t1 values (300); -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) -master-bin.000001 # Intvar 1 # INSERT_ID=127 -master-bin.000001 # Query 1 # use `test`; insert into t1 values(null) -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) -master-bin.000001 # Query 1 # use `test`; create table if not exists t2 select * from t1 -master-bin.000001 # Query 1 # use `test`; create temporary table tt1 (a int) -master-bin.000001 # Query 1 # use `test`; create table if not exists t3 like tt1 -master-bin.000001 # Query 1 # use `mysql`; INSERT INTO user SET host='localhost', user='@#@', password=password('Just a test') -master-bin.000001 # Query 1 # use `mysql`; UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@' -master-bin.000001 # Query 1 # use `mysql`; DELETE FROM user WHERE host='localhost' AND user='@#@' -master-bin.000001 # Query 1 # use `test`; drop table t1,t2,t3,tt1 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; create table t1 (id tinyint auto_increment primary key) +master-bin.000001 # Intvar # # INSERT_ID=127 +master-bin.000001 # Query # # use `test`; insert into t1 values(null) +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (a int) +master-bin.000001 # Query # # use `test`; create table if not exists t2 select * from t1 +master-bin.000001 # Query # # use `test`; create temporary table tt1 (a int) +master-bin.000001 # Query # # use `test`; create table if not exists t3 like tt1 +master-bin.000001 # Query # # use `mysql`; INSERT INTO user SET host='localhost', user='@#@', password=password('Just a test') +master-bin.000001 # Query # # use `mysql`; UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@' +master-bin.000001 # Query # # use `mysql`; DELETE FROM user WHERE host='localhost' AND user='@#@' +master-bin.000001 # Query # # use `test`; drop table t1,t2,t3,tt1 +master-bin.000001 # Query # # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F insert delayed into t1 values (null),(null),(null),(null); insert delayed into t1 values (null),(null),(400),(null); 11 == 11 @@ -221,3 +221,17 @@ a 400 401 drop table t1; +reset master; +drop table if exists t3; +create table t3 (a int(11) NOT NULL AUTO_INCREMENT, b text, PRIMARY KEY (a) ) engine=innodb; +show master status; +File Position Binlog_Do_DB Binlog_Ignore_DB +master-bin.000001 346 +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +insert into t3(b) values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +show master status /* must show new binlog index after rotating */; +File Position Binlog_Do_DB Binlog_Ignore_DB +master-bin.000002 106 +drop table t3; diff --git a/mysql-test/r/binlog_stm_blackhole.result b/mysql-test/r/binlog_stm_blackhole.result index d382c94fba9..bf2fdfa616b 100644 --- a/mysql-test/r/binlog_stm_blackhole.result +++ b/mysql-test/r/binlog_stm_blackhole.result @@ -104,24 +104,23 @@ select * from t2; a select * from t3; a -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; drop table t1,t2 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=blackhole -master-bin.000001 # Query 1 # use `test`; delete from t1 where a=10 -master-bin.000001 # Query 1 # use `test`; update t1 set a=11 where a=15 -master-bin.000001 # Query 1 # use `test`; insert into t1 values(1) -master-bin.000001 # Query 1 # use `test`; insert ignore into t1 values(1) -master-bin.000001 # Query 1 # use `test`; replace into t1 values(100) -master-bin.000001 # Query 1 # use `test`; create table t2 (a varchar(200)) engine=blackhole -master-bin.000001 # Begin_load_query 1 # ;file_id=1;block_len=581 -master-bin.000001 # Execute_load_query 1 # use `test`; load data infile '../std_data_ln/words.dat' into table t2 ;file_id=1 -master-bin.000001 # Query 1 # use `test`; alter table t1 add b int -master-bin.000001 # Query 1 # use `test`; alter table t1 drop b -master-bin.000001 # Query 1 # use `test`; create table t3 like t1 -master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t3 -master-bin.000001 # Query 1 # use `test`; replace into t1 select * from t3 +master-bin.000001 # Query # # use `test`; drop table t1,t2 +master-bin.000001 # Query # # use `test`; create table t1 (a int) engine=blackhole +master-bin.000001 # Query # # use `test`; delete from t1 where a=10 +master-bin.000001 # Query # # use `test`; update t1 set a=11 where a=15 +master-bin.000001 # Query # # use `test`; insert into t1 values(1) +master-bin.000001 # Query # # use `test`; insert ignore into t1 values(1) +master-bin.000001 # Query # # use `test`; replace into t1 values(100) +master-bin.000001 # Query # # use `test`; create table t2 (a varchar(200)) engine=blackhole +master-bin.000001 # Begin_load_query # # ;file_id=1;block_len=581 +master-bin.000001 # Execute_load_query # # use `test`; load data infile '../std_data_ln/words.dat' into table t2 ;file_id=1 +master-bin.000001 # Query # # use `test`; alter table t1 add b int +master-bin.000001 # Query # # use `test`; alter table t1 drop b +master-bin.000001 # Query # # use `test`; create table t3 like t1 +master-bin.000001 # Query # # use `test`; insert into t1 select * from t3 +master-bin.000001 # Query # # use `test`; replace into t1 select * from t3 drop table t1,t2,t3; CREATE TABLE t1(a INT, b INT) ENGINE=BLACKHOLE; DELETE FROM t1 WHERE a=10; @@ -144,11 +143,10 @@ start transaction; insert into t1 values(2); rollback; set autocommit=1; -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=blackhole -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(1) -master-bin.000001 # Query 1 # use `test`; COMMIT +master-bin.000001 # Query # # use `test`; create table t1 (a int) engine=blackhole +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(1) +master-bin.000001 # Query # # use `test`; COMMIT drop table if exists t1; diff --git a/mysql-test/r/binlog_stm_ctype_ucs.result b/mysql-test/r/binlog_stm_ctype_ucs.result index 76f3a3b06b9..c789c618876 100644 --- a/mysql-test/r/binlog_stm_ctype_ucs.result +++ b/mysql-test/r/binlog_stm_ctype_ucs.result @@ -3,10 +3,10 @@ create table t2 (c char(30)) charset=ucs2; set @v=convert('abc' using ucs2); reset master; insert into t2 values (@v); -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 User var 1 142 @`v`=_ucs2 0x006100620063 COLLATE ucs2_general_ci -master-bin.000001 142 Query 1 231 use `test`; insert into t2 values (@v) +master-bin.000001 # User var # # @`v`=_ucs2 0x006100620063 COLLATE ucs2_general_ci +master-bin.000001 # Query # # use `test`; insert into t2 values (@v) flush logs; /*!40019 SET @@session.max_insert_delayed_threads=0*/; /*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; diff --git a/mysql-test/r/binlog_stm_drop_tmp_tbl.result b/mysql-test/r/binlog_stm_drop_tmp_tbl.result index 96481341bd6..dc4349dea59 100644 --- a/mysql-test/r/binlog_stm_drop_tmp_tbl.result +++ b/mysql-test/r/binlog_stm_drop_tmp_tbl.result @@ -11,12 +11,11 @@ get_lock("a",10) select get_lock("a",10); get_lock("a",10) 1 -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # create database `drop-temp+table-test` -master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn1 (a int) -master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table `table:name` (a int) -master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn2 (a int) -master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn2`,`drop-temp+table-test`.`table:name`,`drop-temp+table-test`.`shortn1` +master-bin.000001 # Query # # create database `drop-temp+table-test` +master-bin.000001 # Query # # use `drop-temp+table-test`; create temporary table shortn1 (a int) +master-bin.000001 # Query # # use `drop-temp+table-test`; create temporary table `table:name` (a int) +master-bin.000001 # Query # # use `drop-temp+table-test`; create temporary table shortn2 (a int) +master-bin.000001 # Query # # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn2`,`drop-temp+table-test`.`table:name`,`drop-temp+table-test`.`shortn1` drop database `drop-temp+table-test`; diff --git a/mysql-test/r/binlog_stm_insert_select.result b/mysql-test/r/binlog_stm_insert_select.result index 646a76b254d..a93a8edf4aa 100644 --- a/mysql-test/r/binlog_stm_insert_select.result +++ b/mysql-test/r/binlog_stm_insert_select.result @@ -6,10 +6,9 @@ insert into t2 values(1),(2); reset master; insert into t1 select * from t2; ERROR 23000: Duplicate entry '2' for key 'a' -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 196 use `test`; insert into t1 select * from t2 +master-bin.000001 # Query # # use `test`; insert into t1 select * from t2 select * from t1; a 1 @@ -20,7 +19,6 @@ insert into t1 values(1),(1); reset master; create table t2(unique(a)) select a from t1; ERROR 23000: Duplicate entry '1' for key 'a' -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 drop table t1; diff --git a/mysql-test/r/binlog_stm_mix_innodb_myisam.result b/mysql-test/r/binlog_stm_mix_innodb_myisam.result index 23f4e50826a..c74fb17d600 100644 --- a/mysql-test/r/binlog_stm_mix_innodb_myisam.result +++ b/mysql-test/r/binlog_stm_mix_innodb_myisam.result @@ -6,12 +6,12 @@ begin; insert into t1 values(1); insert into t2 select * from t1; commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(1) -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(1) +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -21,12 +21,12 @@ insert into t2 select * from t1; rollback; Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(2) -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 # Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(2) +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query # # use `test`; ROLLBACK delete from t1; delete from t2; reset master; @@ -39,15 +39,15 @@ rollback to savepoint my_savepoint; Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(3) -master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 # Query 1 # use `test`; insert into t1 values(4) -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(3) +master-bin.000001 # Query # # use `test`; savepoint my_savepoint +master-bin.000001 # Query # # use `test`; insert into t1 values(4) +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query # # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -65,16 +65,16 @@ select a from t1 order by a; a 5 7 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(5) -master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 # Query 1 # use `test`; insert into t1 values(6) -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 # Query 1 # use `test`; insert into t1 values(7) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(5) +master-bin.000001 # Query # # use `test`; savepoint my_savepoint +master-bin.000001 # Query # # use `test`; insert into t1 values(6) +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query # # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Query # # use `test`; insert into t1 values(7) +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -87,43 +87,43 @@ insert into t2 select * from t1; select get_lock("a",10); get_lock("a",10) 1 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(8) -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 # Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(8) +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query # # use `test`; ROLLBACK delete from t1; delete from t2; reset master; insert into t1 values(9); insert into t2 select * from t1; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; insert into t1 values(9) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query # # use `test`; insert into t1 values(9) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 delete from t1; delete from t2; reset master; insert into t1 values(10); begin; insert into t2 select * from t1; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; insert into t1 values(10) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query # # use `test`; insert into t1 values(10) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 insert into t1 values(11); commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; insert into t1 values(10) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(11) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; insert into t1 values(10) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(11) +master-bin.000001 # Xid # # COMMIT /* XID */ alter table t2 engine=INNODB; delete from t1; delete from t2; @@ -132,12 +132,12 @@ begin; insert into t1 values(12); insert into t2 select * from t1; commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(12) -master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(12) +master-bin.000001 # Query # # use `test`; insert into t2 select * from t1 +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -145,7 +145,7 @@ begin; insert into t1 values(13); insert into t2 select * from t1; rollback; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info delete from t1; delete from t2; @@ -157,11 +157,11 @@ insert into t1 values(15); insert into t2 select * from t1; rollback to savepoint my_savepoint; commit; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(14) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(14) +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; reset master; @@ -177,12 +177,12 @@ select a from t1 order by a; a 16 18 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(16) -master-bin.000001 # Query 1 # use `test`; insert into t1 values(18) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(16) +master-bin.000001 # Query # # use `test`; insert into t1 values(18) +master-bin.000001 # Xid # # COMMIT /* XID */ delete from t1; delete from t2; alter table t2 engine=MyISAM; @@ -227,31 +227,31 @@ insert into t2 values (3); select get_lock("lock1",60); get_lock("lock1",60) 1 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; insert into t1 values(16) -master-bin.000001 # Query 1 # use `test`; insert into t1 values(18) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; delete from t1 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; delete from t2 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; alter table t2 engine=MyISAM -master-bin.000001 # Query 1 # use `test`; insert into t1 values (1) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; insert into t2 values (20) -master-bin.000001 # Query 1 # use `test`; drop table t1,t2 -master-bin.000001 # Query 1 # use `test`; create temporary table ti (a int) engine=innodb -master-bin.000001 # Query 1 # use `test`; insert into ti values(1) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; create temporary table t1 (a int) engine=myisam -master-bin.000001 # Query 1 # use `test`; insert t1 values (1) -master-bin.000001 # Query 1 # use `test`; create table t0 (n int) -master-bin.000001 # Query 1 # use `test`; insert t0 select * from t1 -master-bin.000001 # Query 1 # use `test`; insert into t0 select GET_LOCK("lock1",null) -master-bin.000001 # Query 1 # use `test`; create table t2 (n int) engine=innodb -master-bin.000001 # Query 1 # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti` +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; insert into t1 values(16) +master-bin.000001 # Query # # use `test`; insert into t1 values(18) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; delete from t1 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; delete from t2 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; alter table t2 engine=MyISAM +master-bin.000001 # Query # # use `test`; insert into t1 values (1) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; insert into t2 values (20) +master-bin.000001 # Query # # use `test`; drop table t1,t2 +master-bin.000001 # Query # # use `test`; create temporary table ti (a int) engine=innodb +master-bin.000001 # Query # # use `test`; insert into ti values(1) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; create temporary table t1 (a int) engine=myisam +master-bin.000001 # Query # # use `test`; insert t1 values (1) +master-bin.000001 # Query # # use `test`; create table t0 (n int) +master-bin.000001 # Query # # use `test`; insert t0 select * from t1 +master-bin.000001 # Query # # use `test`; insert into t0 select GET_LOCK("lock1",null) +master-bin.000001 # Query # # use `test`; create table t2 (n int) engine=innodb +master-bin.000001 # Query # # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti` do release_lock("lock1"); drop table t0,t2; set autocommit=0; @@ -331,30 +331,30 @@ SELECT * from t2; a b 100 100 DROP TABLE t1,t2; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (1,1),(1,2) -master-bin.000001 # Query 1 # use `test`; DROP TABLE if exists t2 -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (3,3) -master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS t2 -master-bin.000001 # Query 1 # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES (4,4) -master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES (5,5) -master-bin.000001 # Query 1 # use `test`; DROP TABLE t2 -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (6,6) -master-bin.000001 # Query 1 # use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (7,7) -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (8,8) -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (9,9) -master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (10,10) -master-bin.000001 # Query 1 # use `test`; BEGIN -master-bin.000001 # Query 1 # use `test`; INSERT INTO t2 values (100,100) -master-bin.000001 # Xid 1 # COMMIT /* xid= */ -master-bin.000001 # Query 1 # use `test`; DROP TABLE t1,t2 +master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (1,1),(1,2) +master-bin.000001 # Query # # use `test`; DROP TABLE if exists t2 +master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (3,3) +master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS t2 +master-bin.000001 # Query # # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (4,4) +master-bin.000001 # Query # # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (5,5) +master-bin.000001 # Query # # use `test`; DROP TABLE t2 +master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (6,6) +master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb +master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (7,7) +master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (8,8) +master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (9,9) +master-bin.000001 # Query # # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (10,10) +master-bin.000001 # Query # # use `test`; BEGIN +master-bin.000001 # Query # # use `test`; INSERT INTO t2 values (100,100) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t1,t2 reset master; create table t1 (a int) engine=innodb; create table t2 (a int) engine=myisam; diff --git a/mysql-test/r/client_xml.result b/mysql-test/r/client_xml.result index 7395b2433e8..6a148954fcd 100644 --- a/mysql-test/r/client_xml.result +++ b/mysql-test/r/client_xml.result @@ -7,7 +7,7 @@ insert into t1 values (1, 2, 'a&b a<b a>b'); <?xml version="1.0"?> <resultset statement="select * from t1 -"> +" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <row> <field name="a&b">1</field> <field name="a<b">2</field> @@ -34,7 +34,7 @@ insert into t1 values (1, 2, 'a&b a<b a>b'); <?xml version="1.0"?> <resultset statement="select count(*) from t1 -"> +" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <row> <field name="count(*)">1</field> </row> @@ -42,7 +42,7 @@ insert into t1 values (1, 2, 'a&b a<b a>b'); <?xml version="1.0"?> <resultset statement="select 1 < 2 from dual -"> +" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <row> <field name="1 < 2">1</field> </row> @@ -50,7 +50,7 @@ insert into t1 values (1, 2, 'a&b a<b a>b'); <?xml version="1.0"?> <resultset statement="select 1 > 2 from dual -"> +" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <row> <field name="1 > 2">0</field> </row> @@ -58,7 +58,7 @@ insert into t1 values (1, 2, 'a&b a<b a>b'); <?xml version="1.0"?> <resultset statement="select 1 & 3 from dual -"> +" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <row> <field name="1 & 3">1</field> </row> @@ -66,7 +66,7 @@ insert into t1 values (1, 2, 'a&b a<b a>b'); <?xml version="1.0"?> <resultset statement="select null from dual -"> +" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <row> <field name="NULL" xsi:nil="true" /> </row> diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index ab9e6762d21..53892b219eb 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -460,8 +460,8 @@ t2 CREATE TABLE `t2` ( `ifnull(c,c)` mediumint(8) DEFAULT NULL, `ifnull(d,d)` int(11) DEFAULT NULL, `ifnull(e,e)` bigint(20) DEFAULT NULL, - `ifnull(f,f)` float(24,2) DEFAULT NULL, - `ifnull(g,g)` double(53,3) DEFAULT NULL, + `ifnull(f,f)` float(3,2) DEFAULT NULL, + `ifnull(g,g)` double(4,3) DEFAULT NULL, `ifnull(h,h)` decimal(5,4) DEFAULT NULL, `ifnull(i,i)` year(4) DEFAULT NULL, `ifnull(j,j)` date DEFAULT NULL, diff --git a/mysql-test/r/ctype_cp932_binlog_row.result b/mysql-test/r/ctype_cp932_binlog_row.result index f1a64241fbb..39d7727b58a 100644 --- a/mysql-test/r/ctype_cp932_binlog_row.result +++ b/mysql-test/r/ctype_cp932_binlog_row.result @@ -6,11 +6,11 @@ CREATE TABLE t1(f1 blob); PREPARE stmt1 FROM 'INSERT INTO t1 VALUES(?)'; SET @var1= x'8300'; EXECUTE stmt1 USING @var1; -SHOW BINLOG EVENTS FROM 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; CREATE TABLE t1(f1 blob) -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; CREATE TABLE t1(f1 blob) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F SELECT HEX(f1) FROM t1; HEX(f1) 8300 diff --git a/mysql-test/r/ctype_cp932_binlog_stm.result b/mysql-test/r/ctype_cp932_binlog_stm.result index ef024e2fa20..b486d2faaa9 100644 --- a/mysql-test/r/ctype_cp932_binlog_stm.result +++ b/mysql-test/r/ctype_cp932_binlog_stm.result @@ -6,11 +6,11 @@ CREATE TABLE t1(f1 blob); PREPARE stmt1 FROM 'INSERT INTO t1 VALUES(?)'; SET @var1= x'8300'; EXECUTE stmt1 USING @var1; -SHOW BINLOG EVENTS FROM 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # use `test`; CREATE TABLE t1(f1 blob) -master-bin.000001 # User var 1 # @`var1`=_binary 0x8300 COLLATE binary -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES(@'var1') +master-bin.000001 # Query # # use `test`; CREATE TABLE t1(f1 blob) +master-bin.000001 # User var # # @`var1`=_binary 0x8300 COLLATE binary +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES(@'var1') SELECT HEX(f1) FROM t1; HEX(f1) 8300 @@ -30,17 +30,17 @@ HEX(s1) HEX(s2) d 466F6F2773206120426172 ED40ED41ED42 47.93 DROP PROCEDURE bug18293| DROP TABLE t4| -SHOW BINLOG EVENTS FROM 406| +SHOW BINLOG EVENTS FROM 410| Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 406 Query 1 572 use `test`; CREATE TABLE t4 (s1 CHAR(50) CHARACTER SET latin1, +master-bin.000001 410 Query 1 576 use `test`; CREATE TABLE t4 (s1 CHAR(50) CHARACTER SET latin1, s2 CHAR(50) CHARACTER SET cp932, d DECIMAL(10,2)) -master-bin.000001 572 Query 1 820 use `test`; CREATE DEFINER=`root`@`localhost` PROCEDURE bug18293 (IN ins1 CHAR(50), +master-bin.000001 576 Query 1 824 use `test`; CREATE DEFINER=`root`@`localhost` PROCEDURE bug18293 (IN ins1 CHAR(50), IN ins2 CHAR(50) CHARACTER SET cp932, IN ind DECIMAL(10,2)) BEGIN INSERT INTO t4 VALUES (ins1, ins2, ind); END -master-bin.000001 820 Query 1 1039 use `test`; INSERT INTO t4 VALUES ( NAME_CONST('ins1',_latin1 0x466F6F2773206120426172), NAME_CONST('ins2',_cp932 0xED40ED41ED42), NAME_CONST('ind',47.93)) -master-bin.000001 1039 Query 1 1128 use `test`; DROP PROCEDURE bug18293 -master-bin.000001 1128 Query 1 1207 use `test`; DROP TABLE t4 +master-bin.000001 824 Query 1 1043 use `test`; INSERT INTO t4 VALUES ( NAME_CONST('ins1',_latin1 0x466F6F2773206120426172), NAME_CONST('ins2',_cp932 0xED40ED41ED42), NAME_CONST('ind',47.93)) +master-bin.000001 1043 Query 1 1132 use `test`; DROP PROCEDURE bug18293 +master-bin.000001 1132 Query 1 1211 use `test`; DROP TABLE t4 diff --git a/mysql-test/r/ctype_uca.result b/mysql-test/r/ctype_uca.result index 3e286c77c00..1fd1493bf1e 100644 --- a/mysql-test/r/ctype_uca.result +++ b/mysql-test/r/ctype_uca.result @@ -2654,3 +2654,12 @@ ii 2 ii 2 İİ 4 İİ 4 ii 2 İİ 4 II 2 ıı 4 II 2 DROP TABLE t1; +CREATE TABLE t1 ( +c1 text character set ucs2 collate ucs2_polish_ci NOT NULL +) ENGINE=MyISAM; +insert into t1 values (''),('a'); +SELECT COUNT(*), c1 FROM t1 GROUP BY c1; +COUNT(*) c1 +1 +1 a +DROP TABLE IF EXISTS t1; diff --git a/mysql-test/r/ctype_ucs.result b/mysql-test/r/ctype_ucs.result index a65b19695ec..fcfe5d6345b 100644 --- a/mysql-test/r/ctype_ucs.result +++ b/mysql-test/r/ctype_ucs.result @@ -839,6 +839,24 @@ lily river drop table t1; deallocate prepare stmt; +set names latin1; +set character_set_connection=ucs2; +select soundex(''),soundex('he'),soundex('hello all folks'),soundex('#3556 in bugdb'); +soundex('') soundex('he') soundex('hello all folks') soundex('#3556 in bugdb') + H000 H4142 I51231 +select hex(soundex('')),hex(soundex('he')),hex(soundex('hello all folks')),hex(soundex('#3556 in bugdb')); +hex(soundex('')) hex(soundex('he')) hex(soundex('hello all folks')) hex(soundex('#3556 in bugdb')) + 0048003000300030 00480034003100340032 004900350031003200330031 +select 'mood' sounds like 'mud'; +'mood' sounds like 'mud' +1 +select hex(soundex(_ucs2 0x041004110412)); +hex(soundex(_ucs2 0x041004110412)) +0410003000300030 +select hex(soundex(_ucs2 0x00BF00C0)); +hex(soundex(_ucs2 0x00BF00C0)) +00C0003000300030 +set names latin1; create table t1(a blob, b text charset utf8, c text charset ucs2); select data_type, character_octet_length, character_maximum_length from information_schema.columns where table_name='t1'; diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index 37ef5e2756f..380cf2f227d 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -854,6 +854,18 @@ select * from t1 where soundex(a) = soundex('test'); id a 1 Test drop table t1; +select soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB); +soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB) +阅000 +select hex(soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)); +hex(soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)) +E99885303030 +select soundex(_utf8 0xD091D092D093); +soundex(_utf8 0xD091D092D093) +Б000 +select hex(soundex(_utf8 0xD091D092D093)); +hex(soundex(_utf8 0xD091D092D093)) +D091303030 SET collation_connection='utf8_general_ci'; create table t1 select repeat('a',4000) a; delete from t1; diff --git a/mysql-test/r/delayed.result b/mysql-test/r/delayed.result index a21b6f4f3a5..fb8dc9af71a 100644 --- a/mysql-test/r/delayed.result +++ b/mysql-test/r/delayed.result @@ -250,3 +250,8 @@ SELECT HEX(a) FROM t1; HEX(a) 1 DROP TABLE t1; +CREATE TABLE t1(c1 INT) ENGINE=MyISAM; +CREATE TABLE t2(c1 INT) ENGINE=MERGE UNION=(t1); +INSERT DELAYED INTO t2 VALUES(1); +ERROR HY000: Table storage engine for 't2' doesn't have this option +DROP TABLE t1, t2; diff --git a/mysql-test/r/delete.result b/mysql-test/r/delete.result index ba4e9386312..4bdf1c770d3 100644 --- a/mysql-test/r/delete.result +++ b/mysql-test/r/delete.result @@ -214,3 +214,12 @@ select count(*) from t1; count(*) 0 drop table t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DELETE FROM t1 ORDER BY x; +ERROR 42S22: Unknown column 'x' in 'order clause' +DELETE FROM t1 ORDER BY t2.x; +ERROR 42S22: Unknown column 't2.x' in 'order clause' +DELETE FROM t1 ORDER BY (SELECT x); +ERROR 42S22: Unknown column 'x' in 'field list' +DROP TABLE t1; diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result index b7690a57207..ba82a7d7b1b 100644 --- a/mysql-test/r/events_bugs.result +++ b/mysql-test/r/events_bugs.result @@ -33,7 +33,7 @@ create event e_55 on schedule at 20000101000000 do drop table t; Warnings: Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created show events; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator create event e_55 on schedule at 20200101000000 starts 10000101000000 do drop table t; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'starts 10000101000000 do drop table t' at line 1 create event e_55 on schedule at 20200101000000 ends 10000101000000 do drop table t; diff --git a/mysql-test/r/events_grant.result b/mysql-test/r/events_grant.result index 8bcf40b5167..278cc5956f5 100644 --- a/mysql-test/r/events_grant.result +++ b/mysql-test/r/events_grant.result @@ -2,8 +2,8 @@ CREATE DATABASE IF NOT EXISTS events_test; use events_test; CREATE EVENT one_event ON SCHEDULE EVERY 10 SECOND DO SELECT 123; SHOW EVENTS; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -events_test one_event root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +events_test one_event root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLED 1 SELECT EVENT_CATALOG, EVENT_SCHEMA, EVENT_NAME, DEFINER, EVENT_BODY, EVENT_DEFINITION, EVENT_TYPE, EXECUTE_AT, INTERVAL_VALUE, INTERVAL_FIELD, STATUS,ON_COMPLETION, EVENT_COMMENT FROM INFORMATION_SCHEMA.EVENTS ORDER BY EVENT_SCHEMA, EVENT_NAME; EVENT_CATALOG EVENT_SCHEMA EVENT_NAME DEFINER EVENT_BODY EVENT_DEFINITION EVENT_TYPE EXECUTE_AT INTERVAL_VALUE INTERVAL_FIELD STATUS ON_COMPLETION EVENT_COMMENT NULL events_test one_event root@localhost SQL SELECT 123 RECURRING NULL 10 SECOND ENABLED NOT PRESERVE @@ -29,8 +29,8 @@ ERROR 42000: Access denied for user 'ev_test'@'localhost' to database 'events_te USE events_test; "We should see one event"; SHOW EVENTS; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -events_test one_event root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +events_test one_event root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLED 1 SELECT CONCAT("Let's create some new events from the name of ", USER()); CONCAT("Let's create some new events from the name of ", USER()) Let's create some new events from the name of ev_test@localhost @@ -40,18 +40,18 @@ CREATE EVENT two_event ON SCHEDULE EVERY 20 SECOND ON COMPLETION NOT PRESERVE CO CREATE EVENT three_event ON SCHEDULE EVERY 20 SECOND ON COMPLETION PRESERVE COMMENT "three event" DO SELECT 123; "Now we should see 3 events:"; SHOW EVENTS; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -events_test one_event root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLED -events_test three_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED -events_test two_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +events_test one_event root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLED 1 +events_test three_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED 1 +events_test two_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED 1 "This should show us only 2 events:"; SHOW EVENTS LIKE 't%event'; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -events_test three_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED -events_test two_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +events_test three_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED 1 +events_test two_event ev_test@localhost SYSTEM RECURRING NULL 20 # # NULL ENABLED 1 "This should show us no events:"; SHOW EVENTS FROM test LIKE '%'; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator GRANT EVENT ON events_test2.* TO ev_test@localhost; USE events_test2; CREATE EVENT four_event ON SCHEDULE EVERY 20 SECOND DO SELECT 42; diff --git a/mysql-test/r/events_scheduling.result b/mysql-test/r/events_scheduling.result index b945b83093f..33c0781d4e7 100644 --- a/mysql-test/r/events_scheduling.result +++ b/mysql-test/r/events_scheduling.result @@ -73,7 +73,7 @@ OK SELECT IF(LAST_EXECUTED-ENDS < 3, 'OK', 'ERROR') FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_SCHEMA=DATABASE() AND EVENT_NAME='start_n_end' AND ENDS IS NOT NULL; IF(LAST_EXECUTED-ENDS < 3, 'OK', 'ERROR') OK -DROP EVENT start_n_end; +DROP EVENT IF EXISTS events_test.start_n_end; "Already dropped because ended. Therefore an error." DROP EVENT only_one_time; ERROR HY000: Unknown event 'only_one_time' diff --git a/mysql-test/r/federated_server.result b/mysql-test/r/federated_server.result index 7a1a6e0970d..a3e7cd793a6 100644 --- a/mysql-test/r/federated_server.result +++ b/mysql-test/r/federated_server.result @@ -20,6 +20,14 @@ CREATE TABLE first_db.t1 ( `name` varchar(64) NOT NULL default '' ) DEFAULT CHARSET=latin1; +DROP TABLE IF EXISTS first_db.t2; +Warnings: +Note 1051 Unknown table 't2' +CREATE TABLE first_db.t2 ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ) +DEFAULT CHARSET=latin1; use second_db; DROP TABLE IF EXISTS second_db.t1; Warnings: @@ -29,6 +37,14 @@ CREATE TABLE second_db.t1 ( `name` varchar(64) NOT NULL default '' ) DEFAULT CHARSET=latin1; +DROP TABLE IF EXISTS second_db.t2; +Warnings: +Note 1051 Unknown table 't2' +CREATE TABLE second_db.t2 ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ) +DEFAULT CHARSET=latin1; drop server if exists 'server_one'; create server 'server_one' foreign data wrapper 'mysql' options (HOST '127.0.0.1', @@ -60,10 +76,10 @@ CREATE TABLE federated.old ( ) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/first_db/t1'; -INSERT INTO federated.old (id, name) values (1, 'federated.old url'); +INSERT INTO federated.old (id, name) values (1, 'federated.old-> first_db.t1, url format'); SELECT * FROM federated.old; id name -1 federated.old url +1 federated.old-> first_db.t1, url format DROP TABLE IF EXISTS federated.old2; Warnings: Note 1051 Unknown table 'old2' @@ -72,8 +88,37 @@ CREATE TABLE federated.old2 ( `name` varchar(64) NOT NULL default '' ) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 +CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/first_db/t2'; +INSERT INTO federated.old2 (id, name) values (1, 'federated.old2-> first_db.t2, url format'); +SELECT * FROM federated.old2; +id name +1 federated.old2-> first_db.t2, url format +DROP TABLE IF EXISTS federated.urldb2t1; +Warnings: +Note 1051 Unknown table 'urldb2t1' +CREATE TABLE federated.urldb2t1 ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ) +ENGINE="FEDERATED" DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/second_db/t1'; -INSERT INTO federated.old2 (id, name) values (1, 'federated.old2 url'); +INSERT INTO federated.urldb2t1 (id, name) values (1, 'federated.urldb2t1 -> second_db.t1, url format'); +SELECT * FROM federated.urldb2t1; +id name +1 federated.urldb2t1 -> second_db.t1, url format +DROP TABLE IF EXISTS federated.urldb2t2; +Warnings: +Note 1051 Unknown table 'urldb2t2' +CREATE TABLE federated.urldb2t2 ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ) +ENGINE="FEDERATED" DEFAULT CHARSET=latin1 +CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/second_db/t2'; +INSERT INTO federated.urldb2t2 (id, name) values (1, 'federated.urldb2t2 -> second_db.t2, url format'); +SELECT * FROM federated.urldb2t2; +id name +1 federated.urldb2t2 -> second_db.t2, url format DROP TABLE IF EXISTS federated.t1; Warnings: Note 1051 Unknown table 't1' @@ -83,18 +128,38 @@ CREATE TABLE federated.t1 ( ) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 CONNECTION='server_one'; -INSERT INTO federated.t1 (id, name) values (1, 'server_one, new scheme'); +INSERT INTO federated.t1 (id, name) values (1, 'server_one, new scheme, first_db.t1'); SELECT * FROM federated.t1; id name -1 federated.old url -1 server_one, new scheme +1 federated.old-> first_db.t1, url format +1 server_one, new scheme, first_db.t1 +DROP TABLE IF EXISTS federated.whatever; +Warnings: +Note 1051 Unknown table 'whatever' +CREATE TABLE federated.whatever ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ) +ENGINE="FEDERATED" DEFAULT CHARSET=latin1 +CONNECTION='server_one/t1'; +INSERT INTO federated.whatever (id, name) values (1, 'server_one, new scheme, whatever, first_db.t1'); +SELECT * FROM federated.whatever; +id name +1 federated.old-> first_db.t1, url format +1 server_one, new scheme, first_db.t1 +1 server_one, new scheme, whatever, first_db.t1 ALTER SERVER 'server_one' options(DATABASE 'second_db'); -flush tables; -INSERT INTO federated.t1 (id, name) values (1, 'server_two, new scheme'); +INSERT INTO federated.t1 (id, name) values (1, 'server_two, new scheme, second_db.t1'); SELECT * FROM federated.t1; id name -1 federated.old2 url -1 server_two, new scheme +1 federated.urldb2t1 -> second_db.t1, url format +1 server_two, new scheme, second_db.t1 +INSERT INTO federated.whatever (id, name) values (1, 'server_two, new scheme, whatever, second_db.t1'); +SELECT * FROM federated.whatever; +id name +1 federated.urldb2t1 -> second_db.t1, url format +1 server_two, new scheme, second_db.t1 +1 server_two, new scheme, whatever, second_db.t1 drop table federated.t1; drop server 'server_one'; drop server 'server_two'; @@ -104,6 +169,116 @@ drop table first_db.t1; drop table second_db.t1; drop database first_db; drop database second_db; +create database db_legitimate; +create database db_bogus; +use db_legitimate; +CREATE TABLE db_legitimate.t1 ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ); +INSERT INTO db_legitimate.t1 VALUES ('1','this is legitimate'); +use db_bogus; +CREATE TABLE db_bogus.t1 ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ) +; +INSERT INTO db_bogus.t1 VALUES ('2','this is bogus'); +create server 's1' foreign data wrapper 'mysql' options +(HOST '127.0.0.1', +DATABASE 'db_legitimate', +USER 'root', +PASSWORD '', +PORT SLAVE_PORT, +SOCKET '', +OWNER 'root'); +create user guest_select@localhost; +grant select on federated.* to guest_select@localhost; +create user guest_super@localhost; +grant select,SUPER,RELOAD on *.* to guest_super@localhost; +create user guest_usage@localhost; +grant usage on *.* to guest_usage@localhost; +CREATE TABLE federated.t1 ( +`id` int(20) NOT NULL, +`name` varchar(64) NOT NULL default '' + ) ENGINE = FEDERATED CONNECTION = 's1'; +select * from federated.t1; +id name +1 this is legitimate +alter server s1 options (database 'db_bogus'); +ERROR 42000: Access denied; you need the SUPER privilege for this operation +flush tables; +select * from federated.t1; +id name +1 this is legitimate +alter server s1 options (database 'db_bogus'); +ERROR 42000: Access denied; you need the SUPER privilege for this operation +flush tables; +select * from federated.t1; +id name +1 this is legitimate +alter server s1 options (database 'db_bogus'); +flush tables; +select * from federated.t1; +id name +2 this is bogus +drop server if exists 's1'; +ERROR 42000: Access denied; you need the SUPER privilege for this operation +create server 's1' foreign data wrapper 'mysql' options +(HOST '127.0.0.1', +DATABASE 'db_legitimate', +USER 'root', +PASSWORD '', +PORT SLAVE_PORT, +SOCKET '', +OWNER 'root'); +ERROR 42000: Access denied; you need the SUPER privilege for this operation +drop server 's1'; +create server 's1' foreign data wrapper 'mysql' options +(HOST '127.0.0.1', +DATABASE 'db_legitimate', +USER 'root', +PASSWORD '', +PORT SLAVE_PORT, +SOCKET '', +OWNER 'root'); +flush tables; +select * from federated.t1; +id name +1 this is legitimate +drop database db_legitimate; +drop database db_bogus; +drop user guest_super@localhost; +drop user guest_usage@localhost; +drop user guest_select@localhost; +drop table federated.t1; +drop server 's1'; +# End of 5.1 tests +use test; +create procedure p1 () +begin +DECLARE v INT DEFAULT 0; +DECLARE e INT DEFAULT 0; +DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SET e = e + 1; +WHILE v < 10000 do +CREATE SERVER s +FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'Remote', HOST '192.168.1.106', DATABASE 'test'); +ALTER SERVER s OPTIONS (USER 'Remote'); +DROP SERVER s; +SET v = v + 1; +END WHILE; +SELECT e > 0; +END// +use test; +call p1(); +call p1(); +e > 0 +1 +e > 0 +1 +drop procedure p1; +drop server if exists s; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/r/flush_block_commit_notembedded.result b/mysql-test/r/flush_block_commit_notembedded.result index 1d045b21763..16fb143ee4c 100644 --- a/mysql-test/r/flush_block_commit_notembedded.result +++ b/mysql-test/r/flush_block_commit_notembedded.result @@ -5,11 +5,11 @@ insert t1 values (1); flush tables with read lock; show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000001 102 +master-bin.000001 106 commit; show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000001 102 +master-bin.000001 106 unlock tables; drop table t1; set autocommit=1; diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result index 57b0a2aec25..4c7baac051a 100644 --- a/mysql-test/r/func_gconcat.result +++ b/mysql-test/r/func_gconcat.result @@ -734,3 +734,13 @@ f2 group_concat(f1) aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 1 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 2 drop table t1; +CREATE TABLE t1(a TEXT, b CHAR(20)); +INSERT INTO t1 VALUES ("one.1","one.1"),("two.2","two.2"),("one.3","one.3"); +SELECT GROUP_CONCAT(DISTINCT UCASE(a)) FROM t1; +GROUP_CONCAT(DISTINCT UCASE(a)) +ONE.1,TWO.2,ONE.3 +SELECT GROUP_CONCAT(DISTINCT UCASE(b)) FROM t1; +GROUP_CONCAT(DISTINCT UCASE(b)) +ONE.1,TWO.2,ONE.3 +DROP TABLE t1; +End of 5.0 tests diff --git a/mysql-test/r/func_in.result b/mysql-test/r/func_in.result index 7a8f59c65f4..992d1cd418b 100644 --- a/mysql-test/r/func_in.result +++ b/mysql-test/r/func_in.result @@ -477,6 +477,11 @@ a Warnings: Warning 1292 Incorrect date value: '19772-07-29' for column 'a' at row 1 DROP TABLE t1,t2,t3,t4; +CREATE TABLE t1 (id int not null); +INSERT INTO t1 VALUES (1),(2); +SELECT id FROM t1 WHERE id IN(4564, (SELECT IF(1=0,1,1/0)) ); +id +DROP TABLE t1; End of 5.0 tests create table t1(f1 char(1)); insert into t1 values ('a'),('b'),('1'); diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 8f81c7c4b66..63a92fa3a62 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -730,6 +730,12 @@ point(b, b) IS NULL linestring(b) IS NULL polygon(b) IS NULL multipoint(b) IS NU 1 1 1 1 1 1 1 0 1 1 1 1 1 1 drop table t1; +CREATE TABLE t1(a POINT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1; +a +NULL +DROP TABLE t1; End of 4.1 tests create table t1 (s1 geometry not null,s2 char(100)); create trigger t1_bu before update on t1 for each row set new.s1 = null; @@ -764,6 +770,17 @@ create table t1 (g geometry not null); insert into t1 values(default); ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field drop table t1; +CREATE TABLE t1 (a GEOMETRY); +CREATE VIEW v1 AS SELECT GeomFromwkb(ASBINARY(a)) FROM t1; +CREATE VIEW v2 AS SELECT a FROM t1; +DESCRIBE v1; +Field Type Null Key Default Extra +GeomFromwkb(ASBINARY(a)) geometry YES NULL +DESCRIBE v2; +Field Type Null Key Default Extra +a geometry YES NULL +DROP VIEW v1,v2; +DROP TABLE t1; create table t1 (f1 tinyint(1), f2 char(1), f3 varchar(1), f4 geometry, f5 datetime); create view v1 as select * from t1; desc v1; diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index 5cff5fec7ed..c7464bb21d2 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -1141,4 +1141,9 @@ EXPLAIN SELECT 1 FROM t2 WHERE a IN id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 index NULL a 5 NULL 4 Using where; Using index 2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 256 Using where +SHOW VARIABLES LIKE 'old'; +Variable_name Value +old OFF +SET @@old = off; +ERROR HY000: Variable 'old' is a read only variable DROP TABLE t1, t2; diff --git a/mysql-test/r/heap_btree.result b/mysql-test/r/heap_btree.result index fd789a39d88..ab4b892170a 100644 --- a/mysql-test/r/heap_btree.result +++ b/mysql-test/r/heap_btree.result @@ -280,6 +280,33 @@ a 1 1 drop table t1; +CREATE TABLE t1 ( +c1 CHAR(3), +c2 INTEGER, +KEY USING BTREE(c1), +KEY USING BTREE(c2) +) ENGINE= MEMORY; +INSERT INTO t1 VALUES ('ABC',0), ('A',0), ('B',0), ('C',0); +UPDATE t1 SET c2= c2 + 1 WHERE c1 = 'A'; +SELECT * FROM t1; +c1 c2 +ABC 0 +A 1 +B 0 +C 0 +DROP TABLE t1; +CREATE TABLE t1 ( +c1 ENUM('1', '2'), +UNIQUE USING BTREE(c1) +) ENGINE= MEMORY DEFAULT CHARSET= utf8; +INSERT INTO t1 VALUES('1'), ('2'); +DROP TABLE t1; +CREATE TABLE t1 ( +c1 SET('1', '2'), +UNIQUE USING BTREE(c1) +) ENGINE= MEMORY DEFAULT CHARSET= utf8; +INSERT INTO t1 VALUES('1'), ('2'); +DROP TABLE t1; End of 4.1 tests CREATE TABLE t1(val INT, KEY USING BTREE(val)) ENGINE=memory; INSERT INTO t1 VALUES(0); diff --git a/mysql-test/r/help.result b/mysql-test/r/help.result index 4b7e320454e..16719cc8193 100644 --- a/mysql-test/r/help.result +++ b/mysql-test/r/help.result @@ -1,63 +1,63 @@ -insert into mysql.help_category(help_category_id,name)values(1,'impossible_category_1'); +insert into mysql.help_category(help_category_id,name)values(10001,'impossible_category_1'); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @category1_id:= 1; -@category1_id:= 1 -1 -insert into mysql.help_category(help_category_id,name)values(2,'impossible_category_2'); +select @category1_id:= 10001; +@category1_id:= 10001 +10001 +insert into mysql.help_category(help_category_id,name)values(10002,'impossible_category_2'); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @category2_id:= 2; -@category2_id:= 2 -2 -insert into mysql.help_category(help_category_id,name,parent_category_id)values(3,'impossible_category_3',@category2_id); +select @category2_id:= 10002; +@category2_id:= 10002 +10002 +insert into mysql.help_category(help_category_id,name,parent_category_id)values(10003,'impossible_category_3',@category2_id); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @category3_id:= 3; -@category3_id:= 3 -3 -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(1,'impossible_function_1',@category1_id,'description of \n impossible_function1\n','example of \n impossible_function1'); +select @category3_id:= 10003; +@category3_id:= 10003 +10003 +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10101,'impossible_function_1',@category1_id,'description of \n impossible_function1\n','example of \n impossible_function1'); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @topic1_id:= 1; -@topic1_id:= 1 -1 -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(2,'impossible_function_2',@category1_id,'description of \n impossible_function2\n','example of \n impossible_function2'); +select @topic1_id:= 10101; +@topic1_id:= 10101 +10101 +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10102,'impossible_function_2',@category1_id,'description of \n impossible_function2\n','example of \n impossible_function2'); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @topic2_id:= 2; -@topic2_id:= 2 -2 -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(3,'impossible_function_3',@category2_id,'description of \n impossible_function3\n','example of \n impossible_function3'); +select @topic2_id:= 10102; +@topic2_id:= 10102 +10102 +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10103,'impossible_function_3',@category2_id,'description of \n impossible_function3\n','example of \n impossible_function3'); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @topic3_id:= 3; -@topic3_id:= 3 -3 -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(4,'impossible_function_4',@category2_id,'description of \n impossible_function4\n','example of \n impossible_function4'); +select @topic3_id:= 10103; +@topic3_id:= 10103 +10103 +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10104,'impossible_function_4',@category2_id,'description of \n impossible_function4\n','example of \n impossible_function4'); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @topic4_id:= 4; -@topic4_id:= 4 -4 -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(5,'impossible_function_7',@category3_id,'description of \n impossible_function5\n','example of \n impossible_function7'); +select @topic4_id:= 10104; +@topic4_id:= 10104 +10104 +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10105,'impossible_function_7',@category3_id,'description of \n impossible_function5\n','example of \n impossible_function7'); Warnings: Warning 1364 Field 'url' doesn't have a default value -select @topic5_id:= 5; -@topic5_id:= 5 -5 -insert into mysql.help_keyword(help_keyword_id,name)values(1,'impossible_function_1'); -select @keyword1_id:= 1; -@keyword1_id:= 1 -1 -insert into mysql.help_keyword(help_keyword_id,name)values(2,'impossible_function_5'); -select @keyword2_id:= 2; -@keyword2_id:= 2 -2 -insert into mysql.help_keyword(help_keyword_id,name)values(3,'impossible_function_6'); -select @keyword3_id:= 3; -@keyword3_id:= 3 -3 +select @topic5_id:= 10105; +@topic5_id:= 10105 +10105 +insert into mysql.help_keyword(help_keyword_id,name)values(10201,'impossible_function_1'); +select @keyword1_id:= 10201; +@keyword1_id:= 10201 +10201 +insert into mysql.help_keyword(help_keyword_id,name)values(10202,'impossible_function_5'); +select @keyword2_id:= 10202; +@keyword2_id:= 10202 +10202 +insert into mysql.help_keyword(help_keyword_id,name)values(10203,'impossible_function_6'); +select @keyword3_id:= 10203; +@keyword3_id:= 10203 +10203 insert into mysql.help_relation(help_keyword_id,help_topic_id)values(@keyword1_id,@topic2_id); insert into mysql.help_relation(help_keyword_id,help_topic_id)values(@keyword2_id,@topic1_id); insert into mysql.help_relation(help_keyword_id,help_topic_id)values(@keyword3_id,@topic3_id); diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index e5d6078e863..3453a486da9 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -1408,4 +1408,35 @@ select user,db from information_schema.processlist; user db user3148 test drop user user3148@localhost; +DROP TABLE IF EXISTS thread_status; +DROP TABLE IF EXISTS server_status; +DROP EVENT IF EXISTS event_status; +SET GLOBAL event_scheduler=1; +CREATE EVENT event_status +ON SCHEDULE AT NOW() +ON COMPLETION NOT PRESERVE +DO +BEGIN +CREATE TABLE thread_status +SELECT variable_name, variable_value +FROM information_schema.session_status +WHERE variable_name LIKE 'SSL_ACCEPTS' OR +variable_name LIKE 'SSL_CALLBACK_CACHE_HITS'; +CREATE TABLE server_status +SELECT variable_name +FROM information_schema.global_status +WHERE variable_name LIKE 'ABORTED_CONNECTS' OR +variable_name LIKE 'BINLOG_CACHE_DISK_USE'; +END$$ +SELECT variable_name, variable_value FROM thread_status; +variable_name variable_value +SSL_ACCEPTS 0.0000000 +SSL_CALLBACK_CACHE_HITS 0.0000000 +SELECT variable_name FROM server_status; +variable_name +ABORTED_CONNECTS +BINLOG_CACHE_DISK_USE +DROP TABLE thread_status; +DROP TABLE server_status; +SET GLOBAL event_scheduler=0; End of 5.1 tests. diff --git a/mysql-test/r/insert_select.result b/mysql-test/r/insert_select.result index 18a0ed1a1cb..57339b2e29f 100644 --- a/mysql-test/r/insert_select.result +++ b/mysql-test/r/insert_select.result @@ -730,3 +730,32 @@ f1 f2 2 2 10 10 DROP TABLE t1, t2; +SET SQL_MODE='STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION'; +CREATE TABLE t1 (c VARCHAR(30), INDEX ix_c (c(10))); +CREATE TABLE t2 (d VARCHAR(10)); +INSERT INTO t1 (c) VALUES ('7_chars'), ('13_characters'); +EXPLAIN +SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='13_characters') FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 +2 SUBQUERY t1 ref ix_c ix_c 13 const 1 Using where +SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='13_characters') FROM t1; +(SELECT SUM(LENGTH(c)) FROM t1 WHERE c='13_characters') +13 +13 +INSERT INTO t2 (d) +SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='13_characters') FROM t1; +INSERT INTO t2 (d) +SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='7_chars') FROM t1; +INSERT INTO t2 (d) +SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c IN (SELECT t1.c FROM t1)) +FROM t1; +SELECT * FROM t2; +d +13 +13 +7 +7 +20 +20 +DROP TABLE t1,t2; diff --git a/mysql-test/r/insert_update.result b/mysql-test/r/insert_update.result index c7351258266..fed66ed47a7 100644 --- a/mysql-test/r/insert_update.result +++ b/mysql-test/r/insert_update.result @@ -258,3 +258,81 @@ SELECT LAST_INSERT_ID(); LAST_INSERT_ID() 1 DROP TABLE t1; +SET SQL_MODE='NO_AUTO_VALUE_ON_ZERO'; +CREATE TABLE `t1` ( +`id` int(11) PRIMARY KEY auto_increment, +`f1` varchar(10) NOT NULL UNIQUE +); +INSERT IGNORE INTO t1 (f1) VALUES ("test1") +ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +INSERT IGNORE INTO t1 (f1) VALUES ("test1") +ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +SELECT * FROM t1; +id f1 +1 test1 +INSERT IGNORE INTO t1 (f1) VALUES ("test2") +ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT * FROM t1; +id f1 +1 test1 +2 test2 +INSERT IGNORE INTO t1 (f1) VALUES ("test2") +ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +2 +SELECT * FROM t1; +id f1 +1 test1 +2 test2 +INSERT IGNORE INTO t1 (f1) VALUES ("test3") +ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +3 +SELECT * FROM t1; +id f1 +1 test1 +2 test2 +3 test3 +DROP TABLE t1; +CREATE TABLE `t1` ( +`id` int(11) PRIMARY KEY auto_increment, +`f1` varchar(10) NOT NULL UNIQUE +); +INSERT IGNORE INTO t1 (f1) VALUES ("test1") +ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +SELECT * FROM t1; +id f1 +1 test1 +INSERT IGNORE INTO t1 (f1) VALUES ("test1"),("test4") +ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +2 +SELECT * FROM t1; +id f1 +1 test1 +2 test4 +DROP TABLE t1; +CREATE TABLE `t1` ( +`id` int(11) PRIMARY KEY auto_increment, +`f1` varchar(10) NOT NULL UNIQUE, +tim1 timestamp default '2003-01-01 00:00:00' on update current_timestamp +); +INSERT INTO t1 (f1) VALUES ("test1"); +SELECT id, f1 FROM t1; +id f1 +1 test1 +REPLACE INTO t1 VALUES (0,"test1",null); +SELECT id, f1 FROM t1; +id f1 +0 test1 +DROP TABLE t1; +SET SQL_MODE=''; diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result index c8fcdf14ea0..eb3418f1f18 100644 --- a/mysql-test/r/loaddata.result +++ b/mysql-test/r/loaddata.result @@ -157,12 +157,12 @@ MYSQLTEST_VARDIR/ set @@secure_file_priv= 0; ERROR HY000: Variable 'secure_file_priv' is a read only variable truncate table t1; -load data infile 'MYSQL_TEST_DIR/Makefile' into table t1; +load data infile 'MYSQL_TEST_DIR/t/loaddata.test' into table t1; ERROR HY000: The MySQL server is running with the --secure-file-priv option so it cannot execute this statement select * from t1; a b c -select load_file("MYSQL_TEST_DIR/Makefile"); -load_file("MYSQL_TEST_DIR/Makefile") +select load_file("MYSQL_TEST_DIR/t/loaddata.test"); +load_file("MYSQL_TEST_DIR/t/loaddata.test") NULL drop table t1, t2; CREATE TABLE t1 (a int); diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result index 34f1b76ba05..6cbdaf6f4f8 100644 --- a/mysql-test/r/merge.result +++ b/mysql-test/r/merge.result @@ -806,11 +806,6 @@ CREATE TABLE tm1(a SMALLINT, b SMALLINT, KEY(a)) ENGINE=MERGE UNION=(t1); SELECT * FROM tm1; ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist DROP TABLE t1, tm1; -CREATE TABLE t1(c1 INT) ENGINE=MyISAM; -CREATE TABLE t2(c1 INT) ENGINE=MERGE UNION=(t1); -INSERT DELAYED INTO t2 VALUES(1); -ERROR HY000: Table storage engine for 't2' doesn't have this option -DROP TABLE t1, t2; CREATE TABLE t1(c1 VARCHAR(1)); CREATE TABLE m1 LIKE t1; ALTER TABLE m1 ENGINE=MERGE UNION=(t1); diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index bb666b2e499..dcb471510cd 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -943,6 +943,156 @@ SHOW TABLE STATUS LIKE 't1'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment t1 MyISAM 10 Dynamic X X X 72057594037927935 X X X X X X latin1_swedish_ci X max_rows=4100100100 avg_row_length=70100 DROP TABLE t1; +CREATE TABLE t1 (c1 TEXT NOT NULL, KEY c1 (c1(10))) ENGINE=MyISAM; +INSERT INTO t1 VALUES +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), +(''), (''), (''), (''), +(' B'), (' B'), (' B'), (' B'); +SELECT DISTINCT COUNT(*) FROM t1 WHERE c1 = ''; +COUNT(*) +4 +SELECT DISTINCT length(c1), c1 FROM t1 WHERE c1 = ''; +length(c1) c1 +0 +SELECT DISTINCT COUNT(*) FROM t1 IGNORE INDEX (c1) WHERE c1 = ''; +COUNT(*) +4 +SELECT DISTINCT length(c1), c1 FROM t1 IGNORE INDEX (c1) WHERE c1 = ''; +length(c1) c1 +0 +SELECT DISTINCT length(c1), c1 FROM t1 ORDER BY c1; +length(c1) c1 +0 +2 A +2 B +DROP TABLE t1; End of 4.1 tests set storage_engine=MyISAM; drop table if exists t1,t2,t3; diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index da90ff2cf6b..895e5dd46fa 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -3440,8 +3440,8 @@ use first; set time_zone = 'UTC'; create event ee1 on schedule at '2035-12-31 20:01:23' do set @a=5; show events; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -first ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +first ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED 1 show create event ee1; Event sql_mode time_zone Create Event ee1 UTC CREATE EVENT `ee1` ON SCHEDULE AT '2035-12-31 20:01:23' ON COMPLETION NOT PRESERVE ENABLE DO set @a=5 @@ -3449,26 +3449,26 @@ drop database first; create database second; use second; show events; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -second ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +second ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED 1 show create event ee1; Event sql_mode time_zone Create Event ee1 NO_AUTO_VALUE_ON_ZERO UTC CREATE EVENT `ee1` ON SCHEDULE AT '2035-12-31 20:01:23' ON COMPLETION NOT PRESERVE ENABLE DO set @a=5 create event ee2 on schedule at '2018-12-31 21:01:23' do set @a=5; create event ee3 on schedule at '2030-12-31 22:01:23' do set @a=5; show events; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -second ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED -second ee2 root@localhost UTC ONE TIME 2018-12-31 21:01:23 NULL NULL NULL NULL ENABLED -second ee3 root@localhost UTC ONE TIME 2030-12-31 22:01:23 NULL NULL NULL NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +second ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED 1 +second ee2 root@localhost UTC ONE TIME 2018-12-31 21:01:23 NULL NULL NULL NULL ENABLED 1 +second ee3 root@localhost UTC ONE TIME 2030-12-31 22:01:23 NULL NULL NULL NULL ENABLED 1 drop database second; create database third; use third; show events; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status -third ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED -third ee2 root@localhost UTC ONE TIME 2018-12-31 21:01:23 NULL NULL NULL NULL ENABLED -third ee3 root@localhost UTC ONE TIME 2030-12-31 22:01:23 NULL NULL NULL NULL ENABLED +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator +third ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED 1 +third ee2 root@localhost UTC ONE TIME 2018-12-31 21:01:23 NULL NULL NULL NULL ENABLED 1 +third ee3 root@localhost UTC ONE TIME 2030-12-31 22:01:23 NULL NULL NULL NULL ENABLED 1 drop database third; set time_zone = 'SYSTEM'; use test; diff --git a/mysql-test/r/mysqlslap.result b/mysql-test/r/mysqlslap.result index 1a8b77fde1c..bca7919d78c 100644 --- a/mysql-test/r/mysqlslap.result +++ b/mysql-test/r/mysqlslap.result @@ -143,3 +143,27 @@ select * from t1; select * from t2; select * from t1; DROP SCHEMA IF EXISTS `mysqlslap`; +DROP SCHEMA IF EXISTS `mysqlslap`; +CREATE SCHEMA `mysqlslap`; +use mysqlslap; +set storage_engine=`heap`; +CREATE TABLE t1 (id int, name varchar(64)); +create table t2(foo1 varchar(32), foo2 varchar(32)); +INSERT INTO t1 VALUES (1, 'This is a test'); +insert into t2 values ('test', 'test2'); +SHOW TABLES; +select * from t1; +SHOW TABLES; +DROP SCHEMA IF EXISTS `mysqlslap`; +DROP SCHEMA IF EXISTS `mysqlslap`; +CREATE SCHEMA `mysqlslap`; +use mysqlslap; +set storage_engine=`myisam`; +CREATE TABLE t1 (id int, name varchar(64)); +create table t2(foo1 varchar(32), foo2 varchar(32)); +INSERT INTO t1 VALUES (1, 'This is a test'); +insert into t2 values ('test', 'test2'); +SHOW TABLES; +select * from t1; +SHOW TABLES; +DROP SCHEMA IF EXISTS `mysqlslap`; diff --git a/mysql-test/r/ndb_binlog_multi.result b/mysql-test/r/ndb_binlog_multi.result index 3a84c89a7a4..cae1cfa8ce7 100644 --- a/mysql-test/r/ndb_binlog_multi.result +++ b/mysql-test/r/ndb_binlog_multi.result @@ -13,6 +13,7 @@ master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t2) master-bin1.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) master-bin1.000001 # Write_rows # # table_id: # +master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT select * from t2 order by a; @@ -35,6 +36,7 @@ master-bin.000001 # Query # # BEGIN master-bin.000001 # Table_map # # table_id: # (test.t2) master-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin.000001 # Query # # COMMIT master-bin.000001 # Query # # use `test`; DROP TABLE t2 @@ -53,6 +55,7 @@ master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t1) master-bin1.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) master-bin1.000001 # Write_rows # # table_id: # +master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM @@ -71,6 +74,7 @@ master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t1) master-bin1.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) master-bin1.000001 # Write_rows # # table_id: # +master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT master-bin1.000001 # Query # # use `test`; drop table t1 diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result index ad33c7994d1..34f2c5fdd66 100644 --- a/mysql-test/r/ndb_blob.result +++ b/mysql-test/r/ndb_blob.result @@ -76,6 +76,8 @@ commit; select a from t1 where d is null; a 1 +delete from t1 where a=45567; +commit; delete from t1 where a=1; delete from t1 where a=2; commit; diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result index e294807b40d..685137b7710 100644 --- a/mysql-test/r/ndb_partition_key.result +++ b/mysql-test/r/ndb_partition_key.result @@ -57,6 +57,7 @@ Number of primary keys: 3 Length of frm data: # Row Checksum: 1 Row GCI: 1 +SingleUserMode: 0 TableStatus: Retrieved -- Attributes -- a Int PRIMARY KEY AT=FIXED ST=MEMORY diff --git a/mysql-test/r/ndb_restore_compat.result b/mysql-test/r/ndb_restore_compat.result index f580f680687..d495aa28135 100644 --- a/mysql-test/r/ndb_restore_compat.result +++ b/mysql-test/r/ndb_restore_compat.result @@ -45,8 +45,8 @@ SYSTEM_VALUES_ID VALUE 0 2039 1 3 SELECT * FROM mysql.ndb_apply_status WHERE server_id=0; -server_id epoch -0 151 +server_id epoch log_name start_pos end_pos +0 151 0 0 TRUNCATE GL; TRUNCATE ACCOUNT; TRUNCATE TRANSACTION; @@ -99,6 +99,6 @@ SYSTEM_VALUES_ID VALUE 0 2297 1 5 SELECT * FROM mysql.ndb_apply_status WHERE server_id=0; -server_id epoch -0 331 +server_id epoch log_name start_pos end_pos +0 331 0 0 DROP DATABASE BANK; diff --git a/mysql-test/r/ndb_restore_print.result b/mysql-test/r/ndb_restore_print.result new file mode 100644 index 00000000000..e05f8e43d1a --- /dev/null +++ b/mysql-test/r/ndb_restore_print.result @@ -0,0 +1,321 @@ +use test; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +create table t1 +(pk int key +,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) +,b1 TINYINT, b2 TINYINT UNSIGNED +,c1 SMALLINT, c2 SMALLINT UNSIGNED +,d1 INT, d2 INT UNSIGNED +,e1 BIGINT, e2 BIGINT UNSIGNED +,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY +,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY +,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) +,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) +) engine myisam; +insert into t1 values +(1 +,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 +,127, 255 +,32767, 65535 +,2147483647, 4294967295 +,9223372036854775807, 18446744073709551615 +,'1','12345678901234567890123456789012','123456789' + ,'1','12345678901234567890123456789012','123456789' + ,0x12,0x123456789abcdef0, 0x012345 +,0x12,0x123456789abcdef0, 0x00123450 +); +insert into t1 values +(2 +,0, 0, 0, 0, 0 +,-128, 0 +,-32768, 0 +,-2147483648, 0 +,-9223372036854775808, 0 +,'','','' + ,'','','' + ,0x0,0x0,0x0 +,0x0,0x0,0x0 +); +insert into t1 values +(3 +,NULL,NULL,NULL,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +); +select pk +,hex(a1), hex(a2), hex(a3), hex(a4), hex(a5) +,b1, b2 +,c1 , c2 +,d1 , d2 +,e1 , e2 +,f1 , f2, f3 +,g1 , g2, g3 +,hex(h1), hex(h2), hex(h3) +,hex(i1), hex(i2), hex(i3) +from t1 order by pk; +pk 1 +hex(a1) 1 +hex(a2) 17 +hex(a3) 789A +hex(a4) 789ABCDE +hex(a5) FEDC0001 +b1 127 +b2 255 +c1 32767 +c2 65535 +d1 2147483647 +d2 4294967295 +e1 9223372036854775807 +e2 18446744073709551615 +f1 1 +f2 12345678901234567890123456789012 +f3 123456789 +g1 1 +g2 12345678901234567890123456789012 +g3 123456789 +hex(h1) 12 +hex(h2) 123456789ABCDEF0 +hex(h3) 012345000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +hex(i1) 12 +hex(i2) 123456789ABCDEF0 +hex(i3) 00123450 +pk 2 +hex(a1) 0 +hex(a2) 0 +hex(a3) 0 +hex(a4) 0 +hex(a5) 0 +b1 -128 +b2 0 +c1 -32768 +c2 0 +d1 -2147483648 +d2 0 +e1 -9223372036854775808 +e2 0 +f1 +f2 +f3 +g1 +g2 +g3 +hex(h1) 00 +hex(h2) 0000000000000000 +hex(h3) 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +hex(i1) 00 +hex(i2) 00 +hex(i3) 00 +pk 3 +hex(a1) NULL +hex(a2) NULL +hex(a3) NULL +hex(a4) NULL +hex(a5) NULL +b1 NULL +b2 NULL +c1 NULL +c2 NULL +d1 NULL +d2 NULL +e1 NULL +e2 NULL +f1 NULL +f2 NULL +f3 NULL +g1 NULL +g2 NULL +g3 NULL +hex(h1) NULL +hex(h2) NULL +hex(h3) NULL +hex(i1) NULL +hex(i2) NULL +hex(i3) NULL +alter table t1 engine ndb; +select pk +,hex(a1), hex(a2), hex(a3), hex(a4), hex(a5) +,b1, b2 +,c1 , c2 +,d1 , d2 +,e1 , e2 +,f1 , f2, f3 +,g1 , g2, g3 +,hex(h1), hex(h2), hex(h3) +,hex(i1), hex(i2), hex(i3) +from t1 order by pk; +pk 1 +hex(a1) 1 +hex(a2) 17 +hex(a3) 789A +hex(a4) 789ABCDE +hex(a5) FEDC0001 +b1 127 +b2 255 +c1 32767 +c2 65535 +d1 2147483647 +d2 4294967295 +e1 9223372036854775807 +e2 18446744073709551615 +f1 1 +f2 12345678901234567890123456789012 +f3 123456789 +g1 1 +g2 12345678901234567890123456789012 +g3 123456789 +hex(h1) 12 +hex(h2) 123456789ABCDEF0 +hex(h3) 012345000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +hex(i1) 12 +hex(i2) 123456789ABCDEF0 +hex(i3) 00123450 +pk 2 +hex(a1) 0 +hex(a2) 0 +hex(a3) 0 +hex(a4) 0 +hex(a5) 0 +b1 -128 +b2 0 +c1 -32768 +c2 0 +d1 -2147483648 +d2 0 +e1 -9223372036854775808 +e2 0 +f1 +f2 +f3 +g1 +g2 +g3 +hex(h1) 00 +hex(h2) 0000000000000000 +hex(h3) 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +hex(i1) 00 +hex(i2) 00 +hex(i3) 00 +pk 3 +hex(a1) NULL +hex(a2) NULL +hex(a3) NULL +hex(a4) NULL +hex(a5) NULL +b1 NULL +b2 NULL +c1 NULL +c2 NULL +d1 NULL +d2 NULL +e1 NULL +e2 NULL +f1 NULL +f2 NULL +f3 NULL +g1 NULL +g2 NULL +g3 NULL +hex(h1) NULL +hex(h2) NULL +hex(h3) NULL +hex(i1) NULL +hex(i2) NULL +hex(i3) NULL +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id +<the_backup_id> +DROP TABLE test.backup_info; +1;0x1;0x17;0x789A;0x789ABCDE;0xFEDC0001;127;255;32767;65535;2147483647;4294967295;9223372036854775807;18446744073709551615;1;12345678901234567890123456789012;123456789;1;12345678901234567890123456789012;123456789;0x12;0x123456789ABCDEF0;0x012345;0x12;0x123456789ABCDEF0;0x00123450 +2;0x0;0x0;0x0;0x0;0x0;-128;0;-32768;0;-2147483648;0;-9223372036854775808;0;;;;;;;0x0;0x0;0x0;0x0;0x0;0x0 +3;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N +1,0x1,0x17,0x789A,0x789ABCDE,0xFEDC0001,127,255,32767,65535,2147483647,4294967295,9223372036854775807,18446744073709551615,'1','12345678901234567890123456789012','123456789','1','12345678901234567890123456789012','123456789',0x12,0x123456789ABCDEF0,0x012345,0x12,0x123456789ABCDEF0,0x00123450 +2,0x0,0x0,0x0,0x0,0x0,-128,0,-32768,0,-2147483648,0,-9223372036854775808,0,'','','','','','',0x0,0x0,0x0,0x0,0x0,0x0 +3,,,,,,,,,,,,,,,,,,,,,,,,, +drop table t1; +create table t1 +(pk int key +,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY +,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY +,h1 BINARY(1), h2 BINARY(9), h3 BINARY(255) +,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) +) engine ndb; +insert into t1 values +(1 +,'1','12345678901234567890123456789012','123456789 ' + ,'1 ','12345678901234567890123456789012 ','123456789 ' + ,0x20,0x123456789abcdef020, 0x012345000020 +,0x1200000020,0x123456789abcdef000000020, 0x00123450000020 +); +create table t2 (pk int key, a int) engine ndb; +create table t3 (pk int key, a int) engine ndb; +create table t4 (pk int key, a int) engine ndb; +insert into t2 values (1,11),(2,12),(3,13),(4,14),(5,15); +insert into t3 values (1,21),(2,22),(3,23),(4,24),(5,25); +insert into t4 values (1,31),(2,32),(3,33),(4,34),(5,35); +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id +<the_backup_id> +DROP TABLE test.backup_info; +'1' '1' '12345678901234567890123456789012' '123456789' '1' '12345678901234567890123456789012' '123456789' '0x20' '0x123456789ABCDEF020' '0x012345000020' '0x1200000020' '0x123456789ABCDEF000000020' '0x00123450000020' + +t1 +-- +1 1 12345678901234567890123456789012 123456789 1 12345678901234567890123456789012 123456789 0x20 0x123456789ABCDEF020 0x012345000020 0x1200000020 0x123456789ABCDEF000000020 0x00123450000020 + +t2 +-- +1 11 +2 12 +3 13 +4 14 +5 15 + +t3 +-- +1 21 +2 22 +3 23 +4 24 +5 25 + +t4 +-- +1 31 +2 32 +3 33 +4 34 +5 35 +drop table t1; +create table t1 +(pk int key +,a1 MEDIUMINT, a2 MEDIUMINT UNSIGNED +) engine ndb; +insert into t1 values(1, 8388607, 16777215); +insert into t1 values(2, -8388608, 0); +insert into t1 values(3, -1, 1); +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id +<the_backup_id> +DROP TABLE test.backup_info; +1;8388607;16777215 +2;-8388608;0 +3;-1;1 +drop table t1; +drop table t2; +drop table t3; +drop table t4; diff --git a/mysql-test/r/ndb_single_user.result b/mysql-test/r/ndb_single_user.result index 771a02ecc84..732acc6f000 100644 --- a/mysql-test/r/ndb_single_user.result +++ b/mysql-test/r/ndb_single_user.result @@ -1,7 +1,7 @@ use test; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; create table t1 (a int key, b int unique, c int) engine ndb; -ERROR HY000: Can't create table 'test.t1' (errno: 4007) +ERROR HY000: Can't create table 'test.t1' (errno: 299) create table t1 (a int key, b int unique, c int) engine ndb; insert into t1 values (1,1,0),(2,2,0),(3,3,0),(4,4,0),(5,5,0),(6,6,0),(7,7,0),(8,8,0),(9,9,0),(10,10,0); create table t2 as select * from t1; @@ -28,19 +28,32 @@ insert into t1 select * from t2; drop table t1; ERROR 42S02: Unknown table 't1' create index new_index on t1 (c); -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER insert into t1 values (1,1,0),(2,2,0),(3,3,0),(4,4,0),(5,5,0),(6,6,0),(7,7,0),(8,8,0),(9,9,0),(10,10,0); -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER select * from t1 where a = 1; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER select * from t1 where b = 4; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER update t1 set b=102 where a = 2; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER update t1 set b=103 where b = 3; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER update t1 set b=b+100; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER update t1 set b=b+100 where a > 7; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER +BEGIN; +update t1 set b=b+100 where a=1; +BEGIN; +update t1 set b=b+100 where a=2; +update t1 set b=b+100 where a=3; +COMMIT; +update t1 set b=b+100 where a=4; +ERROR HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from NDBCLUSTER +COMMIT; +ERROR HY000: Got error 4350 'Transaction already aborted' from NDBCLUSTER +create table t2 (a int) engine myisam; +alter table t2 add column (b int); +drop table t2; drop table t1; diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index 12608c48bf5..15e1c8730f0 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -1968,11 +1968,11 @@ prepare abc from "show master logs"; deallocate prepare abc; create procedure proc_1() show events; call proc_1(); -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator call proc_1(); -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator call proc_1(); -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator drop procedure proc_1; create function func_1() returns int begin show events; return 1; end| ERROR 0A000: Not allowed to return a result set from a function @@ -1982,11 +1982,11 @@ drop function func_1; ERROR 42000: FUNCTION test.func_1 does not exist prepare abc from "show events"; execute abc; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator execute abc; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator execute abc; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator deallocate prepare abc; drop procedure if exists a; create procedure a() select 42; diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 5cd7e3ebfbd..d9efe21c5d0 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -717,6 +717,147 @@ d8c4177d225791924.30714720 d8c4177d2380fc201.39666693 d8c4177d24ccef970.14957924 DROP TABLE t1; +create table t1 ( +c1 char(10), c2 char(10), c3 char(10), c4 char(10), +c5 char(10), c6 char(10), c7 char(10), c8 char(10), +c9 char(10), c10 char(10), c11 char(10), c12 char(10), +c13 char(10), c14 char(10), c15 char(10), c16 char(10), +index(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,c13,c14,c15,c16) +); +insert into t1 (c1) values ('1'),('1'),('1'),('1'); +select * from t1 where +c1 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c2 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c3 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c4 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c5 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c6 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c7 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c8 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c9 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC") +and c10 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", +"abcdefg1", "123456781", "qwertyui1", "asddfg1", +"abcdefg2", "123456782", "qwertyui2", "asddfg2", +"abcdefg3", "123456783", "qwertyui3", "asddfg3", +"abcdefg4", "123456784", "qwertyui4", "asddfg4", +"abcdefg5", "123456785", "qwertyui5", "asddfg5", +"abcdefg6", "123456786", "qwertyui6", "asddfg6", +"abcdefg7", "123456787", "qwertyui7", "asddfg7", +"abcdefg8", "123456788", "qwertyui8", "asddfg8", +"abcdefg9", "123456789", "qwertyui9", "asddfg9", +"abcdefgA", "12345678A", "qwertyuiA", "asddfgA", +"abcdefgB", "12345678B", "qwertyuiB", "asddfgB", +"abcdefgC", "12345678C", "qwertyuiC", "asddfgC"); +c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 +drop table t1; End of 4.1 tests CREATE TABLE t1 ( id int(11) NOT NULL auto_increment, diff --git a/mysql-test/r/row.result b/mysql-test/r/row.result index 08d457f7ad7..26d616df2f3 100644 --- a/mysql-test/r/row.result +++ b/mysql-test/r/row.result @@ -306,3 +306,16 @@ a b a b c 1 1 1 2 1 1 2 1 2 1 DROP TABLE t1,t2; +CREATE TABLE t1( +a int, b int, c int, d int, e int, f int, g int, h int, +PRIMARY KEY (a,b,c,d,e,f,g) +); +INSERT INTO t1 VALUES (1,2,3,4,5,6,7,99); +SELECT h FROM t1 WHERE (a,b,c,d,e,f,g)=(1,2,3,4,5,6,7); +h +99 +SET @x:= (SELECT h FROM t1 WHERE (a,b,c,d,e,f,g)=(1,2,3,4,5,6,7)); +SELECT @x; +@x +99 +DROP TABLE t1; diff --git a/mysql-test/r/rpl_000015.result b/mysql-test/r/rpl_000015.result index a53750f82ad..13da328e522 100644 --- a/mysql-test/r/rpl_000015.result +++ b/mysql-test/r/rpl_000015.result @@ -1,7 +1,7 @@ reset master; show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000001 102 +master-bin.000001 106 reset slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master @@ -17,7 +17,7 @@ Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 7 master-bin.000001 102 # # master-bin.000001 Yes Yes 0 0 102 # None 0 No # +# 127.0.0.1 root MASTER_PORT 7 master-bin.000001 106 # # master-bin.000001 Yes Yes 0 0 106 # None 0 No # drop table if exists t1; create table t1 (n int, PRIMARY KEY(n)); insert into t1 values (10),(45),(90); diff --git a/mysql-test/r/rpl_change_master.result b/mysql-test/r/rpl_change_master.result index 513de9494ba..c3909de5ba5 100644 --- a/mysql-test/r/rpl_change_master.result +++ b/mysql-test/r/rpl_change_master.result @@ -13,11 +13,11 @@ insert into t1 values(2); stop slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 # # # master-bin.000001 No No 0 0 187 # None 0 No # +# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 # # # master-bin.000001 No No 0 0 191 # None 0 No # change master to master_user='root'; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 # # # master-bin.000001 No No 0 0 187 # None 0 No # +# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 # # # master-bin.000001 No No 0 0 191 # None 0 No # start slave; select * from t1; n diff --git a/mysql-test/r/rpl_ddl.result b/mysql-test/r/rpl_ddl.result index ace86532b12..d41462de621 100644 --- a/mysql-test/r/rpl_ddl.result +++ b/mysql-test/r/rpl_ddl.result @@ -4,45 +4,47 @@ reset master; reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; + +-------- switch to master ------- SET AUTOCOMMIT = 1; DROP DATABASE IF EXISTS mysqltest1; DROP DATABASE IF EXISTS mysqltest2; DROP DATABASE IF EXISTS mysqltest3; CREATE DATABASE mysqltest1; CREATE DATABASE mysqltest2; -CREATE TABLE mysqltest1.t1 (f1 BIGINT) ENGINE="InnoDB"; +CREATE TABLE mysqltest1.t1 (f1 BIGINT) ENGINE=InnoDB; INSERT INTO mysqltest1.t1 SET f1= 0; -CREATE TABLE mysqltest1.t2 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t3 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t4 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t5 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t6 (f1 BIGINT) ENGINE="InnoDB"; +CREATE TABLE mysqltest1.t2 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t3 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t4 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t5 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t6 (f1 BIGINT) ENGINE=InnoDB; CREATE INDEX my_idx6 ON mysqltest1.t6(f1); -CREATE TABLE mysqltest1.t7 (f1 BIGINT) ENGINE="InnoDB"; +CREATE TABLE mysqltest1.t7 (f1 BIGINT) ENGINE=InnoDB; INSERT INTO mysqltest1.t7 SET f1= 0; -CREATE TABLE mysqltest1.t8 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t9 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t10 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t11 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t12 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t13 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t14 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t15 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t16 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t17 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t18 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TABLE mysqltest1.t19 (f1 BIGINT) ENGINE="InnoDB"; -CREATE TEMPORARY TABLE mysqltest1.t23 (f1 BIGINT); +CREATE TABLE mysqltest1.t8 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t9 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t10 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t11 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t12 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t13 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t14 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t15 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t16 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t17 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t18 (f1 BIGINT) ENGINE=InnoDB; +CREATE TABLE mysqltest1.t19 (f1 BIGINT) ENGINE=InnoDB; +CREATE TEMPORARY TABLE mysqltest1.t23 (f1 BIGINT) ENGINE=MEMORY; SET AUTOCOMMIT = 0; use mysqltest1; -------- switch to slave -------- -SET AUTOCOMMIT = 0; +SET AUTOCOMMIT = 1; use mysqltest1; -------- switch to master ------- -######## COMMIT ######## +######## SELECT 1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 0 + 1; @@ -56,7 +58,9 @@ MAX(f1) 0 -------- switch to master ------- -COMMIT; +SELECT 1; +1 +1 SELECT MAX(f1) FROM t1; MAX(f1) 1 @@ -64,32 +68,110 @@ MAX(f1) -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) -1 +0 -------- switch to master ------- ROLLBACK; SELECT MAX(f1) FROM t1; MAX(f1) +0 + +TEST-INFO: MASTER: The INSERT is not committed (Succeeded) + +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) + +-------- switch to master ------- + +######## SELECT COUNT(*) FROM t1 ######## + +-------- switch to master ------- +INSERT INTO t1 SET f1= 0 + 1; +SELECT MAX(f1) FROM t1; +MAX(f1) 1 -TEST-INFO: MASTER: The INSERT is committed (Succeeded) +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +-------- switch to master ------- +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +SELECT MAX(f1) FROM t1; +MAX(f1) +1 + +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +-------- switch to master ------- +ROLLBACK; +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +TEST-INFO: MASTER: The INSERT is not committed (Succeeded) -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) +0 + +TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) + +-------- switch to master ------- + +######## COMMIT ######## + +-------- switch to master ------- +INSERT INTO t1 SET f1= 0 + 1; +SELECT MAX(f1) FROM t1; +MAX(f1) 1 -TEST-INFO: SLAVE: The INSERT is committed (Succeeded) +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +-------- switch to master ------- +COMMIT; +SELECT MAX(f1) FROM t1; +MAX(f1) +1 + +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +1 -------- switch to master ------- -flush logs; +ROLLBACK; +SELECT MAX(f1) FROM t1; +MAX(f1) +1 + +TEST-INFO: MASTER: The INSERT is committed (Succeeded) -------- switch to slave -------- -flush logs; +SELECT MAX(f1) FROM t1; +MAX(f1) +1 + +TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -######## ROLLBACK ######## +######## ROLLBACK ######## -------- switch to master ------- INSERT INTO t1 SET f1= 1 + 1; @@ -129,14 +211,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; --------- switch to slave -------- -flush logs; - --------- switch to master ------- - -######## SET AUTOCOMMIT=1 ######## +######## SET AUTOCOMMIT=1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 1 + 1; @@ -176,15 +252,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SET AUTOCOMMIT=0; -######## START TRANSACTION ######## +######## START TRANSACTION ######## -------- switch to master ------- INSERT INTO t1 SET f1= 2 + 1; @@ -224,14 +294,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; --------- switch to slave -------- -flush logs; - --------- switch to master ------- - -######## BEGIN ######## +######## BEGIN ######## -------- switch to master ------- INSERT INTO t1 SET f1= 3 + 1; @@ -271,14 +335,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- -######## DROP TABLE mysqltest1.t2 ######## +######## DROP TABLE mysqltest1.t2 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 4 + 1; @@ -318,12 +376,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TABLES LIKE 't2'; Tables_in_mysqltest1 (t2) @@ -333,7 +385,7 @@ Tables_in_mysqltest1 (t2) -------- switch to master ------- -######## DROP TEMPORARY TABLE mysqltest1.t23 ######## +######## DROP TEMPORARY TABLE mysqltest1.t23 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 5 + 1; @@ -368,15 +420,9 @@ TEST-INFO: MASTER: The INSERT is not committed (Succeeded) -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) -6 - -TEST-INFO: SLAVE: The INSERT is committed (Succeeded) - --------- switch to master ------- -flush logs; +5 --------- switch to slave -------- -flush logs; +TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- SHOW TABLES LIKE 't23'; @@ -388,7 +434,7 @@ Tables_in_mysqltest1 (t23) -------- switch to master ------- -######## RENAME TABLE mysqltest1.t3 to mysqltest1.t20 ######## +######## RENAME TABLE mysqltest1.t3 to mysqltest1.t20 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 5 + 1; @@ -399,7 +445,7 @@ MAX(f1) -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) -6 +5 -------- switch to master ------- RENAME TABLE mysqltest1.t3 to mysqltest1.t20; @@ -428,12 +474,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TABLES LIKE 't20'; Tables_in_mysqltest1 (t20) t20 @@ -445,7 +485,7 @@ t20 -------- switch to master ------- -######## ALTER TABLE mysqltest1.t4 ADD column f2 BIGINT ######## +######## ALTER TABLE mysqltest1.t4 ADD column f2 BIGINT ######## -------- switch to master ------- INSERT INTO t1 SET f1= 6 + 1; @@ -485,12 +525,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- describe mysqltest1.t4; Field Type Null Key Default Extra f1 bigint(20) YES NULL @@ -504,7 +538,7 @@ f2 bigint(20) YES NULL -------- switch to master ------- -######## CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= "InnoDB" ######## +######## CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= InnoDB ######## -------- switch to master ------- INSERT INTO t1 SET f1= 7 + 1; @@ -518,7 +552,7 @@ MAX(f1) 7 -------- switch to master ------- -CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= "InnoDB"; +CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= InnoDB; SELECT MAX(f1) FROM t1; MAX(f1) 8 @@ -544,14 +578,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- -######## CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT) ######## +######## CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT) ENGINE=MEMORY ######## -------- switch to master ------- INSERT INTO t1 SET f1= 8 + 1; @@ -565,7 +593,7 @@ MAX(f1) 8 -------- switch to master ------- -CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT); +CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT) ENGINE=MEMORY; SELECT MAX(f1) FROM t1; MAX(f1) 9 @@ -586,19 +614,13 @@ TEST-INFO: MASTER: The INSERT is not committed (Succeeded) -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) -9 - -TEST-INFO: SLAVE: The INSERT is committed (Succeeded) - --------- switch to master ------- -flush logs; +8 --------- switch to slave -------- -flush logs; +TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -######## TRUNCATE TABLE mysqltest1.t7 ######## +######## TRUNCATE TABLE mysqltest1.t7 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 8 + 1; @@ -609,7 +631,7 @@ MAX(f1) -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) -9 +8 -------- switch to master ------- TRUNCATE TABLE mysqltest1.t7; @@ -638,20 +660,16 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT * FROM mysqltest1.t7; f1 + -------- switch to slave -------- SELECT * FROM mysqltest1.t7; f1 + -------- switch to master ------- -######## LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ ######## +######## LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ ######## -------- switch to master ------- INSERT INTO t1 SET f1= 9 + 1; @@ -691,15 +709,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- UNLOCK TABLES; -######## UNLOCK TABLES ######## +######## UNLOCK TABLES ######## -------- switch to master ------- INSERT INTO t1 SET f1= 10 + 1; @@ -739,15 +751,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- LOCK TABLES mysqltest1.t1 READ; -######## UNLOCK TABLES ######## +######## UNLOCK TABLES ######## -------- switch to master ------- INSERT INTO t1 SET f1= 10 + 1; @@ -788,15 +794,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ; -######## UNLOCK TABLES ######## +######## UNLOCK TABLES ######## -------- switch to master ------- INSERT INTO t1 SET f1= 10 + 1; @@ -836,14 +836,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; --------- switch to slave -------- -flush logs; - --------- switch to master ------- - -######## DROP INDEX my_idx6 ON mysqltest1.t6 ######## +######## DROP INDEX my_idx6 ON mysqltest1.t6 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 11 + 1; @@ -883,12 +877,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW INDEX FROM mysqltest1.t6; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment @@ -898,7 +886,7 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par -------- switch to master ------- -######## CREATE INDEX my_idx5 ON mysqltest1.t5(f1) ######## +######## CREATE INDEX my_idx5 ON mysqltest1.t5(f1) ######## -------- switch to master ------- INSERT INTO t1 SET f1= 12 + 1; @@ -938,12 +926,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW INDEX FROM mysqltest1.t5; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment t5 1 my_idx5 1 f1 A 0 NULL NULL YES BTREE @@ -955,7 +937,7 @@ t5 1 my_idx5 1 f1 A NULL NULL NULL YES BTREE -------- switch to master ------- -######## DROP DATABASE mysqltest2 ######## +######## DROP DATABASE mysqltest2 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 13 + 1; @@ -995,12 +977,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW DATABASES LIKE "mysqltest2"; Database (mysqltest2) @@ -1010,7 +986,7 @@ Database (mysqltest2) -------- switch to master ------- -######## CREATE DATABASE mysqltest3 ######## +######## CREATE DATABASE mysqltest3 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 14 + 1; @@ -1050,12 +1026,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW DATABASES LIKE "mysqltest3"; Database (mysqltest3) mysqltest3 @@ -1067,7 +1037,7 @@ mysqltest3 -------- switch to master ------- -######## CREATE PROCEDURE p1() READS SQL DATA SELECT "this is p1" ######## +######## CREATE PROCEDURE p1() READS SQL DATA SELECT "this is p1" ######## -------- switch to master ------- INSERT INTO t1 SET f1= 15 + 1; @@ -1107,12 +1077,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1122,7 +1086,8 @@ Modified # Created # Security_type DEFINER Comment - -------- switch to slave ------- + +-------- switch to slave -------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1133,7 +1098,9 @@ Created # Security_type DEFINER Comment -######## ALTER PROCEDURE p1 COMMENT "I have been altered" ######## +-------- switch to master ------- + +######## ALTER PROCEDURE p1 COMMENT "I have been altered" ######## -------- switch to master ------- INSERT INTO t1 SET f1= 16 + 1; @@ -1173,12 +1140,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1188,7 +1149,8 @@ Modified # Created # Security_type DEFINER Comment I have been altered - -------- switch to slave ------- + +-------- switch to slave -------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1199,7 +1161,9 @@ Created # Security_type DEFINER Comment I have been altered -######## DROP PROCEDURE p1 ######## +-------- switch to master ------- + +######## DROP PROCEDURE p1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 17 + 1; @@ -1239,17 +1203,14 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; +SHOW PROCEDURE STATUS LIKE 'p1'; -------- switch to slave -------- -flush logs; +SHOW PROCEDURE STATUS LIKE 'p1'; -------- switch to master ------- -SHOW PROCEDURE STATUS LIKE 'p1'; - -------- switch to slave ------- -SHOW PROCEDURE STATUS LIKE 'p1'; -######## CREATE OR REPLACE VIEW v1 as select * from t1 ######## +######## CREATE OR REPLACE VIEW v1 as select * from t1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 18 + 1; @@ -1289,22 +1250,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` --------- switch to slave ------- +-------- switch to slave -------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` -######## ALTER VIEW v1 AS select f1 from t1 ######## +-------- switch to master ------- + +######## ALTER VIEW v1 AS select f1 from t1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 19 + 1; @@ -1344,22 +1301,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` --------- switch to slave ------- +-------- switch to slave -------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` -######## DROP VIEW IF EXISTS v1 ######## +-------- switch to master ------- + +######## DROP VIEW IF EXISTS v1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 20 + 1; @@ -1399,20 +1352,16 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW CREATE VIEW v1; ERROR 42S02: Table 'mysqltest1.v1' doesn't exist --------- switch to slave ------- +-------- switch to slave -------- SHOW CREATE VIEW v1; ERROR 42S02: Table 'mysqltest1.v1' doesn't exist -######## CREATE TRIGGER trg1 BEFORE INSERT ON t1 FOR EACH ROW SET @a:=1 ######## +-------- switch to master ------- + +######## CREATE TRIGGER trg1 BEFORE INSERT ON t1 FOR EACH ROW SET @a:=1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 21 + 1; @@ -1452,22 +1401,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost --------- switch to slave ------- +-------- switch to slave -------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost -######## DROP TRIGGER trg1 ######## +-------- switch to master ------- + +######## DROP TRIGGER trg1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 22 + 1; @@ -1507,20 +1452,16 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer --------- switch to slave ------- +-------- switch to slave -------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer -######## CREATE USER user1@localhost ######## +-------- switch to master ------- + +######## CREATE USER user1@localhost ######## -------- switch to master ------- INSERT INTO t1 SET f1= 23 + 1; @@ -1560,22 +1501,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT user FROM mysql.user WHERE user = 'user1'; user user1 --------- switch to slave ------- +-------- switch to slave -------- SELECT user FROM mysql.user WHERE user = 'user1'; user user1 -######## RENAME USER user1@localhost TO rename1@localhost ######## +-------- switch to master ------- + +######## RENAME USER user1@localhost TO rename1@localhost ######## -------- switch to master ------- INSERT INTO t1 SET f1= 24 + 1; @@ -1615,22 +1552,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT user FROM mysql.user WHERE user = 'rename1'; user rename1 --------- switch to slave ------- +-------- switch to slave -------- SELECT user FROM mysql.user WHERE user = 'rename1'; user rename1 -######## DROP USER rename1@localhost ######## +-------- switch to master ------- + +######## DROP USER rename1@localhost ######## -------- switch to master ------- INSERT INTO t1 SET f1= 25 + 1; @@ -1670,18 +1603,14 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT user FROM mysql.user WHERE user = 'rename1'; user --------- switch to slave ------- +-------- switch to slave -------- SELECT user FROM mysql.user WHERE user = 'rename1'; user -DROP DATABASE IF EXISTS mysqltest1; -DROP DATABASE IF EXISTS mysqltest2; -DROP DATABASE IF EXISTS mysqltest3; +use test; + +-------- switch to master ------- +DROP DATABASE mysqltest1; +DROP DATABASE mysqltest3; diff --git a/mysql-test/r/rpl_deadlock_innodb.result b/mysql-test/r/rpl_deadlock_innodb.result index caf040c0997..48fdf051d50 100644 --- a/mysql-test/r/rpl_deadlock_innodb.result +++ b/mysql-test/r/rpl_deadlock_innodb.result @@ -80,7 +80,7 @@ Master_SSL_Key Seconds_Behind_Master # stop slave; delete from t3; -change master to master_log_pos=544; +change master to master_log_pos=548; begin; select * from t2 for update; a @@ -136,7 +136,7 @@ set @my_max_relay_log_size= @@global.max_relay_log_size; set global max_relay_log_size=0; stop slave; delete from t3; -change master to master_log_pos=544; +change master to master_log_pos=548; begin; select * from t2 for update; a diff --git a/mysql-test/r/rpl_events.result b/mysql-test/r/rpl_events.result new file mode 100644 index 00000000000..bff1a814a6d --- /dev/null +++ b/mysql-test/r/rpl_events.result @@ -0,0 +1,161 @@ +set global event_scheduler=1; +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +set binlog_format=row; +DROP EVENT IF EXISTS test.justonce; +drop table if exists t1,t2; +CREATE TABLE `t1` ( +`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT, +`c` VARCHAR(50) NOT NULL, +`ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +PRIMARY KEY (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; +INSERT INTO t1 (c) VALUES ('manually'); +CREATE EVENT test.justonce ON SCHEDULE AT NOW() + INTERVAL 2 SECOND DO INSERT INTO t1 +(c) VALUES ('from justonce'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'justonce'; +db name status originator +test justonce ENABLED 1 +"in the master" +SELECT * FROM t1 WHERE c = 'from justonce' OR c = 'manually' ORDER BY id; +id c ts +1 manually TIMESTAMP +2 from justonce TIMESTAMP +affected rows: 2 +"in the slave" +SELECT * FROM t1 WHERE c = 'from justonce' OR c = 'manually' ORDER BY id; +id c ts +1 manually TIMESTAMP +2 from justonce TIMESTAMP +affected rows: 2 +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'justonce'; +db name status originator +test justonce SLAVESIDE_DISABLED 1 +DROP EVENT IF EXISTS test.slave_once; +CREATE EVENT test.slave_once ON SCHEDULE EVERY 5 MINUTE DO +INSERT INTO t1(c) VALUES ('from slave_once'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_once'; +db name status originator +test slave_once ENABLED 2 +DROP EVENT IF EXISTS test.slave_once; +DROP EVENT IF EXISTS test.justonce; +CREATE EVENT test.er ON SCHEDULE EVERY 3 SECOND DO +INSERT INTO t1(c) VALUES ('from er'); +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er ENABLED 1 INSERT INTO t1(c) VALUES ('from er') +"in the slave" +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er SLAVESIDE_DISABLED 1 INSERT INTO t1(c) VALUES ('from er') +"in the master" +ALTER EVENT test.er ON SCHEDULE EVERY 5 SECOND DO INSERT into t1(c) VALUES ('from alter er'); +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er ENABLED 1 INSERT into t1(c) VALUES ('from alter er') +"in the slave" +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er SLAVESIDE_DISABLED 1 INSERT into t1(c) VALUES ('from alter er') +"in the master" +DROP EVENT test.er; +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test'; +db name status originator +"in the slave" +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test'; +db name status originator +CREATE EVENT test.slave_terminate ON SCHEDULE EVERY 3 SECOND +DO INSERT INTO t1(c) VALUES ('from slave_terminate'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_terminate'; +db name status originator +test slave_terminate ENABLED 2 +DROP EVENT test.slave_terminate; +CREATE EVENT test.slave_terminate ON SCHEDULE EVERY 3 SECOND +DISABLE ON SLAVE DO INSERT INTO t1(c) VALUES ('from slave_terminate'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_terminate'; +db name status originator +test slave_terminate SLAVESIDE_DISABLED 2 +DROP EVENT test.slave_terminate; +"in the master" +DROP TABLE t1; +set binlog_format=statement; +DROP EVENT IF EXISTS test.justonce; +drop table if exists t1,t2; +CREATE TABLE `t1` ( +`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT, +`c` VARCHAR(50) NOT NULL, +`ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +PRIMARY KEY (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; +INSERT INTO t1 (c) VALUES ('manually'); +CREATE EVENT test.justonce ON SCHEDULE AT NOW() + INTERVAL 2 SECOND DO INSERT INTO t1 +(c) VALUES ('from justonce'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'justonce'; +db name status originator +test justonce ENABLED 1 +"in the master" +SELECT * FROM t1 WHERE c = 'from justonce' OR c = 'manually' ORDER BY id; +id c ts +1 manually TIMESTAMP +2 from justonce TIMESTAMP +affected rows: 2 +"in the slave" +SELECT * FROM t1 WHERE c = 'from justonce' OR c = 'manually' ORDER BY id; +id c ts +1 manually TIMESTAMP +2 from justonce TIMESTAMP +affected rows: 2 +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'justonce'; +db name status originator +test justonce SLAVESIDE_DISABLED 1 +DROP EVENT IF EXISTS test.slave_once; +CREATE EVENT test.slave_once ON SCHEDULE EVERY 5 MINUTE DO +INSERT INTO t1(c) VALUES ('from slave_once'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_once'; +db name status originator +test slave_once ENABLED 2 +DROP EVENT IF EXISTS test.slave_once; +DROP EVENT IF EXISTS test.justonce; +CREATE EVENT test.er ON SCHEDULE EVERY 3 SECOND DO +INSERT INTO t1(c) VALUES ('from er'); +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er ENABLED 1 INSERT INTO t1(c) VALUES ('from er') +"in the slave" +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er SLAVESIDE_DISABLED 1 INSERT INTO t1(c) VALUES ('from er') +"in the master" +ALTER EVENT test.er ON SCHEDULE EVERY 5 SECOND DO INSERT into t1(c) VALUES ('from alter er'); +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er ENABLED 1 INSERT into t1(c) VALUES ('from alter er') +"in the slave" +SELECT db, name, status, originator, body FROM mysql.event WHERE db = 'test' AND name = 'er'; +db name status originator body +test er SLAVESIDE_DISABLED 1 INSERT into t1(c) VALUES ('from alter er') +"in the master" +DROP EVENT test.er; +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test'; +db name status originator +"in the slave" +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test'; +db name status originator +CREATE EVENT test.slave_terminate ON SCHEDULE EVERY 3 SECOND +DO INSERT INTO t1(c) VALUES ('from slave_terminate'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_terminate'; +db name status originator +test slave_terminate ENABLED 2 +DROP EVENT test.slave_terminate; +CREATE EVENT test.slave_terminate ON SCHEDULE EVERY 3 SECOND +DISABLE ON SLAVE DO INSERT INTO t1(c) VALUES ('from slave_terminate'); +SELECT db, name, status, originator FROM mysql.event WHERE db = 'test' AND name = 'slave_terminate'; +db name status originator +test slave_terminate SLAVESIDE_DISABLED 2 +DROP EVENT test.slave_terminate; +"in the master" +DROP TABLE t1; diff --git a/mysql-test/r/rpl_flushlog_loop.result b/mysql-test/r/rpl_flushlog_loop.result index 16d8ba251f4..40eb403f6e7 100644 --- a/mysql-test/r/rpl_flushlog_loop.result +++ b/mysql-test/r/rpl_flushlog_loop.result @@ -24,7 +24,7 @@ Master_User root Master_Port SLAVE_PORT Connect_Retry 60 Master_Log_File slave-bin.000001 -Read_Master_Log_Pos 212 +Read_Master_Log_Pos 216 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File slave-bin.000001 @@ -39,7 +39,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 212 +Exec_Master_Log_Pos 216 Relay_Log_Space # Until_Condition None Until_Log_File diff --git a/mysql-test/r/rpl_incident.result b/mysql-test/r/rpl_incident.result new file mode 100644 index 00000000000..a9547832ed9 --- /dev/null +++ b/mysql-test/r/rpl_incident.result @@ -0,0 +1,105 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +**** On Master **** +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; +a +1 +2 +3 +REPLACE INTO t1 VALUES (4); +SELECT * FROM t1; +a +1 +2 +3 +4 +**** On Slave **** +SELECT * FROM t1; +a +1 +2 +3 +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000002 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1586 +Last_Error The incident LOST_EVENTS occured on the master. Message: <none> +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; +START SLAVE; +SELECT * FROM t1; +a +1 +2 +3 +4 +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000002 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000002 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +DROP TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/r/rpl_known_bugs_detection.result b/mysql-test/r/rpl_known_bugs_detection.result index eabc6185780..9ce6ac25c88 100644 --- a/mysql-test/r/rpl_known_bugs_detection.result +++ b/mysql-test/r/rpl_known_bugs_detection.result @@ -33,7 +33,7 @@ Replicate_Wild_Ignore_Table Last_Errno 1105 Last_Error Error 'master may suffer from http://bugs.mysql.com/bug.php?id=24432 so slave stops; check error log on slave for more info' on query. Default database: 'test'. Query: 'INSERT INTO t1(b) VALUES(1),(1),(2) ON DUPLICATE KEY UPDATE t1.b=10' Skip_Counter 0 -Exec_Master_Log_Pos 242 +Exec_Master_Log_Pos 246 Relay_Log_Space # Until_Condition None Until_Log_File @@ -115,7 +115,7 @@ FROM t2 ON DUPLICATE KEY UPDATE t1.field_3 = t2.field_c' Skip_Counter 0 -Exec_Master_Log_Pos 1274 +Exec_Master_Log_Pos 1278 Relay_Log_Space # Until_Condition None Until_Log_File diff --git a/mysql-test/r/rpl_loaddata.result b/mysql-test/r/rpl_loaddata.result index cae11e98caa..c68d2d9677e 100644 --- a/mysql-test/r/rpl_loaddata.result +++ b/mysql-test/r/rpl_loaddata.result @@ -28,7 +28,7 @@ day id category name 2003-03-22 2416 a bbbbb show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -slave-bin.000001 1276 +slave-bin.000001 1280 drop table t1; drop table t2; drop table t3; @@ -39,7 +39,7 @@ set global sql_slave_skip_counter=1; start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1793 # # master-bin.000001 Yes Yes # 0 0 1793 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1797 # # master-bin.000001 Yes Yes # 0 0 1797 # None 0 No # set sql_log_bin=0; delete from t1; set sql_log_bin=1; @@ -49,7 +49,7 @@ change master to master_user='test'; change master to master_user='root'; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1828 # # master-bin.000001 No No # 0 0 1828 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1832 # # master-bin.000001 No No # 0 0 1832 # None 0 No # set global sql_slave_skip_counter=1; start slave; set sql_log_bin=0; diff --git a/mysql-test/r/rpl_loaddata_s.result b/mysql-test/r/rpl_loaddata_s.result index f0c79c81c15..4fc33eee20d 100644 --- a/mysql-test/r/rpl_loaddata_s.result +++ b/mysql-test/r/rpl_loaddata_s.result @@ -10,6 +10,6 @@ load data infile '../std_data_ln/rpl_loaddata.dat' into table test.t1; select count(*) from test.t1; count(*) 2 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info drop table test.t1; diff --git a/mysql-test/r/rpl_log_pos.result b/mysql-test/r/rpl_log_pos.result index c7484022b23..01ff89741c8 100644 --- a/mysql-test/r/rpl_log_pos.result +++ b/mysql-test/r/rpl_log_pos.result @@ -6,37 +6,37 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000001 102 <Binlog_Ignore_DB> +master-bin.000001 106 <Binlog_Ignore_DB> show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 102 # # master-bin.000001 Yes Yes 0 0 102 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 106 # # master-bin.000001 Yes Yes 0 0 106 # None 0 No # stop slave; -change master to master_log_pos=74; +change master to master_log_pos=75; start slave; stop slave; -change master to master_log_pos=74; +change master to master_log_pos=75; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 74 # # master-bin.000001 No No 0 0 74 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 75 # # master-bin.000001 No No 0 0 75 # None 0 No # start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 74 # # master-bin.000001 No Yes 0 0 74 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 75 # # master-bin.000001 No Yes 0 0 75 # None 0 No # stop slave; -change master to master_log_pos=177; +change master to master_log_pos=178; start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 177 # # master-bin.000001 No Yes 0 0 177 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 178 # # master-bin.000001 No Yes 0 0 178 # None 0 No # show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000001 102 <Binlog_Ignore_DB> +master-bin.000001 106 <Binlog_Ignore_DB> create table if not exists t1 (n int); drop table if exists t1; create table t1 (n int); insert into t1 values (1),(2),(3); stop slave; -change master to master_log_pos=102; +change master to master_log_pos=106; start slave; select * from t1 ORDER BY n; n diff --git a/mysql-test/r/rpl_misc_functions.result b/mysql-test/r/rpl_misc_functions.result index c11663b8ac8..526414cec9c 100644 --- a/mysql-test/r/rpl_misc_functions.result +++ b/mysql-test/r/rpl_misc_functions.result @@ -18,6 +18,29 @@ create table t2 like t1; load data local infile 'MYSQLTEST_VARDIR/master-data/test/rpl_misc_functions.outfile' into table t2; select * from t1, t2 where (t1.id=t2.id) and not(t1.i=t2.i and t1.r1=t2.r1 and t1.r2=t2.r2 and t1.p=t2.p); id i r1 r2 p id i r1 r2 p -stop slave; -drop table t1; drop table t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (col_a double default NULL); +CREATE PROCEDURE test_replication_sp1() +BEGIN +INSERT INTO t1 VALUES (rand()), (rand()); +INSERT INTO t1 VALUES (rand()); +END| +CREATE PROCEDURE test_replication_sp2() +BEGIN +CALL test_replication_sp1(); +CALL test_replication_sp1(); +END| +CREATE FUNCTION test_replication_sf() RETURNS DOUBLE DETERMINISTIC +BEGIN +RETURN (rand() + rand()); +END| +CALL test_replication_sp1(); +CALL test_replication_sp2(); +INSERT INTO t1 VALUES (test_replication_sf()); +INSERT INTO t1 VALUES (test_replication_sf()); +INSERT INTO t1 VALUES (test_replication_sf()); +DROP PROCEDURE IF EXISTS test_replication_sp1; +DROP PROCEDURE IF EXISTS test_replication_sp2; +DROP FUNCTION IF EXISTS test_replication_sf; +DROP TABLE IF EXISTS t1; diff --git a/mysql-test/r/rpl_ndb_basic.result b/mysql-test/r/rpl_ndb_basic.result index 32a1c790c99..499ce41597c 100644 --- a/mysql-test/r/rpl_ndb_basic.result +++ b/mysql-test/r/rpl_ndb_basic.result @@ -65,6 +65,41 @@ nid nom prenom 4 CCP DDD 5 EEE FFF DROP table t1; +CREATE TABLE `t1` ( +`prid` int(10) unsigned NOT NULL, +`id_type` enum('IMSI','SIP') NOT NULL, +`fkimssub` varchar(50) NOT NULL, +`user_id` varchar(20) DEFAULT NULL, +`password` varchar(20) DEFAULT NULL, +`ptg_nbr` varchar(20) DEFAULT NULL, +`old_tmsi` int(10) unsigned DEFAULT NULL, +`new_tmsi` int(10) unsigned DEFAULT NULL, +`dev_capability` int(10) unsigned DEFAULT NULL, +`dev_oid` bigint(20) unsigned DEFAULT NULL, +`lac_cell_id` bigint(20) unsigned DEFAULT NULL, +`ms_classmark1` int(10) unsigned DEFAULT NULL, +`cipher_key` int(10) unsigned DEFAULT NULL, +`priid_master` int(10) unsigned DEFAULT NULL, +PRIMARY KEY (`prid`), +UNIQUE KEY `fkimssub` (`fkimssub`,`ptg_nbr`) USING HASH +) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +Warnings: +Warning 1121 Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan +INSERT INTO `t1` VALUES (183342,'IMSI','config3_sub_2Privates_3Publics_imssub_36668','user_id_73336','user_id_73336','73336',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(47617,'IMSI','config3_sub_2Privates_3Publics_imssub_9523','user_id_19046','user_id_19046','19046',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(200332,'IMSI','config3_sub_2Privates_3Publics_imssub_40066','user_id_80132','user_id_80132','80132',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(478882,'IMSI','config3_sub_2Privates_3Publics_imssub_95776','user_id_191552','user_id_191552','191552',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(490146,'IMSI','config3_sub_2Privates_3Publics_imssub_98029','user_id_196057','user_id_196057','196057',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(499301,'IMSI','config3_sub_2Privates_3Publics_imssub_99860','user_id_199719','user_id_199719','199719',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(506101,'IMSI','config3_sub_2Privates_3Publics_imssub_101220','user_id_202439','user_id_202439','202439',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(510142,'IMSI','config3_sub_2Privates_3Publics_imssub_102028','user_id_204056','user_id_204056','204056',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(515871,'IMSI','config3_sub_2Privates_3Publics_imssub_103174','user_id_206347','user_id_206347','206347',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(209842,'IMSI','config3_sub_2Privates_3Publics_imssub_41968','user_id_83936','user_id_83936','83936',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(365902,'IMSI','config3_sub_2Privates_3Publics_imssub_73180','user_id_146360','user_id_146360','146360',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(11892,'IMSI','config3_sub_2Privates_3Publics_imssub_2378','user_id_4756','user_id_4756','4756',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL); +select count(*) from t1; +count(*) +12 +select count(*) from t1; +count(*) +12 +update t1 set dev_oid=dev_oid+1; +select count(*) from t1; +count(*) +12 +select count(*) from t1; +count(*) +12 +DROP table t1; CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0', `nom` char(4) default NULL, `prenom` char(4) default NULL, @@ -97,8 +132,8 @@ Replicate_Do_Table Replicate_Ignore_Table <Replicate_Ignore_Table> Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 146 -Last_Error Error in Write_rows event: error during transaction execution on table test.t1 +Last_Errno 1105 +Last_Error Unknown error Skip_Counter 0 Exec_Master_Log_Pos <Exec_Master_Log_Pos> Relay_Log_Space <Relay_Log_Space> diff --git a/mysql-test/r/rpl_ndb_charset.result b/mysql-test/r/rpl_ndb_charset.result index 0ce4446c8a5..ed9b3cfbfa8 100644 --- a/mysql-test/r/rpl_ndb_charset.result +++ b/mysql-test/r/rpl_ndb_charset.result @@ -109,39 +109,39 @@ a b 1 cp850_general_ci drop database mysqltest2; drop database mysqltest3; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # drop database if exists mysqltest2 -master-bin.000001 # Query 1 # drop database if exists mysqltest3 -master-bin.000001 # Query 1 # create database mysqltest2 character set latin2 -master-bin.000001 # Query 1 # create database mysqltest3 -master-bin.000001 # Query 1 # drop database mysqltest3 -master-bin.000001 # Query 1 # create database mysqltest3 -master-bin.000001 # Query 1 # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest2`; truncate table t1 -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest2`; truncate table t1 -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # drop database mysqltest2 -master-bin.000001 # Query 1 # drop database mysqltest3 +master-bin.000001 # Query # # drop database if exists mysqltest2 +master-bin.000001 # Query # # drop database if exists mysqltest3 +master-bin.000001 # Query # # create database mysqltest2 character set latin2 +master-bin.000001 # Query # # create database mysqltest3 +master-bin.000001 # Query # # drop database mysqltest3 +master-bin.000001 # Query # # create database mysqltest3 +master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100)) +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest2`; truncate table t1 +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest2`; truncate table t1 +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # drop database mysqltest2 +master-bin.000001 # Query # # drop database mysqltest3 select "--- --global--" as ""; --- --global-- diff --git a/mysql-test/r/rpl_ndb_ddl.result b/mysql-test/r/rpl_ndb_ddl.result index f5d8073be94..aeaca1e7de0 100644 --- a/mysql-test/r/rpl_ndb_ddl.result +++ b/mysql-test/r/rpl_ndb_ddl.result @@ -4,45 +4,47 @@ reset master; reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; + +-------- switch to master ------- SET AUTOCOMMIT = 1; DROP DATABASE IF EXISTS mysqltest1; DROP DATABASE IF EXISTS mysqltest2; DROP DATABASE IF EXISTS mysqltest3; CREATE DATABASE mysqltest1; CREATE DATABASE mysqltest2; -CREATE TABLE mysqltest1.t1 (f1 BIGINT) ENGINE="NDB"; +CREATE TABLE mysqltest1.t1 (f1 BIGINT) ENGINE=NDB; INSERT INTO mysqltest1.t1 SET f1= 0; -CREATE TABLE mysqltest1.t2 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t3 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t4 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t5 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t6 (f1 BIGINT) ENGINE="NDB"; +CREATE TABLE mysqltest1.t2 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t3 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t4 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t5 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t6 (f1 BIGINT) ENGINE=NDB; CREATE INDEX my_idx6 ON mysqltest1.t6(f1); -CREATE TABLE mysqltest1.t7 (f1 BIGINT) ENGINE="NDB"; +CREATE TABLE mysqltest1.t7 (f1 BIGINT) ENGINE=NDB; INSERT INTO mysqltest1.t7 SET f1= 0; -CREATE TABLE mysqltest1.t8 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t9 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t10 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t11 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t12 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t13 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t14 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t15 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t16 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t17 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t18 (f1 BIGINT) ENGINE="NDB"; -CREATE TABLE mysqltest1.t19 (f1 BIGINT) ENGINE="NDB"; -CREATE TEMPORARY TABLE mysqltest1.t23 (f1 BIGINT); +CREATE TABLE mysqltest1.t8 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t9 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t10 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t11 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t12 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t13 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t14 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t15 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t16 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t17 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t18 (f1 BIGINT) ENGINE=NDB; +CREATE TABLE mysqltest1.t19 (f1 BIGINT) ENGINE=NDB; +CREATE TEMPORARY TABLE mysqltest1.t23 (f1 BIGINT) ENGINE=MEMORY; SET AUTOCOMMIT = 0; use mysqltest1; -------- switch to slave -------- -SET AUTOCOMMIT = 0; +SET AUTOCOMMIT = 1; use mysqltest1; -------- switch to master ------- -######## COMMIT ######## +######## SELECT 1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 0 + 1; @@ -56,7 +58,9 @@ MAX(f1) 0 -------- switch to master ------- -COMMIT; +SELECT 1; +1 +1 SELECT MAX(f1) FROM t1; MAX(f1) 1 @@ -64,32 +68,110 @@ MAX(f1) -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) -1 +0 -------- switch to master ------- ROLLBACK; SELECT MAX(f1) FROM t1; MAX(f1) +0 + +TEST-INFO: MASTER: The INSERT is not committed (Succeeded) + +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) + +-------- switch to master ------- + +######## SELECT COUNT(*) FROM t1 ######## + +-------- switch to master ------- +INSERT INTO t1 SET f1= 0 + 1; +SELECT MAX(f1) FROM t1; +MAX(f1) 1 -TEST-INFO: MASTER: The INSERT is committed (Succeeded) +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +-------- switch to master ------- +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +SELECT MAX(f1) FROM t1; +MAX(f1) +1 -------- switch to slave -------- SELECT MAX(f1) FROM t1; MAX(f1) +0 + +-------- switch to master ------- +ROLLBACK; +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +TEST-INFO: MASTER: The INSERT is not committed (Succeeded) + +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 + +TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) + +-------- switch to master ------- + +######## COMMIT ######## + +-------- switch to master ------- +INSERT INTO t1 SET f1= 0 + 1; +SELECT MAX(f1) FROM t1; +MAX(f1) 1 -TEST-INFO: SLAVE: The INSERT is committed (Succeeded) +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +0 -------- switch to master ------- -flush logs; +COMMIT; +SELECT MAX(f1) FROM t1; +MAX(f1) +1 -------- switch to slave -------- -flush logs; +SELECT MAX(f1) FROM t1; +MAX(f1) +1 -------- switch to master ------- +ROLLBACK; +SELECT MAX(f1) FROM t1; +MAX(f1) +1 -######## ROLLBACK ######## +TEST-INFO: MASTER: The INSERT is committed (Succeeded) + +-------- switch to slave -------- +SELECT MAX(f1) FROM t1; +MAX(f1) +1 + +TEST-INFO: SLAVE: The INSERT is committed (Succeeded) + +-------- switch to master ------- + +######## ROLLBACK ######## -------- switch to master ------- INSERT INTO t1 SET f1= 1 + 1; @@ -129,14 +211,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- -######## SET AUTOCOMMIT=1 ######## +######## SET AUTOCOMMIT=1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 1 + 1; @@ -176,15 +252,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SET AUTOCOMMIT=0; -######## START TRANSACTION ######## +######## START TRANSACTION ######## -------- switch to master ------- INSERT INTO t1 SET f1= 2 + 1; @@ -224,14 +294,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- -######## BEGIN ######## +######## BEGIN ######## -------- switch to master ------- INSERT INTO t1 SET f1= 3 + 1; @@ -271,14 +335,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; --------- switch to slave -------- -flush logs; - --------- switch to master ------- - -######## DROP TABLE mysqltest1.t2 ######## +######## DROP TABLE mysqltest1.t2 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 4 + 1; @@ -318,12 +376,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TABLES LIKE 't2'; Tables_in_mysqltest1 (t2) @@ -333,7 +385,7 @@ Tables_in_mysqltest1 (t2) -------- switch to master ------- -######## DROP TEMPORARY TABLE mysqltest1.t23 ######## +######## DROP TEMPORARY TABLE mysqltest1.t23 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 5 + 1; @@ -373,12 +425,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TABLES LIKE 't23'; Tables_in_mysqltest1 (t23) @@ -388,7 +434,7 @@ Tables_in_mysqltest1 (t23) -------- switch to master ------- -######## RENAME TABLE mysqltest1.t3 to mysqltest1.t20 ######## +######## RENAME TABLE mysqltest1.t3 to mysqltest1.t20 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 5 + 1; @@ -428,12 +474,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TABLES LIKE 't20'; Tables_in_mysqltest1 (t20) t20 @@ -445,7 +485,7 @@ t20 -------- switch to master ------- -######## ALTER TABLE mysqltest1.t4 ADD column f2 BIGINT ######## +######## ALTER TABLE mysqltest1.t4 ADD column f2 BIGINT ######## -------- switch to master ------- INSERT INTO t1 SET f1= 6 + 1; @@ -485,12 +525,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- describe mysqltest1.t4; Field Type Null Key Default Extra f1 bigint(20) YES NULL @@ -504,7 +538,7 @@ f2 bigint(20) YES NULL -------- switch to master ------- -######## CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= "NDB" ######## +######## CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= NDB ######## -------- switch to master ------- INSERT INTO t1 SET f1= 7 + 1; @@ -518,7 +552,7 @@ MAX(f1) 7 -------- switch to master ------- -CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= "NDB"; +CREATE TABLE mysqltest1.t21 (f1 BIGINT) ENGINE= NDB; SELECT MAX(f1) FROM t1; MAX(f1) 8 @@ -544,14 +578,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- -######## CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT) ######## +######## CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT) ENGINE=MEMORY ######## -------- switch to master ------- INSERT INTO t1 SET f1= 8 + 1; @@ -565,7 +593,7 @@ MAX(f1) 8 -------- switch to master ------- -CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT); +CREATE TEMPORARY TABLE mysqltest1.t22 (f1 BIGINT) ENGINE=MEMORY; SELECT MAX(f1) FROM t1; MAX(f1) 9 @@ -591,14 +619,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- -######## TRUNCATE TABLE mysqltest1.t7 ######## +######## TRUNCATE TABLE mysqltest1.t7 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 8 + 1; @@ -638,20 +660,16 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT * FROM mysqltest1.t7; f1 + -------- switch to slave -------- SELECT * FROM mysqltest1.t7; f1 + -------- switch to master ------- -######## LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ ######## +######## LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ ######## -------- switch to master ------- INSERT INTO t1 SET f1= 9 + 1; @@ -691,15 +709,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- UNLOCK TABLES; -######## UNLOCK TABLES ######## +######## UNLOCK TABLES ######## -------- switch to master ------- INSERT INTO t1 SET f1= 10 + 1; @@ -739,15 +751,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- LOCK TABLES mysqltest1.t1 READ; -######## UNLOCK TABLES ######## +######## UNLOCK TABLES ######## -------- switch to master ------- INSERT INTO t1 SET f1= 10 + 1; @@ -788,15 +794,9 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is not committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- LOCK TABLES mysqltest1.t1 WRITE, mysqltest1.t8 READ; -######## UNLOCK TABLES ######## +######## UNLOCK TABLES ######## -------- switch to master ------- INSERT INTO t1 SET f1= 10 + 1; @@ -836,14 +836,8 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; --------- switch to slave -------- -flush logs; - --------- switch to master ------- - -######## DROP INDEX my_idx6 ON mysqltest1.t6 ######## +######## DROP INDEX my_idx6 ON mysqltest1.t6 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 11 + 1; @@ -883,12 +877,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW INDEX FROM mysqltest1.t6; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment @@ -898,7 +886,7 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par -------- switch to master ------- -######## CREATE INDEX my_idx5 ON mysqltest1.t5(f1) ######## +######## CREATE INDEX my_idx5 ON mysqltest1.t5(f1) ######## -------- switch to master ------- INSERT INTO t1 SET f1= 12 + 1; @@ -938,12 +926,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW INDEX FROM mysqltest1.t5; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment t5 1 my_idx5 1 f1 A 0 NULL NULL YES BTREE @@ -955,7 +937,7 @@ t5 1 my_idx5 1 f1 A 0 NULL NULL YES BTREE -------- switch to master ------- -######## DROP DATABASE mysqltest2 ######## +######## DROP DATABASE mysqltest2 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 13 + 1; @@ -995,12 +977,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW DATABASES LIKE "mysqltest2"; Database (mysqltest2) @@ -1010,7 +986,7 @@ Database (mysqltest2) -------- switch to master ------- -######## CREATE DATABASE mysqltest3 ######## +######## CREATE DATABASE mysqltest3 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 14 + 1; @@ -1050,12 +1026,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW DATABASES LIKE "mysqltest3"; Database (mysqltest3) mysqltest3 @@ -1067,7 +1037,7 @@ mysqltest3 -------- switch to master ------- -######## CREATE PROCEDURE p1() READS SQL DATA SELECT "this is p1" ######## +######## CREATE PROCEDURE p1() READS SQL DATA SELECT "this is p1" ######## -------- switch to master ------- INSERT INTO t1 SET f1= 15 + 1; @@ -1107,12 +1077,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1122,7 +1086,8 @@ Modified # Created # Security_type DEFINER Comment - -------- switch to slave ------- + +-------- switch to slave -------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1133,7 +1098,9 @@ Created # Security_type DEFINER Comment -######## ALTER PROCEDURE p1 COMMENT "I have been altered" ######## +-------- switch to master ------- + +######## ALTER PROCEDURE p1 COMMENT "I have been altered" ######## -------- switch to master ------- INSERT INTO t1 SET f1= 16 + 1; @@ -1173,12 +1140,6 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1188,7 +1149,8 @@ Modified # Created # Security_type DEFINER Comment I have been altered - -------- switch to slave ------- + +-------- switch to slave -------- SHOW PROCEDURE STATUS LIKE 'p1'; Db mysqltest1 Name p1 @@ -1199,7 +1161,9 @@ Created # Security_type DEFINER Comment I have been altered -######## DROP PROCEDURE p1 ######## +-------- switch to master ------- + +######## DROP PROCEDURE p1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 17 + 1; @@ -1239,17 +1203,14 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; +SHOW PROCEDURE STATUS LIKE 'p1'; -------- switch to slave -------- -flush logs; +SHOW PROCEDURE STATUS LIKE 'p1'; -------- switch to master ------- -SHOW PROCEDURE STATUS LIKE 'p1'; - -------- switch to slave ------- -SHOW PROCEDURE STATUS LIKE 'p1'; -######## CREATE OR REPLACE VIEW v1 as select * from t1 ######## +######## CREATE OR REPLACE VIEW v1 as select * from t1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 18 + 1; @@ -1289,22 +1250,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` --------- switch to slave ------- +-------- switch to slave -------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` -######## ALTER VIEW v1 AS select f1 from t1 ######## +-------- switch to master ------- + +######## ALTER VIEW v1 AS select f1 from t1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 19 + 1; @@ -1344,22 +1301,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` --------- switch to slave ------- +-------- switch to slave -------- SHOW CREATE VIEW v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` -######## DROP VIEW IF EXISTS v1 ######## +-------- switch to master ------- + +######## DROP VIEW IF EXISTS v1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 20 + 1; @@ -1399,20 +1352,16 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW CREATE VIEW v1; ERROR 42S02: Table 'mysqltest1.v1' doesn't exist --------- switch to slave ------- +-------- switch to slave -------- SHOW CREATE VIEW v1; ERROR 42S02: Table 'mysqltest1.v1' doesn't exist -######## CREATE TRIGGER trg1 BEFORE INSERT ON t1 FOR EACH ROW SET @a:=1 ######## +-------- switch to master ------- + +######## CREATE TRIGGER trg1 BEFORE INSERT ON t1 FOR EACH ROW SET @a:=1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 21 + 1; @@ -1452,22 +1401,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost --------- switch to slave ------- +-------- switch to slave -------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost -######## DROP TRIGGER trg1 ######## +-------- switch to master ------- + +######## DROP TRIGGER trg1 ######## -------- switch to master ------- INSERT INTO t1 SET f1= 22 + 1; @@ -1507,20 +1452,16 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer --------- switch to slave ------- +-------- switch to slave -------- SHOW TRIGGERS; Trigger Event Table Statement Timing Created sql_mode Definer -######## CREATE USER user1@localhost ######## +-------- switch to master ------- + +######## CREATE USER user1@localhost ######## -------- switch to master ------- INSERT INTO t1 SET f1= 23 + 1; @@ -1560,22 +1501,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT user FROM mysql.user WHERE user = 'user1'; user user1 --------- switch to slave ------- +-------- switch to slave -------- SELECT user FROM mysql.user WHERE user = 'user1'; user user1 -######## RENAME USER user1@localhost TO rename1@localhost ######## +-------- switch to master ------- + +######## RENAME USER user1@localhost TO rename1@localhost ######## -------- switch to master ------- INSERT INTO t1 SET f1= 24 + 1; @@ -1615,22 +1552,18 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT user FROM mysql.user WHERE user = 'rename1'; user rename1 --------- switch to slave ------- +-------- switch to slave -------- SELECT user FROM mysql.user WHERE user = 'rename1'; user rename1 -######## DROP USER rename1@localhost ######## +-------- switch to master ------- + +######## DROP USER rename1@localhost ######## -------- switch to master ------- INSERT INTO t1 SET f1= 25 + 1; @@ -1670,19 +1603,14 @@ MAX(f1) TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- -flush logs; - --------- switch to slave -------- -flush logs; - --------- switch to master ------- SELECT user FROM mysql.user WHERE user = 'rename1'; user --------- switch to slave ------- +-------- switch to slave -------- SELECT user FROM mysql.user WHERE user = 'rename1'; user -DROP DATABASE IF EXISTS mysqltest1; -DROP DATABASE IF EXISTS mysqltest2; -DROP DATABASE IF EXISTS mysqltest3; -ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +use test; + +-------- switch to master ------- +DROP DATABASE mysqltest1; +DROP DATABASE mysqltest3; diff --git a/mysql-test/r/rpl_ndb_do_table.result b/mysql-test/r/rpl_ndb_do_table.result index a5854985352..dda2844f6d0 100644 --- a/mysql-test/r/rpl_ndb_do_table.result +++ b/mysql-test/r/rpl_ndb_do_table.result @@ -19,4 +19,8 @@ t1 SELECT COUNT(*) FROM t1; COUNT(*) 3 +INSERT INTO t1 VALUES (3, repeat('bad',1)); +ERROR 23000: Duplicate entry '3' for key 'PRIMARY' +INSERT INTO t1 VALUES (3, repeat('bad too',1)); +ERROR 23000: Duplicate entry '3' for key 'PRIMARY' DROP TABLE IF EXISTS t1, t2; diff --git a/mysql-test/r/rpl_ndb_extraCol.result b/mysql-test/r/rpl_ndb_extraCol.result index bc40e24ecac..336a6152d36 100644 --- a/mysql-test/r/rpl_ndb_extraCol.result +++ b/mysql-test/r/rpl_ndb_extraCol.result @@ -28,9 +28,9 @@ a b c *** Select from slave *** SELECT * FROM t1 ORDER BY a; a b c d e -1 2 TEXAS 2 TEST -2 1 AUSTIN 2 TEST -3 4 QA 2 TEST +1 2 TEXAS NULL NULL +2 1 AUSTIN NULL NULL +3 4 QA NULL NULL *** Drop t1 *** DROP TABLE t1; *** Create t3 on slave *** @@ -289,9 +289,9 @@ a b c *** Select from slave *** SELECT * FROM t7 ORDER BY a; a b c d e -1 b1b1 Kyle 0000-00-00 00:00:00 Extra Column Testing -2 b1b1 JOE 0000-00-00 00:00:00 Extra Column Testing -3 b1b1 QA 0000-00-00 00:00:00 Extra Column Testing +1 b1b1 Kyle NULL NULL +2 b1b1 JOE NULL NULL +3 b1b1 QA NULL NULL *** Drop t7 *** DROP TABLE t7; *** Create t8 on slave *** @@ -447,9 +447,9 @@ a b c *** Select on Slave *** SELECT * FROM t12 ORDER BY a; a b f c e -1 b1b1b1b1b1b1b1b1 Kyle test 1 -2 b1b1b1b1b1b1b1b1 JOE test 1 -3 b1b1b1b1b1b1b1b1 QA test 1 +1 b1b1b1b1b1b1b1b1 Kyle NULL NULL +2 b1b1b1b1b1b1b1b1 JOE NULL NULL +3 b1b1b1b1b1b1b1b1 QA NULL NULL *** Drop t12 *** DROP TABLE t12; **** Extra Colums End **** @@ -479,9 +479,9 @@ a b c *** Select on Slave **** SELECT * FROM t13 ORDER BY a; a b c d e -1 b1b1b1b1b1b1b1b1 Kyle 1 CURRENT_TIMESTAMP -2 b1b1b1b1b1b1b1b1 JOE 1 CURRENT_TIMESTAMP -3 b1b1b1b1b1b1b1b1 QA 1 CURRENT_TIMESTAMP +1 b1b1b1b1b1b1b1b1 Kyle NULL CURRENT_TIMESTAMP +2 b1b1b1b1b1b1b1b1 JOE NULL CURRENT_TIMESTAMP +3 b1b1b1b1b1b1b1b1 QA NULL CURRENT_TIMESTAMP *** Drop t13 *** DROP TABLE t13; *** 22117 END *** @@ -515,9 +515,9 @@ c1 c2 c3 c4 c5 *** Select on Slave **** SELECT * FROM t14 ORDER BY c1; c1 c2 c3 c4 c5 c6 c7 -1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle 1 CURRENT_TIMESTAMP -2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE 1 CURRENT_TIMESTAMP -3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA 1 CURRENT_TIMESTAMP +1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle NULL CURRENT_TIMESTAMP +2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE NULL CURRENT_TIMESTAMP +3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA NULL CURRENT_TIMESTAMP *** connect to master and drop columns *** ALTER TABLE t14 DROP COLUMN c2; ALTER TABLE t14 DROP COLUMN c4; @@ -530,9 +530,9 @@ c1 c3 c5 *** Select from Slave *** SELECT * FROM t14 ORDER BY c1; c1 c3 c5 c6 c7 -1 Replication Testing Extra Col Kyle 1 CURRENT_TIMESTAMP -2 This Test Should work JOE 1 CURRENT_TIMESTAMP -3 If is does not, I will open a bug QA 1 CURRENT_TIMESTAMP +1 Replication Testing Extra Col Kyle NULL CURRENT_TIMESTAMP +2 This Test Should work JOE NULL CURRENT_TIMESTAMP +3 If is does not, I will open a bug QA NULL CURRENT_TIMESTAMP *** Drop t14 *** DROP TABLE t14; *** Create t15 on slave *** @@ -563,9 +563,9 @@ c1 c2 c3 c4 c5 *** Select on Slave **** SELECT * FROM t15 ORDER BY c1; c1 c2 c3 c4 c5 c6 c7 -1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle 1 CURRENT_TIMESTAMP -2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE 1 CURRENT_TIMESTAMP -3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA 1 CURRENT_TIMESTAMP +1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle NULL CURRENT_TIMESTAMP +2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE NULL CURRENT_TIMESTAMP +3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA NULL CURRENT_TIMESTAMP *** Add column on master that is a Extra on Slave *** ALTER TABLE t15 ADD COLUMN c6 INT AFTER c5; ******************************************** @@ -618,9 +618,9 @@ c1 c2 c3 c4 c5 c6 *** Try to select from slave **** SELECT * FROM t15 ORDER BY c1; c1 c2 c3 c4 c5 c6 c7 -1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle 1 CURRENT_TIMESTAMP -2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE 1 CURRENT_TIMESTAMP -3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA 1 CURRENT_TIMESTAMP +1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle NULL CURRENT_TIMESTAMP +2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE NULL CURRENT_TIMESTAMP +3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA NULL CURRENT_TIMESTAMP 5 2.00 Replication Testing b1b1b1b1b1b1b1b1 Buda 2 CURRENT_TIMESTAMP *** DROP TABLE t15 *** DROP TABLE t15; @@ -652,9 +652,9 @@ c1 c2 c3 c4 c5 *** Select on Slave **** SELECT * FROM t16 ORDER BY c1; c1 c2 c3 c4 c5 c6 c7 -1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle 1 CURRENT_TIMESTAMP -2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE 1 CURRENT_TIMESTAMP -3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA 1 CURRENT_TIMESTAMP +1 1.00 Replication Testing Extra Col b1b1b1b1b1b1b1b1 Kyle NULL CURRENT_TIMESTAMP +2 2.00 This Test Should work b1b1b1b1b1b1b1b1 JOE NULL CURRENT_TIMESTAMP +3 3.00 If is does not, I will open a bug b1b1b1b1b1b1b1b1 QA NULL CURRENT_TIMESTAMP *** Add Partition on master *** ALTER TABLE t16 PARTITION BY KEY(c1) PARTITIONS 4; INSERT INTO t16 () VALUES(4,1.00,'Replication Rocks',@b1,'Omer'); diff --git a/mysql-test/r/rpl_ndb_log.result b/mysql-test/r/rpl_ndb_log.result index 543af3b9abe..8d443780039 100644 --- a/mysql-test/r/rpl_ndb_log.result +++ b/mysql-test/r/rpl_ndb_log.result @@ -32,16 +32,17 @@ master-bin.000001 # Query 1 # BEGIN master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Table_map 1 # table_id: # (mysql.ndb_apply_status) master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # COMMIT -show binlog events from 102 limit 1; +show binlog events from 106 limit 1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB -show binlog events from 102 limit 2; +show binlog events from 106 limit 2; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB master-bin.000001 # Query 1 # BEGIN -show binlog events from 102 limit 2,1; +show binlog events from 106 limit 2,1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Table_map 1 # table_id: # (test.t1) flush logs; @@ -55,25 +56,25 @@ flush logs; stop slave; create table t2 (n int)ENGINE=NDB; insert into t2 values (1); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB -master-bin.000001 # Query 1 # BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysql.ndb_apply_status) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # COMMIT -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=NDB -master-bin.000001 # Query 1 # BEGIN -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysql.ndb_apply_status) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # COMMIT -master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 +master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB +master-bin.000001 # Query # # BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=NDB +master-bin.000001 # Query # # BEGIN +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Rotate # # master-bin.000002;pos=4 show binlog events in 'master-bin.000002'; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 @@ -87,13 +88,13 @@ master-bin.000002 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000002 # Query 1 # COMMIT show binary logs; Log_name File_size -master-bin.000001 1702 -master-bin.000002 593 +master-bin.000001 1775 +master-bin.000002 617 start slave; show binary logs; Log_name File_size -slave-bin.000001 1797 -slave-bin.000002 198 +slave-bin.000001 1870 +slave-bin.000002 202 show binlog events in 'slave-bin.000001' from 4; Log_name Pos Event_type Server_id End_log_pos Info slave-bin.000001 # Format_desc 2 # Server ver: VERSION, Binlog ver: 4 @@ -110,6 +111,7 @@ slave-bin.000001 # Query 2 # BEGIN slave-bin.000001 # Table_map 2 # table_id: # (test.t1) slave-bin.000001 # Table_map 2 # table_id: # (mysql.ndb_apply_status) slave-bin.000001 # Write_rows 2 # table_id: # +slave-bin.000001 # Write_rows 2 # table_id: # slave-bin.000001 # Write_rows 2 # table_id: # flags: STMT_END_F slave-bin.000001 # Query 2 # COMMIT slave-bin.000001 # Query 1 # use `test`; create table t3 (a int)ENGINE=NDB @@ -126,7 +128,7 @@ slave-bin.000002 # Write_rows 2 # table_id: # flags: STMT_END_F slave-bin.000002 # Query 2 # COMMIT show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 593 # # master-bin.000002 Yes Yes # 0 0 593 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 617 # # master-bin.000002 Yes Yes # 0 0 617 # None 0 No # show binlog events in 'slave-bin.000005' from 4; ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log DROP TABLE t1; @@ -137,11 +139,10 @@ insert into t1 values (NULL, 1); reset master; set insert_id=5; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id()); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F select * from t1; a b 1 1 diff --git a/mysql-test/r/rpl_ndb_multi.result b/mysql-test/r/rpl_ndb_multi.result index 66819c2c9c8..760114f3639 100644 --- a/mysql-test/r/rpl_ndb_multi.result +++ b/mysql-test/r/rpl_ndb_multi.result @@ -26,11 +26,11 @@ stop slave; SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) FROM mysql.ndb_binlog_index WHERE epoch = <the_epoch> ; @the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) -102 master-bin1.000001 +106 master-bin1.000001 CHANGE MASTER TO master_port=<MASTER_PORT1>, master_log_file = 'master-bin1.000001', -master_log_pos = 102 ; +master_log_pos = 106 ; start slave; INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4); DELETE FROM t1 WHERE c3 = 1; diff --git a/mysql-test/r/rpl_ndb_sync.result b/mysql-test/r/rpl_ndb_sync.result index 2b9ca24fca0..d05367c5fc0 100644 --- a/mysql-test/r/rpl_ndb_sync.result +++ b/mysql-test/r/rpl_ndb_sync.result @@ -60,11 +60,11 @@ hex(c2) hex(c3) c1 0 1 BCDEF 1 0 CD 0 0 DEFGHIJKL -SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.ndb_apply_status; @the_epoch:=MAX(epoch) <the_epoch> SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) -FROM mysql.binlog_index WHERE epoch > <the_epoch> ORDER BY epoch ASC LIMIT 1; +FROM mysql.ndb_binlog_index WHERE epoch > <the_epoch> ORDER BY epoch ASC LIMIT 1; @the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) <the_pos> master-bin.000001 CHANGE MASTER TO @@ -89,8 +89,8 @@ hex(c2) hex(c3) c1 DROP DATABASE ndbsynctest; STOP SLAVE; reset master; -select * from mysql.binlog_index; +select * from mysql.ndb_binlog_index; Position File epoch inserts updates deletes schemaops reset slave; -select * from mysql.apply_status; -server_id epoch +select * from mysql.ndb_apply_status; +server_id epoch log_name start_pos end_pos diff --git a/mysql-test/r/rpl_packet.result b/mysql-test/r/rpl_packet.result index a5c9b43cabb..894bc81b08d 100644 --- a/mysql-test/r/rpl_packet.result +++ b/mysql-test/r/rpl_packet.result @@ -15,3 +15,12 @@ select count(*) from `DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_______________ count(*) 1 drop database DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; +SET @@global.max_allowed_packet=4096; +SET @@global.net_buffer_length=4096; +STOP SLAVE; +START SLAVE; +CREATE TABLe `t1` (`f1` LONGTEXT) ENGINE=MyISAM; +INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048'); +SHOW STATUS LIKE 'Slave_running'; +Variable_name Value +Slave_running OFF diff --git a/mysql-test/r/rpl_rbr_to_sbr.result b/mysql-test/r/rpl_rbr_to_sbr.result index 4b2d129c732..2cb0eafa5b8 100644 --- a/mysql-test/r/rpl_rbr_to_sbr.result +++ b/mysql-test/r/rpl_rbr_to_sbr.result @@ -28,7 +28,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 450 +Read_Master_Log_Pos 454 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -43,7 +43,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 450 +Exec_Master_Log_Pos 454 Relay_Log_Space # Until_Condition None Until_Log_File diff --git a/mysql-test/r/rpl_rotate_logs.result b/mysql-test/r/rpl_rotate_logs.result index 264f5d224bd..cea84dba1ef 100644 --- a/mysql-test/r/rpl_rotate_logs.result +++ b/mysql-test/r/rpl_rotate_logs.result @@ -16,7 +16,7 @@ create table t1 (s text); insert into t1 values('Could not break slave'),('Tried hard'); show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 60 master-bin.000001 552 # # master-bin.000001 Yes Yes # 0 0 552 # None 0 No # +# 127.0.0.1 root MASTER_PORT 60 master-bin.000001 556 # # master-bin.000001 Yes Yes # 0 0 556 # None 0 No # select * from t1; s Could not break slave @@ -27,9 +27,9 @@ insert into t2 values (34),(67),(123); flush logs; show binary logs; Log_name File_size -master-bin.000001 596 -master-bin.000002 367 -master-bin.000003 102 +master-bin.000001 600 +master-bin.000002 371 +master-bin.000003 106 create table t3 select * from temp_table; select * from t3; a @@ -43,21 +43,21 @@ start slave; purge master logs to 'master-bin.000002'; show master logs; Log_name File_size -master-bin.000002 367 -master-bin.000003 411 +master-bin.000002 371 +master-bin.000003 415 purge binary logs to 'master-bin.000002'; show binary logs; Log_name File_size -master-bin.000002 367 -master-bin.000003 411 +master-bin.000002 371 +master-bin.000003 415 purge master logs before now(); show binary logs; Log_name File_size -master-bin.000003 411 +master-bin.000003 415 insert into t2 values (65); show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 60 master-bin.000003 500 # # master-bin.000003 Yes Yes # 0 0 500 # None 0 No # +# 127.0.0.1 root MASTER_PORT 60 master-bin.000003 504 # # master-bin.000003 Yes Yes # 0 0 504 # None 0 No # select * from t2; m 34 @@ -74,18 +74,18 @@ count(*) create table t4 select * from temp_table; show binary logs; Log_name File_size -master-bin.000003 4189 -master-bin.000004 4194 -master-bin.000005 2036 +master-bin.000003 4193 +master-bin.000004 4198 +master-bin.000005 2040 show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000005 2036 +master-bin.000005 2040 select * from t4; a testing temporary tables part 2 show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 60 master-bin.000005 2036 # # master-bin.000005 Yes Yes # 0 0 2036 # None 0 No # +# 127.0.0.1 root MASTER_PORT 60 master-bin.000005 2040 # # master-bin.000005 Yes Yes # 0 0 2040 # None 0 No # lock tables t3 read; select count(*) from t3 where n >= 4; count(*) diff --git a/mysql-test/r/rpl_row_basic_11bugs-master.opt b/mysql-test/r/rpl_row_basic_11bugs-master.opt new file mode 100644 index 00000000000..627becdbfb5 --- /dev/null +++ b/mysql-test/r/rpl_row_basic_11bugs-master.opt @@ -0,0 +1 @@ +--innodb diff --git a/mysql-test/r/rpl_row_basic_11bugs-slave.opt b/mysql-test/r/rpl_row_basic_11bugs-slave.opt new file mode 100644 index 00000000000..627becdbfb5 --- /dev/null +++ b/mysql-test/r/rpl_row_basic_11bugs-slave.opt @@ -0,0 +1 @@ +--innodb diff --git a/mysql-test/r/rpl_row_basic_11bugs.result b/mysql-test/r/rpl_row_basic_11bugs.result index 6f7bd6e3f85..1025b965589 100644 --- a/mysql-test/r/rpl_row_basic_11bugs.result +++ b/mysql-test/r/rpl_row_basic_11bugs.result @@ -24,11 +24,11 @@ SHOW TABLES; Tables_in_test_ignore t2 INSERT INTO t2 VALUES (3,3), (4,4); -SHOW BINLOG EVENTS FROM 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 195 use `test`; CREATE TABLE t1 (a INT, b INT) -master-bin.000001 195 Table_map 1 235 table_id: # (test.t1) -master-bin.000001 235 Write_rows 1 282 table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b INT) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F **** On Slave **** SHOW DATABASES; Database @@ -54,10 +54,10 @@ DELETE FROM t1 WHERE a = 0; UPDATE t1 SET a=99 WHERE a = 0; SHOW BINLOG EVENTS; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 188 use `test`; CREATE TABLE t1 (a INT) -master-bin.000001 188 Table_map 1 227 table_id: # (test.t1) -master-bin.000001 227 Write_rows 1 266 table_id: # flags: STMT_END_F +master-bin.000001 4 Format_desc 1 106 Server ver: SERVER_VERSION, Binlog ver: 4 +master-bin.000001 106 Query 1 192 use `test`; CREATE TABLE t1 (a INT) +master-bin.000001 192 Table_map 1 231 table_id: # (test.t1) +master-bin.000001 231 Write_rows 1 270 table_id: # flags: STMT_END_F DROP TABLE t1; ================ Test for BUG#17620 ================ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; @@ -119,4 +119,102 @@ HEX(a) b SELECT HEX(a),b FROM t1; HEX(a) b 0 2 -DROP TABLE t1; +DROP TABLE IF EXISTS t1; +================ Test for BUG#22583 ================ +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +**** On Master **** +CREATE TABLE t1_myisam (k INT, a BIT(1), b BIT(9)) ENGINE=MYISAM; +CREATE TABLE t1_innodb (k INT, a BIT(1), b BIT(9)) ENGINE=INNODB; +CREATE TABLE t2_myisam (k INT, a BIT(1) NOT NULL, b BIT(4) NOT NULL) ENGINE=MYISAM; +CREATE TABLE t2_innodb (k INT, a BIT(1) NOT NULL, b BIT(4) NOT NULL) ENGINE=INNODB; +**** On Slave **** +ALTER TABLE t1_myisam ENGINE=INNODB; +ALTER TABLE t1_innodb ENGINE=MYISAM; +ALTER TABLE t2_myisam ENGINE=INNODB; +ALTER TABLE t2_innodb ENGINE=MYISAM; +**** On Master **** +INSERT INTO t1_myisam VALUES(1, b'0', 257); +INSERT INTO t1_myisam VALUES(2, b'1', 256); +INSERT INTO t1_innodb VALUES(1, b'0', 257); +INSERT INTO t1_innodb VALUES(2, b'1', 256); +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +k HEX(a) HEX(b) +1 0 101 +2 1 100 +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +k HEX(a) HEX(b) +1 0 101 +2 1 100 +INSERT INTO t2_myisam VALUES(1, b'0', 9); +INSERT INTO t2_myisam VALUES(2, b'1', 8); +INSERT INTO t2_innodb VALUES(1, b'0', 9); +INSERT INTO t2_innodb VALUES(2, b'1', 8); +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +k HEX(a) HEX(b) +1 0 9 +2 1 8 +SELECT k, HEX(a),HEX(b) FROM t2_innodb; +k HEX(a) HEX(b) +1 0 9 +2 1 8 +**** On Slave **** +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +k HEX(a) HEX(b) +1 0 101 +2 1 100 +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +k HEX(a) HEX(b) +1 0 101 +2 1 100 +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +k HEX(a) HEX(b) +1 0 9 +2 1 8 +SELECT k, HEX(a),HEX(b) FROM t2_innodb; +k HEX(a) HEX(b) +1 0 9 +2 1 8 +**** On Master **** +UPDATE t1_myisam SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +k HEX(a) HEX(b) +1 0 101 +2 0 100 +UPDATE t1_innodb SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +k HEX(a) HEX(b) +1 0 101 +2 0 100 +UPDATE t2_myisam SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +k HEX(a) HEX(b) +1 0 9 +2 0 8 +UPDATE t2_innodb SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t2_innodb; +k HEX(a) HEX(b) +1 0 9 +2 0 8 +**** On Slave **** +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +k HEX(a) HEX(b) +1 0 101 +2 0 100 +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +k HEX(a) HEX(b) +1 0 101 +2 0 100 +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +k HEX(a) HEX(b) +1 0 9 +2 0 8 +SELECT k, HEX(a),HEX(b) FROM t2_innodb; +k HEX(a) HEX(b) +1 0 9 +2 0 8 +**** On Master **** +DROP TABLE IF EXISTS t1_myisam, t1_innodb, t2_myisam, t2_innodb; diff --git a/mysql-test/r/rpl_row_charset.result b/mysql-test/r/rpl_row_charset.result index 79cf75c8cc1..e51f3e57d1f 100644 --- a/mysql-test/r/rpl_row_charset.result +++ b/mysql-test/r/rpl_row_charset.result @@ -109,39 +109,39 @@ a b 1 cp850_general_ci drop database mysqltest2; drop database mysqltest3; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # drop database if exists mysqltest2 -master-bin.000001 # Query 1 # drop database if exists mysqltest3 -master-bin.000001 # Query 1 # create database mysqltest2 character set latin2 -master-bin.000001 # Query 1 # create database mysqltest3 -master-bin.000001 # Query 1 # drop database mysqltest3 -master-bin.000001 # Query 1 # create database mysqltest3 -master-bin.000001 # Query 1 # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest2`; truncate table t1 -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest2`; truncate table t1 -master-bin.000001 # Table_map 1 # table_id: # (mysqltest2.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # drop database mysqltest2 -master-bin.000001 # Query 1 # drop database mysqltest3 +master-bin.000001 # Query # # drop database if exists mysqltest2 +master-bin.000001 # Query # # drop database if exists mysqltest3 +master-bin.000001 # Query # # create database mysqltest2 character set latin2 +master-bin.000001 # Query # # create database mysqltest3 +master-bin.000001 # Query # # drop database mysqltest3 +master-bin.000001 # Query # # create database mysqltest3 +master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100)) +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest2`; truncate table t1 +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest2`; truncate table t1 +master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # drop database mysqltest2 +master-bin.000001 # Query # # drop database mysqltest3 select "--- --global--" as ""; --- --global-- diff --git a/mysql-test/r/rpl_row_create_table.result b/mysql-test/r/rpl_row_create_table.result index 8f587fb5796..e76ce5b962d 100644 --- a/mysql-test/r/rpl_row_create_table.result +++ b/mysql-test/r/rpl_row_create_table.result @@ -8,30 +8,30 @@ CREATE TABLE t1 (a INT, b INT); CREATE TABLE t2 (a INT, b INT) ENGINE=Merge; CREATE TABLE t3 (a INT, b INT) CHARSET=utf8; CREATE TABLE t4 (a INT, b INT) ENGINE=Merge CHARSET=utf8; -SHOW BINLOG EVENTS FROM 212; +SHOW BINLOG EVENTS FROM 216; Log_name # -Pos 212 +Pos 216 Event_type Query Server_id # -End_log_pos # +End_log_pos 309 Info use `test`; CREATE TABLE t1 (a INT, b INT) Log_name # -Pos 305 +Pos 309 Event_type Query Server_id # -End_log_pos # +End_log_pos 415 Info use `test`; CREATE TABLE t2 (a INT, b INT) ENGINE=Merge Log_name # -Pos 411 +Pos 415 Event_type Query Server_id # -End_log_pos # +End_log_pos 521 Info use `test`; CREATE TABLE t3 (a INT, b INT) CHARSET=utf8 Log_name # -Pos 517 +Pos 521 Event_type Query Server_id # -End_log_pos # +End_log_pos 640 Info use `test`; CREATE TABLE t4 (a INT, b INT) ENGINE=Merge CHARSET=utf8 **** On Master **** SHOW CREATE TABLE t1; @@ -127,7 +127,7 @@ NULL 5 10 NULL 6 12 CREATE TABLE t7 (UNIQUE(b)) SELECT a,b FROM tt3; ERROR 23000: Duplicate entry '2' for key 'b' -SHOW BINLOG EVENTS FROM 1118; +SHOW BINLOG EVENTS FROM 1098; Log_name Pos Event_type Server_id End_log_pos Info CREATE TABLE t7 (a INT, b INT UNIQUE); INSERT INTO t7 SELECT a,b FROM tt3; @@ -137,11 +137,11 @@ a b 1 2 2 4 3 6 -SHOW BINLOG EVENTS FROM 1118; +SHOW BINLOG EVENTS FROM 1098; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 1118 Query 1 1218 use `test`; CREATE TABLE t7 (a INT, b INT UNIQUE) -master-bin.000001 1218 Table_map 1 1258 table_id: # (test.t7) -master-bin.000001 1258 Write_rows 1 1314 table_id: # flags: STMT_END_F +# 1098 Query # 1198 use `test`; CREATE TABLE t7 (a INT, b INT UNIQUE) +# 1198 Table_map # 1238 table_id: # (test.t7) +# 1238 Write_rows # 1294 table_id: # flags: STMT_END_F SELECT * FROM t7 ORDER BY a,b; a b 1 2 @@ -154,10 +154,10 @@ INSERT INTO t7 SELECT a,b FROM tt4; ROLLBACK; Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back -SHOW BINLOG EVENTS FROM 1314; +SHOW BINLOG EVENTS FROM 1294; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 1314 Table_map 1 1354 table_id: # (test.t7) -master-bin.000001 1354 Write_rows 1 1410 table_id: # flags: STMT_END_F +# 1294 Table_map # 1334 table_id: # (test.t7) +# 1334 Write_rows # 1390 table_id: # flags: STMT_END_F SELECT * FROM t7 ORDER BY a,b; a b 1 2 @@ -192,10 +192,10 @@ Create Table CREATE TABLE `t9` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 -SHOW BINLOG EVENTS FROM 1410; +SHOW BINLOG EVENTS FROM 1390; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 1410 Query 1 1496 use `test`; CREATE TABLE t8 LIKE t4 -master-bin.000001 1496 Query 1 1635 use `test`; CREATE TABLE `t9` ( +# 1390 Query # 1476 use `test`; CREATE TABLE t8 LIKE t4 +# 1476 Query # 1615 use `test`; CREATE TABLE `t9` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL ) @@ -274,33 +274,33 @@ a 3 SHOW BINLOG EVENTS; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: #, Binlog ver: # -master-bin.000001 102 Query 1 188 use `test`; CREATE TABLE t1 (a INT) -master-bin.000001 188 Table_map 1 227 table_id: # (test.t1) -master-bin.000001 227 Write_rows 1 271 table_id: # flags: STMT_END_F -master-bin.000001 271 Query 1 339 use `test`; BEGIN -master-bin.000001 339 Query 1 125 use `test`; CREATE TABLE `t2` ( +# 4 Format_desc # 106 Server ver: #, Binlog ver: # +# 106 Query # 192 use `test`; CREATE TABLE t1 (a INT) +# 192 Table_map # 231 table_id: # (test.t1) +# 231 Write_rows # 275 table_id: # flags: STMT_END_F +# 275 Query # 343 use `test`; BEGIN +# 343 Query # 125 use `test`; CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL ) ENGINE=InnoDB -master-bin.000001 464 Table_map 1 164 table_id: # (test.t2) -master-bin.000001 503 Write_rows 1 208 table_id: # flags: STMT_END_F -master-bin.000001 547 Xid 1 574 COMMIT /* XID */ -master-bin.000001 574 Query 1 642 use `test`; BEGIN -master-bin.000001 642 Query 1 125 use `test`; CREATE TABLE `t3` ( +# 468 Table_map # 164 table_id: # (test.t2) +# 507 Write_rows # 208 table_id: # flags: STMT_END_F +# 551 Xid # 578 COMMIT /* XID */ +# 578 Query # 646 use `test`; BEGIN +# 646 Query # 125 use `test`; CREATE TABLE `t3` ( `a` int(11) DEFAULT NULL ) ENGINE=InnoDB -master-bin.000001 767 Table_map 1 164 table_id: # (test.t3) -master-bin.000001 806 Write_rows 1 208 table_id: # flags: STMT_END_F -master-bin.000001 850 Xid 1 877 COMMIT /* XID */ -master-bin.000001 877 Query 1 945 use `test`; BEGIN -master-bin.000001 945 Query 1 125 use `test`; CREATE TABLE `t4` ( +# 771 Table_map # 164 table_id: # (test.t3) +# 810 Write_rows # 208 table_id: # flags: STMT_END_F +# 854 Xid # 881 COMMIT /* XID */ +# 881 Query # 949 use `test`; BEGIN +# 949 Query # 125 use `test`; CREATE TABLE `t4` ( `a` int(11) DEFAULT NULL ) ENGINE=InnoDB -master-bin.000001 1070 Table_map 1 164 table_id: # (test.t4) -master-bin.000001 1109 Write_rows 1 208 table_id: # flags: STMT_END_F -master-bin.000001 1153 Xid 1 1180 COMMIT /* XID */ -master-bin.000001 1180 Table_map 1 1219 table_id: # (test.t1) -master-bin.000001 1219 Write_rows 1 1263 table_id: # flags: STMT_END_F +# 1074 Table_map # 164 table_id: # (test.t4) +# 1113 Write_rows # 208 table_id: # flags: STMT_END_F +# 1157 Xid # 1184 COMMIT /* XID */ +# 1184 Table_map # 1223 table_id: # (test.t1) +# 1223 Write_rows # 1267 table_id: # flags: STMT_END_F SHOW TABLES; Tables_in_test t1 @@ -365,17 +365,17 @@ a 9 SHOW BINLOG EVENTS; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: #, Binlog ver: # -master-bin.000001 102 Query 1 188 use `test`; CREATE TABLE t1 (a INT) -master-bin.000001 188 Table_map 1 227 table_id: # (test.t1) -master-bin.000001 227 Write_rows 1 271 table_id: # flags: STMT_END_F -master-bin.000001 271 Query 1 371 use `test`; CREATE TABLE t2 (a INT) ENGINE=INNODB -master-bin.000001 371 Query 1 439 use `test`; BEGIN -master-bin.000001 439 Table_map 1 39 table_id: # (test.t2) -master-bin.000001 478 Write_rows 1 83 table_id: # flags: STMT_END_F -master-bin.000001 522 Table_map 1 122 table_id: # (test.t2) -master-bin.000001 561 Write_rows 1 161 table_id: # flags: STMT_END_F -master-bin.000001 600 Xid 1 627 COMMIT /* XID */ +# 4 Format_desc # 106 Server ver: #, Binlog ver: # +# 106 Query # 192 use `test`; CREATE TABLE t1 (a INT) +# 192 Table_map # 231 table_id: # (test.t1) +# 231 Write_rows # 275 table_id: # flags: STMT_END_F +# 275 Query # 375 use `test`; CREATE TABLE t2 (a INT) ENGINE=INNODB +# 375 Query # 443 use `test`; BEGIN +# 443 Table_map # 39 table_id: # (test.t2) +# 482 Write_rows # 83 table_id: # flags: STMT_END_F +# 526 Table_map # 122 table_id: # (test.t2) +# 565 Write_rows # 161 table_id: # flags: STMT_END_F +# 604 Xid # 631 COMMIT /* XID */ SELECT * FROM t2 ORDER BY a; a 1 @@ -394,10 +394,10 @@ INSERT INTO t2 SELECT a+2 FROM tt2; ROLLBACK; SELECT * FROM t2 ORDER BY a; a -SHOW BINLOG EVENTS FROM 627; +SHOW BINLOG EVENTS FROM 631; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 627 Query 1 80 use `test`; TRUNCATE TABLE t2 -master-bin.000001 707 Xid 1 734 COMMIT /* XID */ +# 631 Query # 80 use `test`; TRUNCATE TABLE t2 +# 711 Xid # 738 COMMIT /* XID */ SELECT * FROM t2 ORDER BY a; a DROP TABLE t1,t2; diff --git a/mysql-test/r/rpl_row_delayed_ins.result b/mysql-test/r/rpl_row_delayed_ins.result index 21b251db193..800a39bd567 100644 --- a/mysql-test/r/rpl_row_delayed_ins.result +++ b/mysql-test/r/rpl_row_delayed_ins.result @@ -14,17 +14,16 @@ a 1 2 3 -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 222 use `test`; create table t1(a int not null primary key) engine=myisam -master-bin.000001 222 Table_map 1 261 table_id: # (test.t1) -master-bin.000001 261 Write_rows 1 295 table_id: # flags: STMT_END_F -master-bin.000001 295 Table_map 1 334 table_id: # (test.t1) -master-bin.000001 334 Write_rows 1 368 table_id: # flags: STMT_END_F -master-bin.000001 368 Table_map 1 407 table_id: # (test.t1) -master-bin.000001 407 Write_rows 1 441 table_id: # flags: STMT_END_F -master-bin.000001 441 Query 1 516 use `test`; flush tables +master-bin.000001 # Query # # use `test`; create table t1(a int not null primary key) engine=myisam +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; flush tables SELECT * FROM t1 ORDER BY a; a 1 diff --git a/mysql-test/r/rpl_row_drop.result b/mysql-test/r/rpl_row_drop.result index 4ef21884fda..89654ebf165 100644 --- a/mysql-test/r/rpl_row_drop.result +++ b/mysql-test/r/rpl_row_drop.result @@ -43,10 +43,10 @@ t2 DROP TABLE t1,t2; SHOW BINLOG EVENTS; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 188 use `test`; CREATE TABLE t1 (a int) -master-bin.000001 188 Query 1 274 use `test`; CREATE TABLE t2 (a int) -master-bin.000001 274 Query 1 378 use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 4 Format_desc 1 106 Server ver: VERSION, Binlog ver: 4 +master-bin.000001 106 Query 1 192 use `test`; CREATE TABLE t1 (a int) +master-bin.000001 192 Query 1 278 use `test`; CREATE TABLE t2 (a int) +master-bin.000001 278 Query 1 382 use `test`; DROP TABLE `t1` /* generated by server */ SHOW TABLES; Tables_in_test t2 diff --git a/mysql-test/r/rpl_row_flsh_tbls.result b/mysql-test/r/rpl_row_flsh_tbls.result index e2352b8605b..942a6b83bf6 100644 --- a/mysql-test/r/rpl_row_flsh_tbls.result +++ b/mysql-test/r/rpl_row_flsh_tbls.result @@ -12,13 +12,13 @@ create table t4 (a int); insert into t4 select * from t3; rename table t1 to t5, t2 to t1; flush no_write_to_binlog tables; -SHOW BINLOG EVENTS FROM 615 ; +SHOW BINLOG EVENTS FROM 619 ; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; rename table t1 to t5, t2 to t1 select * from t3; a flush tables; -SHOW BINLOG EVENTS FROM 615 ; +SHOW BINLOG EVENTS FROM 619 ; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; rename table t1 to t5, t2 to t1 master-bin.000001 # Query 1 # use `test`; flush tables diff --git a/mysql-test/r/rpl_row_inexist_tbl.result b/mysql-test/r/rpl_row_inexist_tbl.result index 5f5a4556d76..7d0d7504ee7 100644 --- a/mysql-test/r/rpl_row_inexist_tbl.result +++ b/mysql-test/r/rpl_row_inexist_tbl.result @@ -39,7 +39,7 @@ Replicate_Wild_Ignore_Table Last_Errno 1146 Last_Error Error 'Table 'test.t1' doesn't exist' on opening table `test`.`t1` Skip_Counter 0 -Exec_Master_Log_Pos 519 +Exec_Master_Log_Pos 524 Relay_Log_Space # Until_Condition None Until_Log_File diff --git a/mysql-test/r/rpl_row_log.result b/mysql-test/r/rpl_row_log.result index c6b85e7b329..5d252d72bd1 100644 --- a/mysql-test/r/rpl_row_log.result +++ b/mysql-test/r/rpl_row_log.result @@ -26,14 +26,14 @@ master-bin.000001 # Query 1 # use `test`; drop table t1 master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=MyISAM master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -show binlog events from 102 limit 1; +show binlog events from 106 limit 1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM -show binlog events from 102 limit 2; +show binlog events from 106 limit 2; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM master-bin.000001 # Table_map 1 # table_id: # (test.t1) -show binlog events from 102 limit 2,1; +show binlog events from 106 limit 2,1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F flush logs; @@ -47,17 +47,16 @@ flush logs; stop slave; create table t2 (n int)ENGINE=MyISAM; insert into t2 values (1); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=MyISAM -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 +master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=MyISAM +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Rotate # # master-bin.000002;pos=4 show binlog events in 'master-bin.000002'; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 @@ -67,13 +66,13 @@ master-bin.000002 # Table_map 1 # table_id: # (test.t2) master-bin.000002 # Write_rows 1 # table_id: # flags: STMT_END_F show binary logs; Log_name File_size -master-bin.000001 1256 -master-bin.000002 373 +master-bin.000001 1260 +master-bin.000002 377 start slave; show binary logs; Log_name File_size -slave-bin.000001 1354 -slave-bin.000002 274 +slave-bin.000001 1358 +slave-bin.000002 278 show binlog events in 'slave-bin.000001' from 4; Log_name Pos Event_type Server_id End_log_pos Info slave-bin.000001 # Format_desc 2 # Server ver: VERSION, Binlog ver: 4 @@ -94,7 +93,7 @@ slave-bin.000002 # Table_map 1 # table_id: # (test.t2) slave-bin.000002 # Write_rows 1 # table_id: # flags: STMT_END_F show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 373 # # master-bin.000002 Yes Yes # 0 0 373 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 377 # # master-bin.000002 Yes Yes # 0 0 377 # None 0 No # show binlog events in 'slave-bin.000005' from 4; ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log DROP TABLE t1; @@ -105,11 +104,10 @@ insert into t1 values (NULL, 1); reset master; set insert_id=5; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id()); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F select * from t1; a b 1 1 diff --git a/mysql-test/r/rpl_row_log_innodb.result b/mysql-test/r/rpl_row_log_innodb.result index bc13047f973..ff1fabb35cb 100644 --- a/mysql-test/r/rpl_row_log_innodb.result +++ b/mysql-test/r/rpl_row_log_innodb.result @@ -28,14 +28,14 @@ master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not nul master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Xid 1 # COMMIT /* XID */ -show binlog events from 102 limit 1; +show binlog events from 106 limit 1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=InnoDB -show binlog events from 102 limit 2; +show binlog events from 106 limit 2; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=InnoDB master-bin.000001 # Table_map 1 # table_id: # (test.t1) -show binlog events from 102 limit 2,1; +show binlog events from 106 limit 2,1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F flush logs; @@ -49,19 +49,18 @@ flush logs; stop slave; create table t2 (n int)ENGINE=InnoDB; insert into t2 values (1); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=InnoDB -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* XID */ -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=InnoDB -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Xid 1 # COMMIT /* XID */ -master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 +master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=InnoDB +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=InnoDB +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Rotate # # master-bin.000002;pos=4 show binlog events in 'master-bin.000002'; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 @@ -72,13 +71,13 @@ master-bin.000002 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000002 # Xid 1 # COMMIT /* XID */ show binary logs; Log_name File_size -master-bin.000001 1310 -master-bin.000002 400 +master-bin.000001 1314 +master-bin.000002 404 start slave; show binary logs; Log_name File_size -slave-bin.000001 1408 -slave-bin.000002 301 +slave-bin.000001 1412 +slave-bin.000002 305 show binlog events in 'slave-bin.000001' from 4; Log_name Pos Event_type Server_id End_log_pos Info slave-bin.000001 # Format_desc 2 # Server ver: VERSION, Binlog ver: 4 @@ -102,7 +101,7 @@ slave-bin.000002 # Write_rows 1 # table_id: # flags: STMT_END_F slave-bin.000002 # Xid 1 # COMMIT /* XID */ show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 400 # # master-bin.000002 Yes Yes # 0 0 400 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 404 # # master-bin.000002 Yes Yes # 0 0 404 # None 0 No # show binlog events in 'slave-bin.000005' from 4; ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log DROP TABLE t1; @@ -113,11 +112,10 @@ insert into t1 values (NULL, 1); reset master; set insert_id=5; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id()); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F select * from t1; a b 1 1 diff --git a/mysql-test/r/rpl_row_max_relay_size.result b/mysql-test/r/rpl_row_max_relay_size.result index 8bb10ffb080..4c0f923c323 100644 --- a/mysql-test/r/rpl_row_max_relay_size.result +++ b/mysql-test/r/rpl_row_max_relay_size.result @@ -30,7 +30,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 58664 +Read_Master_Log_Pos 58668 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -45,7 +45,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 58664 +Exec_Master_Log_Pos 58668 Relay_Log_Space # Until_Condition None Until_Log_File @@ -73,7 +73,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 58664 +Read_Master_Log_Pos 58668 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -88,7 +88,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 58664 +Exec_Master_Log_Pos 58668 Relay_Log_Space # Until_Condition None Until_Log_File @@ -116,7 +116,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 58664 +Read_Master_Log_Pos 58668 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -131,7 +131,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 58664 +Exec_Master_Log_Pos 58668 Relay_Log_Space # Until_Condition None Until_Log_File @@ -197,7 +197,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 58750 +Read_Master_Log_Pos 58754 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -212,7 +212,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 58750 +Exec_Master_Log_Pos 58754 Relay_Log_Space # Until_Condition None Until_Log_File @@ -236,7 +236,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 58826 +Read_Master_Log_Pos 58830 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -251,7 +251,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 58826 +Exec_Master_Log_Pos 58830 Relay_Log_Space # Until_Condition None Until_Log_File @@ -266,7 +266,7 @@ Seconds_Behind_Master # flush logs; show master status; File master-bin.000002 -Position 102 +Position 106 Binlog_Do_DB <Binlog_Ignore_DB> Binlog_Ignore_DB set global max_binlog_size= @my_max_binlog_size; diff --git a/mysql-test/r/rpl_row_reset_slave.result b/mysql-test/r/rpl_row_reset_slave.result index 57fc95708e5..657dad30266 100644 --- a/mysql-test/r/rpl_row_reset_slave.result +++ b/mysql-test/r/rpl_row_reset_slave.result @@ -6,12 +6,12 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 102 # # master-bin.000001 Yes Yes # 0 0 102 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 106 # # master-bin.000001 Yes Yes # 0 0 106 # None 0 No # stop slave; change master to master_user='test'; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 test MASTER_PORT 1 master-bin.000001 102 # # master-bin.000001 No No # 0 0 102 # None 0 No # +# 127.0.0.1 test MASTER_PORT 1 master-bin.000001 106 # # master-bin.000001 No No # 0 0 106 # None 0 No # reset slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master @@ -19,7 +19,7 @@ Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 102 # # master-bin.000001 Yes Yes # 0 0 102 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 106 # # master-bin.000001 Yes Yes # 0 0 106 # None 0 No # stop slave; reset slave; start slave; diff --git a/mysql-test/r/rpl_row_tabledefs_2myisam.result b/mysql-test/r/rpl_row_tabledefs_2myisam.result index d62650fa142..10001c736ac 100644 --- a/mysql-test/r/rpl_row_tabledefs_2myisam.result +++ b/mysql-test/r/rpl_row_tabledefs_2myisam.result @@ -122,7 +122,7 @@ Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 1364 +Last_Errno 1105 Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef Skip_Counter 0 Exec_Master_Log_Pos # diff --git a/mysql-test/r/rpl_row_tabledefs_3innodb.result b/mysql-test/r/rpl_row_tabledefs_3innodb.result index 0c3bb734e95..6bb98afd4c4 100644 --- a/mysql-test/r/rpl_row_tabledefs_3innodb.result +++ b/mysql-test/r/rpl_row_tabledefs_3innodb.result @@ -122,7 +122,7 @@ Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 1364 +Last_Errno 1105 Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef Skip_Counter 0 Exec_Master_Log_Pos # diff --git a/mysql-test/r/rpl_row_until.result b/mysql-test/r/rpl_row_until.result index 8d4b0d6b591..cffcf12a31b 100644 --- a/mysql-test/r/rpl_row_until.result +++ b/mysql-test/r/rpl_row_until.result @@ -21,7 +21,7 @@ n 4 show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 740 slave-relay-bin.000004 # master-bin.000001 # No 0 0 311 # Master master-bin.000001 311 No # +# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 744 slave-relay-bin.000004 # master-bin.000001 # No 0 0 315 # Master master-bin.000001 311 No # start slave until master_log_file='master-no-such-bin.000001', master_log_pos=291; select * from t1; n @@ -31,7 +31,7 @@ n 4 show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 740 slave-relay-bin.000004 # master-bin.000001 # No 0 0 311 # Master master-no-such-bin.000001 291 No # +# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 744 slave-relay-bin.000004 # master-bin.000001 # No 0 0 315 # Master master-no-such-bin.000001 291 No # start slave until relay_log_file='slave-relay-bin.000004', relay_log_pos=728; select * from t2; n @@ -39,13 +39,13 @@ n 2 show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 740 slave-relay-bin.000004 # master-bin.000001 # No 0 0 586 # Relay slave-relay-bin.000004 728 No # +# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 744 slave-relay-bin.000004 # master-bin.000001 # No 0 0 590 # Relay slave-relay-bin.000004 728 No # start slave; stop slave; start slave until master_log_file='master-bin.000001', master_log_pos=740; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 740 slave-relay-bin.000004 # master-bin.000001 Yes No 0 0 740 # Master master-bin.000001 740 No # +# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 744 slave-relay-bin.000004 # master-bin.000001 Yes No 0 0 744 # Master master-bin.000001 740 No # start slave until master_log_file='master-bin', master_log_pos=561; ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL start slave until master_log_file='master-bin.000001', master_log_pos=561, relay_log_pos=12; diff --git a/mysql-test/r/rpl_server_id1.result b/mysql-test/r/rpl_server_id1.result index c94a7748fcd..3f0c1a79de3 100644 --- a/mysql-test/r/rpl_server_id1.result +++ b/mysql-test/r/rpl_server_id1.result @@ -10,7 +10,7 @@ stop slave; change master to master_port=SLAVE_PORT; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master - 127.0.0.1 root SLAVE_PORT 1 4 slave-relay-bin.000001 4 No No # # 0 0 0 102 None 0 No NULL + 127.0.0.1 root SLAVE_PORT 1 4 slave-relay-bin.000001 4 No No # # 0 0 0 106 None 0 No NULL start slave; insert into t1 values (1); show status like "slave_running"; diff --git a/mysql-test/r/rpl_server_id2.result b/mysql-test/r/rpl_server_id2.result index 72db862040e..60550aba98e 100644 --- a/mysql-test/r/rpl_server_id2.result +++ b/mysql-test/r/rpl_server_id2.result @@ -10,7 +10,7 @@ stop slave; change master to master_port=SLAVE_PORT; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master - 127.0.0.1 root SLAVE_PORT 1 4 slave-relay-bin.000001 4 No No # 0 0 0 102 None 0 No NULL + 127.0.0.1 root SLAVE_PORT 1 4 slave-relay-bin.000001 4 No No # 0 0 0 106 None 0 No NULL start slave; insert into t1 values (1); select * from t1; diff --git a/mysql-test/r/rpl_sp.result b/mysql-test/r/rpl_sp.result index ea07a86d009..6dc3be94e32 100644 --- a/mysql-test/r/rpl_sp.result +++ b/mysql-test/r/rpl_sp.result @@ -381,7 +381,7 @@ return 0; end| use mysqltest; set @a:= mysqltest2.f1(); -show binlog events in 'master-bin.000001' from 102; +show binlog events in 'master-bin.000001' from 106; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # drop database if exists mysqltest1 master-bin.000001 # Query 1 # create database mysqltest1 diff --git a/mysql-test/r/rpl_stm_charset.result b/mysql-test/r/rpl_stm_charset.result index f1691608bc7..fd9c40843d5 100644 --- a/mysql-test/r/rpl_stm_charset.result +++ b/mysql-test/r/rpl_stm_charset.result @@ -103,40 +103,40 @@ a b 1 cp850_general_ci drop database mysqltest2; drop database mysqltest3; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # drop database if exists mysqltest2 -master-bin.000001 # Query 1 # drop database if exists mysqltest3 -master-bin.000001 # Query 1 # create database mysqltest2 character set latin2 -master-bin.000001 # Query 1 # create database mysqltest3 -master-bin.000001 # Query 1 # drop database mysqltest3 -master-bin.000001 # Query 1 # create database mysqltest3 -master-bin.000001 # Query 1 # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100)) -master-bin.000001 # Intvar 1 # INSERT_ID=1 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(@@character_set_server) -master-bin.000001 # Intvar 1 # INSERT_ID=2 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(@@collation_server) -master-bin.000001 # Intvar 1 # INSERT_ID=3 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(@@character_set_client) -master-bin.000001 # Intvar 1 # INSERT_ID=4 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(@@character_set_connection) -master-bin.000001 # Intvar 1 # INSERT_ID=5 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(@@collation_connection) -master-bin.000001 # Query 1 # use `mysqltest2`; truncate table t1 -master-bin.000001 # Intvar 1 # INSERT_ID=1 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(@@collation_connection) -master-bin.000001 # Intvar 1 # INSERT_ID=2 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(LEAST("Müller","Muffler")) -master-bin.000001 # Intvar 1 # INSERT_ID=3 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(@@collation_connection) -master-bin.000001 # Intvar 1 # INSERT_ID=4 -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(LEAST("Müller","Muffler")) -master-bin.000001 # Query 1 # use `mysqltest2`; truncate table t1 -master-bin.000001 # Intvar 1 # INSERT_ID=1 -master-bin.000001 # User var 1 # @`a`=_cp850 0x4DFC6C6C6572 COLLATE cp850_general_ci -master-bin.000001 # Query 1 # use `mysqltest2`; insert into t1 (b) values(collation(@a)) -master-bin.000001 # Query 1 # drop database mysqltest2 -master-bin.000001 # Query 1 # drop database mysqltest3 +master-bin.000001 # Query # # drop database if exists mysqltest2 +master-bin.000001 # Query # # drop database if exists mysqltest3 +master-bin.000001 # Query # # create database mysqltest2 character set latin2 +master-bin.000001 # Query # # create database mysqltest3 +master-bin.000001 # Query # # drop database mysqltest3 +master-bin.000001 # Query # # create database mysqltest3 +master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100)) +master-bin.000001 # Intvar # # INSERT_ID=1 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(@@character_set_server) +master-bin.000001 # Intvar # # INSERT_ID=2 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(@@collation_server) +master-bin.000001 # Intvar # # INSERT_ID=3 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(@@character_set_client) +master-bin.000001 # Intvar # # INSERT_ID=4 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(@@character_set_connection) +master-bin.000001 # Intvar # # INSERT_ID=5 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(@@collation_connection) +master-bin.000001 # Query # # use `mysqltest2`; truncate table t1 +master-bin.000001 # Intvar # # INSERT_ID=1 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(@@collation_connection) +master-bin.000001 # Intvar # # INSERT_ID=2 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(LEAST("Müller","Muffler")) +master-bin.000001 # Intvar # # INSERT_ID=3 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(@@collation_connection) +master-bin.000001 # Intvar # # INSERT_ID=4 +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(LEAST("Müller","Muffler")) +master-bin.000001 # Query # # use `mysqltest2`; truncate table t1 +master-bin.000001 # Intvar # # INSERT_ID=1 +master-bin.000001 # User var # # @`a`=_cp850 0x4DFC6C6C6572 COLLATE cp850_general_ci +master-bin.000001 # Query # # use `mysqltest2`; insert into t1 (b) values(collation(@a)) +master-bin.000001 # Query # # drop database mysqltest2 +master-bin.000001 # Query # # drop database mysqltest3 set global character_set_server=latin2; set global character_set_server=latin1; set global character_set_server=latin2; diff --git a/mysql-test/r/rpl_stm_flsh_tbls.result b/mysql-test/r/rpl_stm_flsh_tbls.result index a6123d75cb3..1c6b5615b6e 100644 --- a/mysql-test/r/rpl_stm_flsh_tbls.result +++ b/mysql-test/r/rpl_stm_flsh_tbls.result @@ -12,13 +12,13 @@ create table t4 (a int); insert into t4 select * from t3; rename table t1 to t5, t2 to t1; flush no_write_to_binlog tables; -SHOW BINLOG EVENTS FROM 652 ; +SHOW BINLOG EVENTS FROM 656 ; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; rename table t1 to t5, t2 to t1 select * from t3; a flush tables; -SHOW BINLOG EVENTS FROM 652 ; +SHOW BINLOG EVENTS FROM 656 ; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; rename table t1 to t5, t2 to t1 master-bin.000001 # Query 1 # use `test`; flush tables diff --git a/mysql-test/r/rpl_stm_log.result b/mysql-test/r/rpl_stm_log.result index 496685981aa..f69ad8a57c1 100644 --- a/mysql-test/r/rpl_stm_log.result +++ b/mysql-test/r/rpl_stm_log.result @@ -26,14 +26,14 @@ master-bin.000001 # Query 1 # use `test`; drop table t1 master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=MyISAM master-bin.000001 # Begin_load_query 1 # ;file_id=1;block_len=581 master-bin.000001 # Execute_load_query 1 # use `test`; load data infile '../std_data_ln/words.dat' into table t1 ignore 1 lines ;file_id=1 -show binlog events from 102 limit 1; +show binlog events from 106 limit 1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM -show binlog events from 102 limit 2; +show binlog events from 106 limit 2; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM master-bin.000001 # Intvar 1 # INSERT_ID=1 -show binlog events from 102 limit 2,1; +show binlog events from 106 limit 2,1; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; insert into t1 values (NULL) flush logs; @@ -47,17 +47,16 @@ flush logs; stop slave; create table t2 (n int)ENGINE=MyISAM; insert into t2 values (1); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM -master-bin.000001 # Intvar 1 # INSERT_ID=1 -master-bin.000001 # Query 1 # use `test`; insert into t1 values (NULL) -master-bin.000001 # Query 1 # use `test`; drop table t1 -master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=MyISAM -master-bin.000001 # Begin_load_query 1 # ;file_id=1;block_len=581 -master-bin.000001 # Execute_load_query 1 # use `test`; load data infile '../std_data_ln/words.dat' into table t1 ignore 1 lines ;file_id=1 -master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 +master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=MyISAM +master-bin.000001 # Intvar # # INSERT_ID=1 +master-bin.000001 # Query # # use `test`; insert into t1 values (NULL) +master-bin.000001 # Query # # use `test`; drop table t1 +master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=MyISAM +master-bin.000001 # Begin_load_query # # ;file_id=1;block_len=581 +master-bin.000001 # Execute_load_query # # use `test`; load data infile '../std_data_ln/words.dat' into table t1 ignore 1 lines ;file_id=1 +master-bin.000001 # Rotate # # master-bin.000002;pos=4 show binlog events in 'master-bin.000002'; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 @@ -66,13 +65,13 @@ master-bin.000002 # Query 1 # use `test`; create table t2 (n int)ENGINE=MyISAM master-bin.000002 # Query 1 # use `test`; insert into t2 values (1) show binary logs; Log_name File_size -master-bin.000001 1343 -master-bin.000002 388 +master-bin.000001 1347 +master-bin.000002 392 start slave; show binary logs; Log_name File_size -slave-bin.000001 1443 -slave-bin.000002 289 +slave-bin.000001 1447 +slave-bin.000002 293 show binlog events in 'slave-bin.000001' from 4; Log_name Pos Event_type Server_id End_log_pos Info slave-bin.000001 # Format_desc 2 # Server ver: VERSION, Binlog ver: 4 @@ -92,7 +91,7 @@ slave-bin.000002 # Query 1 # use `test`; create table t2 (n int)ENGINE=MyISAM slave-bin.000002 # Query 1 # use `test`; insert into t2 values (1) show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 388 # # master-bin.000002 Yes Yes # 0 0 388 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 392 # # master-bin.000002 Yes Yes # 0 0 392 # None 0 No # show binlog events in 'slave-bin.000005' from 4; ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log DROP TABLE t1; @@ -103,12 +102,11 @@ insert into t1 values (NULL, 1); reset master; set insert_id=5; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id()); -show binlog events; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 -master-bin.000001 # Intvar 1 # LAST_INSERT_ID=1 -master-bin.000001 # Intvar 1 # INSERT_ID=5 -master-bin.000001 # Query 1 # use `test`; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id()) +master-bin.000001 # Intvar # # LAST_INSERT_ID=1 +master-bin.000001 # Intvar # # INSERT_ID=5 +master-bin.000001 # Query # # use `test`; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id()) select * from t1; a b 1 1 diff --git a/mysql-test/r/rpl_stm_max_relay_size.result b/mysql-test/r/rpl_stm_max_relay_size.result index c4a9a5bd3ff..2509cc08205 100644 --- a/mysql-test/r/rpl_stm_max_relay_size.result +++ b/mysql-test/r/rpl_stm_max_relay_size.result @@ -28,7 +28,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 72956 +Read_Master_Log_Pos 72960 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -43,7 +43,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 72956 +Exec_Master_Log_Pos 72960 Relay_Log_Space # Until_Condition None Until_Log_File @@ -71,7 +71,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 72956 +Read_Master_Log_Pos 72960 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -86,7 +86,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 72956 +Exec_Master_Log_Pos 72960 Relay_Log_Space # Until_Condition None Until_Log_File @@ -114,7 +114,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 72956 +Read_Master_Log_Pos 72960 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -129,7 +129,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 72956 +Exec_Master_Log_Pos 72960 Relay_Log_Space # Until_Condition None Until_Log_File @@ -195,7 +195,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 73042 +Read_Master_Log_Pos 73046 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -210,7 +210,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 73042 +Exec_Master_Log_Pos 73046 Relay_Log_Space # Until_Condition None Until_Log_File @@ -234,7 +234,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 73118 +Read_Master_Log_Pos 73122 Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -249,7 +249,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 73118 +Exec_Master_Log_Pos 73122 Relay_Log_Space # Until_Condition None Until_Log_File @@ -264,7 +264,7 @@ Seconds_Behind_Master # flush logs; show master status; File master-bin.000002 -Position 102 +Position 106 Binlog_Do_DB <Binlog_Ignore_DB> Binlog_Ignore_DB set global max_binlog_size= @my_max_binlog_size; diff --git a/mysql-test/r/rpl_stm_multi_query.result b/mysql-test/r/rpl_stm_multi_query.result index bf914e6ce6c..625c686f383 100644 --- a/mysql-test/r/rpl_stm_multi_query.result +++ b/mysql-test/r/rpl_stm_multi_query.result @@ -19,14 +19,14 @@ n 3 4 5 -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # drop database if exists mysqltest -master-bin.000001 # Query 1 # create database mysqltest -master-bin.000001 # Query 1 # use `test`; create table mysqltest.t1 ( n int) -master-bin.000001 # Query 1 # use `test`; insert into mysqltest.t1 values(1) -master-bin.000001 # Query 1 # use `test`; insert into mysqltest.t1 values(2) -master-bin.000001 # Query 1 # use `test`; insert into mysqltest.t1 values(3) -master-bin.000001 # Query 1 # use `test`; insert into mysqltest.t1 values(4) -master-bin.000001 # Query 1 # use `test`; insert into mysqltest.t1 values(5) +master-bin.000001 # Query # # drop database if exists mysqltest +master-bin.000001 # Query # # create database mysqltest +master-bin.000001 # Query # # use `test`; create table mysqltest.t1 ( n int) +master-bin.000001 # Query # # use `test`; insert into mysqltest.t1 values(1) +master-bin.000001 # Query # # use `test`; insert into mysqltest.t1 values(2) +master-bin.000001 # Query # # use `test`; insert into mysqltest.t1 values(3) +master-bin.000001 # Query # # use `test`; insert into mysqltest.t1 values(4) +master-bin.000001 # Query # # use `test`; insert into mysqltest.t1 values(5) drop database mysqltest; diff --git a/mysql-test/r/rpl_stm_reset_slave.result b/mysql-test/r/rpl_stm_reset_slave.result index 834b9add089..2b9fdca0911 100644 --- a/mysql-test/r/rpl_stm_reset_slave.result +++ b/mysql-test/r/rpl_stm_reset_slave.result @@ -6,12 +6,12 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 102 # # master-bin.000001 Yes Yes # 0 0 102 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 106 # # master-bin.000001 Yes Yes # 0 0 106 # None 0 No # stop slave; change master to master_user='test'; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 test MASTER_PORT 1 master-bin.000001 102 # # master-bin.000001 No No # 0 0 102 # None 0 No # +# 127.0.0.1 test MASTER_PORT 1 master-bin.000001 106 # # master-bin.000001 No No # 0 0 106 # None 0 No # reset slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master @@ -19,7 +19,7 @@ Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 102 # # master-bin.000001 Yes Yes # 0 0 102 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 106 # # master-bin.000001 Yes Yes # 0 0 106 # None 0 No # stop slave; reset slave; start slave; diff --git a/mysql-test/r/rpl_stm_until.result b/mysql-test/r/rpl_stm_until.result index e8e33b66864..63544199f4c 100644 --- a/mysql-test/r/rpl_stm_until.result +++ b/mysql-test/r/rpl_stm_until.result @@ -26,7 +26,7 @@ Master_User root Master_Port MASTER_MYPORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 780 +Read_Master_Log_Pos 784 Relay_Log_File slave-relay-bin.000004 Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -41,7 +41,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 323 +Exec_Master_Log_Pos 327 Relay_Log_Space # Until_Condition Master Until_Log_File master-bin.000001 @@ -67,7 +67,7 @@ Master_User root Master_Port MASTER_MYPORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 780 +Read_Master_Log_Pos 784 Relay_Log_File slave-relay-bin.000004 Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -82,7 +82,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 323 +Exec_Master_Log_Pos 327 Relay_Log_Space # Until_Condition Master Until_Log_File master-no-such-bin.000001 @@ -106,7 +106,7 @@ Master_User root Master_Port MASTER_MYPORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 780 +Read_Master_Log_Pos 784 Relay_Log_File slave-relay-bin.000004 Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -121,7 +121,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 612 +Exec_Master_Log_Pos 616 Relay_Log_Space # Until_Condition Relay Until_Log_File slave-relay-bin.000004 @@ -143,7 +143,7 @@ Master_User root Master_Port MASTER_MYPORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 780 +Read_Master_Log_Pos 784 Relay_Log_File slave-relay-bin.000004 Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -158,7 +158,7 @@ Replicate_Wild_Ignore_Table Last_Errno 0 Last_Error Skip_Counter 0 -Exec_Master_Log_Pos 780 +Exec_Master_Log_Pos 784 Relay_Log_Space # Until_Condition Master Until_Log_File master-bin.000001 diff --git a/mysql-test/r/rpl_switch_stm_row_mixed.result b/mysql-test/r/rpl_switch_stm_row_mixed.result index fe0d986eba6..6d9f1a32980 100644 --- a/mysql-test/r/rpl_switch_stm_row_mixed.result +++ b/mysql-test/r/rpl_switch_stm_row_mixed.result @@ -405,84 +405,84 @@ CREATE TABLE t12 (data LONG); LOCK TABLES t12 WRITE; INSERT INTO t12 VALUES(UUID()); UNLOCK TABLES; -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # drop database if exists mysqltest1 -master-bin.000001 # Query 1 # create database mysqltest1 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t1 (a varchar(100)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_8_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_9_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_10_") -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_11_" -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_13_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_14_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_15_") -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_16_" -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_18_") -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_21_" -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_24_" -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t2` ( +master-bin.000001 # Query # # drop database if exists mysqltest1 +master-bin.000001 # Query # # create database mysqltest1 +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE t1 (a varchar(100)) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_8_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_9_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("for_10_") +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_11_" +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_13_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_14_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("for_15_") +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_16_" +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_18_") +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_21_" +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_24_" +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t2` ( `rpad(UUID(),100,' ')` varchar(100) CHARACTER SET utf8 NOT NULL DEFAULT '' ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t3` ( +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t3` ( `1` varbinary(36) NOT NULL DEFAULT '' ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t4` ( +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t3) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t4` ( `a` varchar(100) DEFAULT NULL ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t4) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t5) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo() +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t4) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t5) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo() begin insert into t1 values("work_25_"); insert into t1 values(concat("for_26_",UUID())); insert into t1 select "yesterday_27_"; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo2() +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo2() begin insert into t1 values(concat("emergency_28_",UUID())); insert into t1 values("work_29_"); @@ -491,308 +491,308 @@ set session binlog_format=row; # accepted for stored procs insert into t1 values("more work_31_"); set session binlog_format=mixed; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned begin set session binlog_format=row; # rejected for stored funcs insert into t1 values("alarm"); return 100; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo4(x varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo4(x varchar(100)) begin insert into t1 values(concat("work_250_",x)); insert into t1 select "yesterday_270_"; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'hello'))) -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'world'))) -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" -master-bin.000001 # Query 1 # use `mysqltest1`; drop function foo3 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'hello'))) +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_270_" +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'world'))) +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_270_" +master-bin.000001 # Query # # use `mysqltest1`; drop function foo3 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned begin insert into t1 values("foo3_32_"); call foo(); return 100; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo4() returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo4() returns bigint unsigned begin insert into t2 select foo3(); return 100; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo5() returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo5() returns bigint unsigned begin insert into t2 select UUID(); return 100; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo6(x varchar(100)) returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo6(x varchar(100)) returns bigint unsigned begin insert into t2 select x; return 100; end -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`foo6`(_latin1'foo6_1_') -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select uuid() -master-bin.000001 # Query 1 # use `mysqltest1`; create table t11 (data varchar(255)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t11_bi before insert on t11 for each row +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`foo6`(_latin1'foo6_1_') +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select uuid() +master-bin.000001 # Query # # use `mysqltest1`; create table t11 (data varchar(255)) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query # # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query # # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t11_bi before insert on t11 for each row begin set NEW.data = concat(NEW.data,UUID()); end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; create table t20 select * from t1 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t21 select * from t2 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t22 select * from t3 -master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1,t2,t3 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; create table t2 (a int primary key auto_increment, b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; create table t3 (b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f (x varchar(100)) returns int deterministic +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; create table t20 select * from t1 +master-bin.000001 # Query # # use `mysqltest1`; create table t21 select * from t2 +master-bin.000001 # Query # # use `mysqltest1`; create table t22 select * from t3 +master-bin.000001 # Query # # use `mysqltest1`; drop table t1,t2,t3 +master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; create table t2 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; create table t3 (b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f (x varchar(100)) returns int deterministic begin insert into t1 values(null,x); insert into t2 values(null,x); return 1; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Intvar 1 # INSERT_ID=3 -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_44_") -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; create table t12 select * from t1 -master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int, b varchar(100), key(a)) -master-bin.000001 # Intvar 1 # INSERT_ID=4 -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`f`(_latin1'try_45_') -master-bin.000001 # Query 1 # use `mysqltest1`; create table t13 select * from t1 -master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; drop function f -master-bin.000001 # Query 1 # use `mysqltest1`; create table t14 (unique (a)) select * from t2 -master-bin.000001 # Query 1 # use `mysqltest1`; truncate table t2 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f1 (x varchar(100)) returns int deterministic +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Intvar # # INSERT_ID=3 +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(null,"try_44_") +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; create table t12 select * from t1 +master-bin.000001 # Query # # use `mysqltest1`; drop table t1 +master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a int, b varchar(100), key(a)) +master-bin.000001 # Intvar # # INSERT_ID=4 +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`f`(_latin1'try_45_') +master-bin.000001 # Query # # use `mysqltest1`; create table t13 select * from t1 +master-bin.000001 # Query # # use `mysqltest1`; drop table t1 +master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; drop function f +master-bin.000001 # Query # # use `mysqltest1`; create table t14 (unique (a)) select * from t2 +master-bin.000001 # Query # # use `mysqltest1`; truncate table t2 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f1 (x varchar(100)) returns int deterministic begin insert into t1 values(null,x); return 1; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic begin insert into t2 values(null,x); return 1; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t3) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; drop function f2 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic begin declare y int; insert into t1 values(null,x); set y = (select count(*) from t2); return y; end -master-bin.000001 # Intvar 1 # INSERT_ID=4 -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`f1`(_latin1'try_53_') -master-bin.000001 # Intvar 1 # INSERT_ID=5 -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`f2`(_latin1'try_54_') -master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t1_bi before insert on t1 for each row +master-bin.000001 # Intvar # # INSERT_ID=4 +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`f1`(_latin1'try_53_') +master-bin.000001 # Intvar # # INSERT_ID=5 +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`f2`(_latin1'try_54_') +master-bin.000001 # Query # # use `mysqltest1`; drop function f2 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t1_bi before insert on t1 for each row begin insert into t2 values(null,"try_55_"); end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; alter table t1 modify a int, drop primary key -master-bin.000001 # Intvar 1 # INSERT_ID=5 -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_57_") -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t16` ( +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; alter table t1 modify a int, drop primary key +master-bin.000001 # Intvar # # INSERT_ID=5 +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(null,"try_57_") +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t16` ( `UUID()` varchar(36) CHARACTER SET utf8 NOT NULL DEFAULT '' ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t16 values("try_66_") -master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t11 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t11 (song VARCHAR(255)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; INSERT INTO t11 VALUES('Careful With That Axe, Eugene') -master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t12 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t12 (data LONG) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t12) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -show binlog events from 102; +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t16) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t16) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t16 values("try_66_") +master-bin.000001 # Query # # use `mysqltest1`; DROP TABLE IF EXISTS t11 +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE t11 (song VARCHAR(255)) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; INSERT INTO t11 VALUES('Careful With That Axe, Eugene') +master-bin.000001 # Query # # use `mysqltest1`; DROP TABLE IF EXISTS t12 +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE t12 (data LONG) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t12) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query 1 # drop database if exists mysqltest1 -master-bin.000001 # Query 1 # create database mysqltest1 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t1 (a varchar(100)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_8_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_9_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_10_") -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_11_" -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_13_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_14_") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_15_") -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_16_" -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_18_") -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_21_" -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_24_" -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t2` ( +master-bin.000001 # Query # # drop database if exists mysqltest1 +master-bin.000001 # Query # # create database mysqltest1 +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE t1 (a varchar(100)) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_8_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_9_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("for_10_") +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_11_" +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_13_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_14_") +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("for_15_") +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_16_" +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values("work_18_") +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_21_" +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # User var # # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_24_" +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t2` ( `rpad(UUID(),100,' ')` varchar(100) CHARACTER SET utf8 NOT NULL DEFAULT '' ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t3` ( +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t3` ( `1` varbinary(36) NOT NULL DEFAULT '' ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t4` ( +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t3) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t4` ( `a` varchar(100) DEFAULT NULL ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t4) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t5) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo() +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t4) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t5) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo() begin insert into t1 values("work_25_"); insert into t1 values(concat("for_26_",UUID())); insert into t1 select "yesterday_27_"; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo2() +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo2() begin insert into t1 values(concat("emergency_28_",UUID())); insert into t1 values("work_29_"); @@ -801,229 +801,229 @@ set session binlog_format=row; # accepted for stored procs insert into t1 values("more work_31_"); set session binlog_format=mixed; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned begin set session binlog_format=row; # rejected for stored funcs insert into t1 values("alarm"); return 100; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo4(x varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo4(x varchar(100)) begin insert into t1 values(concat("work_250_",x)); insert into t1 select "yesterday_270_"; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'hello'))) -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'world'))) -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" -master-bin.000001 # Query 1 # use `mysqltest1`; drop function foo3 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'hello'))) +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_270_" +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'world'))) +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 select "yesterday_270_" +master-bin.000001 # Query # # use `mysqltest1`; drop function foo3 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned begin insert into t1 values("foo3_32_"); call foo(); return 100; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo4() returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo4() returns bigint unsigned begin insert into t2 select foo3(); return 100; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo5() returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo5() returns bigint unsigned begin insert into t2 select UUID(); return 100; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo6(x varchar(100)) returns bigint unsigned +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo6(x varchar(100)) returns bigint unsigned begin insert into t2 select x; return 100; end -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`foo6`(_latin1'foo6_1_') -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select uuid() -master-bin.000001 # Query 1 # use `mysqltest1`; create table t11 (data varchar(255)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t11_bi before insert on t11 for each row +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`foo6`(_latin1'foo6_1_') +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select uuid() +master-bin.000001 # Query # # use `mysqltest1`; create table t11 (data varchar(255)) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query # # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query # # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t11_bi before insert on t11 for each row begin set NEW.data = concat(NEW.data,UUID()); end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; create table t20 select * from t1 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t21 select * from t2 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t22 select * from t3 -master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1,t2,t3 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; create table t2 (a int primary key auto_increment, b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; create table t3 (b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f (x varchar(100)) returns int deterministic +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; create table t20 select * from t1 +master-bin.000001 # Query # # use `mysqltest1`; create table t21 select * from t2 +master-bin.000001 # Query # # use `mysqltest1`; create table t22 select * from t3 +master-bin.000001 # Query # # use `mysqltest1`; drop table t1,t2,t3 +master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; create table t2 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; create table t3 (b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f (x varchar(100)) returns int deterministic begin insert into t1 values(null,x); insert into t2 values(null,x); return 1; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Intvar 1 # INSERT_ID=3 -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_44_") -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; create table t12 select * from t1 -master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int, b varchar(100), key(a)) -master-bin.000001 # Intvar 1 # INSERT_ID=4 -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`f`(_latin1'try_45_') -master-bin.000001 # Query 1 # use `mysqltest1`; create table t13 select * from t1 -master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 -master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) -master-bin.000001 # Query 1 # use `mysqltest1`; drop function f -master-bin.000001 # Query 1 # use `mysqltest1`; create table t14 (unique (a)) select * from t2 -master-bin.000001 # Query 1 # use `mysqltest1`; truncate table t2 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f1 (x varchar(100)) returns int deterministic +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Intvar # # INSERT_ID=3 +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(null,"try_44_") +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; create table t12 select * from t1 +master-bin.000001 # Query # # use `mysqltest1`; drop table t1 +master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a int, b varchar(100), key(a)) +master-bin.000001 # Intvar # # INSERT_ID=4 +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`f`(_latin1'try_45_') +master-bin.000001 # Query # # use `mysqltest1`; create table t13 select * from t1 +master-bin.000001 # Query # # use `mysqltest1`; drop table t1 +master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query # # use `mysqltest1`; drop function f +master-bin.000001 # Query # # use `mysqltest1`; create table t14 (unique (a)) select * from t2 +master-bin.000001 # Query # # use `mysqltest1`; truncate table t2 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f1 (x varchar(100)) returns int deterministic begin insert into t1 values(null,x); return 1; end -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic begin insert into t2 values(null,x); return 1; end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t3) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; drop function f2 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic begin declare y int; insert into t1 values(null,x); set y = (select count(*) from t2); return y; end -master-bin.000001 # Intvar 1 # INSERT_ID=4 -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`f1`(_latin1'try_53_') -master-bin.000001 # Intvar 1 # INSERT_ID=5 -master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `mysqltest1`.`f2`(_latin1'try_54_') -master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t1_bi before insert on t1 for each row +master-bin.000001 # Intvar # # INSERT_ID=4 +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`f1`(_latin1'try_53_') +master-bin.000001 # Intvar # # INSERT_ID=5 +master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`f2`(_latin1'try_54_') +master-bin.000001 # Query # # use `mysqltest1`; drop function f2 +master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t1_bi before insert on t1 for each row begin insert into t2 values(null,"try_55_"); end -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) -master-bin.000001 # Write_rows 1 # table_id: # -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; alter table t1 modify a int, drop primary key -master-bin.000001 # Intvar 1 # INSERT_ID=5 -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_57_") -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t16` ( +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows # # table_id: # +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; alter table t1 modify a int, drop primary key +master-bin.000001 # Intvar # # INSERT_ID=5 +master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(null,"try_57_") +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE `t16` ( `UUID()` varchar(36) CHARACTER SET utf8 NOT NULL DEFAULT '' ) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t16 values("try_66_") -master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t11 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t11 (song VARCHAR(255)) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; INSERT INTO t11 VALUES('Careful With That Axe, Eugene') -master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t12 -master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t12 (data LONG) -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t12) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t16) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t16) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; insert into t16 values("try_66_") +master-bin.000001 # Query # # use `mysqltest1`; DROP TABLE IF EXISTS t11 +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE t11 (song VARCHAR(255)) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `mysqltest1`; INSERT INTO t11 VALUES('Careful With That Axe, Eugene') +master-bin.000001 # Query # # use `mysqltest1`; DROP TABLE IF EXISTS t12 +master-bin.000001 # Query # # use `mysqltest1`; CREATE TABLE t12 (data LONG) +master-bin.000001 # Table_map # # table_id: # (mysqltest1.t12) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F drop database mysqltest1; set global binlog_format =@my_binlog_format; diff --git a/mysql-test/r/rpl_truncate_2myisam.result b/mysql-test/r/rpl_truncate_2myisam.result index 41640a709b9..c7ef28ba56b 100644 --- a/mysql-test/r/rpl_truncate_2myisam.result +++ b/mysql-test/r/rpl_truncate_2myisam.result @@ -29,13 +29,12 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM -master-bin.000001 210 Query 1 307 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Query 1 387 use `test`; TRUNCATE TABLE t1 -master-bin.000001 387 Query 1 463 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1 +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=MIXED; SET GLOBAL BINLOG_FORMAT=MIXED; @@ -61,13 +60,12 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM -master-bin.000001 210 Query 1 307 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Query 1 387 use `test`; TRUNCATE TABLE t1 -master-bin.000001 387 Query 1 463 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1 +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=ROW; SET GLOBAL BINLOG_FORMAT=ROW; @@ -93,14 +91,13 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM -master-bin.000001 210 Table_map 1 250 table_id: # (test.t1) -master-bin.000001 250 Write_rows 1 297 table_id: # flags: STMT_END_F -master-bin.000001 297 Query 1 377 use `test`; TRUNCATE TABLE t1 -master-bin.000001 377 Query 1 453 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1 +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=STATEMENT; SET GLOBAL BINLOG_FORMAT=STATEMENT; @@ -126,13 +123,12 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM -master-bin.000001 210 Query 1 307 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Query 1 384 use `test`; DELETE FROM t1 -master-bin.000001 384 Query 1 460 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Query # # use `test`; DELETE FROM t1 +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=MIXED; SET GLOBAL BINLOG_FORMAT=MIXED; @@ -158,13 +154,12 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM -master-bin.000001 210 Query 1 307 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Query 1 384 use `test`; DELETE FROM t1 -master-bin.000001 384 Query 1 460 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Query # # use `test`; DELETE FROM t1 +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=ROW; SET GLOBAL BINLOG_FORMAT=ROW; @@ -191,12 +186,11 @@ a b 3 3 **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM -master-bin.000001 210 Table_map 1 250 table_id: # (test.t1) -master-bin.000001 250 Write_rows 1 297 table_id: # flags: STMT_END_F -master-bin.000001 297 Table_map 1 337 table_id: # (test.t1) -master-bin.000001 337 Delete_rows 1 384 table_id: # flags: STMT_END_F -master-bin.000001 384 Query 1 460 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=MyISAM +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Delete_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; DROP TABLE t1 diff --git a/mysql-test/r/rpl_truncate_3innodb.result b/mysql-test/r/rpl_truncate_3innodb.result index 062c9704ae0..7ce48c2e983 100644 --- a/mysql-test/r/rpl_truncate_3innodb.result +++ b/mysql-test/r/rpl_truncate_3innodb.result @@ -29,15 +29,14 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB -master-bin.000001 210 Query 1 97 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Xid 1 334 COMMIT /* xid= */ -master-bin.000001 334 Query 1 80 use `test`; TRUNCATE TABLE t1 -master-bin.000001 414 Xid 1 441 COMMIT /* xid= */ -master-bin.000001 441 Query 1 517 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=MIXED; SET GLOBAL BINLOG_FORMAT=MIXED; @@ -63,15 +62,14 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB -master-bin.000001 210 Query 1 97 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Xid 1 334 COMMIT /* xid= */ -master-bin.000001 334 Query 1 80 use `test`; TRUNCATE TABLE t1 -master-bin.000001 414 Xid 1 441 COMMIT /* xid= */ -master-bin.000001 441 Query 1 517 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=ROW; SET GLOBAL BINLOG_FORMAT=ROW; @@ -97,16 +95,15 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB -master-bin.000001 210 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 250 Write_rows 1 87 table_id: # flags: STMT_END_F -master-bin.000001 297 Xid 1 324 COMMIT /* xid= */ -master-bin.000001 324 Query 1 80 use `test`; TRUNCATE TABLE t1 -master-bin.000001 404 Xid 1 431 COMMIT /* xid= */ -master-bin.000001 431 Query 1 507 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=STATEMENT; SET GLOBAL BINLOG_FORMAT=STATEMENT; @@ -132,15 +129,14 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB -master-bin.000001 210 Query 1 97 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Xid 1 334 COMMIT /* xid= */ -master-bin.000001 334 Query 1 77 use `test`; DELETE FROM t1 -master-bin.000001 411 Xid 1 438 COMMIT /* xid= */ -master-bin.000001 438 Query 1 514 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DELETE FROM t1 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=MIXED; SET GLOBAL BINLOG_FORMAT=MIXED; @@ -166,15 +162,14 @@ SELECT * FROM t1; a b **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB -master-bin.000001 210 Query 1 97 use `test`; INSERT INTO t1 VALUES (1,1), (2,2) -master-bin.000001 307 Xid 1 334 COMMIT /* xid= */ -master-bin.000001 334 Query 1 77 use `test`; DELETE FROM t1 -master-bin.000001 411 Xid 1 438 COMMIT /* xid= */ -master-bin.000001 438 Query 1 514 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (1,1), (2,2) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DELETE FROM t1 +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t1 **** On Master **** SET SESSION BINLOG_FORMAT=ROW; SET GLOBAL BINLOG_FORMAT=ROW; @@ -201,14 +196,13 @@ a b 3 3 **** On Master **** DROP TABLE t1; -SHOW BINLOG EVENTS; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 210 use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB -master-bin.000001 210 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 250 Write_rows 1 87 table_id: # flags: STMT_END_F -master-bin.000001 297 Xid 1 324 COMMIT /* xid= */ -master-bin.000001 324 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 364 Delete_rows 1 87 table_id: # flags: STMT_END_F -master-bin.000001 411 Xid 1 438 COMMIT /* xid= */ -master-bin.000001 438 Query 1 514 use `test`; DROP TABLE t1 +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT, b LONG) ENGINE=InnoDB +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Delete_rows # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Query # # use `test`; DROP TABLE t1 diff --git a/mysql-test/r/rpl_truncate_7ndb.result b/mysql-test/r/rpl_truncate_7ndb.result index 63d4b0f9411..62ace911e45 100644 --- a/mysql-test/r/rpl_truncate_7ndb.result +++ b/mysql-test/r/rpl_truncate_7ndb.result @@ -29,16 +29,17 @@ a b DROP TABLE t1; SHOW BINLOG EVENTS; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 219 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB -master-bin.000001 219 Query 1 283 BEGIN -master-bin.000001 283 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 323 Table_map 1 95 table_id: # (mysql.ndb_apply_status) -master-bin.000001 378 Write_rows 1 137 table_id: # -master-bin.000001 420 Write_rows 1 184 table_id: # flags: STMT_END_F -master-bin.000001 467 Query 1 532 COMMIT -master-bin.000001 532 Query 1 612 use `test`; TRUNCATE TABLE t1 -master-bin.000001 612 Query 1 688 use `test`; DROP TABLE t1 +master-bin.000001 4 Format_desc 1 106 Server ver: SERVER_VERSION, Binlog ver: 4 +master-bin.000001 106 Query 1 223 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB +master-bin.000001 223 Query 1 287 BEGIN +master-bin.000001 287 Table_map 1 40 table_id: # (test.t1) +master-bin.000001 327 Table_map 1 98 table_id: # (mysql.ndb_apply_status) +master-bin.000001 385 Write_rows 1 157 table_id: # +master-bin.000001 444 Write_rows 1 195 table_id: # +master-bin.000001 482 Write_rows 1 233 table_id: # flags: STMT_END_F +master-bin.000001 520 Query 1 585 COMMIT +master-bin.000001 585 Query 1 665 use `test`; TRUNCATE TABLE t1 +master-bin.000001 665 Query 1 741 use `test`; DROP TABLE t1 **** On Master **** CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB; INSERT INTO t1 VALUES (1,1), (2,2); @@ -65,27 +66,30 @@ a b DROP TABLE t1; SHOW BINLOG EVENTS; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 -master-bin.000001 102 Query 1 219 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB -master-bin.000001 219 Query 1 283 BEGIN -master-bin.000001 283 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 323 Table_map 1 95 table_id: # (mysql.ndb_apply_status) -master-bin.000001 378 Write_rows 1 137 table_id: # -master-bin.000001 420 Write_rows 1 184 table_id: # flags: STMT_END_F -master-bin.000001 467 Query 1 532 COMMIT -master-bin.000001 532 Query 1 612 use `test`; TRUNCATE TABLE t1 -master-bin.000001 612 Query 1 688 use `test`; DROP TABLE t1 -master-bin.000001 688 Query 1 805 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB -master-bin.000001 805 Query 1 869 BEGIN -master-bin.000001 869 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 909 Table_map 1 95 table_id: # (mysql.ndb_apply_status) -master-bin.000001 964 Write_rows 1 137 table_id: # -master-bin.000001 1006 Write_rows 1 184 table_id: # flags: STMT_END_F -master-bin.000001 1053 Query 1 1118 COMMIT -master-bin.000001 1118 Query 1 1182 BEGIN -master-bin.000001 1182 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 1222 Table_map 1 95 table_id: # (mysql.ndb_apply_status) -master-bin.000001 1277 Write_rows 1 137 table_id: # -master-bin.000001 1319 Delete_rows 1 176 table_id: # flags: STMT_END_F -master-bin.000001 1358 Query 1 1423 COMMIT -master-bin.000001 1423 Query 1 1499 use `test`; DROP TABLE t1 +master-bin.000001 4 Format_desc 1 106 Server ver: SERVER_VERSION, Binlog ver: 4 +master-bin.000001 106 Query 1 223 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB +master-bin.000001 223 Query 1 287 BEGIN +master-bin.000001 287 Table_map 1 40 table_id: # (test.t1) +master-bin.000001 327 Table_map 1 98 table_id: # (mysql.ndb_apply_status) +master-bin.000001 385 Write_rows 1 157 table_id: # +master-bin.000001 444 Write_rows 1 195 table_id: # +master-bin.000001 482 Write_rows 1 233 table_id: # flags: STMT_END_F +master-bin.000001 520 Query 1 585 COMMIT +master-bin.000001 585 Query 1 665 use `test`; TRUNCATE TABLE t1 +master-bin.000001 665 Query 1 741 use `test`; DROP TABLE t1 +master-bin.000001 741 Query 1 858 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB +master-bin.000001 858 Query 1 922 BEGIN +master-bin.000001 922 Table_map 1 40 table_id: # (test.t1) +master-bin.000001 962 Table_map 1 98 table_id: # (mysql.ndb_apply_status) +master-bin.000001 1020 Write_rows 1 157 table_id: # +master-bin.000001 1079 Write_rows 1 195 table_id: # +master-bin.000001 1117 Write_rows 1 233 table_id: # flags: STMT_END_F +master-bin.000001 1155 Query 1 1220 COMMIT +master-bin.000001 1220 Query 1 1284 BEGIN +master-bin.000001 1284 Table_map 1 40 table_id: # (test.t1) +master-bin.000001 1324 Table_map 1 98 table_id: # (mysql.ndb_apply_status) +master-bin.000001 1382 Write_rows 1 157 table_id: # +master-bin.000001 1441 Delete_rows 1 191 table_id: # +master-bin.000001 1475 Delete_rows 1 225 table_id: # flags: STMT_END_F +master-bin.000001 1509 Query 1 1574 COMMIT +master-bin.000001 1574 Query 1 1650 use `test`; DROP TABLE t1 diff --git a/mysql-test/r/rpl_udf.result b/mysql-test/r/rpl_udf.result new file mode 100644 index 00000000000..6587632bca0 --- /dev/null +++ b/mysql-test/r/rpl_udf.result @@ -0,0 +1,310 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +set binlog_format=row; +drop table if exists t1; +"*** Test 1) Test UDFs via loadable libraries *** +"Running on the master" +CREATE FUNCTION myfunc_double RETURNS REAL SONAME "UDF_EXAMPLE_LIB"; +affected rows: 0 +CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; +affected rows: 0 +CREATE FUNCTION myfunc_nonexist RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; +ERROR HY000: Can't find symbol 'myfunc_nonexist' in library +SELECT * FROM mysql.func; +name ret dl type +myfunc_double 1 UDF_LIB function +myfunc_int 2 UDF_LIB function +affected rows: 2 +"Running on the slave" +SELECT * FROM mysql.func; +name ret dl type +myfunc_double 1 UDF_LIB function +myfunc_int 2 UDF_LIB function +affected rows: 2 +"Running on the master" +CREATE TABLE t1(sum INT, price FLOAT(24)) ENGINE=MyISAM; +affected rows: 0 +INSERT INTO t1 VALUES(myfunc_int(100), myfunc_double(50.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfunc_int(10), myfunc_double(5.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfunc_int(200), myfunc_double(25.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfunc_int(1), myfunc_double(500.00)); +affected rows: 1 +SELECT * FROM t1 ORDER BY sum; +sum price +1 48.5 +10 48.75 +100 48.6 +200 49 +affected rows: 4 +"Running on the slave" +SELECT * FROM t1 ORDER BY sum; +sum price +1 48.5 +10 48.75 +100 48.6 +200 49 +affected rows: 4 +SELECT myfunc_int(25); +myfunc_int(25) +25 +affected rows: 1 +SELECT myfunc_double(75.00); +myfunc_double(75.00) +50.00 +affected rows: 1 +"Running on the master" +DROP FUNCTION myfunc_double; +affected rows: 0 +DROP FUNCTION myfunc_int; +affected rows: 0 +SELECT * FROM mysql.func; +name ret dl type +affected rows: 0 +"Running on the slave" +SELECT * FROM mysql.func; +name ret dl type +affected rows: 0 +"Running on the master" +DROP TABLE t1; +affected rows: 0 +"*** Test 2) Test UDFs with SQL body *** +"Running on the master" +CREATE FUNCTION myfuncsql_int(i INT) RETURNS INTEGER DETERMINISTIC RETURN i; +affected rows: 0 +CREATE FUNCTION myfuncsql_double(d DOUBLE) RETURNS INTEGER DETERMINISTIC RETURN d * 2.00; +affected rows: 0 +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 +test myfuncsql_int FUNCTION i INT RETURN i +affected rows: 2 +"Running on the slave" +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 +test myfuncsql_int FUNCTION i INT RETURN i +affected rows: 2 +"Running on the master" +CREATE TABLE t1(sum INT, price FLOAT(24)) ENGINE=MyISAM; +affected rows: 0 +INSERT INTO t1 VALUES(myfuncsql_int(100), myfuncsql_double(50.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfuncsql_int(10), myfuncsql_double(5.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfuncsql_int(200), myfuncsql_double(25.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfuncsql_int(1), myfuncsql_double(500.00)); +affected rows: 1 +SELECT * FROM t1 ORDER BY sum; +sum price +1 1000 +10 10 +100 100 +200 50 +affected rows: 4 +"Running on the slave" +SELECT * FROM t1 ORDER BY sum; +sum price +1 1000 +10 10 +100 100 +200 50 +affected rows: 4 +"Running on the master" +ALTER FUNCTION myfuncsql_int COMMENT "This was altered."; +affected rows: 0 +ALTER FUNCTION myfuncsql_double COMMENT "This was altered."; +affected rows: 0 +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 This was altered. +test myfuncsql_int FUNCTION i INT RETURN i This was altered. +affected rows: 2 +"Running on the slave" +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 This was altered. +test myfuncsql_int FUNCTION i INT RETURN i This was altered. +affected rows: 2 +SELECT myfuncsql_int(25); +myfuncsql_int(25) +25 +affected rows: 1 +SELECT myfuncsql_double(75.00); +myfuncsql_double(75.00) +150 +affected rows: 1 +"Running on the master" +DROP FUNCTION myfuncsql_double; +affected rows: 0 +DROP FUNCTION myfuncsql_int; +affected rows: 0 +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +affected rows: 0 +"Running on the slave" +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +affected rows: 0 +"Running on the master" +DROP TABLE t1; +affected rows: 0 +set binlog_format=statement; +drop table if exists t1; +"*** Test 1) Test UDFs via loadable libraries *** +"Running on the master" +CREATE FUNCTION myfunc_double RETURNS REAL SONAME "UDF_EXAMPLE_LIB"; +affected rows: 0 +CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; +affected rows: 0 +CREATE FUNCTION myfunc_nonexist RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; +ERROR HY000: Can't find symbol 'myfunc_nonexist' in library +SELECT * FROM mysql.func; +name ret dl type +myfunc_int 2 UDF_LIB function +myfunc_double 1 UDF_LIB function +affected rows: 2 +"Running on the slave" +SELECT * FROM mysql.func; +name ret dl type +myfunc_int 2 UDF_LIB function +myfunc_double 1 UDF_LIB function +affected rows: 2 +"Running on the master" +CREATE TABLE t1(sum INT, price FLOAT(24)) ENGINE=MyISAM; +affected rows: 0 +INSERT INTO t1 VALUES(myfunc_int(100), myfunc_double(50.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfunc_int(10), myfunc_double(5.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfunc_int(200), myfunc_double(25.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfunc_int(1), myfunc_double(500.00)); +affected rows: 1 +SELECT * FROM t1 ORDER BY sum; +sum price +1 48.5 +10 48.75 +100 48.6 +200 49 +affected rows: 4 +"Running on the slave" +SELECT * FROM t1 ORDER BY sum; +sum price +1 48.5 +10 48.75 +100 48.6 +200 49 +affected rows: 4 +SELECT myfunc_int(25); +myfunc_int(25) +25 +affected rows: 1 +SELECT myfunc_double(75.00); +myfunc_double(75.00) +50.00 +affected rows: 1 +"Running on the master" +DROP FUNCTION myfunc_double; +affected rows: 0 +DROP FUNCTION myfunc_int; +affected rows: 0 +SELECT * FROM mysql.func; +name ret dl type +affected rows: 0 +"Running on the slave" +SELECT * FROM mysql.func; +name ret dl type +affected rows: 0 +"Running on the master" +DROP TABLE t1; +affected rows: 0 +"*** Test 2) Test UDFs with SQL body *** +"Running on the master" +CREATE FUNCTION myfuncsql_int(i INT) RETURNS INTEGER DETERMINISTIC RETURN i; +affected rows: 0 +CREATE FUNCTION myfuncsql_double(d DOUBLE) RETURNS INTEGER DETERMINISTIC RETURN d * 2.00; +affected rows: 0 +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 +test myfuncsql_int FUNCTION i INT RETURN i +affected rows: 2 +"Running on the slave" +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 +test myfuncsql_int FUNCTION i INT RETURN i +affected rows: 2 +"Running on the master" +CREATE TABLE t1(sum INT, price FLOAT(24)) ENGINE=MyISAM; +affected rows: 0 +INSERT INTO t1 VALUES(myfuncsql_int(100), myfuncsql_double(50.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfuncsql_int(10), myfuncsql_double(5.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfuncsql_int(200), myfuncsql_double(25.00)); +affected rows: 1 +INSERT INTO t1 VALUES(myfuncsql_int(1), myfuncsql_double(500.00)); +affected rows: 1 +SELECT * FROM t1 ORDER BY sum; +sum price +1 1000 +10 10 +100 100 +200 50 +affected rows: 4 +"Running on the slave" +SELECT * FROM t1 ORDER BY sum; +sum price +1 1000 +10 10 +100 100 +200 50 +affected rows: 4 +"Running on the master" +ALTER FUNCTION myfuncsql_int COMMENT "This was altered."; +affected rows: 0 +ALTER FUNCTION myfuncsql_double COMMENT "This was altered."; +affected rows: 0 +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 This was altered. +test myfuncsql_int FUNCTION i INT RETURN i This was altered. +affected rows: 2 +"Running on the slave" +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +test myfuncsql_double FUNCTION d DOUBLE RETURN d * 2.00 This was altered. +test myfuncsql_int FUNCTION i INT RETURN i This was altered. +affected rows: 2 +SELECT myfuncsql_int(25); +myfuncsql_int(25) +25 +affected rows: 1 +SELECT myfuncsql_double(75.00); +myfuncsql_double(75.00) +150 +affected rows: 1 +"Running on the master" +DROP FUNCTION myfuncsql_double; +affected rows: 0 +DROP FUNCTION myfuncsql_int; +affected rows: 0 +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +affected rows: 0 +"Running on the slave" +SELECT db, name, type, param_list, body, comment FROM mysql.proc WHERE db = 'test' AND name LIKE 'myfuncsql%'; +db name type param_list body comment +affected rows: 0 +"Running on the master" +DROP TABLE t1; +affected rows: 0 diff --git a/mysql-test/r/sp-code.result b/mysql-test/r/sp-code.result index 67b030f87a4..588f4329368 100644 --- a/mysql-test/r/sp-code.result +++ b/mysql-test/r/sp-code.result @@ -621,3 +621,20 @@ Pos Instruction 0 stmt 2 "CREATE INDEX idx ON t1 (c1)" DROP PROCEDURE p1; End of 5.0 tests. +CREATE PROCEDURE p1() +BEGIN +DECLARE dummy int default 0; +CASE 12 +WHEN 12 +THEN SET dummy = 0; +END CASE; +END// +SHOW PROCEDURE CODE p1; +Pos Instruction +0 set dummy@0 0 +1 set_case_expr (6) 0 12 +2 jump_if_not 5(6) (case_expr@0 = 12) +3 set dummy@0 0 +4 jump 6 +5 error 1339 +DROP PROCEDURE p1; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 1372ac3687c..089fb10aaae 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -3712,12 +3712,6 @@ bb 2 cc 3 dd 1 DROP TABLE t1,t2,t3; -CREATE TABLE t1 (s1 char(1)); -INSERT INTO t1 VALUES ('a'); -SELECT * FROM t1 WHERE _utf8'a' = ANY (SELECT s1 FROM t1); -s1 -a -DROP TABLE t1; CREATE TABLE t1(f1 int); CREATE TABLE t2(f2 int, f21 int, f3 timestamp); INSERT INTO t1 VALUES (1),(1),(2),(2); @@ -3886,3 +3880,76 @@ this is a test. 3 this is a test. 1 this is a test. 2 DROP table t1; +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); +SELECT COUNT(*), a, +(SELECT m FROM t2 WHERE m = count(*) LIMIT 1) +FROM t1 GROUP BY a; +COUNT(*) a (SELECT m FROM t2 WHERE m = count(*) LIMIT 1) +2 2 2 +3 3 3 +1 4 1 +SELECT COUNT(*), a, +(SELECT MIN(m) FROM t2 WHERE m = count(*)) +FROM t1 GROUP BY a; +COUNT(*) a (SELECT MIN(m) FROM t2 WHERE m = count(*)) +2 2 2 +3 3 3 +1 4 1 +SELECT COUNT(*), a +FROM t1 GROUP BY a +HAVING (SELECT MIN(m) FROM t2 WHERE m = count(*)) > 1; +COUNT(*) a +2 2 +3 3 +DROP TABLE t1,t2; +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); +SELECT COUNT(*) c, a, +(SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) +FROM t1 GROUP BY a; +c a (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) +2 2 2 +3 3 3 +1 4 1,1 +SELECT COUNT(*) c, a, +(SELECT GROUP_CONCAT(COUNT(a)+1) FROM t2 WHERE m = a) +FROM t1 GROUP BY a; +c a (SELECT GROUP_CONCAT(COUNT(a)+1) FROM t2 WHERE m = a) +2 2 3 +3 3 4 +1 4 2,2 +DROP table t1,t2; +CREATE TABLE t1 (a int, b int); +INSERT INTO t1 VALUES (2,22),(1,11),(2,22); +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 0 GROUP BY a; +a +1 +2 +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 1 GROUP BY a; +a +SELECT a FROM t1 t0 +WHERE (SELECT COUNT(t0.b) FROM t1 t WHERE t.b>20) GROUP BY a; +a +1 +2 +SET @@sql_mode='ansi'; +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 0 GROUP BY a; +ERROR HY000: Invalid use of group function +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 1 GROUP BY a; +ERROR HY000: Invalid use of group function +SELECT a FROM t1 t0 +WHERE (SELECT COUNT(t0.b) FROM t1 t WHERE t.b>20) GROUP BY a; +ERROR HY000: Invalid use of group function +SET @@sql_mode=default; +DROP TABLE t1; +CREATE TABLE t1 (s1 char(1)); +INSERT INTO t1 VALUES ('a'); +SELECT * FROM t1 WHERE _utf8'a' = ANY (SELECT s1 FROM t1); +s1 +a +DROP TABLE t1; diff --git a/mysql-test/r/subselect3.result b/mysql-test/r/subselect3.result index 03c35d51045..4b8ece1c931 100644 --- a/mysql-test/r/subselect3.result +++ b/mysql-test/r/subselect3.result @@ -645,3 +645,50 @@ a b Z 2 2 0 3 3 1 drop table t1,t2; +CREATE TABLE t1 (a int, b INT, c CHAR(10) NOT NULL, PRIMARY KEY (a, b)); +INSERT INTO t1 VALUES (1,1,'a'), (1,2,'b'), (1,3,'c'), (1,4,'d'), (1,5,'e'), +(2,1,'f'), (2,2,'g'), (2,3,'h'), (3,4,'i'),(3,3,'j'), (3,2,'k'), (3,1,'l'), +(1,9,'m'); +CREATE TABLE t2 (a int, b INT, c CHAR(10) NOT NULL, PRIMARY KEY (a, b)); +INSERT INTO t2 SELECT * FROM t1; +SELECT a, MAX(b), (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b)) +as test FROM t1 GROUP BY a; +a MAX(b) test +1 9 m +2 3 h +3 4 i +SELECT * FROM t1 GROUP by t1.a +HAVING (MAX(t1.b) > (SELECT MAX(t2.b) FROM t2 WHERE t2.c < t1.c +HAVING MAX(t2.b+t1.a) < 10)); +a b c +SELECT a,b,c FROM t1 WHERE b in (9,3,4) ORDER BY b,c; +a b c +1 3 c +2 3 h +3 3 j +1 4 d +3 4 i +1 9 m +SELECT a, MAX(b), +(SELECT COUNT(DISTINCT t.c) FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) +LIMIT 1) +as cnt, +(SELECT t.b FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) LIMIT 1) +as t_b, +(SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) LIMIT 1) +as t_b, +(SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) ORDER BY t.c LIMIT 1) +as t_b +FROM t1 GROUP BY a; +a MAX(b) cnt t_b t_b t_b +1 9 1 9 m m +2 3 1 3 h h +3 4 1 4 i i +SELECT a, MAX(b), +(SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) LIMIT 1) as test +FROM t1 GROUP BY a; +a MAX(b) test +1 9 m +2 3 h +3 4 i +DROP TABLE t1, t2; diff --git a/mysql-test/r/system_mysql_db.result b/mysql-test/r/system_mysql_db.result index 29223cd3061..5acbace480a 100644 --- a/mysql-test/r/system_mysql_db.result +++ b/mysql-test/r/system_mysql_db.result @@ -220,10 +220,11 @@ event CREATE TABLE `event` ( `last_executed` datetime DEFAULT NULL, `starts` datetime DEFAULT NULL, `ends` datetime DEFAULT NULL, - `status` enum('ENABLED','DISABLED') NOT NULL DEFAULT 'ENABLED', + `status` enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL DEFAULT 'ENABLED', `on_completion` enum('DROP','PRESERVE') NOT NULL DEFAULT 'DROP', `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE') NOT NULL DEFAULT '', `comment` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '', + `originator` int(10) NOT NULL, `time_zone` char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (`db`,`name`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Events' diff --git a/mysql-test/r/temp_table.result b/mysql-test/r/temp_table.result index 6c9a389c1f4..9cb4a6b2427 100644 --- a/mysql-test/r/temp_table.result +++ b/mysql-test/r/temp_table.result @@ -152,6 +152,27 @@ SELECT * FROM t1; i DROP TABLE t1; End of 4.1 tests. +CREATE TABLE t1 ( c FLOAT( 20, 14 ) ); +INSERT INTO t1 VALUES( 12139 ); +CREATE TABLE t2 ( c FLOAT(30,18) ); +INSERT INTO t2 VALUES( 123456 ); +SELECT AVG( c ) FROM t1 UNION SELECT 1; +AVG( c ) +12139 +1 +SELECT 1 UNION SELECT AVG( c ) FROM t1; +1 +1 +12139 +SELECT 1 UNION SELECT * FROM t2 UNION SELECT 1; +1 +1 +123456 +SELECT c/1 FROM t1 UNION SELECT 1; +c/1 +12139 +1 +DROP TABLE t1, t2; create temporary table t1 (a int); insert into t1 values (4711); select * from t1; diff --git a/mysql-test/r/type_datetime.result b/mysql-test/r/type_datetime.result index a8d5388097d..b54bd155c7d 100644 --- a/mysql-test/r/type_datetime.result +++ b/mysql-test/r/type_datetime.result @@ -168,6 +168,9 @@ dt 0000-00-00 00:00:00 0000-00-00 00:00:00 drop table t1; +select cast('2006-12-05 22:10:10' as datetime) + 0; +cast('2006-12-05 22:10:10' as datetime) + 0 +20061205221010.000000 CREATE TABLE t1(a DATETIME NOT NULL); INSERT INTO t1 VALUES ('20060606155555'); SELECT a FROM t1 WHERE a=(SELECT MAX(a) FROM t1) AND (a="20060606155555"); diff --git a/mysql-test/r/type_float.result b/mysql-test/r/type_float.result index dbe60aff3d9..ac1270d33a0 100644 --- a/mysql-test/r/type_float.result +++ b/mysql-test/r/type_float.result @@ -92,7 +92,7 @@ show create table t2; Table Create Table t2 CREATE TABLE `t2` ( `col1` double DEFAULT NULL, - `col2` double(53,5) DEFAULT NULL, + `col2` double(22,5) DEFAULT NULL, `col3` double DEFAULT NULL, `col4` double DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 @@ -232,12 +232,12 @@ insert into t2 values ("1.23456780"); create table t3 select * from t2 union select * from t1; select * from t3; d -1.234567800 -100000000.000000000 +1.2345678 +100000000 show create table t3; Table Create Table t3 CREATE TABLE `t3` ( - `d` double(22,9) DEFAULT NULL + `d` double DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1, t2, t3; create table t1 select 105213674794682365.00 + 0.0 x; diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 901297288af..f3c7e45de39 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -1428,6 +1428,19 @@ f1 20101112000000.000014 101112.000000 drop table t1; +select cast(19999999999999999999 as unsigned); +cast(19999999999999999999 as unsigned) +18446744073709551615 +Warnings: +Error 1292 Truncated incorrect DECIMAL value: '' +create table t1(a decimal(18)); +insert into t1 values(123456789012345678); +alter table t1 modify column a decimal(19); +select * from t1; +a +123456789012345678 +drop table t1; +End of 5.0 tests select cast(143.481 as decimal(4,1)); cast(143.481 as decimal(4,1)) 143.5 @@ -1455,8 +1468,3 @@ Error 1264 Out of range value for column 'cast(-13.4 as decimal(2,1))' at row 1 select cast(98.6 as decimal(2,0)); cast(98.6 as decimal(2,0)) 99 -select cast(19999999999999999999 as unsigned); -cast(19999999999999999999 as unsigned) -18446744073709551615 -Warnings: -Error 1292 Truncated incorrect DECIMAL value: '' diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index 2c33ffc08d7..275f3357c65 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -554,7 +554,7 @@ aa show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` varbinary(20) NOT NULL DEFAULT '' + `a` varbinary(2) NOT NULL DEFAULT '' ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; create table t1 SELECT 12 as a UNION select 12.2 as a; @@ -655,7 +655,7 @@ f show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `f` varbinary(24) DEFAULT NULL + `f` varbinary(12) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; create table t1 SELECT y from t2 UNION select da from t2; @@ -1437,4 +1437,12 @@ a SELECT a FROM (SELECT a FROM t1 UNION SELECT a FROM t1 ORDER BY c) AS test; ERROR 42S22: Unknown column 'c' in 'order clause' DROP TABLE t1; +(select 1 into @var) union (select 1); +ERROR HY000: Incorrect usage of UNION and INTO +(select 1) union (select 1 into @var); +select @var; +@var +1 +(select 2) union (select 1 into @var); +ERROR 42000: Result consisted of more than one row End of 5.0 tests diff --git a/mysql-test/r/user_var-binlog.result b/mysql-test/r/user_var-binlog.result index 1c50289a85d..b76b399c9e2 100644 --- a/mysql-test/r/user_var-binlog.result +++ b/mysql-test/r/user_var-binlog.result @@ -6,13 +6,13 @@ INSERT INTO t1 VALUES(@`a b`); set @var1= "';aaa"; SET @var2=char(ascii('a')); insert into t1 values (@var1),(@var2); -show binlog events from 102; +show binlog events from <binlog_start>; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # User var 1 # @`a b`=_latin1 0x68656C6C6F COLLATE latin1_swedish_ci -master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES(@`a b`) -master-bin.000001 # User var 1 # @`var1`=_latin1 0x273B616161 COLLATE latin1_swedish_ci -master-bin.000001 # User var 1 # @`var2`=_binary 0x61 COLLATE binary -master-bin.000001 # Query 1 # use `test`; insert into t1 values (@var1),(@var2) +master-bin.000001 # User var # # @`a b`=_latin1 0x68656C6C6F COLLATE latin1_swedish_ci +master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES(@`a b`) +master-bin.000001 # User var # # @`var1`=_latin1 0x273B616161 COLLATE latin1_swedish_ci +master-bin.000001 # User var # # @`var2`=_binary 0x61 COLLATE binary +master-bin.000001 # Query # # use `test`; insert into t1 values (@var1),(@var2) flush logs; /*!40019 SET @@session.max_insert_delayed_threads=0*/; /*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 46b09b55565..0fa7cda1187 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -3321,38 +3321,4 @@ DROP TABLE `t-2`; DROP VIEW `v-2`; DROP DATABASE `d-1`; USE test; -DROP VIEW IF EXISTS v1; -DROP TABLE IF EXISTS t1; -CREATE TABLE t1 (i INT); -CREATE VIEW v1 AS SELECT * FROM t1; -ALTER VIEW v1 AS SELECT * FROM t1; -SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` -ALTER DEFINER=no_such@user_1 VIEW v1 AS SELECT * FROM t1; -Warnings: -Note 1449 There is no 'no_such'@'user_1' registered -SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`no_such`@`user_1` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` -Warnings: -Note 1449 There is no 'no_such'@'user_1' registered -ALTER ALGORITHM=MERGE VIEW v1 AS SELECT * FROM t1; -Warnings: -Note 1449 There is no 'no_such'@'user_1' registered -SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=MERGE DEFINER=`no_such`@`user_1` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` -Warnings: -Note 1449 There is no 'no_such'@'user_1' registered -ALTER ALGORITHM=TEMPTABLE DEFINER=no_such@user_2 VIEW v1 AS SELECT * FROM t1; -Warnings: -Note 1449 There is no 'no_such'@'user_2' registered -SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=TEMPTABLE DEFINER=`no_such`@`user_2` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` -Warnings: -Note 1449 There is no 'no_such'@'user_2' registered -DROP VIEW v1; -DROP TABLE t1; End of 5.1 tests. diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result index a95184b8576..2a0bedc1443 100644 --- a/mysql-test/r/view_grant.result +++ b/mysql-test/r/view_grant.result @@ -775,4 +775,58 @@ DROP DATABASE mysqltest_db1; DROP DATABASE mysqltest_db2; DROP USER mysqltest_u1@localhost; DROP USER mysqltest_u2@localhost; +CREATE DATABASE db26813; +USE db26813; +CREATE TABLE t1(f1 INT, f2 INT); +CREATE VIEW v1 AS SELECT f1 FROM t1; +CREATE VIEW v2 AS SELECT f1 FROM t1; +CREATE VIEW v3 AS SELECT f1 FROM t1; +CREATE USER u26813@localhost; +GRANT DROP ON db26813.v1 TO u26813@localhost; +GRANT CREATE VIEW ON db26813.v2 TO u26813@localhost; +GRANT DROP, CREATE VIEW ON db26813.v3 TO u26813@localhost; +GRANT SELECT ON db26813.t1 TO u26813@localhost; +ALTER VIEW v1 AS SELECT f2 FROM t1; +ERROR 42000: CREATE VIEW command denied to user 'u26813'@'localhost' for table 'v1' +ALTER VIEW v2 AS SELECT f2 FROM t1; +ERROR 42000: DROP command denied to user 'u26813'@'localhost' for table 'v2' +ALTER VIEW v3 AS SELECT f2 FROM t1; +SHOW CREATE VIEW v3; +View Create View +v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select `t1`.`f2` AS `f2` from `t1` +DROP USER u26813@localhost; +DROP DATABASE db26813; End of 5.0 tests. +DROP VIEW IF EXISTS v1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT); +CREATE VIEW v1 AS SELECT * FROM t1; +ALTER VIEW v1 AS SELECT * FROM t1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` +ALTER DEFINER=no_such@user_1 VIEW v1 AS SELECT * FROM t1; +Warnings: +Note 1449 There is no 'no_such'@'user_1' registered +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`no_such`@`user_1` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` +Warnings: +Note 1449 There is no 'no_such'@'user_1' registered +ALTER ALGORITHM=MERGE VIEW v1 AS SELECT * FROM t1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=MERGE DEFINER=`no_such`@`user_1` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` +Warnings: +Note 1449 There is no 'no_such'@'user_1' registered +ALTER ALGORITHM=TEMPTABLE DEFINER=no_such@user_2 VIEW v1 AS SELECT * FROM t1; +Warnings: +Note 1449 There is no 'no_such'@'user_2' registered +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=TEMPTABLE DEFINER=`no_such`@`user_2` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` +Warnings: +Note 1449 There is no 'no_such'@'user_2' registered +DROP VIEW v1; +DROP TABLE t1; +End of 5.1 tests. diff --git a/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result b/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result index 183c57c0c04..8c625dc70a4 100644 --- a/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result +++ b/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result @@ -683,12 +683,12 @@ INSERT INTO t1 VALUES(1, 'test1'); CREATE EVENT e1 ON SCHEDULE EVERY '1' SECOND COMMENT 'e_second_comment' DO DELETE FROM t1; ==========MASTER========== SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator test_rpl e1 root@localhost RECURRING NULL 1 SECOND # # ENABLED ==========SLAVE=========== USE test_rpl; SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator ==========MASTER========== SELECT COUNT(*) FROM t1; COUNT(*) @@ -742,12 +742,12 @@ a b ALTER EVENT e1 RENAME TO e2; ==========MASTER========== SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator test_rpl e2 root@localhost RECURRING NULL 1 SECOND # # ENABLED ==========SLAVE=========== USE test_rpl; SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator ==========MASTER========== SELECT COUNT(*) FROM t1; COUNT(*) @@ -776,11 +776,11 @@ a b DROP EVENT e2; ==========MASTER========== SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator ==========SLAVE=========== USE test_rpl; SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator DELETE FROM t1; DELETE FROM t2; diff --git a/mysql-test/t/binlog_row_mix_innodb_myisam.test b/mysql-test/t/binlog_row_mix_innodb_myisam.test index b131e5350af..335a05be146 100644 --- a/mysql-test/t/binlog_row_mix_innodb_myisam.test +++ b/mysql-test/t/binlog_row_mix_innodb_myisam.test @@ -20,7 +20,7 @@ # ER_SERVER_SHUTDOWN (i.e. disconnection just rolls back transaction # and does not make slave to stop) flush logs; ---exec $MYSQL_BINLOG --start-position=516 $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mix_innodb_myisam_binlog.output +--exec $MYSQL_BINLOG --start-position=520 $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mix_innodb_myisam_binlog.output --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR eval select (@a:=load_file("$MYSQLTEST_VARDIR/tmp/mix_innodb_myisam_binlog.output")) diff --git a/mysql-test/t/binlog_stm_mix_innodb_myisam.test b/mysql-test/t/binlog_stm_mix_innodb_myisam.test index 8d7399b918e..72651c13be7 100644 --- a/mysql-test/t/binlog_stm_mix_innodb_myisam.test +++ b/mysql-test/t/binlog_stm_mix_innodb_myisam.test @@ -12,7 +12,7 @@ # ER_SERVER_SHUTDOWN (i.e. disconnection just rolls back transaction # and does not make slave to stop) flush logs; ---exec $MYSQL_BINLOG --start-position=551 $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mix_innodb_myisam_binlog.output +--exec $MYSQL_BINLOG --start-position=555 $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mix_innodb_myisam_binlog.output --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR eval select (@a:=load_file("$MYSQLTEST_VARDIR/tmp/mix_innodb_myisam_binlog.output")) diff --git a/mysql-test/t/ctype_cp932_binlog_stm.test b/mysql-test/t/ctype_cp932_binlog_stm.test index 9111c4ad369..3e20e123258 100644 --- a/mysql-test/t/ctype_cp932_binlog_stm.test +++ b/mysql-test/t/ctype_cp932_binlog_stm.test @@ -22,7 +22,7 @@ CALL bug18293("Foo's a Bar", _cp932 0xED40ED41ED42, 47.93)| SELECT HEX(s1),HEX(s2),d FROM t4| DROP PROCEDURE bug18293| DROP TABLE t4| -SHOW BINLOG EVENTS FROM 406| +SHOW BINLOG EVENTS FROM 410| delimiter ;| # End of 5.0 tests diff --git a/mysql-test/t/ctype_uca.test b/mysql-test/t/ctype_uca.test index 3e49b9de883..64349bc40a6 100644 --- a/mysql-test/t/ctype_uca.test +++ b/mysql-test/t/ctype_uca.test @@ -475,3 +475,13 @@ ALTER TABLE t1 MODIFY a VARCHAR(30) character set utf8 collate utf8_turkish_ci; SELECT a, length(a) la, @l:=lower(a) l, length(@l) ll, @u:=upper(a) u, length(@u) lu FROM t1 ORDER BY id; DROP TABLE t1; + +# +# Bug #27079 Crash while grouping empty ucs2 strings +# +CREATE TABLE t1 ( + c1 text character set ucs2 collate ucs2_polish_ci NOT NULL +) ENGINE=MyISAM; +insert into t1 values (''),('a'); +SELECT COUNT(*), c1 FROM t1 GROUP BY c1; +DROP TABLE IF EXISTS t1; diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test index 5a3720dc431..c3320159c41 100644 --- a/mysql-test/t/ctype_ucs.test +++ b/mysql-test/t/ctype_ucs.test @@ -573,6 +573,20 @@ drop table t1; deallocate prepare stmt; # +# Bug#22638 SOUNDEX broken for international characters +# +set names latin1; +set character_set_connection=ucs2; +select soundex(''),soundex('he'),soundex('hello all folks'),soundex('#3556 in bugdb'); +select hex(soundex('')),hex(soundex('he')),hex(soundex('hello all folks')),hex(soundex('#3556 in bugdb')); +select 'mood' sounds like 'mud'; +# Cyrillic A, BE, VE +select hex(soundex(_ucs2 0x041004110412)); +# Make sure that "U+00BF INVERTED QUESTION MARK" is not considered as letter +select hex(soundex(_ucs2 0x00BF00C0)); +set names latin1; + +# # Bug #14290: character_maximum_length for text fields # create table t1(a blob, b text charset utf8, c text charset ucs2); diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test index 47a43258be1..c0060b7b813 100644 --- a/mysql-test/t/ctype_utf8.test +++ b/mysql-test/t/ctype_utf8.test @@ -702,6 +702,14 @@ select * from t1 where soundex(a) = soundex('TEST'); select * from t1 where soundex(a) = soundex('test'); drop table t1; +# +# Bug#22638 SOUNDEX broken for international characters +# +select soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB); +select hex(soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)); +select soundex(_utf8 0xD091D092D093); +select hex(soundex(_utf8 0xD091D092D093)); + SET collation_connection='utf8_general_ci'; -- source include/ctype_filesort.inc diff --git a/mysql-test/t/delayed.test b/mysql-test/t/delayed.test index 6f161581287..a44288da9ec 100644 --- a/mysql-test/t/delayed.test +++ b/mysql-test/t/delayed.test @@ -242,3 +242,13 @@ INSERT DELAYED INTO t1 VALUES(1); FLUSH TABLE t1; SELECT HEX(a) FROM t1; DROP TABLE t1; + +# +# Bug#26464 - insert delayed + update + merge = corruption +# +CREATE TABLE t1(c1 INT) ENGINE=MyISAM; +CREATE TABLE t2(c1 INT) ENGINE=MERGE UNION=(t1); +--error 1031 +INSERT DELAYED INTO t2 VALUES(1); +DROP TABLE t1, t2; + diff --git a/mysql-test/t/delete.test b/mysql-test/t/delete.test index 306447dbd5a..36d627209db 100644 --- a/mysql-test/t/delete.test +++ b/mysql-test/t/delete.test @@ -203,3 +203,21 @@ select * from t1 where a is null; delete from t1 where a is null; select count(*) from t1; drop table t1; + +# +# Bug #26186: delete order by, sometimes accept unknown column +# +CREATE TABLE t1 (a INT); INSERT INTO t1 VALUES (1); + +--error ER_BAD_FIELD_ERROR +DELETE FROM t1 ORDER BY x; + +# even columns from a table not used in query (and not even existing) +--error ER_BAD_FIELD_ERROR +DELETE FROM t1 ORDER BY t2.x; + +# subquery (as long as the subquery from is valid or DUAL) +--error ER_BAD_FIELD_ERROR +DELETE FROM t1 ORDER BY (SELECT x); + +DROP TABLE t1; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 913fe8e62ee..fd64becbc00 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -16,18 +16,17 @@ concurrent_innodb : BUG#21579 2006-08-11 mleich innodb_concurrent random ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed -ndb_restore_partition : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone -rpl_ndb_sync : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated rpl_ndb_2myisam : BUG#19227 Seems to pass currently rpl_ndb_dd_partitions : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on s/AMD rpl_ndb_ddl : BUG#18946 result file needs update + test needs to checked +rpl_ddl : BUG#26418 2007-03-01 mleich Slave out of sync after CREATE/DROP TEMPORARY TABLE + ROLLBACK on master rpl_ndb_innodb2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement rpl_ndb_myisam2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly -rpl_multi_engine : BUG#22583 2006-09-23 lars +rpl_udf : BUG#27564 2007-03-31 lars New test case for rpl of UDF shows valgrind failure synchronization : Bug#24529 Test 'synchronization' fails on Mac pushbuild; Also on Linux 64 bit. # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open @@ -37,5 +36,3 @@ synchronization : Bug#24529 Test 'synchronization' fails on Mac pushb plugin : Bug#25659 memory leak via "plugins" test rpl_ndb_dd_advance : Bug#25913 rpl_ndb_dd_advance fails randomly -ndb_alter_table : Bug##25774 ndb_alter_table.test fails in DBUG_ASSERT() on Linux x64 -ndb_single_user : Bug#27021 Error codes in mysqld in single user mode varies diff --git a/mysql-test/t/events.test b/mysql-test/t/events.test index c8054e49da4..575f9984a79 100644 --- a/mysql-test/t/events.test +++ b/mysql-test/t/events.test @@ -185,7 +185,7 @@ set names cp1251; create event ðóóò21 on schedule every '50:23:59:95' day_second COMMENT 'òîâà å 1251 êîìåíòàð' do select 1; --replace_regex /STARTS '[^']+'/STARTS '#'/ SHOW CREATE EVENT ðóóò21; -insert into mysql.event (db, name, body, definer, interval_value, interval_field) values (database(), "root22", "select 1", user(), 100, "SECOND_MICROSECOND"); +insert into mysql.event (db, name, body, definer, interval_value, interval_field, originator) values (database(), "root22", "select 1", user(), 100, "SECOND_MICROSECOND", 1); --error ER_NOT_SUPPORTED_YET show create event root22; --error ER_NOT_SUPPORTED_YET diff --git a/mysql-test/t/events_scheduling.test b/mysql-test/t/events_scheduling.test index ff9dfaad927..b53a548b6ed 100644 --- a/mysql-test/t/events_scheduling.test +++ b/mysql-test/t/events_scheduling.test @@ -55,7 +55,7 @@ SELECT IF(SUM(a) > 0, 'OK', 'ERROR') FROM table_4; DROP EVENT two_sec; SELECT IF(TIME_TO_SEC(TIMEDIFF(ENDS,STARTS))=6, 'OK', 'ERROR') FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_SCHEMA=DATABASE() AND EVENT_NAME='start_n_end' AND ENDS IS NOT NULL; SELECT IF(LAST_EXECUTED-ENDS < 3, 'OK', 'ERROR') FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_SCHEMA=DATABASE() AND EVENT_NAME='start_n_end' AND ENDS IS NOT NULL; -DROP EVENT start_n_end; +DROP EVENT IF EXISTS events_test.start_n_end; --echo "Already dropped because ended. Therefore an error." --error ER_EVENT_DOES_NOT_EXIST DROP EVENT only_one_time; diff --git a/mysql-test/t/federated_server.test b/mysql-test/t/federated_server.test index 3e47d9bc95d..b2075b8e262 100644 --- a/mysql-test/t/federated_server.test +++ b/mysql-test/t/federated_server.test @@ -17,6 +17,13 @@ CREATE TABLE first_db.t1 ( ) DEFAULT CHARSET=latin1; +DROP TABLE IF EXISTS first_db.t2; +CREATE TABLE first_db.t2 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + DEFAULT CHARSET=latin1; + use second_db; DROP TABLE IF EXISTS second_db.t1; CREATE TABLE second_db.t1 ( @@ -25,6 +32,13 @@ CREATE TABLE second_db.t1 ( ) DEFAULT CHARSET=latin1; +DROP TABLE IF EXISTS second_db.t2; +CREATE TABLE second_db.t2 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + DEFAULT CHARSET=latin1; + connection master; drop server if exists 'server_one'; @@ -61,7 +75,7 @@ eval CREATE TABLE federated.old ( ENGINE="FEDERATED" DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/first_db/t1'; -INSERT INTO federated.old (id, name) values (1, 'federated.old url'); +INSERT INTO federated.old (id, name) values (1, 'federated.old-> first_db.t1, url format'); SELECT * FROM federated.old; @@ -72,9 +86,32 @@ eval CREATE TABLE federated.old2 ( `name` varchar(64) NOT NULL default '' ) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 + CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/first_db/t2'; + +INSERT INTO federated.old2 (id, name) values (1, 'federated.old2-> first_db.t2, url format'); +SELECT * FROM federated.old2; + +DROP TABLE IF EXISTS federated.urldb2t1; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval CREATE TABLE federated.urldb2t1 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + ENGINE="FEDERATED" DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/second_db/t1'; +INSERT INTO federated.urldb2t1 (id, name) values (1, 'federated.urldb2t1 -> second_db.t1, url format'); +SELECT * FROM federated.urldb2t1; -INSERT INTO federated.old2 (id, name) values (1, 'federated.old2 url'); +DROP TABLE IF EXISTS federated.urldb2t2; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval CREATE TABLE federated.urldb2t2 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + ENGINE="FEDERATED" DEFAULT CHARSET=latin1 + CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/second_db/t2'; +INSERT INTO federated.urldb2t2 (id, name) values (1, 'federated.urldb2t2 -> second_db.t2, url format'); +SELECT * FROM federated.urldb2t2; DROP TABLE IF EXISTS federated.t1; CREATE TABLE federated.t1 ( @@ -84,17 +121,30 @@ CREATE TABLE federated.t1 ( ENGINE="FEDERATED" DEFAULT CHARSET=latin1 CONNECTION='server_one'; -INSERT INTO federated.t1 (id, name) values (1, 'server_one, new scheme'); +INSERT INTO federated.t1 (id, name) values (1, 'server_one, new scheme, first_db.t1'); SELECT * FROM federated.t1; +DROP TABLE IF EXISTS federated.whatever; +CREATE TABLE federated.whatever ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + ENGINE="FEDERATED" DEFAULT CHARSET=latin1 + CONNECTION='server_one/t1'; +INSERT INTO federated.whatever (id, name) values (1, 'server_one, new scheme, whatever, first_db.t1'); +SELECT * FROM federated.whatever; + ALTER SERVER 'server_one' options(DATABASE 'second_db'); -flush tables; +# FLUSH TABLES is now unneccessary -INSERT INTO federated.t1 (id, name) values (1, 'server_two, new scheme'); +INSERT INTO federated.t1 (id, name) values (1, 'server_two, new scheme, second_db.t1'); SELECT * FROM federated.t1; +INSERT INTO federated.whatever (id, name) values (1, 'server_two, new scheme, whatever, second_db.t1'); +SELECT * FROM federated.whatever; + drop table federated.t1; drop server 'server_one'; @@ -107,4 +157,166 @@ drop table second_db.t1; drop database first_db; drop database second_db; +# +# Bug#25671 - CREATE/DROP/ALTER SERVER should require privileges +# +# Changes to SERVER declarations should require SUPER privilege. +# Based upon test case by Giuseppe Maxia + +create database db_legitimate; +create database db_bogus; + +use db_legitimate; +CREATE TABLE db_legitimate.t1 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ); +INSERT INTO db_legitimate.t1 VALUES ('1','this is legitimate'); + +use db_bogus; +CREATE TABLE db_bogus.t1 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + ; +INSERT INTO db_bogus.t1 VALUES ('2','this is bogus'); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create server 's1' foreign data wrapper 'mysql' options + (HOST '127.0.0.1', + DATABASE 'db_legitimate', + USER 'root', + PASSWORD '', + PORT $SLAVE_MYPORT, + SOCKET '', + OWNER 'root'); + +create user guest_select@localhost; +grant select on federated.* to guest_select@localhost; + +create user guest_super@localhost; +grant select,SUPER,RELOAD on *.* to guest_super@localhost; + +create user guest_usage@localhost; +grant usage on *.* to guest_usage@localhost; + +CREATE TABLE federated.t1 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) ENGINE = FEDERATED CONNECTION = 's1'; + +select * from federated.t1; + +connect (conn_select,127.0.0.1,guest_select,,federated,$MASTER_MYPORT); +connect (conn_usage,127.0.0.1,guest_usage,,,$MASTER_MYPORT); +connect (conn_super,127.0.0.1,guest_super,,,$MASTER_MYPORT); + +connection conn_select; +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +alter server s1 options (database 'db_bogus'); + +connection master; +flush tables; +select * from federated.t1; + +connection conn_usage; +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +alter server s1 options (database 'db_bogus'); + +connection master; +flush tables; +select * from federated.t1; + +connection conn_super; +alter server s1 options (database 'db_bogus'); + +connection master; +flush tables; +select * from federated.t1; + +connection conn_select; +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +drop server if exists 's1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +eval create server 's1' foreign data wrapper 'mysql' options + (HOST '127.0.0.1', + DATABASE 'db_legitimate', + USER 'root', + PASSWORD '', + PORT $SLAVE_MYPORT, + SOCKET '', + OWNER 'root'); + +connection conn_super; +drop server 's1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create server 's1' foreign data wrapper 'mysql' options + (HOST '127.0.0.1', + DATABASE 'db_legitimate', + USER 'root', + PASSWORD '', + PORT $SLAVE_MYPORT, + SOCKET '', + OWNER 'root'); + +connection master; +flush tables; +select * from federated.t1; + +# clean up test +connection slave; +drop database db_legitimate; +drop database db_bogus; + +disconnect conn_select; +disconnect conn_usage; +disconnect conn_super; + +connection master; +drop user guest_super@localhost; +drop user guest_usage@localhost; +drop user guest_select@localhost; +drop table federated.t1; +drop server 's1'; + + +--echo # End of 5.1 tests + + +# +# Bug#25721 - deadlock with ALTER/CREATE SERVER +# +connect (other,localhost,root,,); +connection master; +use test; +delimiter //; +create procedure p1 () +begin + DECLARE v INT DEFAULT 0; + DECLARE e INT DEFAULT 0; + DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SET e = e + 1; + WHILE v < 10000 do + CREATE SERVER s + FOREIGN DATA WRAPPER mysql + OPTIONS (USER 'Remote', HOST '192.168.1.106', DATABASE 'test'); + ALTER SERVER s OPTIONS (USER 'Remote'); + DROP SERVER s; + SET v = v + 1; + END WHILE; + SELECT e > 0; +END// +delimiter ;// +connection other; +use test; +send call p1(); +connection master; +call p1(); +connection other; +reap; +drop procedure p1; +drop server if exists s; + + source include/federated_cleanup.inc; diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test index c141fd370f8..431d582be50 100644 --- a/mysql-test/t/func_gconcat.test +++ b/mysql-test/t/func_gconcat.test @@ -497,4 +497,14 @@ select f2,group_concat(f1) from t1 group by f2; --disable_metadata drop table t1; -# End of 4.1 tests +# +# Bug #26815: Unexpected built-in function behavior: group_concat(distinct +# substring_index()) +# +CREATE TABLE t1(a TEXT, b CHAR(20)); +INSERT INTO t1 VALUES ("one.1","one.1"),("two.2","two.2"),("one.3","one.3"); +SELECT GROUP_CONCAT(DISTINCT UCASE(a)) FROM t1; +SELECT GROUP_CONCAT(DISTINCT UCASE(b)) FROM t1; +DROP TABLE t1; + +--echo End of 5.0 tests diff --git a/mysql-test/t/func_in.test b/mysql-test/t/func_in.test index 31352e4c639..e9583f982ff 100644 --- a/mysql-test/t/func_in.test +++ b/mysql-test/t/func_in.test @@ -361,6 +361,17 @@ SELECT * FROM t4 WHERE a IN ('1972-02-06','19772-07-29'); DROP TABLE t1,t2,t3,t4; +# +# BUG#27362: IN with a decimal expression that may return NULL +# + +CREATE TABLE t1 (id int not null); +INSERT INTO t1 VALUES (1),(2); + +SELECT id FROM t1 WHERE id IN(4564, (SELECT IF(1=0,1,1/0)) ); + +DROP TABLE t1; + --echo End of 5.0 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index 7ae6e3adda7..a3952dbc3da 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -1,5 +1,6 @@ -- source include/have_geometry.inc + # # Spatial objects # @@ -423,6 +424,14 @@ from t1; drop table t1; +# +# Bug #27164: Crash when mixing InnoDB and MyISAM Geospatial tables +# +CREATE TABLE t1(a POINT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1; +DROP TABLE t1; + --echo End of 4.1 tests # @@ -472,6 +481,18 @@ insert into t1 values(default); drop table t1; # +# Bug #27300: create view with geometry functions lost columns types +# +CREATE TABLE t1 (a GEOMETRY); +CREATE VIEW v1 AS SELECT GeomFromwkb(ASBINARY(a)) FROM t1; +CREATE VIEW v2 AS SELECT a FROM t1; +DESCRIBE v1; +DESCRIBE v2; + +DROP VIEW v1,v2; +DROP TABLE t1; + +# # Bug #11335 View redefines column types # create table t1 (f1 tinyint(1), f2 char(1), f3 varchar(1), f4 geometry, f5 datetime); diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index a3318745764..fb9c09d4763 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -755,7 +755,8 @@ SET SQL_MODE = ''; # # Bug #21174: Index degrades sort performance and -# optimizer does not honor IGNORE INDEX +# optimizer does not honor IGNORE INDEX. +# a.k.a WL3527. # CREATE TABLE t1 (a INT, b INT, PRIMARY KEY (a), @@ -819,4 +820,8 @@ EXPLAIN SELECT a, SUM(b) FROM t2 IGNORE INDEX (a) GROUP BY a LIMIT 2; EXPLAIN SELECT 1 FROM t2 WHERE a IN (SELECT a FROM t1 USE INDEX (i2) IGNORE INDEX (i2)); +SHOW VARIABLES LIKE 'old'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET @@old = off; + DROP TABLE t1, t2; diff --git a/mysql-test/t/heap_btree.test b/mysql-test/t/heap_btree.test index 68aa79834fc..4e7102806d6 100644 --- a/mysql-test/t/heap_btree.test +++ b/mysql-test/t/heap_btree.test @@ -182,6 +182,37 @@ delete from t1 where a >= 2; select a from t1 order by a; drop table t1; +# +# Bug#26996 - Update of a Field in a Memory Table ends with wrong result +# +CREATE TABLE t1 ( + c1 CHAR(3), + c2 INTEGER, + KEY USING BTREE(c1), + KEY USING BTREE(c2) +) ENGINE= MEMORY; +INSERT INTO t1 VALUES ('ABC',0), ('A',0), ('B',0), ('C',0); +UPDATE t1 SET c2= c2 + 1 WHERE c1 = 'A'; +SELECT * FROM t1; +DROP TABLE t1; + +# +# Bug#24985 - UTF8 ENUM primary key on MEMORY using BTREE +# causes incorrect duplicate entries +# +CREATE TABLE t1 ( + c1 ENUM('1', '2'), + UNIQUE USING BTREE(c1) +) ENGINE= MEMORY DEFAULT CHARSET= utf8; +INSERT INTO t1 VALUES('1'), ('2'); +DROP TABLE t1; +CREATE TABLE t1 ( + c1 SET('1', '2'), + UNIQUE USING BTREE(c1) +) ENGINE= MEMORY DEFAULT CHARSET= utf8; +INSERT INTO t1 VALUES('1'), ('2'); +DROP TABLE t1; + --echo End of 4.1 tests # @@ -205,3 +236,4 @@ INSERT INTO t1 VALUES(NULL),(NULL); DROP TABLE t1; --echo End of 5.0 tests + diff --git a/mysql-test/t/help.test b/mysql-test/t/help.test index 5f234d7c462..71821e46771 100644 --- a/mysql-test/t/help.test +++ b/mysql-test/t/help.test @@ -13,30 +13,30 @@ # impossible_category_3 # impossible_function_7 -insert into mysql.help_category(help_category_id,name)values(1,'impossible_category_1'); -select @category1_id:= 1; -insert into mysql.help_category(help_category_id,name)values(2,'impossible_category_2'); -select @category2_id:= 2; -insert into mysql.help_category(help_category_id,name,parent_category_id)values(3,'impossible_category_3',@category2_id); -select @category3_id:= 3; - -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(1,'impossible_function_1',@category1_id,'description of \n impossible_function1\n','example of \n impossible_function1'); -select @topic1_id:= 1; -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(2,'impossible_function_2',@category1_id,'description of \n impossible_function2\n','example of \n impossible_function2'); -select @topic2_id:= 2; -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(3,'impossible_function_3',@category2_id,'description of \n impossible_function3\n','example of \n impossible_function3'); -select @topic3_id:= 3; -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(4,'impossible_function_4',@category2_id,'description of \n impossible_function4\n','example of \n impossible_function4'); -select @topic4_id:= 4; -insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(5,'impossible_function_7',@category3_id,'description of \n impossible_function5\n','example of \n impossible_function7'); -select @topic5_id:= 5; - -insert into mysql.help_keyword(help_keyword_id,name)values(1,'impossible_function_1'); -select @keyword1_id:= 1; -insert into mysql.help_keyword(help_keyword_id,name)values(2,'impossible_function_5'); -select @keyword2_id:= 2; -insert into mysql.help_keyword(help_keyword_id,name)values(3,'impossible_function_6'); -select @keyword3_id:= 3; +insert into mysql.help_category(help_category_id,name)values(10001,'impossible_category_1'); +select @category1_id:= 10001; +insert into mysql.help_category(help_category_id,name)values(10002,'impossible_category_2'); +select @category2_id:= 10002; +insert into mysql.help_category(help_category_id,name,parent_category_id)values(10003,'impossible_category_3',@category2_id); +select @category3_id:= 10003; + +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10101,'impossible_function_1',@category1_id,'description of \n impossible_function1\n','example of \n impossible_function1'); +select @topic1_id:= 10101; +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10102,'impossible_function_2',@category1_id,'description of \n impossible_function2\n','example of \n impossible_function2'); +select @topic2_id:= 10102; +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10103,'impossible_function_3',@category2_id,'description of \n impossible_function3\n','example of \n impossible_function3'); +select @topic3_id:= 10103; +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10104,'impossible_function_4',@category2_id,'description of \n impossible_function4\n','example of \n impossible_function4'); +select @topic4_id:= 10104; +insert into mysql.help_topic(help_topic_id,name,help_category_id,description,example)values(10105,'impossible_function_7',@category3_id,'description of \n impossible_function5\n','example of \n impossible_function7'); +select @topic5_id:= 10105; + +insert into mysql.help_keyword(help_keyword_id,name)values(10201,'impossible_function_1'); +select @keyword1_id:= 10201; +insert into mysql.help_keyword(help_keyword_id,name)values(10202,'impossible_function_5'); +select @keyword2_id:= 10202; +insert into mysql.help_keyword(help_keyword_id,name)values(10203,'impossible_function_6'); +select @keyword3_id:= 10203; insert into mysql.help_relation(help_keyword_id,help_topic_id)values(@keyword1_id,@topic2_id); insert into mysql.help_relation(help_keyword_id,help_topic_id)values(@keyword2_id,@topic1_id); diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 09a588c9195..ae330f47bc5 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -1042,5 +1042,52 @@ select user,db from information_schema.processlist; connection default; drop user user3148@localhost; + + +# +# Bug #26174 Server Crash: INSERT ... SELECT ... FROM I_S.GLOBAL_STATUS in Event +# +--disable_warnings +DROP TABLE IF EXISTS thread_status; +DROP TABLE IF EXISTS server_status; +DROP EVENT IF EXISTS event_status; + +--enable_warnings + +SET GLOBAL event_scheduler=1; + +DELIMITER $$; + +CREATE EVENT event_status + ON SCHEDULE AT NOW() + ON COMPLETION NOT PRESERVE + DO +BEGIN + CREATE TABLE thread_status + SELECT variable_name, variable_value + FROM information_schema.session_status + WHERE variable_name LIKE 'SSL_ACCEPTS' OR + variable_name LIKE 'SSL_CALLBACK_CACHE_HITS'; + + CREATE TABLE server_status + SELECT variable_name + FROM information_schema.global_status + WHERE variable_name LIKE 'ABORTED_CONNECTS' OR + variable_name LIKE 'BINLOG_CACHE_DISK_USE'; +END$$ + +DELIMITER ;$$ + +let $wait_condition=select count(*) = 0 from information_schema.events where event_name='event_status'; +let $wait_timeout=30; +--source include/wait_condition.inc + +SELECT variable_name, variable_value FROM thread_status; +SELECT variable_name FROM server_status; + +DROP TABLE thread_status; +DROP TABLE server_status; +SET GLOBAL event_scheduler=0; + --echo End of 5.1 tests. diff --git a/mysql-test/t/init_connect.test b/mysql-test/t/init_connect.test index c9a18a4003d..0a08559279c 100644 --- a/mysql-test/t/init_connect.test +++ b/mysql-test/t/init_connect.test @@ -2,10 +2,11 @@ # Test of init_connect variable # +# should work with embedded server after mysqltest is fixed +--source include/not_embedded.inc + --source include/add_anonymous_users.inc -# should work with embedded server after mysqltest is fixed --- source include/not_embedded.inc connect (con0,localhost,root,,); connection con0; select hex(@a); diff --git a/mysql-test/t/insert_select.test b/mysql-test/t/insert_select.test index 0f9a0ca4872..95a55b732c5 100644 --- a/mysql-test/t/insert_select.test +++ b/mysql-test/t/insert_select.test @@ -293,4 +293,29 @@ INSERT INTO t2 (f1, f2) SELECT * FROM t2; DROP TABLE t1, t2; - +# +# Bug #26207: inserts don't work with shortened index +# +SET SQL_MODE='STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION'; + +CREATE TABLE t1 (c VARCHAR(30), INDEX ix_c (c(10))); +CREATE TABLE t2 (d VARCHAR(10)); +INSERT INTO t1 (c) VALUES ('7_chars'), ('13_characters'); + +EXPLAIN + SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='13_characters') FROM t1; + +SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='13_characters') FROM t1; + +INSERT INTO t2 (d) + SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='13_characters') FROM t1; + +INSERT INTO t2 (d) + SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c='7_chars') FROM t1; + +INSERT INTO t2 (d) + SELECT (SELECT SUM(LENGTH(c)) FROM t1 WHERE c IN (SELECT t1.c FROM t1)) + FROM t1; + +SELECT * FROM t2; +DROP TABLE t1,t2; diff --git a/mysql-test/t/insert_update.test b/mysql-test/t/insert_update.test index 1ed1e8657ac..f0d87ea956d 100644 --- a/mysql-test/t/insert_update.test +++ b/mysql-test/t/insert_update.test @@ -195,3 +195,55 @@ SELECT LAST_INSERT_ID(); INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); SELECT LAST_INSERT_ID(); DROP TABLE t1; + +# +# Bug#23233: 0 as LAST_INSERT_ID() after INSERT .. ON DUPLICATE in the +# NO_AUTO_VALUE_ON_ZERO mode. +# +SET SQL_MODE='NO_AUTO_VALUE_ON_ZERO'; +CREATE TABLE `t1` ( + `id` int(11) PRIMARY KEY auto_increment, + `f1` varchar(10) NOT NULL UNIQUE +); +INSERT IGNORE INTO t1 (f1) VALUES ("test1") + ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +INSERT IGNORE INTO t1 (f1) VALUES ("test1") + ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +SELECT * FROM t1; +INSERT IGNORE INTO t1 (f1) VALUES ("test2") + ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT * FROM t1; +INSERT IGNORE INTO t1 (f1) VALUES ("test2") + ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +SELECT * FROM t1; +INSERT IGNORE INTO t1 (f1) VALUES ("test3") + ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +SELECT * FROM t1; +DROP TABLE t1; +CREATE TABLE `t1` ( + `id` int(11) PRIMARY KEY auto_increment, + `f1` varchar(10) NOT NULL UNIQUE +); +INSERT IGNORE INTO t1 (f1) VALUES ("test1") + ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +SELECT * FROM t1; +INSERT IGNORE INTO t1 (f1) VALUES ("test1"),("test4") + ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +SELECT LAST_INSERT_ID(); +SELECT * FROM t1; +DROP TABLE t1; +CREATE TABLE `t1` ( + `id` int(11) PRIMARY KEY auto_increment, + `f1` varchar(10) NOT NULL UNIQUE, + tim1 timestamp default '2003-01-01 00:00:00' on update current_timestamp +); +INSERT INTO t1 (f1) VALUES ("test1"); +SELECT id, f1 FROM t1; +REPLACE INTO t1 VALUES (0,"test1",null); +SELECT id, f1 FROM t1; +DROP TABLE t1; +SET SQL_MODE=''; diff --git a/mysql-test/t/loaddata.test b/mysql-test/t/loaddata.test index b608ab601c1..5729ff0748d 100644 --- a/mysql-test/t/loaddata.test +++ b/mysql-test/t/loaddata.test @@ -130,12 +130,12 @@ set @@secure_file_priv= 0; truncate table t1; --replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR --error 1290 -eval load data infile '$MYSQL_TEST_DIR/Makefile' into table t1; +eval load data infile '$MYSQL_TEST_DIR/t/loaddata.test' into table t1; select * from t1; # Test "load_file" returns NULL --replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR -eval select load_file("$MYSQL_TEST_DIR/Makefile"); +eval select load_file("$MYSQL_TEST_DIR/t/loaddata.test"); # cleanup drop table t1, t2; diff --git a/mysql-test/t/log_state.test b/mysql-test/t/log_state.test index a5e00cb0387..e772089ce7a 100644 --- a/mysql-test/t/log_state.test +++ b/mysql-test/t/log_state.test @@ -132,3 +132,8 @@ select * from mysql.general_log; # Cleanup (must be done last to avoid delayed 'Quit' message in general log) # disconnect con1; + +# Remove the log files that was created in the "default location" +# i.e var/run +--remove_file $MYSQLTEST_VARDIR/run/master.log +--remove_file $MYSQLTEST_VARDIR/run/master-slow.log diff --git a/mysql-test/t/merge.test b/mysql-test/t/merge.test index e132c33ab7f..e150b6b3a9a 100644 --- a/mysql-test/t/merge.test +++ b/mysql-test/t/merge.test @@ -434,16 +434,7 @@ CREATE TABLE tm1(a SMALLINT, b SMALLINT, KEY(a)) ENGINE=MERGE UNION=(t1); SELECT * FROM tm1; DROP TABLE t1, tm1; -# -# Bug#26464 - insert delayed + update + merge = corruption -# -CREATE TABLE t1(c1 INT) ENGINE=MyISAM; -CREATE TABLE t2(c1 INT) ENGINE=MERGE UNION=(t1); ---error 1031 -INSERT DELAYED INTO t2 VALUES(1); -DROP TABLE t1, t2; -# # BUG#26881 - Large MERGE tables report incorrect specification when no # differences in tables # diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test index 4281030fc3c..8f7357d1c98 100644 --- a/mysql-test/t/myisam.test +++ b/mysql-test/t/myisam.test @@ -874,6 +874,150 @@ CREATE TABLE t1 (c1 TEXT) AVG_ROW_LENGTH=70100 MAX_ROWS=4100100100; SHOW TABLE STATUS LIKE 't1'; DROP TABLE t1; +# +# Bug#26231 - select count(*) on myisam table returns wrong value +# when index is used +# +CREATE TABLE t1 (c1 TEXT NOT NULL, KEY c1 (c1(10))) ENGINE=MyISAM; +# Fill at least two key blocks. "Tab, A" must be in both blocks. +INSERT INTO t1 VALUES + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), (CHAR(9,65)), + (''), (''), (''), (''), + (' B'), (' B'), (' B'), (' B'); +SELECT DISTINCT COUNT(*) FROM t1 WHERE c1 = ''; +SELECT DISTINCT length(c1), c1 FROM t1 WHERE c1 = ''; +SELECT DISTINCT COUNT(*) FROM t1 IGNORE INDEX (c1) WHERE c1 = ''; +SELECT DISTINCT length(c1), c1 FROM t1 IGNORE INDEX (c1) WHERE c1 = ''; +SELECT DISTINCT length(c1), c1 FROM t1 ORDER BY c1; +DROP TABLE t1; + --echo End of 4.1 tests diff --git a/mysql-test/t/mysqlbinlog-cp932.test b/mysql-test/t/mysqlbinlog-cp932.test index 5d44ab63f12..5a5b7646cf4 100644 --- a/mysql-test/t/mysqlbinlog-cp932.test +++ b/mysql-test/t/mysqlbinlog-cp932.test @@ -1,3 +1,6 @@ +# disabled in embedded until tools running is fixed with embedded +--source include/not_embedded.inc + -- source include/have_binlog_format_mixed_or_statement.inc -- source include/have_cp932.inc diff --git a/mysql-test/t/mysqlbinlog.test b/mysql-test/t/mysqlbinlog.test index 9a27c4cbdb2..a73d7a40d8b 100644 --- a/mysql-test/t/mysqlbinlog.test +++ b/mysql-test/t/mysqlbinlog.test @@ -65,7 +65,7 @@ select "--- --database --" as ""; select "--- --position --" as ""; --enable_query_log --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --position=235 $MYSQLTEST_VARDIR/log/master-bin.000002 +--exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --position=239 $MYSQLTEST_VARDIR/log/master-bin.000002 # These are tests for remote binlog. # They should return the same as previous test. @@ -97,7 +97,7 @@ select "--- --database --" as ""; select "--- --position --" as ""; --enable_query_log --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --read-from-remote-server --position=235 --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000002 +--exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --read-from-remote-server --position=239 --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000002 # Bug#7853 (mysqlbinlog does not accept input from stdin) --disable_query_log diff --git a/mysql-test/t/mysqlbinlog2.test b/mysql-test/t/mysqlbinlog2.test index 85a678055d4..cd49d3570e9 100644 --- a/mysql-test/t/mysqlbinlog2.test +++ b/mysql-test/t/mysqlbinlog2.test @@ -52,11 +52,11 @@ select "--- offset --" as ""; --disable_query_log select "--- start-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --start-position=604 $MYSQLTEST_VARDIR/log/master-bin.000001 +--exec $MYSQL_BINLOG --short-form --start-position=608 $MYSQLTEST_VARDIR/log/master-bin.000001 --disable_query_log select "--- stop-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --stop-position=604 $MYSQLTEST_VARDIR/log/master-bin.000001 +--exec $MYSQL_BINLOG --short-form --stop-position=608 $MYSQLTEST_VARDIR/log/master-bin.000001 --disable_query_log select "--- start-datetime --" as ""; --enable_query_log @@ -82,11 +82,11 @@ select "--- offset --" as ""; --disable_query_log select "--- start-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --start-position=604 $MYSQLTEST_VARDIR/log/master-bin.000001 $MYSQLTEST_VARDIR/log/master-bin.000002 +--exec $MYSQL_BINLOG --short-form --start-position=608 $MYSQLTEST_VARDIR/log/master-bin.000001 $MYSQLTEST_VARDIR/log/master-bin.000002 --disable_query_log select "--- stop-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --stop-position=130 $MYSQLTEST_VARDIR/log/master-bin.000001 $MYSQLTEST_VARDIR/log/master-bin.000002 +--exec $MYSQL_BINLOG --short-form --stop-position=134 $MYSQLTEST_VARDIR/log/master-bin.000001 $MYSQLTEST_VARDIR/log/master-bin.000002 --disable_query_log select "--- start-datetime --" as ""; --enable_query_log @@ -109,11 +109,11 @@ select "--- offset --" as ""; --disable_query_log select "--- start-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --start-position=604 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 +--exec $MYSQL_BINLOG --short-form --start-position=608 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 --disable_query_log select "--- stop-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --stop-position=604 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 +--exec $MYSQL_BINLOG --short-form --stop-position=608 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 --disable_query_log select "--- start-datetime --" as ""; --enable_query_log @@ -136,11 +136,11 @@ select "--- offset --" as ""; --disable_query_log select "--- start-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --start-position=604 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 +--exec $MYSQL_BINLOG --short-form --start-position=608 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 --disable_query_log select "--- stop-position --" as ""; --enable_query_log ---exec $MYSQL_BINLOG --short-form --stop-position=130 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 +--exec $MYSQL_BINLOG --short-form --stop-position=134 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 --disable_query_log select "--- start-datetime --" as ""; --enable_query_log diff --git a/mysql-test/t/mysqlslap.test b/mysql-test/t/mysqlslap.test index 01add0d7da8..2a7bbfed932 100644 --- a/mysql-test/t/mysqlslap.test +++ b/mysql-test/t/mysqlslap.test @@ -4,7 +4,7 @@ --exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql ---exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --use-threads +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --exec $MYSQL_SLAP --only-print --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";" --exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";" @@ -14,3 +14,25 @@ --exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --delimiter=";" --query="select * from t1;select * from t2" --create="CREATE TABLE t1 (id int, name varchar(64)); create table t2(foo1 varchar(32), foo2 varchar(32)); INSERT INTO t1 VALUES (1, 'This is a test'); insert into t2 values ('test', 'test2')" --exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --create-schema=test_env + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --create-schema=test_env --auto-generate-sql-add-autoincrement + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=update + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=read + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=write + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-add-autoincrement --auto-generate-sql-load-type=mixed + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=update + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=update --auto-generate-sql-execute-number=5 + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=key --auto-generate-sql-execute-number=5 + +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --auto-generate-sql-load-type=key --auto-generate-sql-execute-number=5 --auto-generate-sql-secondary-indexes=3 + +--exec $MYSQL_SLAP --only-print --delimiter=";" --query="select * from t1;select * from t2" --create="CREATE TABLE t1 (id int, name varchar(64)); create table t2(foo1 varchar(32), foo2 varchar(32)); INSERT INTO t1 VALUES (1, 'This is a test'); insert into t2 values ('test', 'test2')" --engine="heap,myisam" --post-query="SHOW TABLES" --pre-query="SHOW TABLES"; diff --git a/mysql-test/t/ndb_blob.test b/mysql-test/t/ndb_blob.test index fb0536f0d10..b9a8c7e20ee 100644 --- a/mysql-test/t/ndb_blob.test +++ b/mysql-test/t/ndb_blob.test @@ -97,6 +97,11 @@ update t1 set d=null where a=1; commit; select a from t1 where d is null; +# bug#24028 - does not occur on MySQL level +# bug#17986 - not seen by us anymore but could show as warning here +delete from t1 where a=45567; +commit; + # pk delete delete from t1 where a=1; delete from t1 where a=2; diff --git a/mysql-test/t/ndb_restore_print.test b/mysql-test/t/ndb_restore_print.test new file mode 100644 index 00000000000..6dbbfdf5933 --- /dev/null +++ b/mysql-test/t/ndb_restore_print.test @@ -0,0 +1,189 @@ +-- source include/have_ndb.inc +-- source include/ndb_default_cluster.inc +-- source include/not_embedded.inc + +--disable_warnings +use test; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +--enable_warnings + +# basic datatypes +create table t1 + (pk int key + ,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) + ,b1 TINYINT, b2 TINYINT UNSIGNED + ,c1 SMALLINT, c2 SMALLINT UNSIGNED + ,d1 INT, d2 INT UNSIGNED + ,e1 BIGINT, e2 BIGINT UNSIGNED + ,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY + ,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY + ,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) + ,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) + ) engine myisam; + +# max values +insert into t1 values + (1 + ,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 + ,127, 255 + ,32767, 65535 + ,2147483647, 4294967295 + ,9223372036854775807, 18446744073709551615 + ,'1','12345678901234567890123456789012','123456789' + ,'1','12345678901234567890123456789012','123456789' + ,0x12,0x123456789abcdef0, 0x012345 + ,0x12,0x123456789abcdef0, 0x00123450 + ); + +# min values +insert into t1 values + (2 + ,0, 0, 0, 0, 0 + ,-128, 0 + ,-32768, 0 + ,-2147483648, 0 + ,-9223372036854775808, 0 + ,'','','' + ,'','','' + ,0x0,0x0,0x0 + ,0x0,0x0,0x0 + ); + +# null values +insert into t1 values + (3 + ,NULL,NULL,NULL,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ); + +--vertical_results +select pk + ,hex(a1), hex(a2), hex(a3), hex(a4), hex(a5) + ,b1, b2 + ,c1 , c2 + ,d1 , d2 + ,e1 , e2 + ,f1 , f2, f3 + ,g1 , g2, g3 + ,hex(h1), hex(h2), hex(h3) + ,hex(i1), hex(i2), hex(i3) + from t1 order by pk; + +alter table t1 engine ndb; + +select pk + ,hex(a1), hex(a2), hex(a3), hex(a4), hex(a5) + ,b1, b2 + ,c1 , c2 + ,d1 , d2 + ,e1 , e2 + ,f1 , f2, f3 + ,g1 , g2, g3 + ,hex(h1), hex(h2), hex(h3) + ,hex(i1), hex(i2), hex(i3) + from t1 order by pk; +--horizontal_results + +--source include/ndb_backup.inc + +--let ndb_restore_filter=test t1 +--let ndb_restore_opts=--verbose=0 --print_data --hex --fields-terminated-by=";" +--source include/ndb_backup_print.inc + +--let ndb_restore_filter=test t1 +--let ndb_restore_opts=--verbose=0 --print_data --hex --fields-terminated-by="," --fields-optionally-enclosed-by="'" +--source include/ndb_backup_print.inc + + +drop table t1; + +# some binary char tests with trailing spaces +create table t1 + (pk int key + ,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY + ,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY + ,h1 BINARY(1), h2 BINARY(9), h3 BINARY(255) + ,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) + ) engine ndb; + +insert into t1 values + (1 + ,'1','12345678901234567890123456789012','123456789 ' + ,'1 ','12345678901234567890123456789012 ','123456789 ' + ,0x20,0x123456789abcdef020, 0x012345000020 + ,0x1200000020,0x123456789abcdef000000020, 0x00123450000020 + ); + +create table t2 (pk int key, a int) engine ndb; +create table t3 (pk int key, a int) engine ndb; +create table t4 (pk int key, a int) engine ndb; + +insert into t2 values (1,11),(2,12),(3,13),(4,14),(5,15); +insert into t3 values (1,21),(2,22),(3,23),(4,24),(5,25); +insert into t4 values (1,31),(2,32),(3,33),(4,34),(5,35); + +--source include/ndb_backup.inc +--let ndb_restore_opts=--verbose=0 --print_data --hex --fields-enclosed-by="'" --fields-optionally-enclosed-by="X" +--let ndb_restore_filter=test t1 +--source include/ndb_backup_print.inc + +--exec rm -f $MYSQLTEST_VARDIR/tmp/t1.txt +--exec rm -f $MYSQLTEST_VARDIR/tmp/t2.txt +--exec rm -f $MYSQLTEST_VARDIR/tmp/t3.txt +--exec rm -f $MYSQLTEST_VARDIR/tmp/t4.txt + +--let ndb_restore_opts=--verbose=0 --print_data --hex --tab $MYSQLTEST_VARDIR/tmp --append +--let ndb_restore_filter=test +--source include/ndb_backup_print.inc + +--let $message= t1 +--source include/show_msg.inc +--exec sort $MYSQLTEST_VARDIR/tmp/t1.txt +--let $message= t2 +--source include/show_msg.inc +--exec sort $MYSQLTEST_VARDIR/tmp/t2.txt +--let $message= t3 +--source include/show_msg.inc +--exec sort $MYSQLTEST_VARDIR/tmp/t3.txt +--let $message= t4 +--source include/show_msg.inc +--exec sort $MYSQLTEST_VARDIR/tmp/t4.txt + +--exec rm -f $MYSQLTEST_VARDIR/tmp/t1.txt +--exec rm -f $MYSQLTEST_VARDIR/tmp/t2.txt +--exec rm -f $MYSQLTEST_VARDIR/tmp/t3.txt +--exec rm -f $MYSQLTEST_VARDIR/tmp/t4.txt + +# now test some other datatypes +drop table t1; +create table t1 + (pk int key + ,a1 MEDIUMINT, a2 MEDIUMINT UNSIGNED + ) engine ndb; + +# max values +insert into t1 values(1, 8388607, 16777215); +# min values +insert into t1 values(2, -8388608, 0); +# small values +insert into t1 values(3, -1, 1); + +# backup and print +--source include/ndb_backup.inc + +--let ndb_restore_filter=test t1 +--let ndb_restore_opts=--verbose=0 --print_data --hex --fields-terminated-by=";" +--source include/ndb_backup_print.inc + +# clean up +drop table t1; +drop table t2; +drop table t3; +drop table t4; diff --git a/mysql-test/t/ndb_single_user.test b/mysql-test/t/ndb_single_user.test index c655124f79f..f2f47becb0c 100644 --- a/mysql-test/t/ndb_single_user.test +++ b/mysql-test/t/ndb_single_user.test @@ -51,34 +51,67 @@ insert into t1 select * from t2; --connection server2 --error 1051 drop table t1; ---error 1146 -#--error 1296 +--error 1296 create index new_index on t1 (c); ---error 1146 -#--error 1296 +--error 1296 insert into t1 values (1,1,0),(2,2,0),(3,3,0),(4,4,0),(5,5,0),(6,6,0),(7,7,0),(8,8,0),(9,9,0),(10,10,0); ---error 1146 -#--error 1296 +--error 1296 select * from t1 where a = 1; ---error 1146 -#--error 1296 +--error 1296 select * from t1 where b = 4; ---error 1146 -#--error 1296 +--error 1296 update t1 set b=102 where a = 2; ---error 1146 -#--error 1296 +--error 1296 update t1 set b=103 where b = 3; ---error 1146 -#--error 1296 +--error 1296 update t1 set b=b+100; ---error 1146 -#--error 1296 +--error 1296 update t1 set b=b+100 where a > 7; --exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "exit single user mode" >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults >> $NDB_TOOLS_OUTPUT +# +# we should be able to run transaction while in single user mode +# +--connection server1 +BEGIN; +update t1 set b=b+100 where a=1; + +--connection server2 +BEGIN; +update t1 set b=b+100 where a=2; + +# enter single user mode +--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "enter single user mode $node_id" >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" --single-user >> $NDB_TOOLS_OUTPUT + +--connection server1 +update t1 set b=b+100 where a=3; +COMMIT; + +# while on other mysqld it should be aborted +--connection server2 +--error 1296 +update t1 set b=b+100 where a=4; +--error 1296 +COMMIT; + +# Bug #25275 SINGLE USER MODE prevents ALTER on non-ndb +# tables for other mysqld nodes +--connection server2 +create table t2 (a int) engine myisam; +alter table t2 add column (b int); + +# exit single user mode +--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "exit single user mode" >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults >> $NDB_TOOLS_OUTPUT + # cleanup +--connection server2 +drop table t2; --connection server1 drop table t1; + +# End of 5.0 tests + diff --git a/mysql-test/t/query_cache_sql_prepare.test b/mysql-test/t/query_cache_sql_prepare.test index 69b504e2fd1..a02388b2ae5 100644 --- a/mysql-test/t/query_cache_sql_prepare.test +++ b/mysql-test/t/query_cache_sql_prepare.test @@ -4,8 +4,10 @@ # Query cache is abbreviated as "QC" -- source include/have_query_cache.inc +# embedded can't make more than one connection, which this test needs +-- source include/not_embedded.inc -connect (con1,127.0.0.1,root,,test,$MASTER_MYPORT,); +connect (con1,localhost,root,,test,$MASTER_MYPORT,); connection default; set global query_cache_size=100000; diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index b1cec049ab2..06c2ef248c4 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -568,6 +568,149 @@ SELECT s.oxid FROM t1 v, t1 s DROP TABLE t1; +# BUG#26624 high mem usage (crash) in range optimizer (depends on order of fields in where) +create table t1 ( + c1 char(10), c2 char(10), c3 char(10), c4 char(10), + c5 char(10), c6 char(10), c7 char(10), c8 char(10), + c9 char(10), c10 char(10), c11 char(10), c12 char(10), + c13 char(10), c14 char(10), c15 char(10), c16 char(10), + index(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,c13,c14,c15,c16) +); +insert into t1 (c1) values ('1'),('1'),('1'),('1'); + +# This must run without crash and fast: +select * from t1 where + c1 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c2 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c3 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c4 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c5 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c6 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c7 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c8 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c9 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC") + and c10 in ("abcdefgh", "123456789", "qwertyuio", "asddfgh", + "abcdefg1", "123456781", "qwertyui1", "asddfg1", + "abcdefg2", "123456782", "qwertyui2", "asddfg2", + "abcdefg3", "123456783", "qwertyui3", "asddfg3", + "abcdefg4", "123456784", "qwertyui4", "asddfg4", + "abcdefg5", "123456785", "qwertyui5", "asddfg5", + "abcdefg6", "123456786", "qwertyui6", "asddfg6", + "abcdefg7", "123456787", "qwertyui7", "asddfg7", + "abcdefg8", "123456788", "qwertyui8", "asddfg8", + "abcdefg9", "123456789", "qwertyui9", "asddfg9", + "abcdefgA", "12345678A", "qwertyuiA", "asddfgA", + "abcdefgB", "12345678B", "qwertyuiB", "asddfgB", + "abcdefgC", "12345678C", "qwertyuiC", "asddfgC"); +drop table t1; --echo End of 4.1 tests # diff --git a/mysql-test/t/row.test b/mysql-test/t/row.test index 63c611e6be6..1d5c7a543ea 100644 --- a/mysql-test/t/row.test +++ b/mysql-test/t/row.test @@ -139,3 +139,20 @@ EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE t2.a=t1.a AND (t2.b,t2.c)=(2,1); SELECT * FROM t1,t2 WHERE t2.a=t1.a AND (t2.b,t2.c)=(2,1); DROP TABLE t1,t2; + +# +# Bug #27154: crash (memory corruption) when using row equalities +# + +CREATE TABLE t1( + a int, b int, c int, d int, e int, f int, g int, h int, + PRIMARY KEY (a,b,c,d,e,f,g) +); +INSERT INTO t1 VALUES (1,2,3,4,5,6,7,99); + +SELECT h FROM t1 WHERE (a,b,c,d,e,f,g)=(1,2,3,4,5,6,7); + +SET @x:= (SELECT h FROM t1 WHERE (a,b,c,d,e,f,g)=(1,2,3,4,5,6,7)); +SELECT @x; + +DROP TABLE t1; diff --git a/mysql-test/t/rpl_ddl.test b/mysql-test/t/rpl_ddl.test index ca1c25c5f09..80df16a7a00 100644 --- a/mysql-test/t/rpl_ddl.test +++ b/mysql-test/t/rpl_ddl.test @@ -22,13 +22,11 @@ # effects like failing 'sync_slave_with_master', crashes of the slave or # abort of the test case etc.. # -# 3. The assignment of the DDL command to be tested to $my_stmt can -# be a bit difficult. "'" must be avoided, because the test -# routine "include/rpl_stmt_seq.inc" performs a -# eval SELECT CONCAT('######## ','$my_stmt',' ########') as ""; -# --source include/not_ndb_default.inc ---source include/have_innodb.inc --source include/master-slave.inc -let $engine_type= "InnoDB"; +--source include/have_innodb.inc +let $engine_type= InnoDB; +let $temp_engine_type= MEMORY; +let $show_binlog = 0; +let $manipulate = 0; -- source extra/rpl_tests/rpl_ddl.test diff --git a/mysql-test/t/rpl_dual_pos_advance.test b/mysql-test/t/rpl_dual_pos_advance.test index 518fa9df885..074aeec63b1 100644 --- a/mysql-test/t/rpl_dual_pos_advance.test +++ b/mysql-test/t/rpl_dual_pos_advance.test @@ -106,3 +106,9 @@ connection slave; sync_with_master; # End of 4.1 tests + +# Cleanup +# The A->B->A replication causes the master to start writing relay logs +# in var/run, remove them +remove_file $MYSQLTEST_VARDIR/run/master-relay-bin.000001; +remove_file $MYSQLTEST_VARDIR/run/master-relay-bin.index; diff --git a/mysql-test/t/rpl_events.test b/mysql-test/t/rpl_events.test new file mode 100644 index 00000000000..895e94c438b --- /dev/null +++ b/mysql-test/t/rpl_events.test @@ -0,0 +1,24 @@ +################################################################## +# Author: Giuseppe # +# Date: 2006-12-20 # +# Purpose: To test that event effects are replicated # +# in both row based and statement based format # +################################################################## + +set global event_scheduler=1; + +--source include/not_embedded.inc +--source include/master-slave.inc + +let $engine_type= MyISAM; + +set binlog_format=row; + +# Embedded server doesn't support binlogging +--source include/rpl_events.inc + +set binlog_format=statement; + +# Embedded server doesn't support binlogging +--source include/rpl_events.inc + diff --git a/mysql-test/t/rpl_incident-master.opt b/mysql-test/t/rpl_incident-master.opt new file mode 100644 index 00000000000..912801debc4 --- /dev/null +++ b/mysql-test/t/rpl_incident-master.opt @@ -0,0 +1 @@ +--loose-debug=+d,incident_database_resync_on_replace diff --git a/mysql-test/t/rpl_incident.test b/mysql-test/t/rpl_incident.test new file mode 100644 index 00000000000..c52f26317ad --- /dev/null +++ b/mysql-test/t/rpl_incident.test @@ -0,0 +1,43 @@ +--source include/master-slave.inc +--source include/have_debug.inc + +--echo **** On Master **** +CREATE TABLE t1 (a INT); + +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; + +# This will generate an incident log event and store it in the binary +# log before the replace statement. +REPLACE INTO t1 VALUES (4); +--save_master_pos +SELECT * FROM t1; + +connection slave; +--wait_for_slave_to_stop + +# The 4 should not be inserted into the table, since the incident log +# event should have stop the slave. +--echo **** On Slave **** +SELECT * FROM t1; + +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS + +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; +START SLAVE; +--sync_with_master + +# Now, we should have inserted the row into the table and the slave +# should be running. We should also have rotated to a new binary log. + +SELECT * FROM t1; +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS + +DROP TABLE t1; +connection master; +DROP TABLE t1; + diff --git a/mysql-test/t/rpl_loaddata_s.test b/mysql-test/t/rpl_loaddata_s.test index 2c94c8ef953..f397d741310 100644 --- a/mysql-test/t/rpl_loaddata_s.test +++ b/mysql-test/t/rpl_loaddata_s.test @@ -20,9 +20,7 @@ save_master_pos; connection slave; sync_with_master; select count(*) from test.t1; # check that LOAD was replicated ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; # should be nothing +source include/show_binlog_events.inc; # Cleanup connection master; diff --git a/mysql-test/t/rpl_log_pos.test b/mysql-test/t/rpl_log_pos.test index 61c24da514e..ec1c83a9718 100644 --- a/mysql-test/t/rpl_log_pos.test +++ b/mysql-test/t/rpl_log_pos.test @@ -18,12 +18,12 @@ sync_slave_with_master; --replace_column 1 # 8 # 9 # 23 # 33 # show slave status; stop slave; -change master to master_log_pos=74; +change master to master_log_pos=75; start slave; sleep 5; stop slave; -change master to master_log_pos=74; +change master to master_log_pos=75; --replace_result $MASTER_MYPORT MASTER_PORT --replace_column 1 # 8 # 9 # 23 # 33 # show slave status; @@ -33,7 +33,7 @@ sleep 5; --replace_column 1 # 8 # 9 # 23 # 33 # show slave status; stop slave; -change master to master_log_pos=177; +change master to master_log_pos=178; start slave; sleep 2; --replace_result $MASTER_MYPORT MASTER_PORT @@ -49,7 +49,7 @@ insert into t1 values (1),(2),(3); save_master_pos; connection slave; stop slave; -change master to master_log_pos=102; +change master to master_log_pos=106; start slave; sync_with_master; select * from t1 ORDER BY n; diff --git a/mysql-test/t/rpl_misc_functions.test b/mysql-test/t/rpl_misc_functions.test index 6e0bda90503..f00beff583a 100644 --- a/mysql-test/t/rpl_misc_functions.test +++ b/mysql-test/t/rpl_misc_functions.test @@ -28,10 +28,76 @@ create table t2 like t1; eval load data local infile '$MYSQLTEST_VARDIR/master-data/test/rpl_misc_functions.outfile' into table t2; # compare them with the replica; the SELECT below should return no row select * from t1, t2 where (t1.id=t2.id) and not(t1.i=t2.i and t1.r1=t2.r1 and t1.r2=t2.r2 and t1.p=t2.p); -stop slave; -drop table t1; connection master; drop table t1; # End of 4.1 tests + +# +# BUG#25543 test calling rand() multiple times on the master in +# a stored procedure. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (col_a double default NULL); + +DELIMITER |; + +# Use a SP that calls rand() multiple times +CREATE PROCEDURE test_replication_sp1() +BEGIN + INSERT INTO t1 VALUES (rand()), (rand()); + INSERT INTO t1 VALUES (rand()); +END| + +# Use a SP that calls another SP to call rand() multiple times +CREATE PROCEDURE test_replication_sp2() +BEGIN + CALL test_replication_sp1(); + CALL test_replication_sp1(); +END| + +# Use a SF that calls rand() multiple times +CREATE FUNCTION test_replication_sf() RETURNS DOUBLE DETERMINISTIC +BEGIN + RETURN (rand() + rand()); +END| + +DELIMITER ;| + +# Exercise the functions and procedures then compare the results on +# the master to those on the slave. +CALL test_replication_sp1(); +CALL test_replication_sp2(); +INSERT INTO t1 VALUES (test_replication_sf()); +INSERT INTO t1 VALUES (test_replication_sf()); +INSERT INTO t1 VALUES (test_replication_sf()); + +# Record the results of the query on the master +--exec $MYSQL --port=$MASTER_MYPORT test -e "SELECT * FROM test.t1" > $MYSQLTEST_VARDIR/tmp/rpl_rand_master.sql + +--sync_slave_with_master + +# Record the results of the query on the slave +--exec $MYSQL --port=$SLAVE_MYPORT test -e "SELECT * FROM test.t1" > $MYSQLTEST_VARDIR/tmp/rpl_rand_slave.sql + +# Compare the results from the master to the slave. +--exec diff $MYSQLTEST_VARDIR/tmp/rpl_rand_master.sql $MYSQLTEST_VARDIR/tmp/rpl_rand_slave.sql + +# Cleanup +connection master; +--disable_warnings +DROP PROCEDURE IF EXISTS test_replication_sp1; +DROP PROCEDURE IF EXISTS test_replication_sp2; +DROP FUNCTION IF EXISTS test_replication_sf; +DROP TABLE IF EXISTS t1; +--enable_warnings +--sync_slave_with_master + +# If all is good, when can cleanup our dump files. +--system rm $MYSQLTEST_VARDIR/tmp/rpl_rand_master.sql +--system rm $MYSQLTEST_VARDIR/tmp/rpl_rand_slave.sql diff --git a/mysql-test/t/rpl_ndb_basic.test b/mysql-test/t/rpl_ndb_basic.test index 5290dc377c2..a230b9da622 100644 --- a/mysql-test/t/rpl_ndb_basic.test +++ b/mysql-test/t/rpl_ndb_basic.test @@ -79,6 +79,49 @@ select * from t1 order by nid; --connection master DROP table t1; +# +# Bug #27378 update becomes delete on slave +# + +--connection master +CREATE TABLE `t1` ( + `prid` int(10) unsigned NOT NULL, + `id_type` enum('IMSI','SIP') NOT NULL, + `fkimssub` varchar(50) NOT NULL, + `user_id` varchar(20) DEFAULT NULL, + `password` varchar(20) DEFAULT NULL, + `ptg_nbr` varchar(20) DEFAULT NULL, + `old_tmsi` int(10) unsigned DEFAULT NULL, + `new_tmsi` int(10) unsigned DEFAULT NULL, + `dev_capability` int(10) unsigned DEFAULT NULL, + `dev_oid` bigint(20) unsigned DEFAULT NULL, + `lac_cell_id` bigint(20) unsigned DEFAULT NULL, + `ms_classmark1` int(10) unsigned DEFAULT NULL, + `cipher_key` int(10) unsigned DEFAULT NULL, + `priid_master` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`prid`), + UNIQUE KEY `fkimssub` (`fkimssub`,`ptg_nbr`) USING HASH +) ENGINE=ndbcluster DEFAULT CHARSET=latin1; + +INSERT INTO `t1` VALUES (183342,'IMSI','config3_sub_2Privates_3Publics_imssub_36668','user_id_73336','user_id_73336','73336',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(47617,'IMSI','config3_sub_2Privates_3Publics_imssub_9523','user_id_19046','user_id_19046','19046',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(200332,'IMSI','config3_sub_2Privates_3Publics_imssub_40066','user_id_80132','user_id_80132','80132',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(478882,'IMSI','config3_sub_2Privates_3Publics_imssub_95776','user_id_191552','user_id_191552','191552',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(490146,'IMSI','config3_sub_2Privates_3Publics_imssub_98029','user_id_196057','user_id_196057','196057',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(499301,'IMSI','config3_sub_2Privates_3Publics_imssub_99860','user_id_199719','user_id_199719','199719',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(506101,'IMSI','config3_sub_2Privates_3Publics_imssub_101220','user_id_202439','user_id_202439','202439',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(510142,'IMSI','config3_sub_2Privates_3Publics_imssub_102028','user_id_204056','user_id_204056','204056',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(515871,'IMSI','config3_sub_2Privates_3Publics_imssub_103174','user_id_206347','user_id_206347','206347',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(209842,'IMSI','config3_sub_2Privates_3Publics_imssub_41968','user_id_83936','user_id_83936','83936',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL),(365902,'IMSI','config3_sub_2Privates_3Publics_imssub_73180','user_id_146360','user_id_146360','146360',NULL,NULL,NULL,1010,NULL,NULL,NULL,NULL),(11892,'IMSI','config3_sub_2Privates_3Publics_imssub_2378','user_id_4756','user_id_4756','4756',NULL,NULL,NULL,123456789,NULL,NULL,NULL,NULL); + +select count(*) from t1; + +--sync_slave_with_master +--connection slave +select count(*) from t1; + +--connection master +update t1 set dev_oid=dev_oid+1; +select count(*) from t1; + +--sync_slave_with_master +--connection slave +select count(*) from t1; + +--connection master +DROP table t1; + ################################################################## # # Check that retries are made on the slave on some temporary errors diff --git a/mysql-test/t/rpl_ndb_ddl.test b/mysql-test/t/rpl_ndb_ddl.test index 0c503e56c9c..ca7a4ce4968 100644 --- a/mysql-test/t/rpl_ndb_ddl.test +++ b/mysql-test/t/rpl_ndb_ddl.test @@ -1,4 +1,4 @@ -######################## rpl_ddl.test ######################## +#################### rpl_ndb_ddl.test ######################## # # # DDL statements (sometimes with implicit COMMIT) executed # # by the master and it's propagation into the slave # @@ -22,14 +22,11 @@ # effects like failing 'sync_slave_with_master', crashes of the slave or # abort of the test case etc.. # -# 3. The assignment of the DDL command to be tested to $my_stmt can -# be a bit difficult. "'" must be avoided, because the test -# routine "include/rpl_stmt_seq.inc" performs a -# eval SELECT CONCAT('######## ','$my_stmt',' ########') as ""; -# ---source include/have_ndb.inc --source include/master-slave.inc -let $engine_type= "NDB"; --- source extra/rpl_tests/rpl_ndb_ddl.test - +--source include/have_ndb.inc +let $engine_type= NDB; +let $temp_engine_type= MEMORY; +let $show_binlog = 0; +let $manipulate = 0; +-- source extra/rpl_tests/rpl_ddl.test diff --git a/mysql-test/t/rpl_ndb_do_table.test b/mysql-test/t/rpl_ndb_do_table.test index 278a326aefd..700c79766e1 100644 --- a/mysql-test/t/rpl_ndb_do_table.test +++ b/mysql-test/t/rpl_ndb_do_table.test @@ -27,6 +27,20 @@ INSERT INTO t2 VALUES(3, repeat('ghi',3000)); SHOW TABLES; SELECT COUNT(*) FROM t1; +# +# Bug #27044 replicated with unique field ndb table allows dup key inserts +# +connection master; + +--error ER_DUP_ENTRY_WITH_KEY_NAME +INSERT INTO t1 VALUES (3, repeat('bad',1)); + +connection slave; +--error ER_DUP_ENTRY_WITH_KEY_NAME +INSERT INTO t1 VALUES (3, repeat('bad too',1)); + +# cleanup + connection master; DROP TABLE IF EXISTS t1, t2; --sync_slave_with_master diff --git a/mysql-test/t/rpl_packet.test b/mysql-test/t/rpl_packet.test index d01979a4731..db6f475dc94 100644 --- a/mysql-test/t/rpl_packet.test +++ b/mysql-test/t/rpl_packet.test @@ -36,4 +36,37 @@ save_master_pos; connection slave; sync_with_master; +# +# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits +# +# Check that a situation when the size of event on the master is greater than +# max_allowed_packet on the slave does not lead to infinite re-transmits. + +connection master; + +# Change the max packet size on master + +SET @@global.max_allowed_packet=4096; +SET @@global.net_buffer_length=4096; + +# Restart slave for new setting to take effect +connection slave; +STOP SLAVE; +START SLAVE; + +# Reconnect to master for new setting to take effect +disconnect master; +connect (master, localhost, root) +connection master; + +CREATE TABLe `t1` (`f1` LONGTEXT) ENGINE=MyISAM; + +INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048'); + +# The slave I/O thread must stop after trying to read the above event +connection slave; +sleep 2; +SHOW STATUS LIKE 'Slave_running'; + + # End of tests diff --git a/mysql-test/t/rpl_row_basic_11bugs-master.opt b/mysql-test/t/rpl_row_basic_11bugs-master.opt index ad03cdaa6d1..ceba85f40e5 100644 --- a/mysql-test/t/rpl_row_basic_11bugs-master.opt +++ b/mysql-test/t/rpl_row_basic_11bugs-master.opt @@ -1 +1,2 @@ ---binlog_ignore_db=test_ignore +--binlog_ignore_db=test_ignore --innodb + diff --git a/mysql-test/t/rpl_row_basic_11bugs-slave.opt b/mysql-test/t/rpl_row_basic_11bugs-slave.opt new file mode 100644 index 00000000000..627becdbfb5 --- /dev/null +++ b/mysql-test/t/rpl_row_basic_11bugs-slave.opt @@ -0,0 +1 @@ +--innodb diff --git a/mysql-test/t/rpl_row_basic_11bugs.test b/mysql-test/t/rpl_row_basic_11bugs.test index 348b6d45f52..17f5848ceef 100644 --- a/mysql-test/t/rpl_row_basic_11bugs.test +++ b/mysql-test/t/rpl_row_basic_11bugs.test @@ -24,8 +24,7 @@ USE test_ignore; CREATE TABLE t2 (a INT, b INT); SHOW TABLES; INSERT INTO t2 VALUES (3,3), (4,4); ---replace_regex /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS FROM 102; +source include/show_binlog_events.inc; sync_slave_with_master; --echo **** On Slave **** SHOW DATABASES; @@ -117,5 +116,69 @@ sync_slave_with_master; SELECT HEX(a),b FROM t1; connection master; -DROP TABLE t1; +DROP TABLE IF EXISTS t1; +sync_slave_with_master; + +# BUG#22583: RBR between MyISAM and non-MyISAM tables containing a BIT +# field does not work + +--echo ================ Test for BUG#22583 ================ +--disable_query_log +--source include/master-slave-reset.inc +--enable_query_log + +--echo **** On Master **** +connection master; +CREATE TABLE t1_myisam (k INT, a BIT(1), b BIT(9)) ENGINE=MYISAM; +CREATE TABLE t1_innodb (k INT, a BIT(1), b BIT(9)) ENGINE=INNODB; +CREATE TABLE t2_myisam (k INT, a BIT(1) NOT NULL, b BIT(4) NOT NULL) ENGINE=MYISAM; +CREATE TABLE t2_innodb (k INT, a BIT(1) NOT NULL, b BIT(4) NOT NULL) ENGINE=INNODB; +--echo **** On Slave **** +sync_slave_with_master; +ALTER TABLE t1_myisam ENGINE=INNODB; +ALTER TABLE t1_innodb ENGINE=MYISAM; +ALTER TABLE t2_myisam ENGINE=INNODB; +ALTER TABLE t2_innodb ENGINE=MYISAM; + +--echo **** On Master **** +connection master; +INSERT INTO t1_myisam VALUES(1, b'0', 257); +INSERT INTO t1_myisam VALUES(2, b'1', 256); +INSERT INTO t1_innodb VALUES(1, b'0', 257); +INSERT INTO t1_innodb VALUES(2, b'1', 256); +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +INSERT INTO t2_myisam VALUES(1, b'0', 9); +INSERT INTO t2_myisam VALUES(2, b'1', 8); +INSERT INTO t2_innodb VALUES(1, b'0', 9); +INSERT INTO t2_innodb VALUES(2, b'1', 8); +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +SELECT k, HEX(a),HEX(b) FROM t2_innodb; +--echo **** On Slave **** +sync_slave_with_master; +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +SELECT k, HEX(a),HEX(b) FROM t2_innodb; + +--echo **** On Master **** +connection master; +UPDATE t1_myisam SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +UPDATE t1_innodb SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +UPDATE t2_myisam SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +UPDATE t2_innodb SET a=0 WHERE k=2; +SELECT k, HEX(a),HEX(b) FROM t2_innodb; +--echo **** On Slave **** +sync_slave_with_master; +SELECT k, HEX(a),HEX(b) FROM t1_myisam; +SELECT k, HEX(a),HEX(b) FROM t1_innodb; +SELECT k, HEX(a),HEX(b) FROM t2_myisam; +SELECT k, HEX(a),HEX(b) FROM t2_innodb; + +--echo **** On Master **** +connection master; +DROP TABLE IF EXISTS t1_myisam, t1_innodb, t2_myisam, t2_innodb; sync_slave_with_master; diff --git a/mysql-test/t/rpl_row_create_table.test b/mysql-test/t/rpl_row_create_table.test index 6afcae4b5e6..d1b26f9e3f4 100644 --- a/mysql-test/t/rpl_row_create_table.test +++ b/mysql-test/t/rpl_row_create_table.test @@ -21,7 +21,7 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7,t8,t9; # Set the default storage engine to different values on master and # slave. We need to stop the slave for the server variable to take # effect, since the variable is only read on start-up. -connection slave; +sync_slave_with_master; --disable_query_log set @storage_engine = @@global.storage_engine; STOP SLAVE; @@ -34,9 +34,9 @@ CREATE TABLE t1 (a INT, b INT); CREATE TABLE t2 (a INT, b INT) ENGINE=Merge; CREATE TABLE t3 (a INT, b INT) CHARSET=utf8; CREATE TABLE t4 (a INT, b INT) ENGINE=Merge CHARSET=utf8; ---replace_column 1 # 4 # 5 # +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ ---query_vertical SHOW BINLOG EVENTS FROM 212 +--query_vertical SHOW BINLOG EVENTS FROM 216 --echo **** On Master **** --query_vertical SHOW CREATE TABLE t1 --query_vertical SHOW CREATE TABLE t2 @@ -70,8 +70,9 @@ connection master; --error ER_DUP_ENTRY_WITH_KEY_NAME CREATE TABLE t7 (UNIQUE(b)) SELECT a,b FROM tt3; # Shouldn't be written to the binary log +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS FROM 1118; +SHOW BINLOG EVENTS FROM 1098; # Test that INSERT-SELECT works the same way as for SBR. CREATE TABLE t7 (a INT, b INT UNIQUE); @@ -79,8 +80,9 @@ CREATE TABLE t7 (a INT, b INT UNIQUE); INSERT INTO t7 SELECT a,b FROM tt3; SELECT * FROM t7 ORDER BY a,b; # Should be written to the binary log +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS FROM 1118; +SHOW BINLOG EVENTS FROM 1098; sync_slave_with_master; SELECT * FROM t7 ORDER BY a,b; @@ -90,8 +92,9 @@ INSERT INTO tt4 VALUES (4,8), (5,10), (6,12); BEGIN; INSERT INTO t7 SELECT a,b FROM tt4; ROLLBACK; +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS FROM 1314; +SHOW BINLOG EVENTS FROM 1294; SELECT * FROM t7 ORDER BY a,b; sync_slave_with_master; SELECT * FROM t7 ORDER BY a,b; @@ -105,8 +108,9 @@ CREATE TEMPORARY TABLE tt7 SELECT 1; --echo **** On Master **** --query_vertical SHOW CREATE TABLE t8 --query_vertical SHOW CREATE TABLE t9 +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS FROM 1410; +SHOW BINLOG EVENTS FROM 1390; sync_slave_with_master; --echo **** On Slave **** --query_vertical SHOW CREATE TABLE t8 @@ -156,6 +160,7 @@ SELECT * FROM t1 ORDER BY a; SELECT * FROM t2 ORDER BY a; SELECT * FROM t3 ORDER BY a; SELECT * FROM t4 ORDER BY a; +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /Server ver: .*, Binlog ver: .*/Server ver: #, Binlog ver: #/ /table_id: [0-9]+/table_id: #/ SHOW BINLOG EVENTS; sync_slave_with_master; @@ -201,6 +206,7 @@ INSERT INTO t2 SELECT a+2 FROM tt1; COMMIT; SELECT * FROM t2 ORDER BY a; +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /Server ver: .*, Binlog ver: .*/Server ver: #, Binlog ver: #/ /table_id: [0-9]+/table_id: #/ SHOW BINLOG EVENTS; sync_slave_with_master; @@ -219,8 +225,9 @@ INSERT INTO t2 SELECT a+2 FROM tt2; ROLLBACK; SELECT * FROM t2 ORDER BY a; +--replace_column 1 # 4 # --replace_regex /\/\* xid=.* \*\//\/* XID *\// /Server ver: .*, Binlog ver: .*/Server ver: #, Binlog ver: #/ /table_id: [0-9]+/table_id: #/ -SHOW BINLOG EVENTS FROM 627; +SHOW BINLOG EVENTS FROM 631; sync_slave_with_master; SELECT * FROM t2 ORDER BY a; diff --git a/mysql-test/t/rpl_row_flsh_tbls.test b/mysql-test/t/rpl_row_flsh_tbls.test index 9e8efc1ac9c..a2f9e31fc5d 100644 --- a/mysql-test/t/rpl_row_flsh_tbls.test +++ b/mysql-test/t/rpl_row_flsh_tbls.test @@ -1,7 +1,7 @@ # depends on the binlog output -- source include/have_binlog_format_row.inc -let $rename_event_pos= 615; +let $rename_event_pos= 619; # Bug#18326: Do not lock table for writing during prepare of statement # The use of the ps protocol causes extra table maps in the binlog, so diff --git a/mysql-test/t/rpl_row_mysqlbinlog.test b/mysql-test/t/rpl_row_mysqlbinlog.test index 3b4c8db86d8..f7158107e4c 100644 --- a/mysql-test/t/rpl_row_mysqlbinlog.test +++ b/mysql-test/t/rpl_row_mysqlbinlog.test @@ -162,12 +162,12 @@ connection master; # this test for position option -# By setting this position to 412, we should only get the create of t3 +# By setting this position to 413, we should only get the create of t3 --disable_query_log select "--- Test 2 position test --" as ""; --enable_query_log --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --position=412 $MYSQLTEST_VARDIR/log/master-bin.000001 +--exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --position=416 $MYSQLTEST_VARDIR/log/master-bin.000001 # These are tests for remote binlog. # They should return the same as previous test. @@ -263,7 +263,7 @@ select "--- Test 6 reading stdin --" as ""; select "--- Test 7 reading stdin w/position --" as ""; --enable_query_log --replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR ---exec $MYSQL_BINLOG --short-form --position=412 - < $MYSQLTEST_VARDIR/log/master-bin.000001 +--exec $MYSQL_BINLOG --short-form --position=416 - < $MYSQLTEST_VARDIR/log/master-bin.000001 # Bug#16217 (mysql client did not know how not switch its internal charset) --disable_query_log diff --git a/mysql-test/t/rpl_sp.test b/mysql-test/t/rpl_sp.test index 9230b3bff4f..84154e549b8 100644 --- a/mysql-test/t/rpl_sp.test +++ b/mysql-test/t/rpl_sp.test @@ -566,7 +566,7 @@ connection master; # were written to the binary log. --replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events in 'master-bin.000001' from 102; +show binlog events in 'master-bin.000001' from 106; # Restore log_bin_trust_function_creators to its original value. diff --git a/mysql-test/t/rpl_stm_flsh_tbls.test b/mysql-test/t/rpl_stm_flsh_tbls.test index 43a5234ccc7..a8a33d05e8b 100644 --- a/mysql-test/t/rpl_stm_flsh_tbls.test +++ b/mysql-test/t/rpl_stm_flsh_tbls.test @@ -1,7 +1,7 @@ # depends on the binlog output --source include/have_binlog_format_mixed_or_statement.inc -let $rename_event_pos= 652; +let $rename_event_pos= 656; -- source extra/rpl_tests/rpl_flsh_tbls.test # End of 4.1 tests diff --git a/mysql-test/t/rpl_switch_stm_row_mixed.test b/mysql-test/t/rpl_switch_stm_row_mixed.test index 494e73fda49..b0012827db8 100644 --- a/mysql-test/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/t/rpl_switch_stm_row_mixed.test @@ -519,9 +519,7 @@ LOCK TABLES t12 WRITE; INSERT INTO t12 VALUES(UUID()); UNLOCK TABLES; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; sync_slave_with_master; # as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID @@ -536,9 +534,7 @@ sync_slave_with_master; diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; connection master; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; # Now test that mysqlbinlog works fine on a binlog generated by the # mixed mode diff --git a/mysql-test/t/rpl_udf.test b/mysql-test/t/rpl_udf.test new file mode 100644 index 00000000000..262ad04630c --- /dev/null +++ b/mysql-test/t/rpl_udf.test @@ -0,0 +1,22 @@ +################################################################### +# Author: Chuck Bell # +# Date: 2006-12-21 # +# Purpose: To test that UDFs are replicated in both row based and # +# statement based format. This tests work completed in WL#3629. # +################################################################### + +--source include/not_embedded.inc +--source include/master-slave.inc + +let $engine_type= MyISAM; + +set binlog_format=row; + +# Embedded server doesn't support binlogging +--source include/rpl_udf.inc + +set binlog_format=statement; + +# Embedded server doesn't support binlogging +--source include/rpl_udf.inc + diff --git a/mysql-test/t/sp-code.test b/mysql-test/t/sp-code.test index 97bc29fcad2..1b33680cfaf 100644 --- a/mysql-test/t/sp-code.test +++ b/mysql-test/t/sp-code.test @@ -447,3 +447,21 @@ DROP PROCEDURE p1; --echo End of 5.0 tests. + +# +# Bug #26303: reserve() not called before qs_append() may lead to buffer +# overflow +# +DELIMITER //; +CREATE PROCEDURE p1() +BEGIN + DECLARE dummy int default 0; + + CASE 12 + WHEN 12 + THEN SET dummy = 0; + END CASE; +END// +DELIMITER ;// +SHOW PROCEDURE CODE p1; +DROP PROCEDURE p1; diff --git a/mysql-test/t/sp-destruct.test b/mysql-test/t/sp-destruct.test index 375d2289459..df07091d2de 100644 --- a/mysql-test/t/sp-destruct.test +++ b/mysql-test/t/sp-destruct.test @@ -12,13 +12,10 @@ # mysqltest should be fixed to allow REPLACE_RESULT in error message -- source include/not_embedded.inc -# We're using --system things that probably doesn't work on Windows. ---source include/not_windows.inc - # Backup proc table ---system rm -rf $MYSQLTEST_VARDIR/master-data/mysql/backup ---system mkdir $MYSQLTEST_VARDIR/master-data/mysql/backup ---system cp $MYSQLTEST_VARDIR/master-data/mysql/proc.* $MYSQLTEST_VARDIR/master-data/mysql/backup/ +--copy_file $MYSQLTEST_VARDIR/master-data/mysql/proc.frm $MYSQLTEST_VARDIR/tmp/proc.frm +--copy_file $MYSQLTEST_VARDIR/master-data/mysql/proc.MYD $MYSQLTEST_VARDIR/tmp/proc.MYD +--copy_file $MYSQLTEST_VARDIR/master-data/mysql/proc.MYI $MYSQLTEST_VARDIR/tmp/proc.MYI use test; @@ -53,22 +50,25 @@ insert into t1 values (0); flush table mysql.proc; # Thrashing the .frm file ---system echo 'saljdlfa' > $MYSQLTEST_VARDIR/master-data/mysql/proc.frm ---replace_result $MYSQLTEST_VARDIR . master-data// '' +--write_file $MYSQLTEST_VARDIR/master-data/mysql/proc.frm +saljdfa +EOF +--replace_result $MYSQLTEST_VARDIR . master-data// '' '\\' '/' --error ER_NOT_FORM_FILE call bug14233(); ---replace_result $MYSQLTEST_VARDIR . master-data// '' +--replace_result $MYSQLTEST_VARDIR . master-data// '' '\\' '/' --error ER_NOT_FORM_FILE create view v1 as select bug14233_f(); ---replace_result $MYSQLTEST_VARDIR . master-data// '' +--replace_result $MYSQLTEST_VARDIR . master-data// '' '\\' '/' --error ER_NOT_FORM_FILE insert into t1 values (0); - flush table mysql.proc; # Drop the mysql.proc table ---system rm $MYSQLTEST_VARDIR/master-data/mysql/proc.* +--remove_file $MYSQLTEST_VARDIR/master-data/mysql/proc.frm +--remove_file $MYSQLTEST_VARDIR/master-data/mysql/proc.MYD +--remove_file $MYSQLTEST_VARDIR/master-data/mysql/proc.MYI --error ER_NO_SUCH_TABLE call bug14233(); --error ER_NO_SUCH_TABLE @@ -77,8 +77,12 @@ create view v1 as select bug14233_f(); insert into t1 values (0); # Restore mysql.proc ---system mv $MYSQLTEST_VARDIR/master-data/mysql/backup/* $MYSQLTEST_VARDIR/master-data/mysql/ ---system rmdir $MYSQLTEST_VARDIR/master-data/mysql/backup +--copy_file $MYSQLTEST_VARDIR/tmp/proc.frm $MYSQLTEST_VARDIR/master-data/mysql/proc.frm +--copy_file $MYSQLTEST_VARDIR/tmp/proc.MYD $MYSQLTEST_VARDIR/master-data/mysql/proc.MYD +--copy_file $MYSQLTEST_VARDIR/tmp/proc.MYI $MYSQLTEST_VARDIR/master-data/mysql/proc.MYI +--remove_file $MYSQLTEST_VARDIR/tmp/proc.frm +--remove_file $MYSQLTEST_VARDIR/tmp/proc.MYD +--remove_file $MYSQLTEST_VARDIR/tmp/proc.MYI flush table mysql.proc; flush privileges; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 9c2248cbcc2..88c34bdb9b9 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -2602,13 +2602,6 @@ SELECT * FROM t1 DROP TABLE t1,t2,t3; -# -# Bug#20835 (literal string with =any values) -# -CREATE TABLE t1 (s1 char(1)); -INSERT INTO t1 VALUES ('a'); -SELECT * FROM t1 WHERE _utf8'a' = ANY (SELECT s1 FROM t1); -DROP TABLE t1; # # Bug#23800: Outer fields in correlated subqueries is used in a temporary # table created for sorting. @@ -2748,3 +2741,77 @@ SELECT * FROM (SELECT 'this is ' 'a test.' AS col1, a AS t2 FROM t1) t; DROP table t1; +# +# Bug #27257: COUNT(*) aggregated in outer query +# + +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); + +SELECT COUNT(*), a, + (SELECT m FROM t2 WHERE m = count(*) LIMIT 1) + FROM t1 GROUP BY a; + +SELECT COUNT(*), a, + (SELECT MIN(m) FROM t2 WHERE m = count(*)) + FROM t1 GROUP BY a; + +SELECT COUNT(*), a + FROM t1 GROUP BY a + HAVING (SELECT MIN(m) FROM t2 WHERE m = count(*)) > 1; + +DROP TABLE t1,t2; + +# +# Bug #27229: GROUP_CONCAT in subselect with COUNT() as an argument +# + +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); + +SELECT COUNT(*) c, a, + (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) + FROM t1 GROUP BY a; + +SELECT COUNT(*) c, a, + (SELECT GROUP_CONCAT(COUNT(a)+1) FROM t2 WHERE m = a) + FROM t1 GROUP BY a; + +DROP table t1,t2; + +# +# Bug #27348: SET FUNCTION used in a subquery from WHERE condition +# + +CREATE TABLE t1 (a int, b int); +INSERT INTO t1 VALUES (2,22),(1,11),(2,22); + +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 0 GROUP BY a; +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 1 GROUP BY a; + +SELECT a FROM t1 t0 + WHERE (SELECT COUNT(t0.b) FROM t1 t WHERE t.b>20) GROUP BY a; + +SET @@sql_mode='ansi'; +--error 1111 +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 0 GROUP BY a; +--error 1111 +SELECT a FROM t1 WHERE (SELECT COUNT(b) FROM DUAL) > 1 GROUP BY a; + +--error 1111 +SELECT a FROM t1 t0 + WHERE (SELECT COUNT(t0.b) FROM t1 t WHERE t.b>20) GROUP BY a; + +SET @@sql_mode=default; +DROP TABLE t1; + +# Bug#20835 (literal string with =any values) +# +CREATE TABLE t1 (s1 char(1)); +INSERT INTO t1 VALUES ('a'); +SELECT * FROM t1 WHERE _utf8'a' = ANY (SELECT s1 FROM t1); +DROP TABLE t1; diff --git a/mysql-test/t/subselect3.test b/mysql-test/t/subselect3.test index ed8480ba464..e8eae3e2452 100644 --- a/mysql-test/t/subselect3.test +++ b/mysql-test/t/subselect3.test @@ -489,3 +489,45 @@ select a, b, (a,b) in (select a, min(b) from t2 group by a) Z from t1; drop table t1,t2; +# +# Bug #24484: Aggregate function used in column list subquery gives erroneous +# error +# +CREATE TABLE t1 (a int, b INT, c CHAR(10) NOT NULL, PRIMARY KEY (a, b)); +INSERT INTO t1 VALUES (1,1,'a'), (1,2,'b'), (1,3,'c'), (1,4,'d'), (1,5,'e'), + (2,1,'f'), (2,2,'g'), (2,3,'h'), (3,4,'i'),(3,3,'j'), (3,2,'k'), (3,1,'l'), + (1,9,'m'); +CREATE TABLE t2 (a int, b INT, c CHAR(10) NOT NULL, PRIMARY KEY (a, b)); +INSERT INTO t2 SELECT * FROM t1; + +# Gives error, but should work since it is (a, b) is the PK so only one +# given match possible +SELECT a, MAX(b), (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b)) + as test FROM t1 GROUP BY a; +SELECT * FROM t1 GROUP by t1.a + HAVING (MAX(t1.b) > (SELECT MAX(t2.b) FROM t2 WHERE t2.c < t1.c + HAVING MAX(t2.b+t1.a) < 10)); +#FIXME: Enable this test after fixing bug #27321 +#SELECT a, AVG(b), (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=AVG(t1.b)) +# AS test FROM t1 GROUP BY a; + +SELECT a,b,c FROM t1 WHERE b in (9,3,4) ORDER BY b,c; + +SELECT a, MAX(b), + (SELECT COUNT(DISTINCT t.c) FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) + LIMIT 1) + as cnt, + (SELECT t.b FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) LIMIT 1) + as t_b, + (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) LIMIT 1) + as t_b, + (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) ORDER BY t.c LIMIT 1) + as t_b + FROM t1 GROUP BY a; + +SELECT a, MAX(b), + (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.b=MAX(t1.b) LIMIT 1) as test + FROM t1 GROUP BY a; + + +DROP TABLE t1, t2; diff --git a/mysql-test/t/temp_table.test b/mysql-test/t/temp_table.test index d06fde87b34..e10fd386bd3 100644 --- a/mysql-test/t/temp_table.test +++ b/mysql-test/t/temp_table.test @@ -164,6 +164,21 @@ DROP TABLE t1; --echo End of 4.1 tests. +# +# Bug #24791: Union with AVG-groups generates wrong results +# +CREATE TABLE t1 ( c FLOAT( 20, 14 ) ); +INSERT INTO t1 VALUES( 12139 ); + +CREATE TABLE t2 ( c FLOAT(30,18) ); +INSERT INTO t2 VALUES( 123456 ); + +SELECT AVG( c ) FROM t1 UNION SELECT 1; +SELECT 1 UNION SELECT AVG( c ) FROM t1; +SELECT 1 UNION SELECT * FROM t2 UNION SELECT 1; +SELECT c/1 FROM t1 UNION SELECT 1; + +DROP TABLE t1, t2; # # Test truncate with temporary tables diff --git a/mysql-test/t/type_datetime.test b/mysql-test/t/type_datetime.test index 3aa162b4700..9246080630e 100644 --- a/mysql-test/t/type_datetime.test +++ b/mysql-test/t/type_datetime.test @@ -113,6 +113,12 @@ insert into t1 values ("00-00-00"), ("00-00-00 00:00:00"); select * from t1; drop table t1; +# +# Bug #16546 DATETIME+0 not always coerced the same way +# +select cast('2006-12-05 22:10:10' as datetime) + 0; + + # End of 4.1 tests # diff --git a/mysql-test/t/type_newdecimal.test b/mysql-test/t/type_newdecimal.test index f16e5dec40c..b8c05d62720 100644 --- a/mysql-test/t/type_newdecimal.test +++ b/mysql-test/t/type_newdecimal.test @@ -1127,6 +1127,22 @@ select * from t1; drop table t1; # +# Bug #8663 (cant use bigint as input to CAST) +# +select cast(19999999999999999999 as unsigned); + +# +# Bug #24558: Increasing decimal column length causes data loss +# +create table t1(a decimal(18)); +insert into t1 values(123456789012345678); +alter table t1 modify column a decimal(19); +select * from t1; +drop table t1; + +--echo End of 5.0 tests + +# # Bug#16172 DECIMAL data type processed incorrectly # select cast(143.481 as decimal(4,1)); @@ -1136,8 +1152,3 @@ select cast(-3.4 as decimal(2,1)); select cast(99.6 as decimal(2,0)); select cast(-13.4 as decimal(2,1)); select cast(98.6 as decimal(2,0)); - -# Bug #8663 (cant use bigint as input to CAST) -# -select cast(19999999999999999999 as unsigned); - diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index cc93fbd715a..a57ab469ac2 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -913,4 +913,13 @@ SELECT a FROM (SELECT a FROM t1 UNION SELECT a FROM t1 ORDER BY c) AS test; DROP TABLE t1; +# +# Bug#23345: Wrongly allowed INTO in a non-last select of a UNION. +# +--error 1221 +(select 1 into @var) union (select 1); +(select 1) union (select 1 into @var); +select @var; +--error 1172 +(select 2) union (select 1 into @var); --echo End of 5.0 tests diff --git a/mysql-test/t/user_var-binlog.test b/mysql-test/t/user_var-binlog.test index ce1a476eb43..3ce01a0927b 100644 --- a/mysql-test/t/user_var-binlog.test +++ b/mysql-test/t/user_var-binlog.test @@ -13,9 +13,8 @@ INSERT INTO t1 VALUES(@`a b`); set @var1= "';aaa"; SET @var2=char(ascii('a')); insert into t1 values (@var1),(@var2); ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; +source include/show_binlog_events.inc; + # more important than SHOW BINLOG EVENTS, mysqlbinlog (where we # absolutely need variables names to be quoted and strings to be # escaped). diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 026a08bf365..4961d95d0ac 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -3216,29 +3216,4 @@ DROP VIEW `v-2`; DROP DATABASE `d-1`; USE test; - -# -# Test that ALTER VIEW accepts DEFINER and ALGORITHM, see bug#16425. -# ---disable_warnings -DROP VIEW IF EXISTS v1; -DROP TABLE IF EXISTS t1; ---enable_warnings - -CREATE TABLE t1 (i INT); -CREATE VIEW v1 AS SELECT * FROM t1; - -ALTER VIEW v1 AS SELECT * FROM t1; -SHOW CREATE VIEW v1; -ALTER DEFINER=no_such@user_1 VIEW v1 AS SELECT * FROM t1; -SHOW CREATE VIEW v1; -ALTER ALGORITHM=MERGE VIEW v1 AS SELECT * FROM t1; -SHOW CREATE VIEW v1; -ALTER ALGORITHM=TEMPTABLE DEFINER=no_such@user_2 VIEW v1 AS SELECT * FROM t1; -SHOW CREATE VIEW v1; - -DROP VIEW v1; -DROP TABLE t1; - - --echo End of 5.1 tests. diff --git a/mysql-test/t/view_grant.test b/mysql-test/t/view_grant.test index a4a1cefd0aa..64cafe89d10 100644 --- a/mysql-test/t/view_grant.test +++ b/mysql-test/t/view_grant.test @@ -1040,5 +1040,63 @@ DROP DATABASE mysqltest_db2; DROP USER mysqltest_u1@localhost; DROP USER mysqltest_u2@localhost; +# +# Bug#26813: The SUPER privilege is wrongly required to alter a view created +# by another user. +# +connection root; +CREATE DATABASE db26813; +USE db26813; +CREATE TABLE t1(f1 INT, f2 INT); +CREATE VIEW v1 AS SELECT f1 FROM t1; +CREATE VIEW v2 AS SELECT f1 FROM t1; +CREATE VIEW v3 AS SELECT f1 FROM t1; +CREATE USER u26813@localhost; +GRANT DROP ON db26813.v1 TO u26813@localhost; +GRANT CREATE VIEW ON db26813.v2 TO u26813@localhost; +GRANT DROP, CREATE VIEW ON db26813.v3 TO u26813@localhost; +GRANT SELECT ON db26813.t1 TO u26813@localhost; + +connect (u1,localhost,u26813,,db26813); +connection u1; +--error 1142 +ALTER VIEW v1 AS SELECT f2 FROM t1; +--error 1142 +ALTER VIEW v2 AS SELECT f2 FROM t1; +ALTER VIEW v3 AS SELECT f2 FROM t1; + +connection root; +SHOW CREATE VIEW v3; + +DROP USER u26813@localhost; +DROP DATABASE db26813; +disconnect u1; --echo End of 5.0 tests. + + +# +# Test that ALTER VIEW accepts DEFINER and ALGORITHM, see bug#16425. +# +connection default; +--disable_warnings +DROP VIEW IF EXISTS v1; +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (i INT); +CREATE VIEW v1 AS SELECT * FROM t1; + +ALTER VIEW v1 AS SELECT * FROM t1; +SHOW CREATE VIEW v1; +ALTER DEFINER=no_such@user_1 VIEW v1 AS SELECT * FROM t1; +SHOW CREATE VIEW v1; +ALTER ALGORITHM=MERGE VIEW v1 AS SELECT * FROM t1; +SHOW CREATE VIEW v1; +ALTER ALGORITHM=TEMPTABLE DEFINER=no_such@user_2 VIEW v1 AS SELECT * FROM t1; +SHOW CREATE VIEW v1; + +DROP VIEW v1; +DROP TABLE t1; + +--echo End of 5.1 tests. diff --git a/mysql-test/t/wait_for_socket.sh b/mysql-test/t/wait_for_socket.sh index 8c17c8ac0ac..2fa7d5c5b7e 100755 --- a/mysql-test/t/wait_for_socket.sh +++ b/mysql-test/t/wait_for_socket.sh @@ -61,7 +61,7 @@ fi ########################################################################### -client_args="--silent --socket=$socket_path --connect_timeout=1 " +client_args="--no-defaults --silent --socket=$socket_path --connect_timeout=1 " [ -n "$username" ] && client_args="$client_args --user=$username " [ -n "$password" ] && client_args="$client_args --password=$password " diff --git a/mysys/array.c b/mysys/array.c index 8f4a6087c00..60f5b255e18 100644 --- a/mysys/array.c +++ b/mysys/array.c @@ -15,10 +15,6 @@ /* Handling of arrays that can grow dynamicly. */ -#if defined(WIN32) || defined(__WIN__) -#undef SAFEMALLOC /* Problems with threads */ -#endif - #include "mysys_priv.h" #include "m_string.h" diff --git a/mysys/mf_tempdir.c b/mysys/mf_tempdir.c index 8d3fc40e96d..36eecbeac09 100644 --- a/mysys/mf_tempdir.c +++ b/mysys/mf_tempdir.c @@ -26,12 +26,11 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist) { char *end, *copy; char buff[FN_REFLEN]; - DYNAMIC_ARRAY t_arr; DBUG_ENTER("init_tmpdir"); DBUG_PRINT("enter", ("pathlist: %s", pathlist ? pathlist : "NULL")); pthread_mutex_init(&tmpdir->mutex, MY_MUTEX_INIT_FAST); - if (my_init_dynamic_array(&t_arr, sizeof(char*), 1, 5)) + if (my_init_dynamic_array(&tmpdir->full_list, sizeof(char*), 1, 5)) goto err; if (!pathlist || !pathlist[0]) { @@ -53,19 +52,19 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist) strmake(buff, pathlist, (uint) (end-pathlist)); length= cleanup_dirname(buff, buff); if (!(copy= my_strndup(buff, length, MYF(MY_WME))) || - insert_dynamic(&t_arr, (gptr) ©)) + insert_dynamic(&tmpdir->full_list, (gptr) ©)) DBUG_RETURN(TRUE); pathlist=end+1; } while (*end); - freeze_size(&t_arr); - tmpdir->list=(char **)t_arr.buffer; - tmpdir->max=t_arr.elements-1; + freeze_size(&tmpdir->full_list); + tmpdir->list=(char **)tmpdir->full_list.buffer; + tmpdir->max=tmpdir->full_list.elements-1; tmpdir->cur=0; DBUG_RETURN(FALSE); err: - delete_dynamic(&t_arr); /* Safe to free */ + delete_dynamic(&tmpdir->full_list); /* Safe to free */ pthread_mutex_destroy(&tmpdir->mutex); DBUG_RETURN(TRUE); } @@ -88,7 +87,7 @@ void free_tmpdir(MY_TMPDIR *tmpdir) uint i; for (i=0; i<=tmpdir->max; i++) my_free(tmpdir->list[i], MYF(0)); - my_free((gptr)tmpdir->list, MYF(0)); + delete_dynamic(&tmpdir->full_list); pthread_mutex_destroy(&tmpdir->mutex); } diff --git a/mysys/my_pread.c b/mysys/my_pread.c index 7a09c21e039..f8f0fa49c10 100644 --- a/mysys/my_pread.c +++ b/mysys/my_pread.c @@ -37,7 +37,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset, errno=0; /* Linux doesn't reset this */ #endif #ifndef HAVE_PREAD - off_t old_offset; + os_off_t old_offset; pthread_mutex_lock(&my_file_info[Filedes].mutex); /* @@ -45,7 +45,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset, before seeking to the given offset */ - error= (old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || + error= (old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || lseek(Filedes, offset, MY_SEEK_SET) == -1L; if (!error) /* Seek was successful */ @@ -116,7 +116,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset, { #ifndef HAVE_PREAD int error= 0; - off_t old_offset; + os_off_t old_offset; writenbytes= (uint) -1; pthread_mutex_lock(&my_file_info[Filedes].mutex); @@ -124,7 +124,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset, As we cannot change the file pointer, we save the old position, before seeking to the given offset */ - error= ((old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || + error= ((old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || lseek(Filedes, offset, MY_SEEK_SET) == -1L); if (!error) /* Seek was successful */ diff --git a/mysys/my_pthread.c b/mysys/my_pthread.c index 74ba98c321a..d8182b67442 100644 --- a/mysys/my_pthread.c +++ b/mysys/my_pthread.c @@ -29,7 +29,7 @@ #define SCHED_POLICY SCHED_OTHER #endif -uint thd_lib_detected= 0; +uint thd_lib_detected; #ifndef my_pthread_setprio void my_pthread_setprio(pthread_t thread_id,int prior) @@ -51,8 +51,6 @@ int my_pthread_getprio(pthread_t thread_id) int policy; if (!pthread_getschedparam(thread_id,&policy,&tmp_sched_param)) { - DBUG_PRINT("thread",("policy: %d priority: %d", - policy,tmp_sched_param.sched_priority)); return tmp_sched_param.sched_priority; } #endif @@ -314,8 +312,6 @@ void sigwait_handle_sig(int sig) pthread_mutex_unlock(&LOCK_sigwait); } -extern pthread_t alarm_thread; - void *sigwait_thread(void *set_arg) { sigset_t *set=(sigset_t*) set_arg; @@ -334,7 +330,9 @@ void *sigwait_thread(void *set_arg) sigaction(i, &sact, (struct sigaction*) 0); } } - sigaddset(set,THR_CLIENT_ALARM); + /* Ensure that init_thr_alarm() is called */ + DBUG_ASSERT(thr_client_alarm); + sigaddset(set, thr_client_alarm); pthread_sigmask(SIG_UNBLOCK,(sigset_t*) set,(sigset_t*) 0); alarm_thread=pthread_self(); /* For thr_alarm */ diff --git a/mysys/my_redel.c b/mysys/my_redel.c index 606d301e1d0..b12cf098283 100644 --- a/mysys/my_redel.c +++ b/mysys/my_redel.c @@ -59,7 +59,7 @@ int my_redel(const char *org_name, const char *tmp_name, myf MyFlags) MyFlags)) goto end; } - else if (my_delete(org_name,MyFlags)) + else if (my_delete_allow_opened(org_name, MyFlags)) goto end; if (my_rename(tmp_name,org_name,MyFlags)) goto end; diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c index 61e6e640027..75da07390ba 100644 --- a/mysys/my_thr_init.c +++ b/mysys/my_thr_init.c @@ -20,6 +20,7 @@ #include "mysys_priv.h" #include <m_string.h> +#include <signal.h> #ifdef THREAD #ifdef USE_TLS @@ -63,6 +64,8 @@ nptl_pthread_exit_hack_handler(void *arg __attribute((unused))) #endif +static uint get_thread_lib(void); + /* initialize thread environment @@ -76,6 +79,8 @@ nptl_pthread_exit_hack_handler(void *arg __attribute((unused))) my_bool my_thread_global_init(void) { + thd_lib_detected= get_thread_lib(); + if (pthread_key_create(&THR_KEY_mysys,0)) { fprintf(stderr,"Can't initialize threads: error %d\n",errno); @@ -395,4 +400,20 @@ const char *my_thread_name(void) } #endif /* DBUG_OFF */ + +static uint get_thread_lib(void) +{ +#ifdef _CS_GNU_LIBPTHREAD_VERSION + char buff[64]; + + confstr(_CS_GNU_LIBPTHREAD_VERSION, buff, sizeof(buff)); + + if (!strncasecmp(buff, "NPTL", 4)) + return THD_LIB_NPTL; + if (!strncasecmp(buff, "linuxthreads", 12)) + return THD_LIB_LT; +#endif + return THD_LIB_OTHER; +} + #endif /* THREAD */ diff --git a/mysys/thr_alarm.c b/mysys/thr_alarm.c index 3fd70790281..57670c9ac14 100644 --- a/mysys/thr_alarm.c +++ b/mysys/thr_alarm.c @@ -34,6 +34,7 @@ #define ETIME ETIMEDOUT #endif +uint thr_client_alarm; static int alarm_aborted=1; /* No alarm thread */ my_bool thr_alarm_inited= 0; volatile my_bool alarm_thread_running= 0; @@ -56,9 +57,7 @@ static void *alarm_handler(void *arg); #define reschedule_alarms() pthread_kill(alarm_thread,THR_SERVER_ALARM) #endif -#if THR_CLIENT_ALARM != SIGALRM || defined(USE_ALARM_THREAD) static sig_handler thread_alarm(int sig __attribute__((unused))); -#endif static int compare_ulong(void *not_used __attribute__((unused)), byte *a_ptr,byte* b_ptr) @@ -77,9 +76,13 @@ void init_thr_alarm(uint max_alarms) sigfillset(&full_signal_set); /* Neaded to block signals */ pthread_mutex_init(&LOCK_alarm,MY_MUTEX_INIT_FAST); pthread_cond_init(&COND_alarm,NULL); -#if THR_CLIENT_ALARM != SIGALRM || defined(USE_ALARM_THREAD) - my_sigset(THR_CLIENT_ALARM,thread_alarm); + thr_client_alarm= thd_lib_detected == THD_LIB_LT ? SIGALRM : SIGUSR1; +#ifndef USE_ALARM_THREAD + if (thd_lib_detected != THD_LIB_LT) #endif + { + my_sigset(thr_client_alarm, thread_alarm); + } sigemptyset(&s); sigaddset(&s, THR_SERVER_ALARM); alarm_thread=pthread_self(); @@ -97,10 +100,11 @@ void init_thr_alarm(uint max_alarms) } #elif defined(USE_ONE_SIGNAL_HAND) pthread_sigmask(SIG_BLOCK, &s, NULL); /* used with sigwait() */ -#if THR_SERVER_ALARM == THR_CLIENT_ALARM - my_sigset(THR_CLIENT_ALARM,process_alarm); /* Linuxthreads */ - pthread_sigmask(SIG_UNBLOCK, &s, NULL); -#endif + if (thd_lib_detected == THD_LIB_LT) + { + my_sigset(thr_client_alarm, process_alarm); /* Linuxthreads */ + pthread_sigmask(SIG_UNBLOCK, &s, NULL); + } #else my_sigset(THR_SERVER_ALARM, process_alarm); pthread_sigmask(SIG_UNBLOCK, &s, NULL); @@ -152,7 +156,7 @@ my_bool thr_alarm(thr_alarm_t *alrm, uint sec, ALARM *alarm_data) now=(ulong) time((time_t*) 0); pthread_sigmask(SIG_BLOCK,&full_signal_set,&old_mask); - pthread_mutex_lock(&LOCK_alarm); /* Lock from threads & alarms */ + pthread_mutex_lock(&LOCK_alarm); /* Lock from threads & alarms */ if (alarm_aborted > 0) { /* No signal thread */ DBUG_PRINT("info", ("alarm aborted")); @@ -273,18 +277,17 @@ sig_handler process_alarm(int sig __attribute__((unused))) This must be first as we can't call DBUG inside an alarm for a normal thread */ -#if THR_SERVER_ALARM == THR_CLIENT_ALARM - if (!pthread_equal(pthread_self(),alarm_thread)) + if (thd_lib_detected == THD_LIB_LT && + !pthread_equal(pthread_self(),alarm_thread)) { #if defined(MAIN) && !defined(__bsdi__) printf("thread_alarm in process_alarm\n"); fflush(stdout); #endif #ifdef DONT_REMEMBER_SIGNAL - my_sigset(THR_CLIENT_ALARM,process_alarm); /* int. thread system calls */ + my_sigset(thr_client_alarm, process_alarm); /* int. thread system calls */ #endif return; } -#endif /* We have to do do the handling of the alarm in a sub function, @@ -328,7 +331,7 @@ static sig_handler process_alarm_part2(int sig __attribute__((unused))) alarm_data=(ALARM*) queue_element(&alarm_queue,i); alarm_data->alarmed=1; /* Info to thread */ if (pthread_equal(alarm_data->thread,alarm_thread) || - pthread_kill(alarm_data->thread, THR_CLIENT_ALARM)) + pthread_kill(alarm_data->thread, thr_client_alarm)) { #ifdef MAIN printf("Warning: pthread_kill couldn't find thread!!!\n"); @@ -352,7 +355,7 @@ static sig_handler process_alarm_part2(int sig __attribute__((unused))) alarm_data->alarmed=1; /* Info to thread */ DBUG_PRINT("info",("sending signal to waiting thread")); if (pthread_equal(alarm_data->thread,alarm_thread) || - pthread_kill(alarm_data->thread, THR_CLIENT_ALARM)) + pthread_kill(alarm_data->thread, thr_client_alarm)) { #ifdef MAIN printf("Warning: pthread_kill couldn't find thread!!!\n"); @@ -488,7 +491,7 @@ void thr_alarm_info(ALARM_INFO *info) ARGSUSED */ -#if THR_CLIENT_ALARM != SIGALRM || defined(USE_ALARM_THREAD) + static sig_handler thread_alarm(int sig) { #ifdef MAIN @@ -498,7 +501,6 @@ static sig_handler thread_alarm(int sig) my_sigset(sig,thread_alarm); /* int. thread system calls */ #endif } -#endif #ifdef HAVE_TIMESPEC_TS_SEC @@ -784,9 +786,7 @@ static void *signal_hand(void *arg __attribute__((unused))) sigaddset(&set,SIGINT); sigaddset(&set,SIGQUIT); sigaddset(&set,SIGTERM); -#if THR_CLIENT_ALARM != SIGHUP sigaddset(&set,SIGHUP); -#endif #ifdef SIGTSTP sigaddset(&set,SIGTSTP); #endif @@ -797,7 +797,7 @@ static void *signal_hand(void *arg __attribute__((unused))) puts("Starting signal handling thread"); #endif printf("server alarm: %d thread alarm: %d\n", - THR_SERVER_ALARM,THR_CLIENT_ALARM); + THR_SERVER_ALARM, thr_client_alarm); DBUG_PRINT("info",("Starting signal and alarm handling thread")); for(;;) { @@ -865,11 +865,11 @@ int main(int argc __attribute__((unused)),char **argv __attribute__((unused))) sigaddset(&set,SIGTSTP); #endif sigaddset(&set,THR_SERVER_ALARM); - sigdelset(&set,THR_CLIENT_ALARM); + sigdelset(&set, thr_client_alarm); (void) pthread_sigmask(SIG_SETMASK,&set,NULL); #ifdef NOT_USED sigemptyset(&set); - sigaddset(&set,THR_CLIENT_ALARM); + sigaddset(&set, thr_client_alarm); VOID(pthread_sigmask(SIG_UNBLOCK, &set, (sigset_t*) 0)); #endif diff --git a/netware/Makefile.am b/netware/Makefile.am index f68e8476156..b5de58060f6 100644 --- a/netware/Makefile.am +++ b/netware/Makefile.am @@ -52,18 +52,19 @@ link_sources: done else -BUILT_SOURCES = libmysql.imp -DISTCLEANFILES = $(BUILT_SOURCES) +BUILT_SOURCES = libmysql.imp init_db.sql test_db.sql +DISTCLEANFILES = libmysql.imp +CLEANFILES = init_db.sql test_db.sql # Create the libmysql.imp from libmysql/libmysql.def libmysql.imp: $(top_srcdir)/libmysql/libmysql.def - awk 'BEGIN{x=0;} \ + $(AWK) 'BEGIN{x=0;} \ END{printf("\n");} \ x==1 {printf(" %s",$$1); x++; next} \ x>1 {printf(",\n %s", $$1); next} \ /EXPORTS/{x=1}' $(top_srcdir)/libmysql/libmysql.def > libmysql.imp -EXTRA_DIST= $(BUILT_SOURCES) comp_err.def init_db.sql install_test_db.ncf \ +EXTRA_DIST= $(BUILT_SOURCES) comp_err.def install_test_db.ncf \ libmysql.def \ libmysqlmain.c my_manage.c my_manage.h \ my_print_defaults.def myisam_ftdump.def myisamchk.def \ @@ -77,7 +78,7 @@ EXTRA_DIST= $(BUILT_SOURCES) comp_err.def init_db.sql install_test_db.ncf \ mysqlshow.def mysqltest.def mysqlslap.def mysql_upgrade.def \ perror.def \ mysql_client_test.def \ - replace.def resolve_stack_dump.def resolveip.def test_db.sql \ + replace.def resolve_stack_dump.def resolveip.def \ static_init_db.sql \ BUILD/apply-patch BUILD/compile-AUTOTOOLS \ BUILD/compile-linux-tools BUILD/compile-netware-END \ @@ -88,6 +89,26 @@ EXTRA_DIST= $(BUILT_SOURCES) comp_err.def init_db.sql install_test_db.ncf \ BUILD/cron-build BUILD/crontab BUILD/knetware.imp \ BUILD/mwasmnlm BUILD/mwccnlm BUILD/mwenv BUILD/mwldnlm \ BUILD/nwbootstrap BUILD/openssl.imp BUILD/save-patch + + +# Build init_db.sql from the files that contain +# the system tables for this version of MySQL plus any commands +init_db.sql: $(top_srcdir)/scripts/mysql_system_tables.sql \ + $(top_srcdir)/scripts/mysql_system_tables_data.sql + @echo "Building $@"; + @echo "CREATE DATABASE mysql;" > $@; + @echo "CREATE DATABASE test;" >> $@; + @echo "use mysql;" >> $@; + @cat $(top_srcdir)/scripts/mysql_system_tables.sql \ + $(top_srcdir)/scripts/mysql_system_tables_fix.sql >> $@; + +# Build test_db.sql from init_db.sql plus +# some test data +test_db.sql: init_db.sql $(top_srcdir)/scripts/mysql_test_data_timezone.sql + @echo "Building $@"; + @cat init_db.sql \ + $(top_srcdir)/scripts/mysql_test_data_timezone.sql >> $@; + endif # Don't update the files from bitkeeper diff --git a/netware/init_db.sql b/netware/init_db.sql deleted file mode 100644 index c5810b50e8e..00000000000 --- a/netware/init_db.sql +++ /dev/null @@ -1,38 +0,0 @@ -CREATE DATABASE mysql; -CREATE DATABASE test; - -USE mysql; - -CREATE TABLE db (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL, References_priv enum('N','Y') DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges'; - -INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y'); -INSERT INTO db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y'); - -CREATE TABLE host (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL, References_priv enum('N','Y') DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges'; - -CREATE TABLE user (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') DEFAULT 'N' NOT NULL, File_priv enum('N','Y') DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL, References_priv enum('N','Y') DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges'; - -INSERT INTO user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0); -INSERT INTO user VALUES ('','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0); - -INSERT INTO user (host,user) values ('localhost',''); -INSERT INTO user (host,user) values ('',''); - -CREATE TABLE func (name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') NOT NULL, PRIMARY KEY (name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions'; - -CREATE TABLE tables_priv (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp(14), Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges'; - -CREATE TABLE columns_priv (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp(14), Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges'; - -CREATE TABLE help_topic (help_topic_id int unsigned NOT NULL, name varchar(64) NOT NULL, help_category_id smallint unsigned NOT NULL, description text NOT NULL, example text NOT NULL, url varchar(128) NOT NULL, primary key (help_topic_id), unique index (name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='help topics'; -CREATE TABLE help_category (help_category_id smallint unsigned NOT NULL, name varchar(64) NOT NULL, parent_category_id smallint unsigned null, url varchar(128) NOT NULL, primary key (help_category_id), unique index (name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='help categories'; -CREATE TABLE help_keyword (help_keyword_id int unsigned NOT NULL, name varchar(64) NOT NULL, primary key (help_keyword_id), unique index (name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='help keywords'; -CREATE TABLE help_relation (help_topic_id int unsigned NOT NULL references help_topic, help_keyword_id int unsigned NOT NULL references help_keyword, primary key (help_keyword_id, help_topic_id)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='keyword-topic relation'; - -CREATE TABLE time_zone_name (Name char(64) NOT NULL,Time_zone_id int unsigned NOT NULL,PRIMARY KEY Name (Name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Time zone names'; - -CREATE TABLE time_zone (Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') DEFAULT 'N' NOT NULL,PRIMARY KEY TzId (Time_zone_id)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Time zones'; -CREATE TABLE time_zone_transition (Time_zone_id int unsigned NOT NULL,Transition_time bigint signed NOT NULL,Transition_type_id int unsigned NOT NULL,PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Time zone transitions'; - -CREATE TABLE time_zone_transition_type (Time_zone_id int unsigned NOT NULL,Transition_type_id int unsigned NOT NULL,Offset int signed DEFAULT 0 NOT NULL,Is_DST tinyint unsigned DEFAULT 0 NOT NULL,Abbreviation char(8) DEFAULT '' NOT NULL,PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Time zone transition types'; -CREATE TABLE time_zone_leap_second (Transition_time bigint signed NOT NULL,Correction int signed NOT NULL,PRIMARY KEY TranTime (Transition_time)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Leap seconds information for time zones'; diff --git a/netware/test_db.sql b/netware/test_db.sql deleted file mode 100644 index 0f58c242278..00000000000 --- a/netware/test_db.sql +++ /dev/null @@ -1,43 +0,0 @@ -CREATE DATABASE mysql; -CREATE DATABASE test; - -USE mysql; - -CREATE TABLE db (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL, References_priv enum('N','Y') DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User)) comment='Database privileges'; - -INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y'); -INSERT INTO db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y'); - -CREATE TABLE host (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL, References_priv enum('N','Y') DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db)) comment='Host privileges; Merged with database privileges'; - -CREATE TABLE user (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') DEFAULT 'N' NOT NULL, File_priv enum('N','Y') DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL, References_priv enum('N','Y') DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User)) comment='Users and global privileges'; - -INSERT INTO user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0); -INSERT INTO user VALUES ('','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0); - -INSERT INTO user (host,user) values ('localhost',''); -INSERT INTO user (host,user) values ('',''); - -CREATE TABLE func (name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') NOT NULL, PRIMARY KEY (name)) comment='User defined functions'; - -CREATE TABLE tables_priv (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp(14), Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor)) comment='Table privileges'; - -CREATE TABLE columns_priv (Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp(14), Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name)) comment='Column privileges'; - -CREATE TABLE help_topic (help_topic_id int unsigned NOT NULL, name varchar(64) NOT NULL, help_category_id smallint unsigned NOT NULL, description text NOT NULL, example text NOT NULL, url varchar(128) NOT NULL, primary key (help_topic_id), unique index (name))comment='help topics'; -CREATE TABLE help_category (help_category_id smallint unsigned NOT NULL, name varchar(64) NOT NULL, parent_category_id smallint unsigned null, url varchar(128) NOT NULL, primary key (help_category_id), unique index (name)) comment='help categories'; -CREATE TABLE help_keyword (help_keyword_id int unsigned NOT NULL, name varchar(64) NOT NULL, primary key (help_keyword_id), unique index (name)) comment='help keywords'; -CREATE TABLE help_relation (help_topic_id int unsigned NOT NULL references help_topic, help_keyword_id int unsigned NOT NULL references help_keyword, primary key (help_keyword_id, help_topic_id)) comment='keyword-topic relation'; - -CREATE TABLE time_zone_name (Name char(64) NOT NULL,Time_zone_id int unsigned NOT NULL,PRIMARY KEY Name (Name)) DEFAULT CHARACTER SET latin1 comment='Time zone names'; -INSERT INTO time_zone_name (Name, Time_Zone_id) VALUES ('MET', 1), ('UTC', 2), ('Universal', 2), ('Europe/Moscow',3), ('leap/Europe/Moscow',4),('Japan', 5); - -CREATE TABLE time_zone (Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') DEFAULT 'N' NOT NULL,PRIMARY KEY TzId (Time_zone_id)) DEFAULT CHARACTER SET latin1 comment='Time zones'; -INSERT INTO time_zone (Time_zone_id, Use_leap_seconds) VALUES (1,'N'), (2,'N'), (3,'N'), (4,'Y'); -CREATE TABLE time_zone_transition (Time_zone_id int unsigned NOT NULL,Transition_time bigint signed NOT NULL,Transition_type_id int unsigned NOT NULL,PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time)) DEFAULT CHARACTER SET latin1 comment='Time zone transitions'; -INSERT INTO time_zone_transition (Time_zone_id, Transition_time, Transition_type_id) VALUES(1, -1693706400, 0) ,(1, -1680483600, 1),(1, -1663455600, 2) ,(1, -1650150000, 3),(1, -1632006000, 2) ,(1, -1618700400, 3),(1, -938905200, 2) ,(1, -857257200, 3),(1, -844556400, 2) ,(1, -828226800, 3),(1, -812502000, 2) ,(1, -796777200, 3),(1, 228877200, 2) ,(1, 243997200, 3),(1, 260326800, 2) ,(1, 276051600, 3),(1, 291776400, 2) ,(1, 307501200, 3),(1, 323830800, 2) ,(1, 338950800, 3),(1, 354675600, 2) ,(1, 370400400, 3),(1, 386125200, 2) ,(1, 401850000, 3),(1, 417574800, 2) ,(1, 433299600, 3),(1, 449024400, 2) ,(1, 465354000, 3),(1, 481078800, 2) ,(1, 496803600, 3),(1, 512528400, 2) ,(1, 528253200, 3),(1, 543978000, 2) ,(1, 559702800, 3),(1, 575427600, 2) ,(1, 591152400, 3),(1, 606877200, 2) ,(1, 622602000, 3),(1, 638326800, 2) ,(1, 654656400, 3),(1, 670381200, 2) ,(1, 686106000, 3),(1, 701830800, 2) ,(1, 717555600, 3) ,(1, 733280400, 2) ,(1, 749005200, 3) ,(1, 764730000, 2) ,(1, 780454800, 3) ,(1, 796179600, 2) ,(1, 811904400, 3) ,(1, 828234000, 2) ,(1, 846378000, 3) ,(1, 859683600, 2) ,(1, 877827600, 3) ,(1, 891133200, 2) ,(1, 909277200, 3) ,(1, 922582800, 2) ,(1, 941331600, 3) ,(1, 954032400, 2) ,(1, 972781200, 3) ,(1, 985482000, 2) ,(1, 1004230800, 3) ,(1, 1017536400, 2) ,(1, 1035680400, 3) ,(1, 1048986000, 2) ,(1, 1067130000, 3) ,(1, 1080435600, 2) ,(1, 1099184400, 3) ,(1, 1111885200, 2) ,(1, 1130634000, 3) ,(1, 1143334800, 2) ,(1, 1162083600, 3) ,(1, 1174784400, 2) ,(1, 1193533200, 3) ,(1, 1206838800, 2) ,(1, 1224982800, 3) ,(1, 1238288400, 2) ,(1, 1256432400, 3) ,(1, 1269738000, 2) ,(1, 1288486800, 3) ,(1, 1301187600, 2) ,(1, 1319936400, 3) ,(1, 1332637200, 2) ,(1, 1351386000, 3) ,(1, 1364691600, 2) ,(1, 1382835600, 3) ,(1, 1396141200, 2) ,(1, 1414285200, 3) ,(1, 1427590800, 2) ,(1, 1445734800, 3) ,(1, 1459040400, 2) ,(1, 1477789200, 3) ,(1, 1490490000, 2) ,(1, 1509238800, 3) ,(1, 1521939600, 2) ,(1, 1540688400, 3) ,(1, 1553994000, 2) ,(1, 1572138000, 3) ,(1, 1585443600, 2) ,(1, 1603587600, 3) ,(1, 1616893200, 2) ,(1, 1635642000, 3) ,(1, 1648342800, 2) ,(1, 1667091600, 3) ,(1, 1679792400, 2) ,(1, 1698541200, 3) ,(1, 1711846800, 2) ,(1, 1729990800, 3) ,(1, 1743296400, 2) ,(1, 1761440400, 3) ,(1, 1774746000, 2) ,(1, 1792890000, 3) ,(1, 1806195600, 2) ,(1, 1824944400, 3) ,(1, 1837645200, 2) ,(1, 1856394000, 3) ,(1, 1869094800, 2) ,(1, 1887843600, 3) ,(1, 1901149200, 2) ,(1, 1919293200, 3) ,(1, 1932598800, 2) ,(1, 1950742800, 3) ,(1, 1964048400, 2) ,(1, 1982797200, 3) ,(1, 1995498000, 2) ,(1, 2014246800, 3) ,(1, 2026947600, 2) ,(1, 2045696400, 3) ,(1, 2058397200, 2) ,(1, 2077146000, 3) ,(1, 2090451600, 2) ,(1, 2108595600, 3) ,(1, 2121901200, 2) ,(1, 2140045200, 3) ,(3, -1688265000, 2) ,(3, -1656819048, 1) ,(3, -1641353448, 2) ,(3, -1627965048, 3) ,(3, -1618716648, 1) ,(3, -1596429048, 3) ,(3, -1593829848, 5) ,(3, -1589860800, 4) ,(3, -1542427200, 5) ,(3, -1539493200, 6) ,(3, -1525323600, 5) ,(3, -1522728000, 4) ,(3, -1491188400, 7) ,(3, -1247536800, 4) ,(3, 354920400, 5) ,(3, 370728000, 4) ,(3, 386456400, 5) ,(3, 402264000, 4) ,(3, 417992400, 5) ,(3, 433800000, 4) ,(3, 449614800, 5) ,(3, 465346800, 8) ,(3, 481071600, 9) ,(3, 496796400, 8) ,(3, 512521200, 9) ,(3, 528246000, 8) ,(3, 543970800, 9) ,(3, 559695600, 8) ,(3, 575420400, 9) ,(3, 591145200, 8) ,(3, 606870000, 9) ,(3, 622594800, 8) ,(3, 638319600, 9) ,(3, 654649200, 8) ,(3, 670374000, 10) ,(3, 686102400, 11) ,(3, 695779200, 8) ,(3, 701812800, 5) ,(3, 717534000, 4) ,(3, 733273200, 9) ,(3, 748998000, 8) ,(3, 764722800, 9) ,(3, 780447600, 8) ,(3, 796172400, 9) ,(3, 811897200, 8) ,(3, 828226800, 9) ,(3, 846370800, 8) ,(3, 859676400, 9) ,(3, 877820400, 8) ,(3, 891126000, 9) ,(3, 909270000, 8) ,(3, 922575600, 9) ,(3, 941324400, 8) ,(3, 954025200, 9) ,(3, 972774000, 8) ,(3, 985474800, 9) ,(3, 1004223600, 8) ,(3, 1017529200, 9) ,(3, 1035673200, 8) ,(3, 1048978800, 9) ,(3, 1067122800, 8) ,(3, 1080428400, 9) ,(3, 1099177200, 8) ,(3, 1111878000, 9) ,(3, 1130626800, 8) ,(3, 1143327600, 9) ,(3, 1162076400, 8) ,(3, 1174777200, 9) ,(3, 1193526000, 8) ,(3, 1206831600, 9) ,(3, 1224975600, 8) ,(3, 1238281200, 9) ,(3, 1256425200, 8) ,(3, 1269730800, 9) ,(3, 1288479600, 8) ,(3, 1301180400, 9) ,(3, 1319929200, 8) ,(3, 1332630000, 9) ,(3, 1351378800, 8) ,(3, 1364684400, 9) ,(3, 1382828400, 8) ,(3, 1396134000, 9) ,(3, 1414278000, 8) ,(3, 1427583600, 9) ,(3, 1445727600, 8) ,(3, 1459033200, 9) ,(3, 1477782000, 8) ,(3, 1490482800, 9) ,(3, 1509231600, 8) ,(3, 1521932400, 9) ,(3, 1540681200, 8) ,(3, 1553986800, 9) ,(3, 1572130800, 8) ,(3, 1585436400, 9) ,(3, 1603580400, 8) ,(3, 1616886000, 9) ,(3, 1635634800, 8) ,(3, 1648335600, 9) ,(3, 1667084400, 8) ,(3, 1679785200, 9) ,(3, 1698534000, 8) ,(3, 1711839600, 9) ,(3, 1729983600, 8) ,(3, 1743289200, 9) ,(3, 1761433200, 8) ,(3, 1774738800, 9) ,(3, 1792882800, 8) ,(3, 1806188400, 9) ,(3, 1824937200, 8) ,(3, 1837638000, 9) ,(3, 1856386800, 8) ,(3, 1869087600, 9) ,(3, 1887836400, 8) ,(3, 1901142000, 9) ,(3, 1919286000, 8) ,(3, 1932591600, 9) ,(3, 1950735600, 8) ,(3, 1964041200, 9) ,(3, 1982790000, 8) ,(3, 1995490800, 9) ,(3, 2014239600, 8) ,(3, 2026940400, 9) ,(3, 2045689200, 8) ,(3, 2058390000, 9) ,(3, 2077138800, 8) ,(3, 2090444400, 9) ,(3, 2108588400, 8) ,(3, 2121894000, 9) ,(3, 2140038000, 8) ,(4, -1688265000, 2) ,(4, -1656819048, 1) ,(4, -1641353448, 2) ,(4, -1627965048, 3) ,(4, -1618716648, 1) ,(4, -1596429048, 3) ,(4, -1593829848, 5) ,(4, -1589860800, 4) ,(4, -1542427200, 5) ,(4, -1539493200, 6) ,(4, -1525323600, 5) ,(4, -1522728000, 4) ,(4, -1491188400, 7) ,(4, -1247536800, 4) ,(4, 354920409, 5) ,(4, 370728010, 4) ,(4, 386456410, 5) ,(4, 402264011, 4) ,(4, 417992411, 5) ,(4, 433800012, 4) ,(4, 449614812, 5) ,(4, 465346812, 8) ,(4, 481071612, 9) ,(4, 496796413, 8) ,(4, 512521213, 9) ,(4, 528246013, 8) ,(4, 543970813, 9) ,(4, 559695613, 8) ,(4, 575420414, 9) ,(4, 591145214, 8) ,(4, 606870014, 9) ,(4, 622594814, 8) ,(4, 638319615, 9) ,(4, 654649215, 8) ,(4, 670374016, 10) ,(4, 686102416, 11) ,(4, 695779216, 8) ,(4, 701812816, 5) ,(4, 717534017, 4) ,(4, 733273217, 9) ,(4, 748998018, 8) ,(4, 764722818, 9) ,(4, 780447619, 8) ,(4, 796172419, 9) ,(4, 811897219, 8) ,(4, 828226820, 9) ,(4, 846370820, 8) ,(4, 859676420, 9) ,(4, 877820421, 8) ,(4, 891126021, 9) ,(4, 909270021, 8) ,(4, 922575622, 9) ,(4, 941324422, 8) ,(4, 954025222, 9) ,(4, 972774022, 8) ,(4, 985474822, 9) ,(4, 1004223622, 8) ,(4, 1017529222, 9) ,(4, 1035673222, 8) ,(4, 1048978822, 9) ,(4, 1067122822, 8) ,(4, 1080428422, 9) ,(4, 1099177222, 8) ,(4, 1111878022, 9) ,(4, 1130626822, 8) ,(4, 1143327622, 9) ,(4, 1162076422, 8) ,(4, 1174777222, 9) ,(4, 1193526022, 8) ,(4, 1206831622, 9) ,(4, 1224975622, 8) ,(4, 1238281222, 9) ,(4, 1256425222, 8) ,(4, 1269730822, 9) ,(4, 1288479622, 8) ,(4, 1301180422, 9) ,(4, 1319929222, 8) ,(4, 1332630022, 9) ,(4, 1351378822, 8) ,(4, 1364684422, 9) ,(4, 1382828422, 8) ,(4, 1396134022, 9) ,(4, 1414278022, 8) ,(4, 1427583622, 9) ,(4, 1445727622, 8) ,(4, 1459033222, 9) ,(4, 1477782022, 8) ,(4, 1490482822, 9) ,(4, 1509231622, 8) ,(4, 1521932422, 9) ,(4, 1540681222, 8) ,(4, 1553986822, 9) ,(4, 1572130822, 8) ,(4, 1585436422, 9) ,(4, 1603580422, 8) ,(4, 1616886022, 9) ,(4, 1635634822, 8) ,(4, 1648335622, 9) ,(4, 1667084422, 8) ,(4, 1679785222, 9) ,(4, 1698534022, 8) ,(4, 1711839622, 9) ,(4, 1729983622, 8) ,(4, 1743289222, 9) ,(4, 1761433222, 8) ,(4, 1774738822, 9) ,(4, 1792882822, 8) ,(4, 1806188422, 9) ,(4, 1824937222, 8) ,(4, 1837638022, 9) ,(4, 1856386822, 8) ,(4, 1869087622, 9) ,(4, 1887836422, 8) ,(4, 1901142022, 9) ,(4, 1919286022, 8) ,(4, 1932591622, 9) ,(4, 1950735622, 8) ,(4, 1964041222, 9) ,(4, 1982790022, 8) ,(4, 1995490822, 9) ,(4, 2014239622, 8) ,(4, 2026940422, 9) ,(4, 2045689222, 8) ,(4, 2058390022, 9) ,(4, 2077138822, 8) ,(4, 2090444422, 9) ,(4, 2108588422, 8) ,(4, 2121894022, 9) ,(4, 2140038022, 8); - -CREATE TABLE time_zone_transition_type (Time_zone_id int unsigned NOT NULL,Transition_type_id int unsigned NOT NULL,Offset int signed DEFAULT 0 NOT NULL,Is_DST tinyint unsigned DEFAULT 0 NOT NULL,Abbreviation char(8) DEFAULT '' NOT NULL,PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id)) DEFAULT CHARACTER SET latin1 comment='Time zone transition types'; -INSERT INTO time_zone_transition_type (Time_zone_id,Transition_type_id, Offset, Is_DST, Abbreviation) VALUES(1, 0, 7200, 1, 'MEST') ,(1, 1, 3600, 0, 'MET'),(1, 2, 7200, 1, 'MEST') ,(1, 3, 3600, 0, 'MET'),(2, 0, 0, 0, 'UTC'),(3, 0, 9000, 0, 'MMT') ,(3, 1, 12648, 1, 'MST'),(3, 2, 9048, 0, 'MMT') ,(3, 3, 16248, 1, 'MDST'),(3, 4, 10800, 0, 'MSK') ,(3, 5, 14400, 1, 'MSD'),(3, 6, 18000, 1, 'MSD') ,(3, 7, 7200, 0, 'EET'),(3, 8, 10800, 0, 'MSK') ,(3, 9, 14400, 1, 'MSD'),(3, 10, 10800, 1, 'EEST') ,(3, 11, 7200, 0, 'EET'),(4, 0, 9000, 0, 'MMT') ,(4, 1, 12648, 1, 'MST'),(4, 2, 9048, 0, 'MMT') ,(4, 3, 16248, 1, 'MDST'),(4, 4, 10800, 0, 'MSK') ,(4, 5, 14400, 1, 'MSD'),(4, 6, 18000, 1, 'MSD') ,(4, 7, 7200, 0, 'EET'),(4, 8, 10800, 0, 'MSK') ,(4, 9, 14400, 1, 'MSD'),(4, 10, 10800, 1, 'EEST') ,(4, 11, 7200, 0, 'EET'); -CREATE TABLE time_zone_leap_second (Transition_time bigint signed NOT NULL,Correction int signed NOT NULL,PRIMARY KEY TranTime (Transition_time)) DEFAULT CHARACTER SET latin1 comment='Leap seconds information for time zones'; -INSERT INTO time_zone_leap_second (Transition_time, Correction) VALUES (78796800, 1) ,(94694401, 2) ,(126230402, 3),(157766403, 4) ,(189302404, 5) ,(220924805, 6),(252460806, 7) ,(283996807, 8) ,(315532808, 9),(362793609, 10) ,(394329610, 11) ,(425865611, 12),(489024012, 13) ,(567993613, 14) ,(631152014, 15),(662688015, 16) ,(709948816, 17) ,(741484817, 18),(773020818, 19) ,(820454419, 20) ,(867715220, 21),(915148821, 22); diff --git a/scripts/fill_func_tables.sh b/scripts/fill_func_tables.sh deleted file mode 100644 index 4d28bda7fb5..00000000000 --- a/scripts/fill_func_tables.sh +++ /dev/null @@ -1,250 +0,0 @@ -#!@PERL@ -# Copyright (C) 2003 MySQL AB -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# fill_func_tables - parse ../Docs/manual.texi -# -# Original version by Victor Vagin <vva@mysql.com> -# - -my $cat_name= ""; -my $func_name= ""; -my $text= ""; -my $example= ""; - -local $mode= ""; - -sub prepare_name -{ - my ($a)= @_; - - $a =~ s/(\@itemize \@bullet)/ /g; - $a =~ s/(\@end itemize)/ /g; - $a =~ s/(\@end multitable)/ /g; - $a =~ s/(\@end table)/ /g; - $a =~ s/(\@cindex(.*?)\n)/ /g; - $a =~ s/(\@multitable \@columnfractions(.*?)\n)/ /g; - $a =~ s/(\@node(.*?)\n)/ /g; - $a =~ s/(\@tab)/\t/g; - $a =~ s/\@item/ /g; - $a =~ s/\@code\{((.|\n)+?)\}/$1/go; - $a =~ s/\@strong\{(.+?)\}/$1/go; - $a =~ s/\@samp\{(.+?)\}/$1/go; - $a =~ s/\@emph\{((.|\n)+?)\}/\/$1\//go; - $a =~ s/\@xref\{((.|\n)+?)\}/See also : [$1]/go; - $a =~ s/\@ref\{((.|\n)+?)\}/[$1]/go; - $a =~ s/\'/\'\'/g; - $a =~ s/\\/\\\\/g; - $a =~ s/\`/\`\`/g; - - $a =~ s/\@table \@code/ /g; - - $a =~ s/\(\)//g; - - $a =~ s/((\w|\s)+)\(([\+-=><\/%*!<>\s]+)\)/$3/gxs; #$a =~ s/((\w|\s)+)\(([\+-=><\/%*!<>\s]+)\)/$3 $1/gxs; - $a =~ s/([\+-=><\/%*!<>\s]+)\(((\w|\s)+)\)/$1/gxs;#$a =~ s/([\+-=><\/%*!<>\s]+)\(((\w|\s)+)\)/$1 $2/gxs; - $a =~ s/((\w|\s)+)\((.+)\)/$1/gxs; - - return $a; -} - -sub prepare_text -{ - my ($a)= @_; - - $a =~ s/(\@itemize \@bullet)/ /g; - $a =~ s/(\@end itemize)/ /g; - $a =~ s/(\@end multitable)/ /g; - $a =~ s/(\@end table)/ /g; - $a =~ s/(\@cindex(.*?)\n)/ /g; - $a =~ s/(\@multitable \@columnfractions(.*?)\n)/ /g; - $a =~ s/(\@node(.*?)\n)/ /g; - $a =~ s/(\@tab)/\t/g; - $a =~ s/\@itemx/ /g; - $a =~ s/\@item/ /g; - $a =~ s/\@code\{((.|\n)+?)\}/$1/go; - $a =~ s/\@strong\{(.+?)\}/$1/go; - $a =~ s/\@samp\{(.+?)\}/$1/go; - $a =~ s/\@emph\{((.|\n)+?)\}/\/$1\//go; - $a =~ s/\@xref\{((.|\n)+?)\}/See also : [$1]/go; - $a =~ s/\@ref\{((.|\n)+?)\}/[$1]/go; - $a =~ s/\'/\'\'/g; - $a =~ s/\\/\\\\/g; - $a =~ s/\`/\`\`/g; - $a =~ s/(\n*?)$//g; - $a =~ s/\n/\\n/g; - - $a =~ s/\@table \@code/ /g; - - return $a; -} - -sub prepare_example -{ - my ($a)= @_; - - $a =~ s/\'/\'\'/g; - $a =~ s/\\/\\\\/g; - $a =~ s/\`/\`\`/g; - $a =~ s/(\n*?)$//g; - $a =~ s/\n/\\n/g; - - return $a; -} - -sub flush_all -{ - my ($mode) = @_; - - if ($mode eq ""){return;} - - $func_name= prepare_name($func_name); - $text= prepare_text($text); - $example= prepare_example($example); - - if ($func_name ne "" && $text ne "" && !($func_name =~ /[abcdefghikjlmnopqrstuvwxyz]/)){ - print "INSERT INTO function (name,description,example) VALUES ("; - print "'$func_name',"; - print "'$text',"; - print "'$example'"; - print ");\n"; - print "INSERT INTO function_category (cat_id,func_id) VALUES (\@cur_category,LAST_INSERT_ID());\n"; - } - - $func_name= ""; - $text= ""; - $example= ""; - $mode= ""; -} - -sub new_category -{ - my ($category)= @_; - - $category= prepare_text($category); - - print "INSERT INTO function_category_name (name) VALUES (\'$category\');\n"; - print "SELECT \@cur_category:=LAST_INSERT_ID();\n"; -} - -print "INSERT INTO db (Host,DB,User,Select_priv) VALUES ('%','mysql_help','','Y');\n"; -print "CREATE DATABASE mysql_help;\n"; - -print "USE mysql_help;\n"; - -print "DROP TABLE IF EXISTS function;\n"; -print "CREATE TABLE function ("; -print " func_id int unsigned not null auto_increment,"; -print " name char(64) not null,"; -print " url char(128) not null,"; -print " description text not null,"; -print " example text not null,"; -print " min_args tinyint not null,"; -print " max_args tinyint,"; -print " date_created datetime not null,"; -print " last_modified timestamp not null,"; -print " primary key (func_id)"; -print ") ENGINE=MYISAM;\n\n"; - -print "DROP TABLE IF EXISTS function_category_name;\n"; -print "CREATE TABLE function_category_name ("; -print " cat_id smallint unsigned not null auto_increment,"; -print " name char(64) not null,"; -print " url char(128) not null,"; -print " date_created datetime not null,"; -print " last_modified timestamp not null,"; -print " primary key (cat_id)"; -print ") ENGINE=MYISAM;\n\n"; - -print "DROP TABLE IF EXISTS function_category;\n"; -print "CREATE TABLE function_category ("; -print " cat_id smallint unsigned not null references function_category_name,"; -print " func_id int unsigned not null references function,"; -print " primary key (cat_id, func_id)"; -print ") ENGINE=MYISAM;\n\n"; - -print "DELETE FROM function_category_name;\n"; -print "DELETE FROM function_category;\n"; -print "DELETE FROM function;\n"; -print "SELECT \@cur_category:=null;\n\n"; - -my $in_section_6_3= 0; - -for(<>) -{ - if ($_=~/\@section Functions for Use in \@code{SELECT} and \@code{WHERE} Clauses/ && - !$in_section_6_3){ - $in_section_6_3= 1; - next; - } - - if ($_=~/\@section/ && $in_section_6_3){ - $in_section_6_3= 0; - next; - } - - if (!$in_section_6_3) { next; } - - my $c_name= ""; - - ($c_name)=m|\@c for_mysql_help,(.+?)$|; - if (!($c_name eq "") && ! ($c_name =~ m/$cat_name/i)){ - ($cat_name)= $c_name; - new_category($cat_name); - next; - } - - ($c_name)=m|\@subsubsection (.+?)$|; - if (!($c_name eq "") && ! ($c_name =~ m/$cat_name/i)){ - ($cat_name)= $c_name; - new_category($cat_name); - next; - } - - ($c_name)=m|\@subsection (.+?)$|; - if (!($c_name eq "") && ! ($c_name =~ m/$cat_name/i)){ - ($cat_name)= $c_name; - new_category($cat_name); - next; - } - - ($f_name)=m|\@findex (.+?)$|; - if (!($f_name eq "")){ - flush_all($mode); - ($func_name)= ($f_name); - $mode= "text"; - next; - } - - if ($_=~/\@example/ && ($mode eq "text")){ - $mode= "example"; - next; - } - - if ($_=~/\@end example/ && ($mode eq "example")){ - flush_all($mode); - next; - } - - if ($mode eq "text") { $text .= $_; } - if ($mode eq "example") { $example .= $_; } -} - - -print "DELETE function_category_name "; -print "FROM function_category_name "; -print "LEFT JOIN function_category ON function_category.cat_id=function_category_name.cat_id "; -print "WHERE function_category.cat_id is null;" - diff --git a/scripts/fill_help_tables.sh b/scripts/fill_help_tables.sh deleted file mode 100644 index e600d24032b..00000000000 --- a/scripts/fill_help_tables.sh +++ /dev/null @@ -1,614 +0,0 @@ -#!@PERL@ -# Copyright (C) 2003 MySQL AB -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# This script generates the SQL statements required by mysql_install_db to -# fill up the tables for the server-side online function help, which can be -# invoked with "help <function>" from the MySQL client. -# -# Usage: -# fill_help_tables OPTIONS < manual.texi > fill_help_tables.sql -# -# --help display this helpscreen and exit -# --verbose print information about help completeness to STDERR -# --lexems=path path to file with lexems. it is used with verbose option. -# default value is ../sql/lex.h -# Examples: -# ./fill_help_tables --help -# ./fill_help_tables --verbose < manual.texi > fill_help_tables.sql -# ./fill_help_tables < manual.texi > fill_help_tables.sql -# -# Please note, that you first need to update Docs/manual.texi with the -# manual file from the separate "mysqldoc" BitKeeper-Tree! The manual.texi -# included in the source tree is just an empty stub file - the full manual -# is now maintained in a separate tree. -# -# extra tags in manual.texi: -# -# @c help_category <category_name>[@<parent_category_name>] -# -# @c description_for_help_topic <topic_name> <keyword1> <keyword2> -# .... -# @c end_description_for_help_topic -# -# @c example_for_help_topic <topic_name> -# @example -# .... -# @end example -# -# -# Original version by Victor Vagin <vva@mysql.com> -# - -use strict; -use Getopt::Long; - -my $insert_portion_size= 15; -my $error_prefix= "---- help parsing errors :"; - -my $path_to_lex_file= "../sql/lex.h"; -my $verbose_option= 0; -my $help_option= 0; - -my $cur_line= 0; -my $count_errors= 0; - -GetOptions( - "help",\$help_option, - "verbose",\$verbose_option, - "lexems=s",\$path_to_lex_file -); - -if ($help_option ne 0) -{ - print <<_HELP; - -This script generates the SQL statements required by mysql_install_db to -fill up the tables for the server-side online function help, which can be -invoked with "help <function>" from the MySQL client. - -Usage: - fill_help_tables OPTIONS < manual.texi > fill_help_tables.sql - - --help display this helpscreen and exit - --verbose print information about help completeness to STDERR - --lexems=path path to file with lexems. it is used with verbose option. - default value is ../sql/lex.h - -Examples: - ./fill_help_tables --help - ./fill_help_tables --verbose < manual.texi > fill_help_tables.sql - ./fill_help_tables < manual.texi > fill_help_tables.sql - -_HELP - exit; -} - -my $current_category= ""; -my $current_parent_category= ""; -my $next_example_for_topic= ""; - -my %topics; -my %categories; -my %keywords; - -$categories{Contents}->{__parent_category__}= ""; - -sub print_error -{ - my ($text)= @_; - if ($count_errors==0) - { - print STDERR "$error_prefix\n"; - } - print STDERR "line $cur_line : $text"; - $count_errors++; -} - -sub add_topic_to_category -{ - my ($topic_name)= @_; - - $categories{$current_category}->{$topic_name}= $topics{$topic_name}; - my $category= $categories{$current_category}; - $category->{__name__}= $current_category; - - if (exists($category->{__parent_category__})) - { - my $old_parent= $category->{__parent_category__}; - if ($old_parent ne $current_parent_category) - { - print_error "wrong parent for $current_category\n"; - } - } - - if ($current_parent_category ne "") - { - $category->{__parent_category__}= $current_parent_category; - } - - if (exists($topics{$topic_name}->{category})) - { - my $old_category= $topics{$topic_name}->{category}; - if ($old_category ne $category) - { - print_error "wrong category for $topic_name (first one's \"$old_category->{__name__}\" second one's \"$current_category\")\n"; - } - } - - $topics{$topic_name}->{category}= $category; -} - -sub add_example -{ - my ($topic_name,$example)= @_; - - $topic_name=~ tr/a-z/A-Z/; - - if (exists($topics{$topic_name}->{example})) - { - print_error "double example for $topic_name\n"; - } - - $topics{$topic_name}->{example}= $example; - add_topic_to_category($topic_name); -} - -sub add_description -{ - my ($topic_name,$description)= @_; - - $topic_name=~ tr/a-z/A-Z/; - - if (exists($topics{$topic_name}->{description})) - { - print_error "double description for $topic_name\n"; - } - $topics{$topic_name}->{description}= $description; - add_topic_to_category($topic_name); -} - -sub add_keyword -{ - my ($topic_name,$keyword)= @_; - - $topic_name=~ tr/a-z/A-Z/; - $keyword=~ tr/a-z/A-Z/; - - push(@{$topics{$topic_name}->{keywords}},$keyword); - if (exists($keywords{$keyword}->{$topic_name})) - { - print_error "double keyword $keyword for $topic_name\n"; - } - $keywords{$keyword}->{$topic_name}= $topics{$topic_name}; -} - -sub prepare_name -{ - my ($a)= @_; - - $a =~ s/(\@itemize \@bullet)/ /g; - $a =~ s/(\@end itemize)/ /g; - $a =~ s/(\@end multitable)/ /g; - $a =~ s/(\@end table)/ /g; - $a =~ s/(\@cindex(.*?)\n)/ /g; - $a =~ s/(\@multitable \@columnfractions(.*?)\n)/ /g; - $a =~ s/(\@node(.*?)\n)/ /g; - $a =~ s/(\@tab)/\t/g; - $a =~ s/\@item/ /g; - $a =~ s/\@minus\{\}/-/g; - $a =~ s/\@dots\{\}/.../g; - $a =~ s/\@var\{((.|\n)+?)\}/$1/go; - $a =~ s/\@command\{((.|\n)+?)\}/$1/go; - $a =~ s/\@code\{((.|\n)+?)\}/$1/go; - $a =~ s/\@strong\{(.+?)\}/$1/go; - $a =~ s/\@samp\{(.+?)\}/'$1'/go; - $a =~ s/\@emph\{((.|\n)+?)\}/\/$1\//go; - $a =~ s/\@xref\{((.|\n)+?)\}/See also : [$1]/go; - $a =~ s/\@ref\{((.|\n)+?)\}/[$1]/go; - $a =~ s/\'/\'\'/g; - $a =~ s/\\/\\\\/g; - $a =~ s/\`/\`\`/g; - - $a =~ s/\@table \@code/ /g; - $a =~ s/\(\)//g; - $a =~ s/\"/\\\"/g; - - $a =~ s/((\w|\s)+)\(([\+-=><\/%*!<>\s]+)\)/$3/gxs; - $a =~ s/([\+-=><\/%*!<>\s]+)\(((\w|\s)+)\)/$1/gxs; - $a =~ s/((\w|\s)+)\((.+)\)/$1/gxs; - - $a =~ s/((\s)+)$//g; - - return $a; -} - -sub prepare_description -{ - my ($a)= @_; - - $a =~ s/(\@itemize \@bullet\n)//g; - $a =~ s/(\@c help_keyword (.*?)\n)//g; - $a =~ s/(\@end itemize\n)//g; - $a =~ s/(\@end example\n)//g; - $a =~ s/(\@example\n)//g; - $a =~ s/(\@{)/{/g; - $a =~ s/(\@})/}/g; - $a =~ s/(\@end multitable)/ /g; - $a =~ s/(\@end table)/ /g; - $a =~ s/(\@cindex(.*?)\n)//g; - $a =~ s/(\@findex(.*?)\n)//g; - $a =~ s/(\@table(.*?)\n)//g; - $a =~ s/(\@multitable \@columnfractions(.*?)\n)/ /g; - $a =~ s/(\@node(.*?)\n)/ /g; - $a =~ s/(\@tab)/\t/g; - $a =~ s/\@itemx/ /g; - $a =~ s/(\@item\n(\s*?))(\S)/ --- $3/g; - $a =~ s/(\@item)/ /g; - $a =~ s/(\@tindex\s(.*?)\n)//g; - $a =~ s/(\@c\s(.*?)\n)//g; - $a =~ s/\@minus\{\}/-/g; - $a =~ s/\@dots\{\}/.../g; - $a =~ s/\@var\{((.|\n)+?)\}/$1/go; - $a =~ s/\@command\{((.|\n)+?)\}/$1/go; - $a =~ s/\@code\{((.|\n)+?)\}/$1/go; - $a =~ s/\@strong\{(.+?)\}/$1/go; - $a =~ s/\@samp\{(.+?)\}/'$1'/go; - $a =~ s/\@emph\{((.|\n)+?)\}/\/$1\//go; - $a =~ s/\@xref\{((.|\n)+?)\}/See also : [$1]/go; - $a =~ s/\@ref\{((.|\n)+?)\}/[$1]/go; - $a =~ s/\@w\{((.|\n)+?)\}/$1/go; - $a =~ s/\@strong\{((.|\n)+?)\}/\n!!!!\n$1\n!!!!\n/go; - $a =~ s/\@file\{((.|\n)+?)\}/\*$1/go; - $a =~ s/\\/\\\\/g; - $a =~ s/\n\n$/\n/g; - $a =~ s/\n\n$/\n/g; - $a =~ s/\n\n$/\n/g; - $a =~ s/\n\n$/\n/g; - $a =~ s/\n\n$/\n/g; - $a =~ s/\n/\\n/g; - $a =~ s/\"/\\\"/g; - - $a =~ s/\@table \@code/ /g; - - return $a; -} - -sub prepare_example -{ - my ($a)= @_; - - $a =~ s/(^\@c for_help_topic(.*?)\n)//g; - - $a =~ s/\@var\{((.|\n)+?)\}/$1/go; - $a =~ s/\@dots\{\}/.../g; - $a =~ s/\\/\\\\/g; - $a =~ s/(\@{)/{/g; - $a =~ s/(\@})/}/g; - $a =~ s/(\@\@)/\@/g; - $a =~ s/(\n*?)$//g; - $a =~ s/\n/\\n/g; - $a =~ s/\"/\\\"/g; - - return $a; -} - -sub parse_example -{ - return if (!($_=~/\@example/)); - return if ($next_example_for_topic eq ""); - - my $topic_name= $next_example_for_topic; - $next_example_for_topic= ""; - my $text= ""; - - while (<>) - { - $cur_line++; - last if ($_=~/\@end example/); - $text .= $_; - } - - $text= prepare_example($text); - $topic_name= prepare_name($topic_name); - add_example($topic_name,$text) if ($topic_name ne ""); -} - -sub parse_example_for_topic -{ - my ($for_topic)= m|\@c example_for_help_topic (.+?)$|; - return if ($for_topic eq ""); - - $next_example_for_topic= $for_topic; -} - -sub parse_description -{ - my ($topic_description)= m|\@c description_for_help_topic (.+?)$|; - return if ($topic_description eq ""); - - my ($topic_name,$topic_keywords)= split(/ /,$topic_description); - - if ($topic_name eq "" || $topic_keywords eq "") - { - $topic_name= $topic_description; - } - else - { - my $keyword; - foreach $keyword (split(/ /,$topic_keywords)) - { - add_keyword($topic_name,$keyword) if ($keyword ne ""); - } - } - - my $text= ""; - - while (<>) - { - $cur_line++; - last if ($_=~/\@c end_description_for_help_topic/); - $text .= $_; - } - - $text= prepare_description($text); - $topic_name= prepare_name($topic_name); - add_description($topic_name,$text); -} - -sub parse_category -{ - my ($c_name,$pc_name)= m|\@c help_category (.+?)\@(.+?)$|; - - if ($pc_name ne "") - { - $current_category= prepare_name($c_name); - $current_parent_category= prepare_name($pc_name); - } - else - { - my ($c_name)=m|\@c help_category (.+?)$|; - return if ($c_name eq ""); - - $current_category= prepare_name($c_name); - $current_parent_category= "Contents" - } -} - -# parse manual: - -while (<>) -{ - parse_example_for_topic (); - parse_example (); - parse_description (); - parse_category (); - $cur_line++; -} - -# test results of parsing: - -sub print_bad_names -{ - my($names,$prompt)= @_; - if (scalar(@{$names})) - { - print STDERR "\n-------------- $prompt : \n\n"; - my $name; - foreach $name (@{$names}) - { - print STDERR "$name\n"; - } - print STDERR "\n"; - } -} - -sub print_verbose_errors -{ - my($name_of_log_file)= @_; - - my @without_help; - my @description_with_at; - my @example_with_at; - my @without_description; - my @without_example; - - print STDERR "\n-------------- parameters of help completeness : \n\n"; - - my $count_lex= 0; - if (!open (TLEX,"<$path_to_lex_file")) - { - print STDERR "Error opening lex file \"$path_to_lex_file\" $!\n"; - } - else - { - for (<TLEX>) - { - my ($a,$lex,$b)=m|(.+?)\"(.+?)\"(.+?)$|; - next if ($lex eq ""); - $count_lex++; - next if (exists($topics{$lex}) || exists($keywords{$lex})); - push(@without_help,$lex); - } - close(TLEX); - print STDERR "number of lexems in \"$path_to_lex_file\" - $count_lex\n"; - } - - my $name; - my @topic_names= keys(%topics); - foreach $name (@topic_names) - { - my $topic= $topics{$name}; - push(@description_with_at,$name) if ($topic->{description}=~/\@/); - push(@example_with_at,$name) if ($topic->{example}=~/\@/); - push(@without_description,$name) if (!exists($topic->{description})); - push(@without_example,$name) if (!exists($topic->{example})); - } - - my $count_categories= scalar(keys(%categories)); - print STDERR "number of help categories - ",$count_categories,"\n"; - my $count_topics= scalar(@topic_names); - print STDERR "number of help topics - ",$count_topics,"\n"; - my $count_keywords= scalar(keys(%keywords)); - print STDERR "number of help keywords - ",$count_keywords,"\n"; - - my $count_without_help= scalar(@without_help); - my $percent_without_help= $count_lex ? - int (($count_without_help/$count_lex)*100) : - "100"; - print_bad_names(\@without_help,"lexems without help (". - $count_without_help." ~ ". - $percent_without_help."%)"); - print_bad_names(\@description_with_at, - " topics below have symbol \'@\' in their descriptions.\n". - "it's probably the litter from 'texi' tags (script needs fixing)"); - print_bad_names(\@example_with_at, - " topics below have symbol \'@\' in their examples.\n". - "it's probably the litter from 'texi' tags (script needs fixing)"); - print_bad_names(\@without_description,"topics without description"); - - my $count_without_example= scalar(@without_example); - my $percent_without_example= $count_topics ? - int (($count_without_example/$count_topics)*100) : - "100"; - print_bad_names(\@without_example,"topics without example (". - $count_without_example." ~ ". - $percent_without_example."%)"); -} - -print_verbose_errors if ($verbose_option ne 0); - -# output result - -sub print_insert_header -{ - my($count,$header)= @_; - - if ($count % $insert_portion_size ne 0) { - print ","; - } else { - print ";\n" if ($count ne 0); - print "$header"; - } -} - -print <<EOF; --- Copyright (C) 2000-2005 MySQL AB --- --- This program is free software; you can redistribute it and/or modify --- it under the terms of the GNU General Public License as published by --- the Free Software Foundation; version 2 of the License. --- --- This program is distributed in the hope that it will be useful, --- but WITHOUT ANY WARRANTY; without even the implied warranty of --- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --- GNU General Public License for more details. --- --- You should have received a copy of the GNU General Public License --- along with this program; if not, write to the Free Software --- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -EOF -print "set sql_mode='';\n"; -print "delete from help_topic;\n"; -print "delete from help_category;\n"; -print "delete from help_keyword;\n"; -print "delete from help_relation;\n\n"; - -my @category_names= keys(%categories); -if (scalar(@category_names)) -{ - my $cat_name; - my $count= 0; - foreach $cat_name (@category_names) - { - $categories{$cat_name}->{__id__}= $count; - $count++; - } - - my $header= "insert into help_category ". - "(help_category_id,name,parent_category_id) values "; - $count= 0; - foreach $cat_name (@category_names) - { - print_insert_header($count,$header); - my $parent_cat_name= $categories{$cat_name}->{__parent_category__}; - my $parent_cat_id= $parent_cat_name eq "" - ? "-1" : $categories{$parent_cat_name}->{__id__}; - print "($count,\"$cat_name\",$parent_cat_id)"; - $count++; - } - printf ";\n\n"; -} - -my @topic_names= keys(%topics); -if (scalar(@topic_names)) -{ - my $header= "insert into help_topic ". - "(help_topic_id,help_category_id,name,description,example) values "; - my $topic_name; - my $count= 0; - foreach $topic_name (@topic_names) - { - print_insert_header($count,$header); - my $topic= $topics{$topic_name}; - print "($count,"; - print "$topic->{category}->{__id__},"; - print "\"$topic_name\","; - print "\"$topic->{description}\","; - print "\"$topic->{example}\")"; - $topics{$topic_name}->{__id__}= $count; - $count++; - } - printf ";\n\n"; -} - -my @keywords_names= keys(%keywords); -if (scalar(@keywords_names)) -{ - my $header= "insert into help_keyword (help_keyword_id,name) values "; - my $keyword_name; - my $count= 0; - foreach $keyword_name (@keywords_names) - { - print_insert_header($count,$header); - print "($count,\"$keyword_name\")"; - $count++; - } - printf ";\n\n"; - - $header= "insert into help_relation ". - "(help_topic_id,help_keyword_id) values "; - $count= 0; - my $count_keyword= 0; - foreach $keyword_name (@keywords_names) - { - my $topic_name; - foreach $topic_name (keys(%{$keywords{$keyword_name}})) - { - print_insert_header($count,$header); - print "($topics{$topic_name}->{__id__},$count_keyword)"; - $count++; - } - $count_keyword++; - } - printf ";\n\n"; -} - -if ($count_errors) -{ - print STDERR "$count_errors errors !!!\n"; - exit 1; -} diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index fb72666baba..eab3af29ad1 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -303,12 +303,6 @@ rm -f $BASE/bin/Makefile* $BASE/bin/*.in $BASE/bin/*.sh \ # Copy system dependent files # if [ $BASE_SYSTEM = "netware" ] ; then - echo "CREATE DATABASE mysql;" > $BASE/bin/init_db.sql - echo "CREATE DATABASE test;" >> $BASE/bin/init_db.sql - sh ./scripts/mysql_create_system_tables.sh real "" "%" 0 \ - >> $BASE/bin/init_db.sql - sh ./scripts/mysql_create_system_tables.sh test "" "%" 0 \ - > $BASE/bin/test_db.sql ./scripts/fill_help_tables < ./Docs/manual.texi >> ./netware/init_db.sql fi diff --git a/scripts/mysql_system_tables.sql b/scripts/mysql_system_tables.sql index f6e2a45339e..5c67477ce5b 100644 --- a/scripts/mysql_system_tables.sql +++ b/scripts/mysql_system_tables.sql @@ -70,7 +70,7 @@ CALL create_slow_log_table(); DROP PROCEDURE create_slow_log_table; -CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; +CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator int(10) NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql index d973c1dddae..e9816bd21e9 100644 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@ -477,9 +477,11 @@ ALTER TABLE event ADD sql_mode 'HIGH_NOT_PRECEDENCE' ) DEFAULT '' NOT NULL AFTER on_completion; ALTER TABLE event MODIFY name char(64) CHARACTER SET utf8 NOT NULL default ''; +ALTER TABLE event ADD COLUMN originator INT(10) NOT NULL; +ALTER TABLE event MODIFY COLUMN status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED'; ALTER TABLE event ADD COLUMN time_zone char(64) CHARACTER SET latin1 - NOT NULL DEFAULT 'SYSTEM' AFTER comment; + NOT NULL DEFAULT 'SYSTEM' AFTER originator; # # TRIGGER privilege diff --git a/server-tools/instance-manager/mysqlmanager.vcproj b/server-tools/instance-manager/mysqlmanager.vcproj index d835e242eb1..a38565fc1f3 100644 --- a/server-tools/instance-manager/mysqlmanager.vcproj +++ b/server-tools/instance-manager/mysqlmanager.vcproj @@ -12,13 +12,14 @@ <Configurations> <Configuration Name="Debug|Win32" - OutputDirectory="../../client_debug" - IntermediateDirectory="Debug" + OutputDirectory=".\debug_obj" + IntermediateDirectory=".\debug_obj" ConfigurationType="1" CharacterSet="2"> <Tool Name="VCCLCompilerTool" Optimization="0" + OptimizeForProcessor="2" AdditionalIncludeDirectories="..\..\include,../../extra/yassl/include" PreprocessorDefinitions="MYSQL_INSTANCE_MANAGER;MYSQL_SERVER;_DEBUG;SAFEMALLOC;SAFE_MUTEX;_WINDOWS;CONSOLE" MinimalRebuild="TRUE" @@ -34,10 +35,12 @@ <Tool Name="VCLinkerTool" AdditionalDependencies="wsock32.lib" - OutputFile="../../client_debug/mysqlmanager.exe" - LinkIncremental="2" + OutputFile=".\debug/mysqlmanager.exe" + LinkIncremental="1" GenerateDebugInformation="TRUE" - ProgramDatabaseFile="../../client_debug/mysqlmanager.pdb" + ProgramDatabaseFile=".\debug/mysqlmanager.pdb" + GenerateMapFile="TRUE" + MapFileName=".\debug/mysqlmanager.map" SubSystem="1" TargetMachine="1"/> <Tool @@ -63,12 +66,15 @@ </Configuration> <Configuration Name="Release|Win32" - OutputDirectory="../../client_release" - IntermediateDirectory="Release" + OutputDirectory=".\release_obj" + IntermediateDirectory=".\release_obj" ConfigurationType="1" CharacterSet="2"> <Tool Name="VCCLCompilerTool" + Optimization="2" + InlineFunctionExpansion="1" + OptimizeForProcessor="2" AdditionalIncludeDirectories="..\..\include,../../extra/yassl/include" PreprocessorDefinitions="MYSQL_INSTANCE_MANAGER;MYSQL_SERVER;_WINDOWS;CONSOLE" ExceptionHandling="FALSE" @@ -82,9 +88,12 @@ <Tool Name="VCLinkerTool" AdditionalDependencies="wsock32.lib" - OutputFile="../../client_release/mysqlmanager.exe" + OutputFile=".\release/mysqlmanager.exe" LinkIncremental="1" GenerateDebugInformation="TRUE" + ProgramDatabaseFile=".\release/mysqlmanager.pdb" + GenerateMapFile="TRUE" + MapFileName=".\release/mysqlmanager.map" SubSystem="1" OptimizeReferences="2" EnableCOMDATFolding="2" diff --git a/sql/Makefile.am b/sql/Makefile.am index ac14ed3e883..a85eb012f1d 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -52,7 +52,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ sql_error.h field.h handler.h mysqld_suffix.h \ ha_partition.h \ ha_ndbcluster.h ha_ndbcluster_binlog.h \ - ha_ndbcluster_tables.h \ + ha_ndbcluster_tables.h rpl_constants.h \ opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \ log.h sql_show.h rpl_rli.h rpl_mi.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h \ diff --git a/sql/authors.h b/sql/authors.h index 48b807c7884..dfe3b143e2f 100644 --- a/sql/authors.h +++ b/sql/authors.h @@ -66,6 +66,7 @@ struct show_table_authors_st show_table_authors[]= { "Parser, port to OS/2, storage engines and some random stuff" }, { "Yuri Dario", "", "OS/2 port" }, { "Andrei Elkin", "Espoo, Finland", "Replication" }, + { "Patrick Galbraith", "Sharon, NH", "Federated Engine, mysqlslap" }, { "Sergei Golubchik", "Kerpen, Germany", "Full-text search, precision math" }, { "Lenz Grimmer", "Hamburg, Germany", diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h index ab9bff176e1..4d58484e5c2 100644 --- a/sql/event_data_objects.h +++ b/sql/event_data_objects.h @@ -21,11 +21,9 @@ #define EVEX_BAD_PARAMS -5 #define EVEX_MICROSECOND_UNSUP -6 - class sp_head; class Sql_alloc; - class Event_queue_element_for_exec { public: @@ -53,6 +51,24 @@ protected: MEM_ROOT mem_root; public: + /* + ENABLED = feature can function normally (is turned on) + SLAVESIDE_DISABLED = feature is turned off on slave + DISABLED = feature is turned off + */ + enum enum_status + { + ENABLED = 1, + DISABLED, + SLAVESIDE_DISABLED + }; + + enum enum_on_completion + { + ON_COMPLETION_DROP = 1, + ON_COMPLETION_PRESERVE + }; + LEX_STRING dbname; LEX_STRING name; LEX_STRING definer;// combination of user and host @@ -82,20 +98,9 @@ protected: bool last_executed_changed; public: - enum enum_status - { - ENABLED = 1, - DISABLED - }; - - enum enum_on_completion - { - ON_COMPLETION_DROP = 1, - ON_COMPLETION_PRESERVE - }; - - enum enum_on_completion on_completion; - enum enum_status status; + int on_completion; + int status; + longlong originator; my_time_t last_executed; my_time_t execute_at; @@ -195,19 +200,10 @@ private: class Event_parse_data : public Sql_alloc { public: - enum enum_status - { - ENABLED = 1, - DISABLED - }; - enum enum_on_completion - { - ON_COMPLETION_DROP = 1, - ON_COMPLETION_PRESERVE - }; - enum enum_on_completion on_completion; - enum enum_status status; + int on_completion; + int status; + longlong originator; /* do_not_create will be set if STARTS time is in the past and on_completion == ON_COMPLETION_DROP. @@ -277,6 +273,7 @@ private: check_if_in_the_past(THD *thd, my_time_t ltime_utc); Event_parse_data(const Event_parse_data &); /* Prevent use of these */ + void check_originator_id(THD *thd); void operator=(Event_parse_data &); }; diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 859836c72a8..cc981c9cb69 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -88,7 +88,7 @@ const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = }, { { C_STRING_WITH_LEN("status") }, - { C_STRING_WITH_LEN("enum('ENABLED','DISABLED')") }, + { C_STRING_WITH_LEN("enum('ENABLED','DISABLED','SLAVESIDE_DISABLED')") }, {NULL, 0} }, { @@ -114,6 +114,11 @@ const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = { C_STRING_WITH_LEN("utf8") } }, { + { C_STRING_WITH_LEN("originator") }, + { C_STRING_WITH_LEN("int(10)") }, + {NULL, 0} + }, + { { C_STRING_WITH_LEN("time_zone") }, { C_STRING_WITH_LEN("char(64)") }, { C_STRING_WITH_LEN("latin1") } @@ -175,6 +180,9 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, fields[ET_FIELD_STATUS]->store((longlong)et->status, TRUE); + fields[ET_FIELD_ORIGINATOR]->store((longlong)et->originator, TRUE); + + /* Change the SQL_MODE only if body was present in an ALTER EVENT and of course always during CREATE EVENT. @@ -499,7 +507,6 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, if (open_event_table(thd, TL_WRITE, &table)) goto end; - DBUG_PRINT("info", ("name: %.*s", parse_data->name.length, parse_data->name.str)); @@ -555,6 +562,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, if (mysql_event_fill_row(thd, table, parse_data, FALSE)) goto end; + table->field[ET_FIELD_STATUS]->store((longlong)parse_data->status, TRUE); if ((ret= table->file->ha_write_row(table->record[0]))) { diff --git a/sql/event_db_repository.h b/sql/event_db_repository.h index 01de0be3afd..64e19854933 100644 --- a/sql/event_db_repository.h +++ b/sql/event_db_repository.h @@ -24,7 +24,7 @@ enum enum_events_table_field { - ET_FIELD_DB = 0, + ET_FIELD_DB = 0, ET_FIELD_NAME, ET_FIELD_BODY, ET_FIELD_DEFINER, @@ -40,6 +40,7 @@ enum enum_events_table_field ET_FIELD_ON_COMPLETION, ET_FIELD_SQL_MODE, ET_FIELD_COMMENT, + ET_FIELD_ORIGINATOR, ET_FIELD_TIME_ZONE, ET_FIELD_COUNT /* a cool trick to count the number of fields :) */ }; diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 2fe6e0b87e0..9b14d3cda7e 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -183,7 +183,7 @@ Event_queue::create_event(THD *thd, Event_queue_element *new_element, /* Will do nothing if the event is disabled */ new_element->compute_next_execution_time(); - if (new_element->status == Event_queue_element::DISABLED) + if (new_element->status != Event_queue_element::ENABLED) { delete new_element; *created= FALSE; @@ -221,7 +221,8 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, DBUG_ENTER("Event_queue::update_event"); DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str)); - if (new_element->status == Event_queue_element::DISABLED) + if ((new_element->status == Event_queue_element::DISABLED) || + (new_element->status == Event_queue_element::SLAVESIDE_DISABLED)) { DBUG_PRINT("info", ("The event is disabled.")); /* diff --git a/sql/events.cc b/sql/events.cc index 3182cea25f2..7c44116af55 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -393,6 +393,12 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, if (parse_data->do_not_create) DBUG_RETURN(FALSE); + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for CREATE EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); pthread_mutex_lock(&LOCK_event_metadata); @@ -411,11 +417,19 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, TRUE); delete new_element; } - else if (event_queue) + else { /* TODO: do not ignore the out parameter and a possible OOM error! */ bool created; - event_queue->create_event(thd, new_element, &created); + if (event_queue) + event_queue->create_event(thd, new_element, &created); + /* Binlog the create event. */ + if (mysql_bin_log.is_open() && (thd->query_length > 0)) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } } } pthread_mutex_unlock(&LOCK_event_metadata); @@ -499,7 +513,15 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, } } + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for UPDATE EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + pthread_mutex_lock(&LOCK_event_metadata); + /* On error conditions my_error() is called so no need to handle here */ if (!(ret= db_repository->update_event(thd, parse_data, new_dbname, new_name))) @@ -515,7 +537,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, DBUG_ASSERT(ret == OP_LOAD_ERROR); delete new_element; } - else if (event_queue) + else { /* TODO: check if an update actually has inserted an entry @@ -523,8 +545,16 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, If not, and the element is ON COMPLETION NOT PRESERVE, delete it right away. */ - event_queue->update_event(thd, parse_data->dbname, parse_data->name, - new_element); + if (event_queue) + event_queue->update_event(thd, parse_data->dbname, parse_data->name, + new_element); + /* Binlog the alter event. */ + if (mysql_bin_log.is_open() && (thd->query_length > 0)) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } } } pthread_mutex_unlock(&LOCK_event_metadata); @@ -584,12 +614,26 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists) is_schema_db(dbname.str))) DBUG_RETURN(TRUE); + /* + Turn off row binlogging of this statement and use statement-based so + that all supporting tables are updated for DROP EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + pthread_mutex_lock(&LOCK_event_metadata); /* On error conditions my_error() is called so no need to handle here */ if (!(ret= db_repository->drop_event(thd, dbname, name, if_exists))) { if (event_queue) event_queue->drop_event(thd, dbname, name); + /* Binlog the drop event. */ + if (mysql_bin_log.is_open() && (thd->query_length > 0)) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } } pthread_mutex_unlock(&LOCK_event_metadata); DBUG_RETURN(ret); diff --git a/sql/field.cc b/sql/field.cc index 7240a59c75f..250a9e3c1b9 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8506,9 +8506,28 @@ char *Field_bit::pack(char *to, const char *from, uint max_length) { DBUG_ASSERT(max_length); uint length; - if (bit_len) + if (bit_len > 0) { - uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + /* + We have the following: + + ptr Points into a field in record R1 + from Points to a field in a record R2 + bit_ptr Points to the byte (in the null bytes) that holds the + odd bits of R1 + from_bitp Points to the byte that holds the odd bits of R2 + + We have the following: + + ptr - bit_ptr = from - from_bitp + + We want to isolate 'from_bitp', so this gives: + + ptr - bit_ptr - from = - from_bitp + - ptr + bit_ptr + from = from_bitp + bit_ptr + from - ptr = from_bitp + */ + uchar bits= get_rec_bits(bit_ptr + (from - ptr), bit_ofs, bit_len); *to++= bits; } length= min(bytes_in_rec, max_length - (bit_len > 0)); @@ -8519,9 +8538,16 @@ char *Field_bit::pack(char *to, const char *from, uint max_length) const char *Field_bit::unpack(char *to, const char *from) { - if (bit_len) + if (bit_len > 0) { - set_rec_bits(*from, bit_ptr, bit_ofs, bit_len); + /* + set_rec_bits is a macro, don't put the post-increment in the + argument since that might cause strange side-effects. + + For the choice of the second argument, see the explanation for + Field_bit::pack(). + */ + set_rec_bits(*from, bit_ptr + (to - ptr), bit_ofs, bit_len); from++; } memcpy(to, from, bytes_in_rec); diff --git a/sql/field.h b/sql/field.h index f27ed8b9394..b2169dac5b6 100644 --- a/sql/field.h +++ b/sql/field.h @@ -193,9 +193,9 @@ public: */ virtual void sql_type(String &str) const =0; virtual uint size_of() const =0; // For new field - inline bool is_null(uint row_offset=0) + inline bool is_null(my_ptrdiff_t row_offset= 0) { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : table->null_row; } - inline bool is_real_null(uint row_offset=0) + inline bool is_real_null(my_ptrdiff_t row_offset= 0) { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; } inline bool is_null_in_record(const uchar *record) { @@ -210,9 +210,9 @@ public: return 0; return test(null_ptr[offset] & null_bit); } - inline void set_null(int row_offset=0) + inline void set_null(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]|= null_bit; } - inline void set_notnull(int row_offset=0) + inline void set_notnull(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } @@ -504,6 +504,7 @@ public: {} int store_decimal(const my_decimal *); my_decimal *val_decimal(my_decimal *); + uint32 max_display_length() { return field_length; } }; @@ -532,7 +533,6 @@ public: void overflow(bool negative); bool zero_pack() const { return 0; } void sql_type(String &str) const; - uint32 max_display_length() { return field_length; } }; @@ -783,7 +783,6 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(float); } void sql_type(String &str) const; - uint32 max_display_length() { return 24; } }; @@ -825,7 +824,6 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(double); } void sql_type(String &str) const; - uint32 max_display_length() { return 53; } uint size_of() const { return sizeof(*this); } }; @@ -1355,7 +1353,7 @@ public: int store_decimal(const my_decimal *); void get_key_image(char *buff,uint length,imagetype type); uint size_of() const { return sizeof(*this); } - int reset(void) { return !maybe_null(); } + int reset(void) { return !maybe_null() || Field_blob::reset(); } }; #endif /*HAVE_SPATIAL*/ diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 805aba01754..a718a402897 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -173,7 +173,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) if (field == field->table->next_number_field) { field->table->auto_increment_field_not_null= FALSE; - return 0; // field is set in handler.cc + return 0; // field is set in fill_record() } if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN) { @@ -336,6 +336,13 @@ static void do_field_real(Copy_field *copy) } +static void do_field_decimal(Copy_field *copy) +{ + my_decimal value; + copy->to_field->store_decimal(copy->from_field->val_decimal(&value)); +} + + /* string copy for single byte characters set when to string is shorter than from string @@ -580,6 +587,8 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) if (to->real_type() == MYSQL_TYPE_BIT || from->real_type() == MYSQL_TYPE_BIT) return do_field_int; + if (to->result_type() == DECIMAL_RESULT) + return do_field_decimal; // Check if identical fields if (from->result_type() == STRING_RESULT) { diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bef58339e5b..c989ddc338d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -733,10 +733,9 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_PRINT("info", ("bit field")); DBUG_DUMP("value", (char*)&bits, pack_len); #ifdef WORDS_BIGENDIAN - if (pack_len < 5) - { - DBUG_RETURN(ndb_op->setValue(fieldnr, ((char*)&bits)+4) != 0); - } + /* store lsw first */ + bits = ((bits >> 32) & 0x00000000FFFFFFFF) + | ((bits << 32) & 0xFFFFFFFF00000000); #endif DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits) != 0); } @@ -1008,7 +1007,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_ASSERT(m_table == NULL); DBUG_ASSERT(m_table_info == NULL); - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; /* @@ -3155,10 +3154,21 @@ void ndb_unpack_record(TABLE *table, NdbValue *value, else { DBUG_PRINT("info", ("bit field H'%.8X%.8X", - *(Uint32*) (*value).rec->aRef(), - *((Uint32*) (*value).rec->aRef()+1))); - field_bit->Field_bit::store((longlong) (*value).rec->u_64_value(), + *(Uint32 *)(*value).rec->aRef(), + *((Uint32 *)(*value).rec->aRef()+1))); +#ifdef WORDS_BIGENDIAN + /* lsw is stored first */ + Uint32 *buf= (Uint32 *)(*value).rec->aRef(); + field_bit->Field_bit::store((((longlong)*buf) + & 0x000000000FFFFFFFF) + | + ((((longlong)*(buf+1)) << 32) + & 0xFFFFFFFF00000000), TRUE); +#else + field_bit->Field_bit::store((longlong) + (*value).rec->u_64_value(), TRUE); +#endif } /* Move back internal field pointer to point to original @@ -3863,7 +3873,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_WRITE_CAN_REPLACE: DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); - if (!m_has_unique_index) + if (!m_has_unique_index || + current_thd->slave_thread) /* always set if slave, quick fix for bug 27378 */ { DBUG_PRINT("info", ("Turning ON use of write instead of insert")); m_use_write= TRUE; @@ -3893,6 +3904,12 @@ int ha_ndbcluster::reset() */ if (m_part_info) bitmap_set_all(&m_part_info->used_partitions); + + /* reset flags set by extra calls */ + m_ignore_dup_key= FALSE; + m_use_write= FALSE; + m_ignore_no_key= FALSE; + DBUG_RETURN(0); } @@ -4732,10 +4749,11 @@ int ha_ndbcluster::create(const char *name, NDBTAB tab; NDBCOL col; uint pack_length, length, i, pk_length= 0; - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE); bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE); char tablespace[FN_LEN]; + NdbDictionary::Table::SingleUserMode single_user_mode= NdbDictionary::Table::SingleUserModeLocked; DBUG_ENTER("ha_ndbcluster::create"); DBUG_PRINT("enter", ("name: %s", name)); @@ -4787,19 +4805,23 @@ int ha_ndbcluster::create(const char *name, schema distribution table is setup ( unless it is a creation of the schema dist table itself ) */ - if (!ndb_schema_share && - !(strcmp(m_dbname, NDB_REP_DB) == 0 && - strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0)) + if (!ndb_schema_share) { - DBUG_PRINT("info", ("Schema distribution table not setup")); - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (!(strcmp(m_dbname, NDB_REP_DB) == 0 && + strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0)) + { + DBUG_PRINT("info", ("Schema distribution table not setup")); + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } + single_user_mode = NdbDictionary::Table::SingleUserModeReadWrite; } #endif /* HAVE_NDB_BINLOG */ DBUG_PRINT("table", ("name: %s", m_tabname)); tab.setName(m_tabname); tab.setLogging(!(create_info->options & HA_LEX_CREATE_TMP_TABLE)); - + tab.setSingleUserMode(single_user_mode); + // Save frm data for this table if (readfrm(name, &data, &length)) DBUG_RETURN(1); @@ -4822,7 +4844,8 @@ int ha_ndbcluster::create(const char *name, if ((my_errno= create_ndb_column(col, field, create_info))) DBUG_RETURN(my_errno); - if (create_info->storage_media == HA_SM_DISK) + if (create_info->storage_media == HA_SM_DISK || + create_info->tablespace) col.setStorageType(NdbDictionary::Column::StorageTypeDisk); else col.setStorageType(NdbDictionary::Column::StorageTypeMemory); @@ -5064,7 +5087,7 @@ int ha_ndbcluster::create_handler_files(const char *file, { Ndb* ndb; const NDBTAB *tab; - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; int error= 0; @@ -5562,6 +5585,7 @@ retry_temporary_error1: { ndb_table_id= h->m_table->getObjectId(); ndb_table_version= h->m_table->getObjectVersion(); + DBUG_PRINT("info", ("success 1")); } else { @@ -5575,6 +5599,7 @@ retry_temporary_error1: break; } res= ndb_to_mysql_error(&dict->getNdbError()); + DBUG_PRINT("info", ("error(1) %u", res)); } h->release_metadata(thd, ndb); } @@ -5591,6 +5616,8 @@ retry_temporary_error1: { ndb_table_id= ndbtab_g.get_table()->getObjectId(); ndb_table_version= ndbtab_g.get_table()->getObjectVersion(); + DBUG_PRINT("info", ("success 2")); + break; } else { @@ -5610,8 +5637,8 @@ retry_temporary_error1: } } } - else - res= ndb_to_mysql_error(&dict->getNdbError()); + res= ndb_to_mysql_error(&dict->getNdbError()); + DBUG_PRINT("info", ("error(2) %u", res)); break; } } @@ -6107,7 +6134,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, int error= 0; NdbError ndb_error; uint len; - const void* data; + const void* data= NULL; Ndb* ndb; char key[FN_REFLEN]; DBUG_ENTER("ndbcluster_discover"); @@ -6186,6 +6213,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, DBUG_RETURN(0); err: + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); if (share) { /* ndb_share reference temporary free */ @@ -7607,7 +7635,9 @@ int handle_trailing_share(NDB_SHARE *share) /* Ndb share has not been released as it should */ +#ifdef NOT_YET DBUG_ASSERT(FALSE); +#endif /* This is probably an error. We can however save the situation @@ -10614,10 +10644,23 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, int pk= 0; int ai= 0; + + if (create_info->tablespace) + create_info->storage_media = HA_SM_DISK; + else + create_info->storage_media = HA_SM_MEMORY; + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; const NDBCOL *col= tab->getColumn(i); + if (col->getStorageType() == NDB_STORAGETYPE_MEMORY && create_info->storage_media != HA_SM_MEMORY || + col->getStorageType() == NDB_STORAGETYPE_DISK && create_info->storage_media != HA_SM_DISK) + { + DBUG_PRINT("info", ("Column storage media is changed")); + DBUG_RETURN(COMPATIBLE_DATA_NO); + } + if (field->flags & FIELD_IS_RENAMED) { DBUG_PRINT("info", ("Field has been renamed, copy table")); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 2615732e7ee..4dc75be79e9 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -500,7 +500,7 @@ static int ndbcluster_reset_logs(THD *thd) static int ndbcluster_binlog_index_purge_file(THD *thd, const char *file) { - if (!ndb_binlog_running) + if (!ndb_binlog_running || thd->slave_thread) return 0; DBUG_ENTER("ndbcluster_binlog_index_purge_file"); @@ -729,6 +729,9 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd) NDB_REP_DB "." NDB_APPLY_TABLE " ( server_id INT UNSIGNED NOT NULL," " epoch BIGINT UNSIGNED NOT NULL, " + " log_name VARCHAR(255) BINARY NOT NULL, " + " start_pos BIGINT UNSIGNED NOT NULL, " + " end_pos BIGINT UNSIGNED NOT NULL, " " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); run_query(thd, buf, end, TRUE, TRUE); @@ -3898,6 +3901,9 @@ restart: bzero(table->record[0], table->s->null_bytes); table->field[0]->store((longlong)::server_id); table->field[1]->store((longlong)gci); + table->field[2]->store("", 0, &my_charset_bin); + table->field[3]->store((longlong)0); + table->field[4]->store((longlong)0); trans.write_row(::server_id, injector::transaction::table(table, TRUE), &table->s->all_set, table->s->fields, diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h index 9f7b9146d91..c6bc8f577f8 100644 --- a/sql/ha_ndbcluster_tables.h +++ b/sql/ha_ndbcluster_tables.h @@ -15,7 +15,9 @@ */ #define NDB_REP_DB "mysql" +#define OLD_NDB_REP_DB "cluster" #define NDB_REP_TABLE "ndb_binlog_index" #define NDB_APPLY_TABLE "ndb_apply_status" #define OLD_NDB_APPLY_TABLE "apply_status" #define NDB_SCHEMA_TABLE "ndb_schema" +#define OLD_NDB_SCHEMA_TABLE "schema" diff --git a/sql/handler.cc b/sql/handler.cc index 0ecfce9349e..617bf9ee378 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1753,7 +1753,6 @@ int handler::update_auto_increment() bool append= FALSE; THD *thd= table->in_use; struct system_variables *variables= &thd->variables; - bool auto_increment_field_not_null; DBUG_ENTER("handler::update_auto_increment"); /* @@ -1761,11 +1760,9 @@ int handler::update_auto_increment() than the interval, but not smaller. */ DBUG_ASSERT(next_insert_id >= auto_inc_interval_for_cur_row.minimum()); - auto_increment_field_not_null= table->auto_increment_field_not_null; - table->auto_increment_field_not_null= FALSE; // to reset for next row if ((nr= table->next_number_field->val_int()) != 0 || - auto_increment_field_not_null && + table->auto_increment_field_not_null && thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { /* diff --git a/sql/item.cc b/sql/item.cc index 0c03bd14d5f..613b72ad05e 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1088,7 +1088,7 @@ bool Item_splocal::set_value(THD *thd, sp_rcontext *ctx, Item **it) Item_case_expr methods *****************************************************************************/ -Item_case_expr::Item_case_expr(int case_expr_id) +Item_case_expr::Item_case_expr(uint case_expr_id) :Item_sp_variable( C_STRING_WITH_LEN("case_expr")), m_case_expr_id(case_expr_id) { @@ -1125,6 +1125,8 @@ Item_case_expr::this_item_addr(THD *thd, Item **) void Item_case_expr::print(String *str) { + if (str->reserve(MAX_INT_WIDTH + sizeof("case_expr@"))) + return; /* purecov: inspected */ VOID(str->append(STRING_WITH_LEN("case_expr@"))); str->qs_append(m_case_expr_id); } @@ -1289,15 +1291,18 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Exception is Item_direct_view_ref which we need to convert to Item_ref to allow fields from view being stored in tmp table. */ + Item_aggregate_ref *item_ref; uint el= fields.elements; - Item *new_item, *real_itm= real_item(); + Item *real_itm= real_item(); ref_pointer_array[el]= real_itm; - if (!(new_item= new Item_aggregate_ref(&thd->lex->current_select->context, + if (!(item_ref= new Item_aggregate_ref(&thd->lex->current_select->context, ref_pointer_array + el, 0, name))) return; // fatal_error is set + if (type() == SUM_FUNC_ITEM) + item_ref->depended_from= ((Item_sum *) this)->depended_from(); fields.push_front(real_itm); - thd->change_item_tree(ref, new_item); + thd->change_item_tree(ref, item_ref); } } @@ -4358,13 +4363,16 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length) case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_GEOMETRY: if (this->type() == Item::TYPE_HOLDER) field= new Field_blob(max_length, maybe_null, name, collation.collation, 1); else field= new Field_blob(max_length, maybe_null, name, collation.collation); break; // Blob handled outside of case + case MYSQL_TYPE_GEOMETRY: + return new Field_geom(max_length, maybe_null, name, table->s, + (Field::geometry_type) + ((Item_geometry_func *)this)->get_geometry_type()); } if (field) field->init(table); @@ -6437,8 +6445,6 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item) :Item(thd, item), enum_set_typelib(0), fld_type(get_real_type(item)) { DBUG_ASSERT(item->fixed); - - max_length= display_length(item); maybe_null= item->maybe_null; collation.set(item->collation); get_full_info(item); @@ -6610,11 +6616,17 @@ bool Item_type_holder::join_types(THD *thd, Item *item) { int delta1= max_length_orig - decimals_orig; int delta2= item->max_length - item->decimals; - if (fld_type == MYSQL_TYPE_DECIMAL) - max_length= max(delta1, delta2) + decimals; - else - max_length= min(max(delta1, delta2) + decimals, - (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7); + max_length= max(delta1, delta2) + decimals; + if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2) + { + max_length= FLT_DIG + 6; + decimals= NOT_FIXED_DEC; + } + if (fld_type == MYSQL_TYPE_DOUBLE && max_length > DBL_DIG + 2) + { + max_length= DBL_DIG + 7; + decimals= NOT_FIXED_DEC; + } } else max_length= (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7; diff --git a/sql/item.h b/sql/item.h index dce03299929..bf662b98732 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1116,7 +1116,7 @@ inline Item_result Item_splocal::result_type() const class Item_case_expr :public Item_sp_variable { public: - Item_case_expr(int case_expr_id); + Item_case_expr(uint case_expr_id); public: Item *this_item(); @@ -1135,7 +1135,7 @@ public: void print(String *str); private: - int m_case_expr_id; + uint m_case_expr_id; }; /***************************************************************************** @@ -2004,6 +2004,11 @@ public: { return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables(); } + void update_used_tables() + { + if (!depended_from) + (*ref)->update_used_tables(); + } table_map not_null_tables() const { return (*ref)->not_null_tables(); } void set_result_field(Field *field) { result_field= field; } bool is_result_field() { return 1; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 7e5df8fdbbf..ffa6b4caf2a 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2271,8 +2271,8 @@ int cmp_longlong(void *cmp_arg, One of the args is unsigned and is too big to fit into the positive signed range. Report no match. */ - if (a->unsigned_flag && ((ulonglong) a->val) > LONGLONG_MAX || - b->unsigned_flag && ((ulonglong) b->val) > LONGLONG_MAX) + if (a->unsigned_flag && ((ulonglong) a->val) > (ulonglong) LONGLONG_MAX || + b->unsigned_flag && ((ulonglong) b->val) > (ulonglong) LONGLONG_MAX) return a->unsigned_flag ? 1 : -1; /* Although the signedness differs both args can fit into the signed @@ -2458,7 +2458,8 @@ void in_decimal::set(uint pos, Item *item) dec->len= DECIMAL_BUFF_LENGTH; dec->fix_buffer_pointer(); my_decimal *res= item->val_decimal(dec); - if (res != dec) + /* if item->val_decimal() is evaluated to NULL then res == 0 */ + if (!item->null_value && res != dec) my_decimal2decimal(res, dec); } diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index a011a2884c8..e6f951bfc7b 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1813,7 +1813,8 @@ void Item_func_soundex::fix_length_and_dec() { collation.set(args[0]->collation); max_length=args[0]->max_length; - set_if_bigger(max_length,4); + set_if_bigger(max_length, 4 * collation.collation->mbminlen); + tmp_value.set_charset(collation.collation); } @@ -1823,14 +1824,15 @@ void Item_func_soundex::fix_length_and_dec() else return 0 */ -static char soundex_toupper(char ch) +static int soundex_toupper(int ch) { return (ch >= 'a' && ch <= 'z') ? ch - 'a' + 'A' : ch; } -static char get_scode(char *ptr) + +static char get_scode(int wc) { - uchar ch= soundex_toupper(*ptr); + int ch= soundex_toupper(wc); if (ch < 'A' || ch > 'Z') { // Thread extended alfa (country spec) @@ -1840,46 +1842,121 @@ static char get_scode(char *ptr) } +static bool my_uni_isalpha(int wc) +{ + /* + Return true for all Basic Latin letters: a..z A..Z. + Return true for all Unicode characters with code higher than U+00C0: + - characters between 'z' and U+00C0 are controls and punctuations. + - "U+00C0 LATIN CAPITAL LETTER A WITH GRAVE" is the first letter after 'z'. + */ + return (wc >= 'a' && wc <= 'z') || + (wc >= 'A' && wc <= 'Z') || + (wc >= 0xC0); +} + + String *Item_func_soundex::val_str(String *str) { DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); char last_ch,ch; CHARSET_INFO *cs= collation.collation; + my_wc_t wc; + uint nchars; + int rc; - if ((null_value=args[0]->null_value)) + if ((null_value= args[0]->null_value)) return 0; /* purecov: inspected */ - if (tmp_value.alloc(max(res->length(),4))) + if (tmp_value.alloc(max(res->length(), 4 * cs->mbminlen))) return str; /* purecov: inspected */ char *to= (char *) tmp_value.ptr(); - char *from= (char *) res->ptr(), *end=from+res->length(); - tmp_value.set_charset(cs); + char *to_end= to + tmp_value.alloced_length(); + char *from= (char *) res->ptr(), *end= from + res->length(); - while (from != end && !my_isalpha(cs,*from)) // Skip pre-space - from++; /* purecov: inspected */ - if (from == end) - return &my_empty_string; // No alpha characters. - *to++ = soundex_toupper(*from); // Copy first letter - last_ch = get_scode(from); // code of the first letter - // for the first 'double-letter check. - // Loop on input letters until - // end of input (null) or output - // letter code count = 3 - for (from++ ; from < end ; from++) - { - if (!my_isalpha(cs,*from)) - continue; - ch=get_scode(from); + for ( ; ; ) /* Skip pre-space */ + { + if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) + return &my_empty_string; /* EOL or invalid byte sequence */ + + if (rc == 1 && cs->ctype) + { + /* Single byte letter found */ + if (my_isalpha(cs, *from)) + { + last_ch= get_scode(*from); // Code of the first letter + *to++= soundex_toupper(*from++); // Copy first letter + break; + } + from++; + } + else + { + from+= rc; + if (my_uni_isalpha(wc)) + { + /* Multibyte letter found */ + wc= soundex_toupper(wc); + last_ch= get_scode(wc); // Code of the first letter + if ((rc= cs->cset->wc_mb(cs, wc, (uchar*) to, (uchar*) to_end)) <= 0) + { + /* Extra safety - should not really happen */ + DBUG_ASSERT(false); + return &my_empty_string; + } + to+= rc; + break; + } + } + } + + /* + last_ch is now set to the first 'double-letter' check. + loop on input letters until end of input + */ + for (nchars= 1 ; ; ) + { + if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) + break; /* EOL or invalid byte sequence */ + + if (rc == 1 && cs->ctype) + { + if (!my_isalpha(cs, *from++)) + continue; + } + else + { + from+= rc; + if (!my_uni_isalpha(wc)) + continue; + } + + ch= get_scode(wc); if ((ch != '0') && (ch != last_ch)) // if not skipped or double { - *to++ = ch; // letter, copy to output - last_ch = ch; // save code of last input letter - } // for next double-letter check + // letter, copy to output + if ((rc= cs->cset->wc_mb(cs, (my_wc_t) ch, + (uchar*) to, (uchar*) to_end)) <= 0) + { + // Extra safety - should not really happen + DBUG_ASSERT(false); + break; + } + to+= rc; + nchars++; + last_ch= ch; // save code of last input letter + } // for next double-letter check } - for (end=(char*) tmp_value.ptr()+4 ; to < end ; to++) - *to = '0'; - *to=0; // end string + + /* Pad up to 4 characters with DIGIT ZERO, if the string is shorter */ + if (nchars < 4) + { + uint nbytes= (4 - nchars) * cs->mbminlen; + cs->cset->fill(cs, to, nbytes, '0'); + to+= nbytes; + } + tmp_value.length((uint) (to-tmp_value.ptr())); return &tmp_value; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index f34fc008186..7c3762d4785 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -63,6 +63,7 @@ bool Item_sum::init_sum_func_check(THD *thd) nest_level= thd->lex->current_select->nest_level; ref_by= 0; aggr_level= -1; + aggr_sel= NULL; max_arg_level= -1; max_sum_func_level= -1; return FALSE; @@ -148,9 +149,14 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) if (register_sum_func(thd, ref)) return TRUE; invalid= aggr_level < 0 && !(allow_sum_func & (1 << nest_level)); + if (!invalid && thd->variables.sql_mode & MODE_ANSI) + invalid= aggr_level < 0 && max_arg_level < nest_level; } if (!invalid && aggr_level < 0) + { aggr_level= nest_level; + aggr_sel= thd->lex->current_select; + } /* By this moment we either found a subquery where the set function is to be aggregated and assigned a value that is >= 0 to aggr_level, @@ -160,8 +166,9 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) Additionally we have to check whether possible nested set functions are acceptable here: they are not, if the level of aggregation of some of them is less than aggr_level. - */ - invalid= aggr_level <= max_sum_func_level; + */ + if (!invalid) + invalid= aggr_level <= max_sum_func_level; if (invalid) { my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), @@ -176,6 +183,7 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) */ set_if_bigger(in_sum_func->max_sum_func_level, aggr_level); } + update_used_tables(); thd->lex->in_sum_func= in_sum_func; return FALSE; } @@ -210,7 +218,6 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) bool Item_sum::register_sum_func(THD *thd, Item **ref) { SELECT_LEX *sl; - SELECT_LEX *aggr_sl= NULL; nesting_map allow_sum_func= thd->lex->allow_sum_func; for (sl= thd->lex->current_select->master_unit()->outer_select() ; sl && sl->nest_level > max_arg_level; @@ -220,7 +227,7 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) { /* Found the most nested subquery where the function can be aggregated */ aggr_level= sl->nest_level; - aggr_sl= sl; + aggr_sel= sl; } } if (sl && (allow_sum_func & (1 << sl->nest_level))) @@ -231,21 +238,22 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) The set function will be aggregated in this subquery. */ aggr_level= sl->nest_level; - aggr_sl= sl; + aggr_sel= sl; + } if (aggr_level >= 0) { ref_by= ref; - /* Add the object to the list of registered objects assigned to aggr_sl */ - if (!aggr_sl->inner_sum_func_list) + /* Add the object to the list of registered objects assigned to aggr_sel */ + if (!aggr_sel->inner_sum_func_list) next= this; else { - next= aggr_sl->inner_sum_func_list->next; - aggr_sl->inner_sum_func_list->next= this; + next= aggr_sel->inner_sum_func_list->next; + aggr_sel->inner_sum_func_list->next= this; } - aggr_sl->inner_sum_func_list= this; - aggr_sl->with_sum_func= 1; + aggr_sel->inner_sum_func_list= this; + aggr_sel->with_sum_func= 1; /* Mark Item_subselect(s) as containing aggregate function all the way up @@ -263,16 +271,17 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) has aggregate functions directly referenced (i.e. not through a sub-select). */ for (sl= thd->lex->current_select; - sl && sl != aggr_sl && sl->master_unit()->item; + sl && sl != aggr_sel && sl->master_unit()->item; sl= sl->master_unit()->outer_select() ) sl->master_unit()->item->with_sum_func= 1; } + thd->lex->current_select->mark_as_dependent(aggr_sel); return FALSE; } -Item_sum::Item_sum(List<Item> &list) - :arg_count(list.elements) +Item_sum::Item_sum(List<Item> &list) :arg_count(list.elements), + forced_const(FALSE) { if ((args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) { @@ -296,7 +305,10 @@ Item_sum::Item_sum(List<Item> &list) Item_sum::Item_sum(THD *thd, Item_sum *item): Item_result_field(thd, item), arg_count(item->arg_count), - quick_group(item->quick_group) + aggr_sel(item->aggr_sel), + nest_level(item->nest_level), aggr_level(item->aggr_level), + quick_group(item->quick_group), used_tables_cache(item->used_tables_cache), + forced_const(item->forced_const) { if (arg_count <= 2) args=tmp_args; @@ -407,7 +419,7 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table, break; case STRING_RESULT: if (max_length/collation.collation->mbmaxlen <= 255 || - max_length/collation.collation->mbmaxlen >=UINT_MAX16 || + convert_blob_length >=UINT_MAX16 || !convert_blob_length) return make_string_field(table); field= new Field_varstring(convert_blob_length, maybe_null, @@ -429,6 +441,25 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table, } +void Item_sum::update_used_tables () +{ + if (!forced_const) + { + used_tables_cache= 0; + for (uint i=0 ; i < arg_count ; i++) + { + args[i]->update_used_tables(); + used_tables_cache|= args[i]->used_tables(); + } + + used_tables_cache&= PSEUDO_TABLE_BITS; + + /* the aggregate function is aggregated into its local context */ + used_tables_cache |= (1 << aggr_sel->join->tables) - 1; + } +} + + String * Item_sum_num::val_str(String *str) { @@ -488,7 +519,7 @@ Item_sum_num::fix_fields(THD *thd, Item **ref) Item_sum_hybrid::Item_sum_hybrid(THD *thd, Item_sum_hybrid *item) :Item_sum(thd, item), value(item->value), hybrid_type(item->hybrid_type), hybrid_field_type(item->hybrid_field_type), cmp_sign(item->cmp_sign), - used_table_cache(item->used_table_cache), was_values(item->was_values) + was_values(item->was_values) { /* copy results from old value */ switch (hybrid_type) { @@ -1082,7 +1113,6 @@ void Item_sum_count::cleanup() DBUG_ENTER("Item_sum_count::cleanup"); count= 0; Item_sum_int::cleanup(); - used_table_cache= ~(table_map) 0; DBUG_VOID_RETURN; } @@ -1105,8 +1135,10 @@ void Item_sum_avg::fix_length_and_dec() f_scale= args[0]->decimals; dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale); } - else + else { decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC); + max_length= args[0]->max_length + prec_increment; + } } @@ -1572,7 +1604,7 @@ void Item_sum_hybrid::cleanup() { DBUG_ENTER("Item_sum_hybrid::cleanup"); Item_sum::cleanup(); - used_table_cache= ~(table_map) 0; + forced_const= FALSE; /* by default it is TRUE to avoid TRUE reporting by diff --git a/sql/item_sum.h b/sql/item_sum.h index 4cf16fc79af..5cf4f93af0e 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -215,7 +215,9 @@ TODO: to catch queries where the limit is exceeded to make the code clean here. -*/ +*/ + +class st_select_lex; class Item_sum :public Item_result_field { @@ -231,25 +233,32 @@ public: Item_sum *next; /* next in the circular chain of registered objects */ uint arg_count; Item_sum *in_sum_func; /* embedding set function if any */ + st_select_lex * aggr_sel; /* select where the function is aggregated */ int8 nest_level; /* number of the nesting level of the set function */ int8 aggr_level; /* nesting level of the aggregating subquery */ int8 max_arg_level; /* max level of unbound column references */ int8 max_sum_func_level;/* max level of aggregation for embedded functions */ bool quick_group; /* If incremental update of fields */ +protected: + table_map used_tables_cache; + bool forced_const; + +public: + void mark_as_sum_func(); - Item_sum() :arg_count(0), quick_group(1) + Item_sum() :arg_count(0), quick_group(1), forced_const(FALSE) { mark_as_sum_func(); } - Item_sum(Item *a) - :args(tmp_args), arg_count(1), quick_group(1) + Item_sum(Item *a) :args(tmp_args), arg_count(1), quick_group(1), + forced_const(FALSE) { args[0]=a; mark_as_sum_func(); } - Item_sum( Item *a, Item *b ) - :args(tmp_args), arg_count(2), quick_group(1) + Item_sum( Item *a, Item *b ) :args(tmp_args), arg_count(2), quick_group(1), + forced_const(FALSE) { args[0]=a; args[1]=b; mark_as_sum_func(); @@ -319,10 +328,20 @@ public: virtual const char *func_name() const= 0; virtual Item *result_item(Field *field) { return new Item_field(field); } - table_map used_tables() const { return ~(table_map) 0; } /* Not used */ - bool const_item() const { return 0; } + table_map used_tables() const { return used_tables_cache; } + void update_used_tables (); + void cleanup() + { + Item::cleanup(); + forced_const= FALSE; + } bool is_null() { return null_value; } - void update_used_tables() { } + void make_const () + { + used_tables_cache= 0; + forced_const= TRUE; + } + virtual bool const_item() const { return forced_const; } void make_field(Send_field *field); void print(String *str); void fix_num_length_and_dec(); @@ -346,6 +365,8 @@ public: bool init_sum_func_check(THD *thd); bool check_sum_func(THD *thd, Item **ref); bool register_sum_func(THD *thd, Item **ref); + st_select_lex *depended_from() + { return (nest_level == aggr_level ? 0 : aggr_sel); } }; @@ -509,23 +530,23 @@ public: class Item_sum_count :public Item_sum_int { longlong count; - table_map used_table_cache; public: Item_sum_count(Item *item_par) - :Item_sum_int(item_par),count(0),used_table_cache(~(table_map) 0) + :Item_sum_int(item_par),count(0) {} Item_sum_count(THD *thd, Item_sum_count *item) - :Item_sum_int(thd, item), count(item->count), - used_table_cache(item->used_table_cache) + :Item_sum_int(thd, item), count(item->count) {} - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } enum Sumfunctype sum_func () const { return COUNT_FUNC; } void clear(); void no_rows_in_result() { count=0; } bool add(); - void make_const(longlong count_arg) { count=count_arg; used_table_cache=0; } + void make_const(longlong count_arg) + { + count=count_arg; + Item_sum::make_const(); + } longlong val_int(); void reset_field(); void cleanup(); @@ -805,28 +826,22 @@ protected: Item_result hybrid_type; enum_field_types hybrid_field_type; int cmp_sign; - table_map used_table_cache; bool was_values; // Set if we have found at least one row (for max/min only) public: Item_sum_hybrid(Item *item_par,int sign) :Item_sum(item_par), sum(0.0), sum_int(0), hybrid_type(INT_RESULT), hybrid_field_type(MYSQL_TYPE_LONGLONG), - cmp_sign(sign), used_table_cache(~(table_map) 0), - was_values(TRUE) + cmp_sign(sign), was_values(TRUE) { collation.set(&my_charset_bin); } Item_sum_hybrid(THD *thd, Item_sum_hybrid *item); bool fix_fields(THD *, Item **); - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } - void clear(); double val_real(); longlong val_int(); my_decimal *val_decimal(my_decimal *); void reset_field(); String *val_str(String *); - void make_const() { used_table_cache=0; } bool keep_field_type(void) const { return 1; } enum Item_result result_type () const { return hybrid_type; } enum enum_field_types field_type() const { return hybrid_field_type; } diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 3a59abf1bda..ea93619e59a 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -351,7 +351,7 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_DATE; } String *val_str(String *str); longlong val_int(); - double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { return val_real_from_decimal(); } const char *func_name() const { return "date"; } void fix_length_and_dec() { @@ -389,6 +389,7 @@ public: return tmp_table_field_from_field_type(table, 0); } bool result_as_longlong() { return TRUE; } + double val_real() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -411,13 +412,14 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_TIME; } void fix_length_and_dec() { - decimals=0; + decimals= DATETIME_DEC; max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } Field *tmp_table_field(TABLE *table) { return tmp_table_field_from_field_type(table, 0); } + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -521,7 +523,6 @@ public: Item_func_now() :Item_date_func() {} Item_func_now(Item *a) :Item_date_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } - double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } int save_in_field(Field *to, bool no_conversions); String *val_str(String *str); @@ -611,11 +612,6 @@ class Item_func_from_unixtime :public Item_date_func THD *thd; public: Item_func_from_unixtime(Item *a) :Item_date_func(a) {} - double val_real() - { - DBUG_ASSERT(fixed == 1); - return (double) Item_func_from_unixtime::val_int(); - } longlong val_int(); String *val_str(String *str); const char *func_name() const { return "from_unixtime"; } @@ -653,7 +649,6 @@ class Item_func_convert_tz :public Item_date_func Item_func_convert_tz(Item *a, Item *b, Item *c): Item_date_func(a, b, c), from_tz_cached(0), to_tz_cached(0) {} longlong val_int(); - double val_real() { return (double) val_int(); } String *val_str(String *str); const char *func_name() const { return "convert_tz"; } void fix_length_and_dec(); @@ -678,7 +673,6 @@ public: Item_str_timefunc::fix_length_and_dec(); collation.set(&my_charset_bin); maybe_null=1; - decimals= DATETIME_DEC; } const char *func_name() const { return "sec_to_time"; } bool result_as_longlong() { return TRUE; } @@ -700,7 +694,6 @@ public: const char *func_name() const { return "date_add_interval"; } void fix_length_and_dec(); enum_field_types field_type() const { return cached_field_type; } - double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } longlong val_int(); bool get_date(TIME *res, uint fuzzy_date); bool eq(const Item *item, bool binary_cmp) const; @@ -805,6 +798,7 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -832,6 +826,7 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -856,8 +851,15 @@ public: { return tmp_table_field_from_field_type(table, 0); } + void fix_length_and_dec() + { + Item_typecast_maybe_null::fix_length_and_dec(); + decimals= DATETIME_DEC; + } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } + double val() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -906,6 +908,7 @@ public: void print(String *str); const char *func_name() const { return "add_time"; } bool check_partition_func_processor(byte *int_arg) {return FALSE;} + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); diff --git a/sql/lock.cc b/sql/lock.cc index 2373b62eb0a..4427e57a938 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -582,7 +582,7 @@ TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, goto end; /* A temporary table does not have locks. */ - if (table->s->tmp_table == TMP_TABLE) + if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) goto end; /* Get command lock or LOCK TABLES lock. Maybe empty for INSERT DELAYED. */ @@ -607,7 +607,7 @@ TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, if (haystack->placeholder()) continue; table2= haystack->table; - if (table2->s->tmp_table == TMP_TABLE) + if (table2->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) continue; /* All tables in list must be in lock. */ @@ -695,7 +695,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint system_count= 0; for (i=tables=lock_count=0 ; i < count ; i++) { - if (table_ptr[i]->s->tmp_table != TMP_TABLE) + if (table_ptr[i]->s->tmp_table != NON_TRANSACTIONAL_TMP_TABLE) { tables+=table_ptr[i]->file->lock_count(); lock_count++; @@ -737,7 +737,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, TABLE *table; enum thr_lock_type lock_type; - if ((table=table_ptr[i])->s->tmp_table == TMP_TABLE) + if ((table=table_ptr[i])->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) continue; lock_type= table->reginfo.lock_type; if (lock_type >= TL_WRITE_ALLOW_WRITE) diff --git a/sql/log.cc b/sql/log.cc index 1994660bf5b..fe3a4f7df5e 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -68,7 +68,7 @@ char *make_default_log_name(char *buff,const char* log_ext) { strmake(buff, pidfile_name, FN_REFLEN-5); return fn_format(buff, buff, mysql_data_home, log_ext, - MYF(MY_UNPACK_FILENAME|MY_APPEND_EXT)); + MYF(MY_UNPACK_FILENAME|MY_REPLACE_EXT)); } /* @@ -1548,7 +1548,7 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; DBUG_ASSERT(mysql_bin_log.is_open()); - if (all && trx_data->empty()) + if (trx_data->empty()) { // we're here because trans_log was flushed in MYSQL_BIN_LOG::log_xid() trx_data->reset(); @@ -2499,7 +2499,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name, /* Set 'created' to 0, so that in next relay logs this event does not trigger cleaning actions on the slave in - Format_description_log_event::exec_event(). + Format_description_log_event::apply_event_impl(). */ description_event_for_queue->created= 0; /* Don't set log_pos in event header */ @@ -3206,8 +3206,10 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock) { tc_log_page_waits++; pthread_mutex_lock(&LOCK_prep_xids); - while (prepared_xids) + while (prepared_xids) { + DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids)); pthread_cond_wait(&COND_prep_xids, &LOCK_prep_xids); + } pthread_mutex_unlock(&LOCK_prep_xids); } @@ -5061,8 +5063,10 @@ void TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid) { pthread_mutex_lock(&LOCK_prep_xids); DBUG_ASSERT(prepared_xids > 0); - if (--prepared_xids == 0) + if (--prepared_xids == 0) { + DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids)); pthread_cond_signal(&COND_prep_xids); + } pthread_mutex_unlock(&LOCK_prep_xids); rotate_and_purge(0); // as ::write() did not rotate } diff --git a/sql/log.h b/sql/log.h index 970823dcd4a..ed0c3557d08 100644 --- a/sql/log.h +++ b/sql/log.h @@ -238,7 +238,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG fix_max_relay_log_size). */ ulong max_size; - ulong prepared_xids; /* for tc log - number of xids to remember */ + long prepared_xids; /* for tc log - number of xids to remember */ // current file sequence number for load data infile binary logging uint file_id; uint open_count; // For replication diff --git a/sql/log_event.cc b/sql/log_event.cc index 8714c60f2c0..e3c94b5e1c9 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -75,8 +75,7 @@ public: ~Write_on_release_cache() { - if (!my_b_copy_to_file(m_cache, m_file)) - reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE); + copy_event_cache_to_file_and_reinit(m_cache, m_file); if (m_flags | FLUSH_F) fflush(m_file); } @@ -88,9 +87,10 @@ public: operator&() DESCRIPTION - Function to return a pointer to the internal, so that the object - can be treated as a IO_CACHE and used with the my_b_* IO_CACHE - functions + + Function to return a pointer to the internal cache, so that the + object can be treated as a IO_CACHE and used with the my_b_* + IO_CACHE functions RETURN VALUE A pointer to the internal IO_CACHE. @@ -305,7 +305,7 @@ static bool write_str(IO_CACHE *file, char *str, uint length) read_str() */ -static inline int read_str(char **buf, char *buf_end, char **str, +static inline int read_str(const char **buf, const char *buf_end, const char **str, uint8 *len) { if (*buf + ((uint) (uchar) **buf) >= buf_end) @@ -420,6 +420,7 @@ const char* Log_event::get_type_str() case DELETE_ROWS_EVENT: return "Delete_rows"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; + case INCIDENT_EVENT: return "Incident"; default: return "Unknown"; /* impossible */ } } @@ -531,25 +532,19 @@ Log_event::Log_event(const char* buf, #ifndef MYSQL_CLIENT #ifdef HAVE_REPLICATION -/* - Log_event::exec_event() -*/ - -int Log_event::exec_event(struct st_relay_log_info* rli) +int Log_event::do_update_pos(RELAY_LOG_INFO *rli) { - DBUG_ENTER("Log_event::exec_event"); - /* - rli is null when (as far as I (Guilhem) know) - the caller is - Load_log_event::exec_event *and* that one is called from - Execute_load_log_event::exec_event. - In this case, we don't do anything here ; - Execute_load_log_event::exec_event will call Log_event::exec_event - again later with the proper rli. - Strictly speaking, if we were sure that rli is null - only in the case discussed above, 'if (rli)' is useless here. - But as we are not 100% sure, keep it for now. + rli is null when (as far as I (Guilhem) know) the caller is + Load_log_event::do_apply_event *and* that one is called from + Execute_load_log_event::do_apply_event. In this case, we don't + do anything here ; Execute_load_log_event::do_apply_event will + call Log_event::do_apply_event again later with the proper rli. + Strictly speaking, if we were sure that rli is null only in the + case discussed above, 'if (rli)' is useless here. But as we are + not 100% sure, keep it for now. + + Matz: I don't think we will need this check with this refactoring. */ if (rli) { @@ -584,18 +579,37 @@ int Log_event::exec_event(struct st_relay_log_info* rli) { rli->inc_group_relay_log_pos(log_pos); flush_relay_log_info(rli); - /* - Note that Rotate_log_event::exec_event() does not call this - function, so there is no chance that a fake rotate event resets - last_master_timestamp. - Note that we update without mutex (probably ok - except in some very - rare cases, only consequence is that value may take some time to - display in Seconds_Behind_Master - not critical). + /* + Note that Rotate_log_event::do_apply_event() does not call + this function, so there is no chance that a fake rotate event + resets last_master_timestamp. Note that we update without + mutex (probably ok - except in some very rare cases, only + consequence is that value may take some time to display in + Seconds_Behind_Master - not critical). */ rli->last_master_timestamp= when; } } - DBUG_RETURN(0); + + return 0; // Cannot fail currently +} + + +Log_event::enum_skip_reason +Log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + DBUG_PRINT("info", ("ev->server_id=%lu, ::server_id=%lu," + " rli->replicate_same_server_id=%d," + " rli->slave_skip_counter=%d", + (ulong) server_id, (ulong) ::server_id, + rli->replicate_same_server_id, + rli->slave_skip_counter)); + if (server_id == ::server_id && !rli->replicate_same_server_id) + return EVENT_SKIP_IGNORE; + else if (rli->slave_skip_counter > 0) + return EVENT_SKIP_COUNT; + else + return EVENT_SKIP_NOT; } @@ -743,7 +757,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, ulong data_len; int result=0; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; - DBUG_ENTER("read_log_event"); + DBUG_ENTER("Log_event::read_log_event"); if (log_lock) pthread_mutex_lock(log_lock); @@ -818,7 +832,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, const Format_description_log_event *description_event) #endif { - DBUG_ENTER("Log_event::read_log_event(IO_CACHE *, Format_description_log_event *"); + DBUG_ENTER("Log_event::read_log_event"); DBUG_ASSERT(description_event != 0); char head[LOG_EVENT_MINIMAL_HEADER_LEN]; /* @@ -998,10 +1012,14 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, ev = new Begin_load_query_log_event(buf, event_len, description_event); break; case EXECUTE_LOAD_QUERY_EVENT: - ev = new Execute_load_query_log_event(buf, event_len, description_event); + ev= new Execute_load_query_log_event(buf, event_len, description_event); + break; + case INCIDENT_EVENT: + ev = new Incident_log_event(buf, event_len, description_event); break; default: - DBUG_PRINT("error",("Unknown evernt code: %d",(int) buf[EVENT_TYPE_OFFSET])); + DBUG_PRINT("error",("Unknown event code: %d", + (int) buf[EVENT_TYPE_OFFSET])); ev= NULL; break; } @@ -1888,27 +1906,28 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Query_log_event::exec_event() + Query_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Query_log_event::exec_event(struct st_relay_log_info* rli) +int Query_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - return exec_event(rli, query, q_len); + return do_apply_event(rli, query, q_len); } -int Query_log_event::exec_event(struct st_relay_log_info* rli, - const char *query_arg, uint32 q_len_arg) +int Query_log_event::do_apply_event(RELAY_LOG_INFO const *rli, + const char *query_arg, uint32 q_len_arg) { LEX_STRING new_db; int expected_error,actual_error= 0; /* - Colleagues: please never free(thd->catalog) in MySQL. This would lead to - bugs as here thd->catalog is a part of an alloced block, not an entire - alloced block (see Query_log_event::exec_event()). Same for thd->db. - Thank you. + Colleagues: please never free(thd->catalog) in MySQL. This would + lead to bugs as here thd->catalog is a part of an alloced block, + not an entire alloced block (see + Query_log_event::do_apply_event()). Same for thd->db. Thank + you. */ thd->catalog= catalog_len ? (char *) catalog : (char *)""; new_db.length= db_len; @@ -1927,11 +1946,11 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, END of the current log event (COMMIT). We save it in rli so that InnoDB can access it. */ - rli->future_group_master_log_pos= log_pos; + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); - clear_all_errors(thd, rli); - rli->clear_tables_to_lock(); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); /* Note: We do not need to execute reset_one_shot_variables() if this @@ -1940,8 +1959,8 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, its companion query. If the SET is ignored because of db_ok(), the companion query will also be ignored, and if the companion query is ignored in the db_ok() test of - ::exec_event(), then the companion SET also have so we - don't need to reset_one_shot_variables(). + ::do_apply_event(), then the companion SET also have so + we don't need to reset_one_shot_variables(). */ if (rpl_filter->db_ok(thd->db)) { @@ -2056,7 +2075,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, to check/fix it. */ if (mysql_test_parse_for_slave(thd, thd->query, thd->query_length)) - clear_all_errors(thd, rli); /* Can ignore query */ + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); /* Can ignore query */ else { slave_print_msg(ERROR_LEVEL, rli, expected_error, @@ -2107,7 +2126,7 @@ Default database: '%s'. Query: '%s'", ignored_error_code(actual_error)) { DBUG_PRINT("info",("error ignored")); - clear_all_errors(thd, rli); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); } /* Other cases: mostly we expected no error and get one. @@ -2174,16 +2193,26 @@ end: thd->first_successful_insert_id_in_prev_stmt= 0; thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); + return thd->query_error; +} + +int Query_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ /* - If there was an error we stop. Otherwise we increment positions. Note that - we will not increment group* positions if we are just after a SET - ONE_SHOT, because SET ONE_SHOT should not be separated from its following - updating query. + Note that we will not increment group* positions if we are just + after a SET ONE_SHOT, because SET ONE_SHOT should not be separated + from its following updating query. */ - return (thd->query_error ? thd->query_error : - (thd->one_shot_set ? (rli->inc_event_relay_log_pos(),0) : - Log_event::exec_event(rli))); + if (thd->one_shot_set) + { + rli->inc_event_relay_log_pos(); + return 0; + } + else + return Log_event::do_update_pos(rli); } + + #endif @@ -2312,7 +2341,7 @@ bool Start_log_event_v3::write(IO_CACHE* file) /* - Start_log_event_v3::exec_event() + Start_log_event_v3::do_apply_event() The master started @@ -2331,9 +2360,9 @@ bool Start_log_event_v3::write(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) +int Start_log_event_v3::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Start_log_event_v3::exec_event"); + DBUG_ENTER("Start_log_event_v3::do_apply_event"); switch (binlog_version) { case 3: @@ -2375,7 +2404,7 @@ int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) /* this case is impossible */ DBUG_RETURN(1); } - DBUG_RETURN(Log_event::exec_event(rli)); + DBUG_RETURN(0); } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -2456,6 +2485,7 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver) post_header_len[DELETE_ROWS_EVENT-1]= 6;); post_header_len[BEGIN_LOAD_QUERY_EVENT-1]= post_header_len[APPEND_BLOCK_EVENT-1]; post_header_len[EXECUTE_LOAD_QUERY_EVENT-1]= EXECUTE_LOAD_QUERY_HEADER_LEN; + post_header_len[INCIDENT_EVENT-1]= INCIDENT_HEADER_LEN; } break; @@ -2566,24 +2596,10 @@ bool Format_description_log_event::write(IO_CACHE* file) } #endif -/* - SYNOPSIS - Format_description_log_event::exec_event() - - IMPLEMENTATION - Save the information which describes the binlog's format, to be able to - read all coming events. - Call Start_log_event_v3::exec_event(). -*/ - #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Format_description_log_event::exec_event(struct st_relay_log_info* rli) +int Format_description_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Format_description_log_event::exec_event"); - - /* save the information describing this binlog */ - delete rli->relay_log.description_event_for_exec; - rli->relay_log.description_event_for_exec= this; + DBUG_ENTER("Format_description_log_event::do_apply_event"); #ifdef USING_TRANSACTIONS /* @@ -2605,14 +2621,36 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli) "or ROLLBACK in relay log). A probable cause is that " "the master died while writing the transaction to " "its binary log, thus rolled back too."); - rli->cleanup_context(thd, 1); + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, 1); } #endif /* - If this event comes from ourselves, there is no cleaning task to perform, - we don't call Start_log_event_v3::exec_event() (this was just to update the - log's description event). + If this event comes from ourselves, there is no cleaning task to + perform, we don't call Start_log_event_v3::do_apply_event() + (this was just to update the log's description event). */ + if (server_id != (uint32) ::server_id) + { + /* + If the event was not requested by the slave i.e. the master sent + it while the slave asked for a position >4, the event will make + rli->group_master_log_pos advance. Say that the slave asked for + position 1000, and the Format_desc event's end is 96. Then in + the beginning of replication rli->group_master_log_pos will be + 0, then 96, then jump to first really asked event (which is + >96). So this is ok. + */ + DBUG_RETURN(Start_log_event_v3::do_apply_event(rli)); + } + DBUG_RETURN(0); +} + +int Format_description_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ + /* save the information describing this binlog */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= this; + if (server_id == (uint32) ::server_id) { /* @@ -2629,19 +2667,20 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli) the Intvar_log_event respectively. */ rli->inc_event_relay_log_pos(); - DBUG_RETURN(0); + return 0; + } + else + { + return Log_event::do_update_pos(rli); } +} - /* - If the event was not requested by the slave i.e. the master sent it while - the slave asked for a position >4, the event will make - rli->group_master_log_pos advance. Say that the slave asked for position - 1000, and the Format_desc event's end is 96. Then in the beginning of - replication rli->group_master_log_pos will be 0, then 96, then jump to - first really asked event (which is >96). So this is ok. - */ - DBUG_RETURN(Start_log_event_v3::exec_event(rli)); +Log_event::enum_skip_reason +Format_description_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + return Log_event::EVENT_SKIP_NOT; } + #endif @@ -3155,30 +3194,32 @@ void Load_log_event::set_fields(const char* affected_db, Does the data loading job when executing a LOAD DATA on the slave SYNOPSIS - Load_log_event::exec_event - net - rli - use_rli_only_for_errors - if set to 1, rli is provided to - Load_log_event::exec_event only for this - function to have RPL_LOG_NAME and - rli->last_slave_error, both being used by - error reports. rli's position advancing - is skipped (done by the caller which is - Execute_load_log_event::exec_event). - - if set to 0, rli is provided for full use, - i.e. for error reports and position - advancing. + Load_log_event::do_apply_event + net + rli + use_rli_only_for_errors - if set to 1, rli is provided to + Load_log_event::do_apply_event + only for this function to have + RPL_LOG_NAME and + rli->last_slave_error, both being + used by error reports. rli's + position advancing is skipped (done + by the caller which is + Execute_load_log_event::do_apply_event). + - if set to 0, rli is provided for + full use, i.e. for error reports and + position advancing. DESCRIPTION Does the data loading job when executing a LOAD DATA on the slave - + RETURN VALUE - 0 Success + 0 Success 1 Failure */ -int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, - bool use_rli_only_for_errors) +int Load_log_event::do_apply_event(NET* net, RELAY_LOG_INFO const *rli, + bool use_rli_only_for_errors) { LEX_STRING new_db; new_db.length= db_len; @@ -3187,9 +3228,9 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, DBUG_ASSERT(thd->query == 0); thd->query_length= 0; // Should not be needed thd->query_error= 0; - clear_all_errors(thd, rli); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); - /* see Query_log_event::exec_event() and BUG#13360 */ + /* see Query_log_event::do_apply_event() and BUG#13360 */ DBUG_ASSERT(!rli->m_table_map.count()); /* Usually mysql_init_query() is called by mysql_parse(), but we need it here @@ -3198,22 +3239,26 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, mysql_init_query(thd, 0, 0); if (!use_rli_only_for_errors) { - /* Saved for InnoDB, see comment in Query_log_event::exec_event() */ - rli->future_group_master_log_pos= log_pos; + /* + Saved for InnoDB, see comment in + Query_log_event::do_apply_event() + */ + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); } /* - We test replicate_*_db rules. Note that we have already prepared the file - to load, even if we are going to ignore and delete it now. So it is - possible that we did a lot of disk writes for nothing. In other words, a - big LOAD DATA INFILE on the master will still consume a lot of space on - the slave (space in the relay log + space of temp files: twice the space - of the file to load...) even if it will finally be ignored. - TODO: fix this; this can be done by testing rules in - Create_file_log_event::exec_event() and then discarding Append_block and - al. Another way is do the filtering in the I/O thread (more efficient: no - disk writes at all). + We test replicate_*_db rules. Note that we have already prepared + the file to load, even if we are going to ignore and delete it + now. So it is possible that we did a lot of disk writes for + nothing. In other words, a big LOAD DATA INFILE on the master will + still consume a lot of space on the slave (space in the relay log + + space of temp files: twice the space of the file to load...) + even if it will finally be ignored. TODO: fix this; this can be + done by testing rules in Create_file_log_event::do_apply_event() + and then discarding Append_block and al. Another way is do the + filtering in the I/O thread (more efficient: no disk writes at + all). Note: We do not need to execute reset_one_shot_variables() if this @@ -3222,8 +3267,8 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, its companion query. If the SET is ignored because of db_ok(), the companion query will also be ignored, and if the companion query is ignored in the db_ok() test of - ::exec_event(), then the companion SET also have so we - don't need to reset_one_shot_variables(). + ::do_apply_event(), then the companion SET also have so + we don't need to reset_one_shot_variables(). */ if (rpl_filter->db_ok(thd->db)) { @@ -3419,7 +3464,7 @@ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", return 1; } - return ( use_rli_only_for_errors ? 0 : Log_event::exec_event(rli) ); + return ( use_rli_only_for_errors ? 0 : Log_event::do_apply_event(rli) ); } #endif @@ -3513,6 +3558,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); new_log_ident= my_strndup(buf + ident_offset, (uint) ident_len, MYF(MY_WME)); + DBUG_PRINT("debug", ("new_log_ident: '%s'", new_log_ident)); DBUG_VOID_RETURN; } @@ -3532,8 +3578,20 @@ bool Rotate_log_event::write(IO_CACHE* file) } #endif +/** + Helper function to detect if the event is inside a group. + */ +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +static bool is_in_group(THD *const thd, RELAY_LOG_INFO *const rli) +{ + return (thd->options & OPTION_BEGIN) != 0 || + (rli->last_event_start_time > 0); +} +#endif + + /* - Rotate_log_event::exec_event() + Rotate_log_event::do_apply_event() Got a rotate log event from the master @@ -3550,34 +3608,49 @@ bool Rotate_log_event::write(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Rotate_log_event::exec_event(struct st_relay_log_info* rli) +int Rotate_log_event::do_update_pos(RELAY_LOG_INFO *rli) { - DBUG_ENTER("Rotate_log_event::exec_event"); + DBUG_ENTER("Rotate_log_event::do_update_pos"); +#ifndef DBUG_OFF + char buf[32]; +#endif + + DBUG_PRINT("info", ("server_id=%lu; ::server_id=%lu", + (ulong) this->server_id, (ulong) ::server_id)); + DBUG_PRINT("info", ("new_log_ident: %s", this->new_log_ident)); + DBUG_PRINT("info", ("pos: %s", llstr(this->pos, buf))); pthread_mutex_lock(&rli->data_lock); rli->event_relay_log_pos= my_b_tell(rli->cur_log); /* - If we are in a transaction: the only normal case is when the I/O thread was - copying a big transaction, then it was stopped and restarted: we have this - in the relay log: + If we are in a transaction or in a group: the only normal case is + when the I/O thread was copying a big transaction, then it was + stopped and restarted: we have this in the relay log: + BEGIN ... ROTATE (a fake one) ... COMMIT or ROLLBACK - In that case, we don't want to touch the coordinates which correspond to - the beginning of the transaction. - Starting from 5.0.0, there also are some rotates from the slave itself, in - the relay log. + + In that case, we don't want to touch the coordinates which + correspond to the beginning of the transaction. Starting from + 5.0.0, there also are some rotates from the slave itself, in the + relay log, which shall not change the group positions. */ - if (!(thd->options & OPTION_BEGIN)) + if ((server_id != ::server_id || rli->replicate_same_server_id) && + !is_in_group(thd, rli)) { + DBUG_PRINT("info", ("old group_master_log_name: '%s' " + "old group_master_log_pos: %lu", + rli->group_master_log_name, + (ulong) rli->group_master_log_pos)); memcpy(rli->group_master_log_name, new_log_ident, ident_len+1); rli->notify_group_master_log_name_update(); rli->group_master_log_pos= pos; rli->group_relay_log_pos= rli->event_relay_log_pos; - DBUG_PRINT("info", ("group_master_log_name: '%s' " - "group_master_log_pos: %lu", + DBUG_PRINT("info", ("new group_master_log_name: '%s' " + "new group_master_log_pos: %lu", rli->group_master_log_name, (ulong) rli->group_master_log_pos)); /* @@ -3596,8 +3669,28 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) pthread_mutex_unlock(&rli->data_lock); pthread_cond_broadcast(&rli->data_cond); flush_relay_log_info(rli); + DBUG_RETURN(0); } + + +Log_event::enum_skip_reason +Rotate_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + enum_skip_reason reason= Log_event::do_shall_skip(rli); + + switch (reason) { + case Log_event::EVENT_SKIP_NOT: + case Log_event::EVENT_SKIP_COUNT: + return Log_event::EVENT_SKIP_NOT; + + case Log_event::EVENT_SKIP_IGNORE: + return Log_event::EVENT_SKIP_IGNORE; + } + DBUG_ASSERT(0); + return Log_event::EVENT_SKIP_NOT; // To keep compiler happy +} + #endif @@ -3704,11 +3797,11 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Intvar_log_event::exec_event() + Intvar_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION)&& !defined(MYSQL_CLIENT) -int Intvar_log_event::exec_event(struct st_relay_log_info* rli) +int Intvar_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { switch (type) { case LAST_INSERT_ID_EVENT: @@ -3719,9 +3812,33 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) thd->force_one_auto_inc_interval(val); break; } + return 0; +} + +int Intvar_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + + +Log_event::enum_skip_reason +Intvar_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead of + 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} + #endif @@ -3784,13 +3901,37 @@ void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Rand_log_event::exec_event(struct st_relay_log_info* rli) +int Rand_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { thd->rand.seed1= (ulong) seed1; thd->rand.seed2= (ulong) seed2; + return 0; +} + +int Rand_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + + +Log_event::enum_skip_reason +Rand_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead of + 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} + #endif /* !MYSQL_CLIENT */ @@ -3857,12 +3998,12 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Xid_log_event::exec_event(struct st_relay_log_info* rli) +int Xid_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { /* For a slave Xid_log_event is COMMIT */ general_log_print(thd, COM_QUERY, "COMMIT /* implicit, from Xid_log_event */"); - return end_trans(thd, COMMIT) || Log_event::exec_event(rli); + return end_trans(thd, COMMIT); } #endif /* !MYSQL_CLIENT */ @@ -4140,11 +4281,11 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - User_var_log_event::exec_event() + User_var_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int User_var_log_event::exec_event(struct st_relay_log_info* rli) +int User_var_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { Item *it= 0; CHARSET_INFO *charset; @@ -4206,9 +4347,31 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) e.update_hash(val, val_len, type, charset, DERIVATION_IMPLICIT, 0); free_root(thd->mem_root,0); + return 0; +} + +int User_var_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + +Log_event::enum_skip_reason +User_var_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead + of 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} #endif /* !MYSQL_CLIENT */ @@ -4248,7 +4411,7 @@ void Slave_log_event::pack_info(Protocol *protocol) #ifndef MYSQL_CLIENT Slave_log_event::Slave_log_event(THD* thd_arg, - struct st_relay_log_info* rli) + RELAY_LOG_INFO* rli) :Log_event(thd_arg, 0, 0) , mem_pool(0), master_host(0) { DBUG_ENTER("Slave_log_event"); @@ -4358,11 +4521,11 @@ Slave_log_event::Slave_log_event(const char* buf, uint event_len) #ifndef MYSQL_CLIENT -int Slave_log_event::exec_event(struct st_relay_log_info* rli) +int Slave_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { if (mysql_bin_log.is_open()) mysql_bin_log.write(this); - return Log_event::exec_event(rli); + return 0; } #endif /* !MYSQL_CLIENT */ @@ -4391,21 +4554,21 @@ void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Stop_log_event::exec_event() - - The master stopped. - We used to clean up all temporary tables but this is useless as, as the - master has shut down properly, it has written all DROP TEMPORARY TABLE - (prepared statements' deletion is TODO only when we binlog prep stmts). - We used to clean up slave_load_tmpdir, but this is useless as it has been - cleared at the end of LOAD DATA INFILE. - So we have nothing to do here. - The place were we must do this cleaning is in Start_log_event_v3::exec_event(), - not here. Because if we come here, the master was sane. + Stop_log_event::do_apply_event() + + The master stopped. We used to clean up all temporary tables but + this is useless as, as the master has shut down properly, it has + written all DROP TEMPORARY TABLE (prepared statements' deletion is + TODO only when we binlog prep stmts). We used to clean up + slave_load_tmpdir, but this is useless as it has been cleared at the + end of LOAD DATA INFILE. So we have nothing to do here. The place + were we must do this cleaning is in + Start_log_event_v3::do_apply_event(), not here. Because if we come + here, the master was sane. */ #ifndef MYSQL_CLIENT -int Stop_log_event::exec_event(struct st_relay_log_info* rli) +int Stop_log_event::do_update_pos(RELAY_LOG_INFO *rli) { /* We do not want to update master_log pos because we get a rotate event @@ -4423,6 +4586,7 @@ int Stop_log_event::exec_event(struct st_relay_log_info* rli) } return 0; } + #endif /* !MYSQL_CLIENT */ #endif /* HAVE_REPLICATION */ @@ -4613,11 +4777,11 @@ void Create_file_log_event::pack_info(Protocol *protocol) /* - Create_file_log_event::exec_event() + Create_file_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Create_file_log_event::exec_event(struct st_relay_log_info* rli) +int Create_file_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char proc_info[17+FN_REFLEN+10], *fname_buf; char *ext; @@ -4679,7 +4843,7 @@ err: if (fd >= 0) my_close(fd, MYF(0)); thd->proc_info= 0; - return error ? 1 : Log_event::exec_event(rli); + return error == 0; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -4787,15 +4951,15 @@ int Append_block_log_event::get_create_or_append() const } /* - Append_block_log_event::exec_event() + Append_block_log_event::do_apply_event() */ -int Append_block_log_event::exec_event(struct st_relay_log_info* rli) +int Append_block_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char proc_info[17+FN_REFLEN+10], *fname= proc_info+17; int fd; int error = 1; - DBUG_ENTER("Append_block_log_event::exec_event"); + DBUG_ENTER("Append_block_log_event::do_apply_event"); fname= strmov(proc_info, "Making temp file "); slave_load_file_stem(fname, file_id, server_id, ".data"); @@ -4834,7 +4998,7 @@ err: if (fd >= 0) my_close(fd, MYF(0)); thd->proc_info= 0; - DBUG_RETURN(error ? error : Log_event::exec_event(rli)); + DBUG_RETURN(error); } #endif @@ -4918,18 +5082,18 @@ void Delete_file_log_event::pack_info(Protocol *protocol) #endif /* - Delete_file_log_event::exec_event() + Delete_file_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Delete_file_log_event::exec_event(struct st_relay_log_info* rli) +int Delete_file_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char fname[FN_REFLEN+10]; char *ext= slave_load_file_stem(fname, file_id, server_id, ".data"); (void) my_delete(fname, MYF(MY_WME)); strmov(ext, ".info"); (void) my_delete(fname, MYF(MY_WME)); - return Log_event::exec_event(rli); + return 0; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -5015,10 +5179,10 @@ void Execute_load_log_event::pack_info(Protocol *protocol) /* - Execute_load_log_event::exec_event() + Execute_load_log_event::do_apply_event() */ -int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) +int Execute_load_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char fname[FN_REFLEN+10]; char *ext; @@ -5049,14 +5213,15 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) lev->thd = thd; /* - lev->exec_event should use rli only for errors - i.e. should not advance rli's position. - lev->exec_event is the place where the table is loaded (it calls - mysql_load()). + lev->do_apply_event should use rli only for errors i.e. should + not advance rli's position. + + lev->do_apply_event is the place where the table is loaded (it + calls mysql_load()). */ - rli->future_group_master_log_pos= log_pos; - if (lev->exec_event(0,rli,1)) + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; + if (lev->do_apply_event(0,rli,1)) { /* We want to indicate the name of the file that could not be loaded @@ -5099,7 +5264,7 @@ err: my_close(fd, MYF(0)); end_io_cache(&file); } - return error ? error : Log_event::exec_event(rli); + return error; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -5267,7 +5432,7 @@ void Execute_load_query_log_event::pack_info(Protocol *protocol) int -Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli) +Execute_load_query_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char *p; char *buf; @@ -5304,7 +5469,7 @@ Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli) p= strmake(p, STRING_WITH_LEN(" INTO")); p= strmake(p, query+fn_pos_end, q_len-fn_pos_end); - error= Query_log_event::exec_event(rli, buf, p-buf); + error= Query_log_event::do_apply_event(rli, buf, p-buf); /* Forging file name for deletion in same buffer */ *fname_end= 0; @@ -5360,7 +5525,7 @@ bool sql_ex_info::write_data(IO_CACHE* file) sql_ex_info::init() */ -char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) +char *sql_ex_info::init(char *buf, char *buf_end, bool use_new_format) { cached_new_format = use_new_format; if (use_new_format) @@ -5373,11 +5538,12 @@ char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) the case when we have old format because we will be reusing net buffer to read the actual file before we write out the Create_file event. */ - if (read_str(&buf, buf_end, &field_term, &field_term_len) || - read_str(&buf, buf_end, &enclosed, &enclosed_len) || - read_str(&buf, buf_end, &line_term, &line_term_len) || - read_str(&buf, buf_end, &line_start, &line_start_len) || - read_str(&buf, buf_end, &escaped, &escaped_len)) + const char *ptr= buf; + if (read_str(&ptr, buf_end, (const char **) &field_term, &field_term_len) || + read_str(&ptr, buf_end, (const char **) &enclosed, &enclosed_len) || + read_str(&ptr, buf_end, (const char **) &line_term, &line_term_len) || + read_str(&ptr, buf_end, (const char **) &line_start, &line_start_len) || + read_str(&ptr, buf_end, (const char **) &escaped, &escaped_len)) return 0; opt_flags = *buf++; } @@ -5445,7 +5611,10 @@ Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols)); } else - m_cols.bitmap= 0; // to not free it + { + // Needed because bitmap_init() does not set it to null on failure + m_cols.bitmap= 0; + } } #endif @@ -5482,14 +5651,57 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, m_flags= uint2korr(post_start); - byte const *const var_start= (const byte *)buf + common_header_len + - post_header_len; + byte const *const var_start= + (const byte *)buf + common_header_len + post_header_len; byte const *const ptr_width= var_start; uchar *ptr_after_width= (uchar*) ptr_width; + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); m_width = net_field_length(&ptr_after_width); + DBUG_PRINT("debug", ("m_width=%lu", m_width)); + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols, + m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, + (m_width + 7) & ~7UL, + false))) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + memcpy(m_cols.bitmap, ptr_after_width, (m_width + 7) / 8); + ptr_after_width+= (m_width + 7) / 8; + DBUG_DUMP("m_cols", (char*) m_cols.bitmap, no_bytes_in_map(&m_cols)); + } + else + { + // Needed because bitmap_init() does not set it to null on failure + m_cols.bitmap= NULL; + DBUG_VOID_RETURN; + } + + m_cols_ai.bitmap= m_cols.bitmap; /* See explanation in is_valid() */ - const uint byte_count= (m_width + 7) / 8; - const byte* const ptr_rows_data= var_start + byte_count + 1; + if (event_type == UPDATE_ROWS_EVENT) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols_ai, + m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, + (m_width + 7) & ~7UL, + false))) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + memcpy(m_cols_ai.bitmap, ptr_after_width, (m_width + 7) / 8); + ptr_after_width+= (m_width + 7) / 8; + DBUG_DUMP("m_cols_ai", (char*) m_cols_ai.bitmap, no_bytes_in_map(&m_cols_ai)); + } + else + { + // Needed because bitmap_init() does not set it to null on failure + m_cols_ai.bitmap= 0; + DBUG_VOID_RETURN; + } + } + + const byte* const ptr_rows_data= (const byte*) ptr_after_width; my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf); DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu", @@ -5498,12 +5710,6 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME)); if (likely((bool)m_rows_buf)) { - /* if bitmap_init fails, catched in is_valid() */ - if (likely(!bitmap_init(&m_cols, - m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, - (m_width + 7) & ~7UL, - false))) - memcpy(m_cols.bitmap, ptr_after_width, byte_count); m_rows_end= m_rows_buf + data_size; m_rows_cur= m_rows_end; memcpy(m_rows_buf, ptr_rows_data, data_size); @@ -5522,6 +5728,29 @@ Rows_log_event::~Rows_log_event() my_free((gptr)m_rows_buf, MYF(MY_ALLOW_ZERO_PTR)); } +int Rows_log_event::get_data_size() +{ + int const type_code= get_type_code(); + + char buf[sizeof(m_width)+1]; + char *end= net_store_length(buf, (m_width + 7) / 8); + + DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", + return 6 + no_bytes_in_map(&m_cols) + (end - buf) + + (type_code == UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) + + (m_rows_cur - m_rows_buf);); + int data_size= ROWS_HEADER_LEN; + data_size+= no_bytes_in_map(&m_cols); + data_size+= end - buf; + + if (type_code == UPDATE_ROWS_EVENT) + data_size+= no_bytes_in_map(&m_cols_ai); + + data_size+= (m_rows_cur - m_rows_buf); + return data_size; +} + + #ifndef MYSQL_CLIENT int Rows_log_event::do_add_row_data(byte *const row_data, my_size_t const length) @@ -5595,10 +5824,10 @@ int Rows_log_event::do_add_row_data(byte *const row_data, row_end Pointer to variable that will hold the value of the one-after-end position for the row master_reclength - Pointer to variable that will be set to the length of the - record on the master side - rw_set Pointer to bitmap that holds either the read_set or the - write_set of the table + Pointer to variable that will be set to the length of the + record on the master side + rw_set Pointer to bitmap that holds either the read_set or the + write_set of the table DESCRIPTION @@ -5624,70 +5853,80 @@ int Rows_log_event::do_add_row_data(byte *const row_data, the master does not have a default value (and isn't nullable) */ static int -unpack_row(RELAY_LOG_INFO *rli, +unpack_row(RELAY_LOG_INFO const *rli, TABLE *table, uint const colcnt, - char const *row, MY_BITMAP const *cols, - char const **row_end, ulong *master_reclength, + char const *const row_data, MY_BITMAP const *cols, + char const **const row_end, ulong *const master_reclength, MY_BITMAP* const rw_set, Log_event_type const event_type) { - byte *const record= table->record[0]; DBUG_ENTER("unpack_row"); - DBUG_ASSERT(record && row); - DBUG_PRINT("enter", ("row: 0x%lx table->record[0]: 0x%lx", (long) row, (long) record)); - my_size_t master_null_bytes= table->s->null_bytes; - - if (colcnt != table->s->fields) - { - Field **fptr= &table->field[colcnt-1]; - do - master_null_bytes= (*fptr)->last_null_byte(); - while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF && - fptr-- > table->field); - - /* - If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time, - there were no nullable fields nor BIT fields at all in the - columns that are common to the master and the slave. In that - case, there is only one null byte holding the X bit. + DBUG_ASSERT(row_data); + my_size_t const master_null_byte_count= (bitmap_bits_set(cols) + 7) / 8; + int error= 0; - OBSERVE! There might still be nullable columns following the - common columns, so table->s->null_bytes might be greater than 1. - */ - if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF) - master_null_bytes= 1; - } + char const *null_ptr= row_data; + char const *pack_ptr= row_data + master_null_byte_count; - DBUG_ASSERT(master_null_bytes <= table->s->null_bytes); - memcpy(record, row, master_null_bytes); // [1] - int error= 0; + bitmap_clear_all(rw_set); - bitmap_set_all(rw_set); + empty_record(table); Field **const begin_ptr = table->field; Field **field_ptr; - char const *ptr= row + master_null_bytes; Field **const end_ptr= begin_ptr + colcnt; + + DBUG_ASSERT(null_ptr < row_data + master_null_byte_count); + + // Mask to mask out the correct bit among the null bits + unsigned int null_mask= 1U; + // The "current" null bits + unsigned int null_bits= *null_ptr++; for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) { Field *const f= *field_ptr; + /* + No need to bother about columns that does not exist: they have + gotten default values when being emptied above. + */ if (bitmap_is_set(cols, field_ptr - begin_ptr)) { - DBUG_ASSERT((const char *)table->record[0] <= f->ptr); - DBUG_ASSERT(f->ptr < (char*)(table->record[0] + table->s->reclength + - (f->pack_length_in_rec() == 0))); + if ((null_mask & 0xFF) == 0) + { + DBUG_ASSERT(null_ptr < row_data + master_null_byte_count); + null_mask= 1U; + null_bits= *null_ptr++; + } + + DBUG_ASSERT(null_mask & 0xFF); // One of the 8 LSB should be set - DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, - (long) f->ptr)); - ptr= f->unpack(f->ptr, ptr); /* Field...::unpack() cannot return 0 */ - DBUG_ASSERT(ptr != NULL); + DBUG_ASSERT(pack_ptr != NULL); + + if ((null_bits & null_mask) && f->maybe_null()) + f->set_null(); + else + { + f->set_notnull(); + + /* + We only unpack the field if it was non-null + */ + pack_ptr= f->unpack(f->ptr, pack_ptr); + } + + bitmap_set_bit(rw_set, f->field_index); + null_mask <<= 1; } - else - bitmap_clear_bit(rw_set, field_ptr - begin_ptr); } - *row_end = ptr; + /* + We should now have read all the null bytes, otherwise something is + really wrong. + */ + DBUG_ASSERT(null_ptr == row_data + master_null_byte_count); + + *row_end = pack_ptr; if (master_reclength) { if (*field_ptr) @@ -5712,9 +5951,8 @@ unpack_row(RELAY_LOG_INFO *rli, uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; Field *const f= *field_ptr; - DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name, - (long) f->ptr)); - if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask) + if (event_type == WRITE_ROWS_EVENT && + ((*field_ptr)->flags & mask) == mask) { slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, "Field `%s` of table `%s`.`%s` " @@ -5730,17 +5968,17 @@ unpack_row(RELAY_LOG_INFO *rli, DBUG_RETURN(error); } -int Rows_log_event::exec_event(st_relay_log_info *rli) +int Rows_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Rows_log_event::exec_event(st_relay_log_info*)"); + DBUG_ENTER("Rows_log_event::do_apply_event(st_relay_log_info*)"); int error= 0; char const *row_start= (char const *)m_rows_buf; /* - If m_table_id == ~0UL, then we have a dummy event that does - not contain any data. In that case, we just remove all tables in - the tables_to_lock list, close the thread tables, step the relay - log position, and return with success. + If m_table_id == ~0UL, then we have a dummy event that does not + contain any data. In that case, we just remove all tables in the + tables_to_lock list, close the thread tables, and return with + success. */ if (m_table_id == ~0UL) { @@ -5750,16 +5988,16 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) */ DBUG_ASSERT(get_flags(STMT_END_F)); - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); close_thread_tables(thd); thd->clear_error(); - rli->inc_event_relay_log_pos(); DBUG_RETURN(0); } /* 'thd' has been set by exec_relay_log_event(), just before calling - exec_event(). We still check here to prevent future coding errors. + do_apply_event(). We still check here to prevent future coding + errors. */ DBUG_ASSERT(rli->sql_thd == thd); @@ -5775,8 +6013,9 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) /* lock_tables() reads the contents of thd->lex, so they must be - initialized. Contrary to in Table_map_log_event::exec_event() we don't - call mysql_init_query() as that may reset the binlog format. + initialized. Contrary to in + Table_map_log_event::do_apply_event() we don't call + mysql_init_query() as that may reset the binlog format. */ lex_start(thd, NULL, 0); @@ -5805,7 +6044,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) "Error in %s event: when locking tables", get_type_str()); } - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(error); } @@ -5826,7 +6065,8 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) TABLE_LIST *tables= rli->tables_to_lock; close_tables_for_reopen(thd, &tables); - if ((error= open_tables(thd, &tables, &rli->tables_to_lock_count, 0))) + uint tables_count= rli->tables_to_lock_count; + if ((error= open_tables(thd, &tables, &tables_count, 0))) { if (thd->query_error || thd->is_fatal_error) { @@ -5841,7 +6081,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) "unexpected success or fatal error")); thd->query_error= 1; } - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(error); } } @@ -5863,7 +6103,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) mysql_unlock_tables(thd, thd->lock); thd->lock= 0; thd->query_error= 1; - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(ERR_BAD_TABLE_DEF); } } @@ -5885,24 +6125,24 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) */ for (TABLE_LIST *ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global) { - rli->m_table_map.set_table(ptr->table_id, ptr->table); + const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.set_table(ptr->table_id, ptr->table); } #ifdef HAVE_QUERY_CACHE query_cache.invalidate_locked_for_write(rli->tables_to_lock); #endif - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); } DBUG_ASSERT(rli->tables_to_lock == NULL && rli->tables_to_lock_count == 0); - TABLE* table= rli->m_table_map.get_table(m_table_id); + TABLE* table= const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.get_table(m_table_id); if (table) { /* table == NULL means that this table should not be replicated - (this was set up by Table_map_log_event::exec_event() which - tested replicate-* rules). + (this was set up by Table_map_log_event::do_apply_event() + which tested replicate-* rules). */ /* @@ -5959,7 +6199,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) break; default: - slave_print_msg(ERROR_LEVEL, rli, error, + slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno, "Error in %s event: row application failed", get_type_str()); thd->query_error= 1; @@ -5969,7 +6209,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) row_start= row_end; } DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event", - rli->abort_slave=1;); + const_cast<RELAY_LOG_INFO*>(rli)->abort_slave= 1;); error= do_after_row_operations(table, error); if (!cache_stmt) { @@ -5980,11 +6220,12 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) if (error) { /* error has occured during the transaction */ - slave_print_msg(ERROR_LEVEL, rli, error, + slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno, "Error in %s event: error during transaction execution " "on table %s.%s", get_type_str(), table->s->db.str, table->s->table_name.str); + /* If one day we honour --skip-slave-errors in row-based replication, and the error should be skipped, then we would clear mappings, rollback, @@ -5997,7 +6238,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) rollback at the caller along with sbr. */ thd->reset_current_stmt_binlog_row_based(); - rli->cleanup_context(thd, 0); /* rollback at caller in step with sbr */ + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, error); thd->query_error= 1; DBUG_RETURN(error); } @@ -6041,8 +6282,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) */ thd->reset_current_stmt_binlog_row_based(); - rli->cleanup_context(thd, 0); - rli->transaction_end(thd); + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, 0); if (error == 0) { @@ -6055,7 +6295,6 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) do not become visible. We still prefer to wipe them out. */ thd->clear_error(); - error= Log_event::exec_event(rli); } else slave_print_msg(ERROR_LEVEL, rli, error, @@ -6082,17 +6321,17 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) wait (reached end of last relay log and nothing gets appended there), we timeout after one minute, and notify DBA about the problem. When WL#2975 is implemented, just remove the member - st_relay_log_info::unsafe_to_stop_at and all its occurences. + st_relay_log_info::last_event_start_time and all its occurences. */ - rli->unsafe_to_stop_at= time(0); + const_cast<RELAY_LOG_INFO*>(rli)->last_event_start_time= time(0); } DBUG_ASSERT(error == 0); thd->clear_error(); - rli->inc_event_relay_log_pos(); - + DBUG_RETURN(0); } + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifndef MYSQL_CLIENT @@ -6119,15 +6358,35 @@ bool Rows_log_event::write_data_body(IO_CACHE*file) */ char sbuf[sizeof(m_width)]; my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf; + bool res= false; char *const sbuf_end= net_store_length((char*) sbuf, (uint) m_width); DBUG_ASSERT(static_cast<my_size_t>(sbuf_end - sbuf) <= sizeof(sbuf)); - return (my_b_safe_write(file, reinterpret_cast<byte*>(sbuf), - sbuf_end - sbuf) || - my_b_safe_write(file, reinterpret_cast<byte*>(m_cols.bitmap), - no_bytes_in_map(&m_cols)) || - my_b_safe_write(file, m_rows_buf, (uint) data_size)); + DBUG_DUMP("m_width", sbuf, sbuf_end - sbuf); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(sbuf), + sbuf_end - sbuf); + + DBUG_DUMP("m_cols", (char*) m_cols.bitmap, no_bytes_in_map(&m_cols)); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(m_cols.bitmap), + no_bytes_in_map(&m_cols)); + /* + TODO[refactor write]: Remove the "down cast" here (and elsewhere). + */ + if (get_type_code() == UPDATE_ROWS_EVENT) + { + DBUG_DUMP("m_cols_ai", (char*) m_cols_ai.bitmap, no_bytes_in_map(&m_cols_ai)); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(m_cols_ai.bitmap), + no_bytes_in_map(&m_cols_ai)); + } + DBUG_DUMP("rows", m_rows_buf, data_size); + res= res || my_b_safe_write(file, m_rows_buf, (uint) data_size); + + return res; + } #endif @@ -6160,10 +6419,8 @@ void Rows_log_event::print_helper(FILE *file, if (get_flags(STMT_END_F)) { - my_b_copy_to_file(head, file); - my_b_copy_to_file(body, file); - reinit_io_cache(head, WRITE_CACHE, 0, FALSE, TRUE); - reinit_io_cache(body, WRITE_CACHE, 0, FALSE, TRUE); + copy_event_cache_to_file_and_reinit(head, file); + copy_event_cache_to_file_and_reinit(body, file); } } #endif @@ -6272,15 +6529,15 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, const char *const vpart= buf + common_header_len + post_header_len; /* Extract the length of the various parts from the buffer */ - byte const* const ptr_dblen= (byte const*)vpart + 0; + byte const *const ptr_dblen= (byte const*)vpart + 0; m_dblen= *(uchar*) ptr_dblen; /* Length of database name + counter + terminating null */ - byte const* const ptr_tbllen= ptr_dblen + m_dblen + 2; + byte const *const ptr_tbllen= ptr_dblen + m_dblen + 2; m_tbllen= *(uchar*) ptr_tbllen; /* Length of table name + counter + terminating null */ - byte const* const ptr_colcnt= ptr_tbllen + m_tbllen + 2; + byte const *const ptr_colcnt= ptr_tbllen + m_tbllen + 2; uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; m_colcnt= net_field_length(&ptr_after_colcnt); @@ -6325,9 +6582,9 @@ Table_map_log_event::~Table_map_log_event() */ #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -int Table_map_log_event::exec_event(st_relay_log_info *rli) +int Table_map_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Table_map_log_event::exec_event(st_relay_log_info*)"); + DBUG_ENTER("Table_map_log_event::do_apply_event(st_relay_log_info*)"); DBUG_ASSERT(rli->sql_thd == thd); @@ -6450,29 +6707,24 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) locked by linking the table into the list of tables to lock. */ table_list->next_global= table_list->next_local= rli->tables_to_lock; - rli->tables_to_lock= table_list; - rli->tables_to_lock_count++; + const_cast<RELAY_LOG_INFO*>(rli)->tables_to_lock= table_list; + const_cast<RELAY_LOG_INFO*>(rli)->tables_to_lock_count++; /* 'memory' is freed in clear_tables_to_lock */ } - /* - We explicitly do not call Log_event::exec_event() here since we do not - want the relay log position to be flushed to disk. The flushing will be - done by the last Rows_log_event that either ends a statement (outside a - transaction) or a transaction. - - A table map event can *never* end a transaction or a statement, so we - just step the relay log position. - */ - - if (likely(!error)) - rli->inc_event_relay_log_pos(); DBUG_RETURN(error); err: my_free((gptr) memory, MYF(MY_WME)); DBUG_RETURN(error); } + +int Table_map_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ + rli->inc_event_relay_log_pos(); + return 0; +} + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifndef MYSQL_CLIENT @@ -6637,7 +6889,7 @@ int Write_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -6778,6 +7030,42 @@ copy_extra_record_fields(TABLE *table, return 0; // All OK } +#define DBUG_PRINT_BITSET(N,FRM,BS) \ + do { \ + char buf[256]; \ + for (uint i = 0 ; i < (BS)->n_bits ; ++i) \ + buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \ + buf[(BS)->n_bits] = '\0'; \ + DBUG_PRINT((N), ((FRM), buf)); \ + } while (0) + + +/** + Check if an error is a duplicate key error. + + This function is used to check if an error code is one of the + duplicate key error, i.e., and error code for which it is sensible + to do a <code>get_dup_key()</code> to retrieve the duplicate key. + + @param errcode The error code to check. + + @return <code>true</code> if the error code is such that + <code>get_dup_key()</code> will return true, <code>false</code> + otherwise. + */ +bool +is_duplicate_key_error(int errcode) +{ + switch (errcode) + { + case HA_ERR_FOUND_DUPP_KEY: + case HA_ERR_FOUND_DUPP_UNIQUE: + return true; + } + return false; +} + + /* Replace the provided record in the database. @@ -6810,6 +7098,12 @@ replace_record(THD *thd, TABLE *table, int keynum; auto_afree_ptr<char> key(NULL); +#ifndef DBUG_OFF + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set); + DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set); +#endif + while ((error= table->file->ha_write_row(table->record[0]))) { if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT) @@ -6820,7 +7114,7 @@ replace_record(THD *thd, TABLE *table, if ((keynum= table->file->get_dup_key(error)) < 0) { /* We failed to retrieve the duplicate key */ - DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); + DBUG_RETURN(error); } /* @@ -6837,7 +7131,10 @@ replace_record(THD *thd, TABLE *table, { error= table->file->rnd_pos(table->record[1], table->file->dup_ref); if (error) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } } else { @@ -6855,10 +7152,14 @@ replace_record(THD *thd, TABLE *table, key_copy((byte*)key.get(), table->record[0], table->key_info + keynum, 0); error= table->file->index_read_idx(table->record[1], keynum, - (const byte*)key.get(), HA_WHOLE_KEY, + (const byte*)key.get(), + HA_WHOLE_KEY, HA_READ_KEY_EXACT); if (error) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } } /* @@ -6891,15 +7192,21 @@ replace_record(THD *thd, TABLE *table, { error=table->file->ha_update_row(table->record[1], table->record[0]); + if (error) + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } else { if ((error= table->file->ha_delete_row(table->record[1]))) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } /* Will retry ha_write_row() with the offending row removed. */ } } + DBUG_RETURN(error); } @@ -6930,20 +7237,75 @@ void Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info) */ static bool record_compare(TABLE *table) { + /* + Need to set the X bit and the filler bits in both records since + there are engines that do not set it correctly. + + In addition, since MyISAM checks that one hasn't tampered with the + record, it is necessary to restore the old bytes into the record + after doing the comparison. + + TODO[record format ndb]: Remove it once NDB returns correct + records. Check that the other engines also return correct records. + */ + + bool result= FALSE; + byte saved_x[2], saved_filler[2]; + + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + saved_x[i]= table->record[i][0]; + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][0]|= 1U; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + } + if (table->s->blob_fields + table->s->varchar_fields == 0) - return cmp_record(table,record[1]); + { + result= cmp_record(table,record[1]); + goto record_compare_exit; + } + /* Compare null bits */ if (memcmp(table->null_flags, table->null_flags+table->s->rec_buff_length, table->s->null_bytes)) - return TRUE; // Diff in NULL value + { + result= TRUE; // Diff in NULL value + goto record_compare_exit; + } + /* Compare updated fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) - return TRUE; + { + result= TRUE; + goto record_compare_exit; + } } - return FALSE; + +record_compare_exit: + /* + Restore the saved bytes. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. + */ + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + table->record[i][0]= saved_x[i]; + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + } + } + + return result; } @@ -6979,6 +7341,8 @@ static int find_and_fetch_row(TABLE *table, byte *key) DBUG_ASSERT(table->in_use != NULL); + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && table->s->primary_key < MAX_KEY) { @@ -7078,15 +7442,22 @@ static int find_and_fetch_row(TABLE *table, byte *key) while (record_compare(table)) { int error; + /* We need to set the null bytes to ensure that the filler bit are all set when returning. There are storage engines that just set the necessary bits on the bytes and don't set the filler bits correctly. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. */ - my_ptrdiff_t const pos= - table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; - table->record[1][pos]= 0xFF; + if (table->s->null_bytes > 0) + { + table->record[1][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + if ((error= table->file->index_next(table->record[1]))) { table->file->print_error(error, MYF(0)); @@ -7112,17 +7483,11 @@ static int find_and_fetch_row(TABLE *table, byte *key) /* Continue until we find the right record or have made a full loop */ do { - /* - We need to set the null bytes to ensure that the filler bit - are all set when returning. There are storage engines that - just set the necessary bits on the bytes and don't set the - filler bits correctly. - */ - my_ptrdiff_t const pos= - table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; - table->record[1][pos]= 0xFF; error= table->file->rnd_next(table->record[1]); + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("record[1]", table->record[1], table->s->reclength); + switch (error) { case 0: @@ -7136,6 +7501,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) default: table->file->print_error(error, MYF(0)); + DBUG_PRINT("info", ("Record not found")); table->file->ha_rnd_end(); DBUG_RETURN(error); } @@ -7145,6 +7511,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) /* Have to restart the scan to be able to fetch the next row. */ + DBUG_PRINT("info", ("Record %sfound", restart_count == 2 ? "not " : "")); table->file->ha_rnd_end(); DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0); @@ -7237,7 +7604,7 @@ int Delete_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -7303,16 +7670,55 @@ void Delete_rows_log_event::print(FILE *file, */ #if !defined(MYSQL_CLIENT) Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, - ulong tid, MY_BITMAP const *cols, + ulong tid, + MY_BITMAP const *cols_bi, + MY_BITMAP const *cols_ai, + bool is_transactional) +: Rows_log_event(thd_arg, tbl_arg, tid, cols_bi, is_transactional) +#ifdef HAVE_REPLICATION + , m_memory(NULL), m_key(NULL) + +#endif +{ + init(cols_ai); +} + +Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, + ulong tid, + MY_BITMAP const *cols, bool is_transactional) : Rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional) #ifdef HAVE_REPLICATION , m_memory(NULL), m_key(NULL) #endif { + init(cols); +} + +void Update_rows_log_event::init(MY_BITMAP const *cols) +{ + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols_ai, + m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, + (m_width + 7) & ~7UL, + false))) + { + /* Cols can be zero if this is a dummy binrows event */ + if (likely(cols != NULL)) + memcpy(m_cols_ai.bitmap, cols->bitmap, no_bytes_in_map(cols)); + } } #endif /* !defined(MYSQL_CLIENT) */ + +Update_rows_log_event::~Update_rows_log_event() +{ + if (m_cols_ai.bitmap == m_bitbuf_ai) // no my_malloc happened + m_cols_ai.bitmap= 0; // so no my_free in bitmap_free + bitmap_free(&m_cols_ai); // To pair with bitmap_init(). +} + + /* Constructor used by slave to read the event from the binary log. */ @@ -7372,7 +7778,7 @@ int Update_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -7397,7 +7803,7 @@ int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, store_record(table, record[1]); char const *next_start = *row_end; /* m_after_image is the after image for the update */ - error= unpack_row(rli, table, m_width, next_start, &m_cols, row_end, + error= unpack_row(rli, table, m_width, next_start, &m_cols_ai, row_end, &m_master_reclength, table->write_set, UPDATE_ROWS_EVENT); bmove_align(m_after_image, table->record[0], table->s->reclength); restore_record(table, record[1]); @@ -7469,3 +7875,113 @@ void Update_rows_log_event::print(FILE *file, } #endif + +Incident_log_event::Incident_log_event(const char *buf, uint event_len, + const Format_description_log_event *descr_event) + : Log_event(buf, descr_event) +{ + DBUG_ENTER("Incident_log_event::Incident_log_event"); + uint8 const common_header_len= + descr_event->common_header_len; + uint8 const post_header_len= + descr_event->post_header_len[INCIDENT_EVENT-1]; + + DBUG_PRINT("info",("event_len: %u; common_header_len: %d; post_header_len: %d", + event_len, common_header_len, post_header_len)); + + m_incident= static_cast<Incident>(uint2korr(buf + common_header_len)); + char const *ptr= buf + common_header_len + post_header_len; + char const *const str_end= buf + event_len; + uint8 len= 0; // Assignment to keep compiler happy + const char *str= NULL; // Assignment to keep compiler happy + read_str(&ptr, str_end, &str, &len); + m_message.str= const_cast<char*>(str); + m_message.length= len; + DBUG_PRINT("info", ("m_incident: %d", m_incident)); + DBUG_VOID_RETURN; +} + + +Incident_log_event::~Incident_log_event() +{ +} + + +const char * +Incident_log_event::description() const +{ + static const char *const description[]= { + "NOTHING", // Not used + "LOST_EVENTS" + }; + + DBUG_PRINT("info", ("m_incident: %d", m_incident)); + + DBUG_ASSERT(0 <= m_incident); + DBUG_ASSERT((my_size_t) m_incident <= sizeof(description)/sizeof(*description)); + + return description[m_incident]; +} + + +#ifndef MYSQL_CLIENT +void Incident_log_event::pack_info(Protocol *protocol) +{ + char buf[256]; + my_size_t bytes; + if (m_message.length > 0) + bytes= my_snprintf(buf, sizeof(buf), "#%d (%s)", + m_incident, description()); + else + bytes= my_snprintf(buf, sizeof(buf), "#%d (%s): %s", + m_incident, description(), m_message.str); + protocol->store(buf, bytes, &my_charset_bin); +} +#endif + + +#ifdef MYSQL_CLIENT +void +Incident_log_event::print(FILE *file, + PRINT_EVENT_INFO *print_event_info) +{ + if (print_event_info->short_form) + return; + + Write_on_release_cache cache(&print_event_info->head_cache, file); + print_header(&cache, print_event_info, FALSE); + my_b_printf(&cache, "\n# Incident: %s", description()); +} +#endif + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int +Incident_log_event::do_apply_event(RELAY_LOG_INFO const *rli) +{ + DBUG_ENTER("Incident_log_event::do_apply_event"); + slave_print_msg(ERROR_LEVEL, rli, ER_SLAVE_INCIDENT, + ER(ER_SLAVE_INCIDENT), + description(), + m_message.length > 0 ? m_message.str : "<none>"); + DBUG_RETURN(1); +} +#endif + +bool +Incident_log_event::write_data_header(IO_CACHE *file) +{ + DBUG_ENTER("Incident_log_event::write_data_header"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + byte buf[sizeof(int16)]; + int2store(buf, (int16) m_incident); + DBUG_RETURN(my_b_safe_write(file, buf, sizeof(buf))); +} + +bool +Incident_log_event::write_data_body(IO_CACHE *file) +{ + DBUG_ENTER("Incident_log_event::write_data_body"); + DBUG_RETURN(write_str(file, m_message.str, m_message.length)); +} + + diff --git a/sql/log_event.h b/sql/log_event.h index 7cbe8925d9a..51543291621 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -22,6 +22,7 @@ #endif #include <my_bitmap.h> +#include "rpl_constants.h" #define LOG_READ_EOF -1 #define LOG_READ_BOGUS -2 @@ -198,7 +199,7 @@ struct sql_ex_info #define TABLE_MAP_HEADER_LEN 8 #define EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN (4 + 4 + 4 + 1) #define EXECUTE_LOAD_QUERY_HEADER_LEN (QUERY_HEADER_LEN + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN) - +#define INCIDENT_HEADER_LEN 2 /* Max number of possible extra bytes in a replication event compared to a packet (i.e. a query) sent from client to master; @@ -468,10 +469,28 @@ enum Log_event_type XID_EVENT= 16, BEGIN_LOAD_QUERY_EVENT= 17, EXECUTE_LOAD_QUERY_EVENT= 18, + TABLE_MAP_EVENT = 19, - WRITE_ROWS_EVENT = 20, - UPDATE_ROWS_EVENT = 21, - DELETE_ROWS_EVENT = 22, + + /* + These event numbers were used for 5.1.0 to 5.1.15 and are + therefore obsolete. + */ + PRE_GA_WRITE_ROWS_EVENT = 20, + PRE_GA_UPDATE_ROWS_EVENT = 21, + PRE_GA_DELETE_ROWS_EVENT = 22, + + /* + These event numbers are used from 5.1.16 and forward + */ + WRITE_ROWS_EVENT = 23, + UPDATE_ROWS_EVENT = 24, + DELETE_ROWS_EVENT = 25, + + /* + Something out of the ordinary happened on the master + */ + INCIDENT_EVENT= 26, /* Add new events here - right above this comment! @@ -503,6 +522,7 @@ class THD; class Format_description_log_event; struct st_relay_log_info; +typedef st_relay_log_info RELAY_LOG_INFO; #ifdef MYSQL_CLIENT /* @@ -591,6 +611,33 @@ typedef struct st_print_event_info class Log_event { public: + /** + Enumeration of what kinds of skipping (and non-skipping) that can + occur when the slave executes an event. + + @see shall_skip + @see do_shall_skip + */ + enum enum_skip_reason { + /** + Don't skip event. + */ + EVENT_SKIP_NOT, + + /** + Skip event by ignoring it. + + This means that the slave skip counter will not be changed. + */ + EVENT_SKIP_IGNORE, + + /** + Skip event and decrease skip counter. + */ + EVENT_SKIP_COUNT + }; + + /* The following type definition is to be used whenever data is placed and manipulated in a common buffer. Use this typedef for buffers @@ -672,16 +719,14 @@ public: static void init_show_field_list(List<Item>* field_list); #ifdef HAVE_REPLICATION int net_send(Protocol *protocol, const char* log_name, my_off_t pos); + /* pack_info() is used by SHOW BINLOG EVENTS; as print() it prepares and sends a string to display to the user, so it resembles print(). */ + virtual void pack_info(Protocol *protocol); - /* - The SQL slave thread calls exec_event() to execute the event; this is where - the slave's data is modified. - */ - virtual int exec_event(struct st_relay_log_info* rli); + #endif /* HAVE_REPLICATION */ virtual const char* get_db() { @@ -754,6 +799,127 @@ public: *description_event); /* returns the human readable name of the event's type */ const char* get_type_str(); + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +public: + + /** + Apply the event to the database. + + This function represents the public interface for applying an + event. + + @see do_apply_event + */ + int apply_event(RELAY_LOG_INFO const *rli) { + return do_apply_event(rli); + } + + + /** + Update the relay log position. + + This function represents the public interface for "stepping over" + the event and will update the relay log information. + + @see do_update_pos + */ + int update_pos(RELAY_LOG_INFO *rli) + { + return do_update_pos(rli); + } + + /** + Decide if the event shall be skipped, and the reason for skipping + it. + + @see do_shall_skip + */ + enum_skip_reason shall_skip(RELAY_LOG_INFO *rli) + { + return do_shall_skip(rli); + } + +protected: + /** + Primitive to apply an event to the database. + + This is where the change to the database is made. + + @note The primitive is protected instead of private, since there + is a hierarchy of actions to be performed in some cases. + + @see Format_description_log_event::do_apply_event() + + @param rli Pointer to relay log info structure + + @retval 0 Event applied successfully + @retval errno Error code if event application failed + */ + virtual int do_apply_event(RELAY_LOG_INFO const *rli) + { + return 0; /* Default implementation does nothing */ + } + + + /** + Advance relay log coordinates. + + This function is called to advance the relay log coordinates to + just after the event. It is essential that both the relay log + coordinate and the group log position is updated correctly, since + this function is used also for skipping events. + + Normally, each implementation of do_update_pos() shall: + + - Update the event position to refer to the position just after + the event. + + - Update the group log position to refer to the position just + after the event <em>if the event is last in a group</em> + + @param rli Pointer to relay log info structure + + @retval 0 Coordinates changed successfully + @retval errno Error code if advancing failed (usually just + 1). Observe that handler errors are returned by the + do_apply_event() function, and not by this one. + */ + virtual int do_update_pos(RELAY_LOG_INFO *rli); + + + /** + Decide if this event shall be skipped or not and the reason for + skipping it. + + The default implementation decide that the event shall be skipped + if either: + + - the server id of the event is the same as the server id of the + server and <code>rli->replicate_same_server_id</code> is true, + or + + - if <code>rli->slave_skip_counter</code> is greater than zero. + + @see do_apply_event + @see do_update_pos + + @retval Log_event::EVENT_SKIP_NOT + The event shall not be skipped and should be applied. + + @retval Log_event::EVENT_SKIP_IGNORE + The event shall be skipped by just ignoring it, i.e., the slave + skip counter shall not be changed. This happends if, for example, + the originating server id of the event is the same as the server + id of the slave. + + @retval Log_event::EVENT_SKIP_COUNT + The event shall be skipped because the slave skip counter was + non-zero. The caller shall decrease the counter by one. + */ + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); + +#endif }; /* @@ -794,10 +960,10 @@ public: uint16 error_code; ulong thread_id; /* - For events created by Query_log_event::exec_event (and - Load_log_event::exec_event()) we need the *original* thread id, to be able - to log the event with the original (=master's) thread id (fix for - BUG#1686). + For events created by Query_log_event::do_apply_event (and + Load_log_event::do_apply_event()) we need the *original* thread + id, to be able to log the event with the original (=master's) + thread id (fix for BUG#1686). */ ulong slave_proxy_id; @@ -860,9 +1026,6 @@ public: const char* get_db() { return db; } #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); - int exec_event(struct st_relay_log_info* rli, const char *query_arg, - uint32 q_len_arg); #endif /* HAVE_REPLICATION */ #else void print_query_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info); @@ -891,6 +1054,16 @@ public: */ virtual ulong get_post_header_size_for_derived() { return 0; } /* Writes derived event-specific part of post header. */ + +public: /* !!! Public in this patch to allow old usage */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + + int do_apply_event(RELAY_LOG_INFO const *rli, + const char *query_arg, + uint32 q_len_arg); +#endif /* HAVE_REPLICATION */ }; @@ -939,9 +1112,8 @@ public: uint16 master_port; #ifndef MYSQL_CLIENT - Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli); + Slave_log_event(THD* thd_arg, RELAY_LOG_INFO* rli); void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -954,6 +1126,11 @@ public: #ifndef MYSQL_CLIENT bool write(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const* rli); +#endif }; #endif /* HAVE_REPLICATION */ @@ -1023,12 +1200,6 @@ public: const char* get_db() { return db; } #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli) - { - return exec_event(thd->slave_net,rli,0); - } - int exec_event(NET* net, struct st_relay_log_info* rli, - bool use_rli_only_for_errors); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1060,6 +1231,17 @@ public: + LOAD_HEADER_LEN + sql_ex.data_size() + field_block_len + num_fields); } + +public: /* !!! Public in this patch to allow old usage */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const* rli) + { + return do_apply_event(thd->slave_net,rli,0); + } + + int do_apply_event(NET *net, RELAY_LOG_INFO const *rli, + bool use_rli_only_for_errors); +#endif }; extern char server_version[SERVER_VERSION_LENGTH]; @@ -1117,7 +1299,6 @@ public: Start_log_event_v3(); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else Start_log_event_v3() {} @@ -1137,6 +1318,22 @@ public: return START_V3_HEADER_LEN; //no variable-sized part } virtual bool is_artificial_event() { return artificial_event; } + +protected: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO*) + { + /* + Events from ourself should be skipped, but they should not + decrease the slave skip counter. + */ + if (this->server_id == ::server_id) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::EVENT_SKIP_NOT; + } +#endif }; @@ -1162,13 +1359,6 @@ public: uchar server_version_split[3]; Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); - -#ifndef MYSQL_CLIENT -#ifdef HAVE_REPLICATION - int exec_event(struct st_relay_log_info* rli); -#endif /* HAVE_REPLICATION */ -#endif - Format_description_log_event(const char* buf, uint event_len, const Format_description_log_event* description_event); ~Format_description_log_event() { my_free((gptr)post_header_len, MYF(0)); } @@ -1191,7 +1381,15 @@ public: */ return FORMAT_DESCRIPTION_HEADER_LEN; } + void calc_server_version_split(); + +protected: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1215,7 +1413,6 @@ public: {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1230,6 +1427,13 @@ public: bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1256,7 +1460,6 @@ class Rand_log_event: public Log_event {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1270,6 +1473,13 @@ class Rand_log_event: public Log_event bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; /***************************************************************************** @@ -1293,7 +1503,6 @@ class Xid_log_event: public Log_event Xid_log_event(THD* thd_arg, my_xid x): Log_event(thd_arg,0,0), xid(x) {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1307,6 +1516,11 @@ class Xid_log_event: public Log_event bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; /***************************************************************************** @@ -1336,7 +1550,6 @@ public: val_len(val_len_arg), type(type_arg), charset_number(charset_number_arg) { is_null= !val; } void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -1348,6 +1561,13 @@ public: bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1362,7 +1582,6 @@ public: #ifndef MYSQL_CLIENT Stop_log_event() :Log_event() {} - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -1373,6 +1592,22 @@ public: ~Stop_log_event() {} Log_event_type get_type_code() { return STOP_EVENT;} bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli) + { + /* + Events from ourself should be skipped, but they should not + decrease the slave skip counter. + */ + if (this->server_id == ::server_id) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::EVENT_SKIP_NOT; + } +#endif }; /***************************************************************************** @@ -1399,7 +1634,6 @@ public: ulonglong pos_arg, uint flags); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1418,6 +1652,12 @@ public: #ifndef MYSQL_CLIENT bool write(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1452,7 +1692,6 @@ public: bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1486,6 +1725,11 @@ public: */ bool write_base(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1518,7 +1762,6 @@ public: Append_block_log_event(THD* thd, const char* db_arg, char* block_arg, uint block_len_arg, bool using_trans); #ifdef HAVE_REPLICATION - int exec_event(struct st_relay_log_info* rli); void pack_info(Protocol* protocol); virtual int get_create_or_append() const; #endif /* HAVE_REPLICATION */ @@ -1536,6 +1779,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1555,7 +1803,6 @@ public: Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1572,6 +1819,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1591,7 +1843,6 @@ public: Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1607,6 +1858,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1676,7 +1932,6 @@ public: bool using_trans, bool suppress_use); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1695,7 +1950,12 @@ public: #ifndef MYSQL_CLIENT bool write_post_header_for_derived(IO_CACHE* file); #endif - }; + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif +}; #ifdef MYSQL_CLIENT @@ -1793,7 +2053,6 @@ public: #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int exec_event(struct st_relay_log_info *rli); virtual void pack_info(Protocol *protocol); #endif @@ -1803,6 +2062,11 @@ public: private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); +#endif + #ifndef MYSQL_CLIENT TABLE *m_table; #endif @@ -1886,7 +2150,6 @@ public: flag_set get_flags(flag_set flags) const { return m_flags & flags; } #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int exec_event(struct st_relay_log_info *rli); virtual void pack_info(Protocol *protocol); #endif @@ -1903,14 +2166,7 @@ public: #endif /* Member functions to implement superclass interface */ - virtual int get_data_size() - { - DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", - return 6 + 1 + no_bytes_in_map(&m_cols) + - (m_rows_cur - m_rows_buf);); - return ROWS_HEADER_LEN + 1 + no_bytes_in_map(&m_cols) + - (m_rows_cur - m_rows_buf); - } + virtual int get_data_size(); MY_BITMAP const *get_cols() const { return &m_cols; } my_size_t get_width() const { return m_width; } @@ -1921,9 +2177,14 @@ public: virtual bool write_data_body(IO_CACHE *file); virtual const char *get_db() { return m_table->s->db.str; } #endif + /* + Check that malloc() succeeded in allocating memory for the rows + buffer and the COLS vector. Checking that an Update_rows_log_event + is valid is done in the Update_rows_log_event::is_valid() + function. + */ virtual bool is_valid() const { - /* that's how we check malloc() succeeded */ return m_rows_buf && m_cols.bitmap; } @@ -1956,10 +2217,20 @@ protected: ulong m_table_id; /* Table ID */ MY_BITMAP m_cols; /* Bitmap denoting columns available */ ulong m_width; /* The width of the columns bitmap */ + /* + Bitmap for columns available in the after image, if present. These + fields are only available for Update_rows events. Observe that the + width of both the before image COLS vector and the after image + COLS vector is the same: the number of columns of the table on the + master. + */ + MY_BITMAP m_cols_ai; + ulong m_master_reclength; /* Length of record on master side */ - /* Bit buffer in the same memory as the class */ + /* Bit buffers in the same memory as the class */ uint32 m_bitbuf[128/(sizeof(uint32)*8)]; + uint32 m_bitbuf_ai[128/(sizeof(uint32)*8)]; byte *m_rows_buf; /* The rows in packed format */ byte *m_rows_cur; /* One-after the end of the data */ @@ -1970,6 +2241,8 @@ protected: private: #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + /* Primitive to prepare for a sequence of row executions. @@ -2017,7 +2290,7 @@ private: RETURN VALUE Error code, if something went wrong, 0 otherwise. */ - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end) = 0; /* @@ -2088,7 +2361,7 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif @@ -2117,10 +2390,20 @@ public: }; #ifndef MYSQL_CLIENT - Update_rows_log_event(THD*, TABLE*, ulong table_id, - MY_BITMAP const *cols, bool is_transactional); + Update_rows_log_event(THD*, TABLE*, ulong table_id, + MY_BITMAP const *cols_bi, + MY_BITMAP const *cols_ai, + bool is_transactional); + + Update_rows_log_event(THD*, TABLE*, ulong table_id, + MY_BITMAP const *cols, + bool is_transactional); + + void init(MY_BITMAP const *cols); #endif + virtual ~Update_rows_log_event(); + #ifdef HAVE_REPLICATION Update_rows_log_event(const char *buf, uint event_len, const Format_description_log_event *description_event); @@ -2139,6 +2422,11 @@ public: } #endif + virtual bool is_valid() const + { + return Rows_log_event::is_valid() && m_cols_ai.bitmap; + } + private: virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } @@ -2153,7 +2441,7 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ @@ -2224,10 +2512,108 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif }; + +/** + Class representing an incident, an occurance out of the ordinary, + that happened on the master. + + The event is used to inform the slave that something out of the + ordinary happened on the master that might cause the database to be + in an inconsistent state. + + <table id="IncidentFormat"> + <caption>Incident event format</caption> + <tr> + <th>Symbol</th> + <th>Size<br/>(bytes)</th> + <th>Description</th> + </tr> + <tr> + <td>INCIDENT</td> + <td align="right">2</td> + <td>Incident number as an unsigned integer</td> + </tr> + <tr> + <td>MSGLEN</td> + <td align="right">1</td> + <td>Message length as an unsigned integer</td> + </tr> + <tr> + <td>MESSAGE</td> + <td align="right">MSGLEN</td> + <td>The message, if present. Not null terminated.</td> + </tr> + </table> + */ +class Incident_log_event : public Log_event { +public: +#ifndef MYSQL_CLIENT + Incident_log_event(THD *thd_arg, Incident incident) + : Log_event(thd_arg, 0, FALSE), m_incident(incident) + { + DBUG_ENTER("Incident_log_event::Incident_log_event"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + m_message.str= NULL; /* Just as a precaution */ + m_message.length= 0; + DBUG_VOID_RETURN; + } + + Incident_log_event(THD *thd_arg, Incident incident, LEX_STRING const msg) + : Log_event(thd_arg, 0, FALSE), m_incident(incident) + { + DBUG_ENTER("Incident_log_event::Incident_log_event"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + m_message= msg; + DBUG_VOID_RETURN; + } +#endif + +#ifndef MYSQL_CLIENT + void pack_info(Protocol*); +#endif + + Incident_log_event(const char *buf, uint event_len, + const Format_description_log_event *descr_event); + + virtual ~Incident_log_event(); + +#ifdef MYSQL_CLIENT + virtual void print(FILE *file, PRINT_EVENT_INFO *print_event_info); +#endif + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif + + virtual bool write_data_header(IO_CACHE *file); + virtual bool write_data_body(IO_CACHE *file); + + virtual Log_event_type get_type_code() { return INCIDENT_EVENT; } + + virtual bool is_valid() const { return 1; } + virtual int get_data_size() { + return INCIDENT_HEADER_LEN + 1 + m_message.length; + } + +private: + const char *description() const; + + Incident m_incident; + LEX_STRING m_message; +}; + +static inline bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache, + FILE *file) +{ + return + my_b_copy_to_file(cache, file) || + reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE); +} + #endif /* _log_event_h */ diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index f807dcdf21a..a481ac5c18c 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -410,8 +410,9 @@ MY_LOCALE *my_locale_by_number(uint number); updated (to store more bytes on disk). NOTE: When adding new SQL_MODE types, make sure to also add them to - ../scripts/mysql_create_system_tables.sh and - ../scripts/mysql_fix_privilege_tables.sql + the scripts used for creating the MySQL system tables + in scripts/mysql_system_tables.sql and scripts/mysql_system_tables_fix.sql + */ #define RAID_BLOCK_SIZE 1024 @@ -1444,6 +1445,9 @@ void close_system_tables(THD *thd, Open_tables_state *backup); TABLE *open_system_table_for_update(THD *thd, TABLE_LIST *one_table); bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE); +bool close_cached_connection_tables(THD *thd, bool wait_for_refresh, + LEX_STRING *connect_string, + bool have_lock = FALSE); void copy_field_from_tmp_record(Field *field,int offset); bool fill_record(THD *thd, Field **field, List<Item> &values, bool ignore_errors); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6364eed5c57..47f8e17dd93 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -195,12 +195,6 @@ inline void reset_floating_point_exceptions() } /* cplusplus */ - -#if defined(HAVE_LINUXTHREADS) -#define THR_KILL_SIGNAL SIGINT -#else -#define THR_KILL_SIGNAL SIGUSR2 // Can't use this with LinuxThreads -#endif #define MYSQL_KILL_SIGNAL SIGTERM #ifdef HAVE_GLIBC2_STYLE_GETHOSTBYNAME_R @@ -638,6 +632,7 @@ struct rand_struct sql_rand; // used by sql_class.cc:THD::THD() #ifndef EMBEDDED_LIBRARY struct passwd *user_info; static pthread_t select_thread; +static uint thr_kill_signal; #endif /* OS specific variables */ @@ -788,7 +783,7 @@ static void close_connections(void) DBUG_PRINT("info",("Waiting for select thread")); #ifndef DONT_USE_THR_ALARM - if (pthread_kill(select_thread,THR_CLIENT_ALARM)) + if (pthread_kill(select_thread, thr_client_alarm)) break; // allready dead #endif set_timespec(abstime, 2); @@ -2294,7 +2289,9 @@ static void init_signals(void) DBUG_ENTER("init_signals"); if (test_flags & TEST_SIGINT) - my_sigset(THR_KILL_SIGNAL,end_thread_signal); + { + my_sigset(thr_kill_signal, end_thread_signal); + } my_sigset(THR_SERVER_ALARM,print_signal_warning); // Should never be called! if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL)) @@ -2349,10 +2346,13 @@ static void init_signals(void) #ifdef SIGTSTP sigaddset(&set,SIGTSTP); #endif - sigaddset(&set,THR_SERVER_ALARM); + if (thd_lib_detected != THD_LIB_LT) + sigaddset(&set,THR_SERVER_ALARM); if (test_flags & TEST_SIGINT) - sigdelset(&set,THR_KILL_SIGNAL); // May be SIGINT - sigdelset(&set,THR_CLIENT_ALARM); // For alarms + { + // May be SIGINT + sigdelset(&set, thr_kill_signal); + } sigprocmask(SIG_SETMASK,&set,NULL); pthread_sigmask(SIG_SETMASK,&set,NULL); DBUG_VOID_RETURN; @@ -2415,24 +2415,20 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) */ init_thr_alarm(thread_scheduler.max_threads + global_system_variables.max_insert_delayed_threads + 10); -#if SIGINT != THR_KILL_SIGNAL - if (test_flags & TEST_SIGINT) + if (thd_lib_detected != THD_LIB_LT && (test_flags & TEST_SIGINT)) { (void) sigemptyset(&set); // Setup up SIGINT for debug (void) sigaddset(&set,SIGINT); // For debugging (void) pthread_sigmask(SIG_UNBLOCK,&set,NULL); } -#endif (void) sigemptyset(&set); // Setup up SIGINT for debug #ifdef USE_ONE_SIGNAL_HAND (void) sigaddset(&set,THR_SERVER_ALARM); // For alarms #endif #ifndef IGNORE_SIGHUP_SIGQUIT (void) sigaddset(&set,SIGQUIT); -#if THR_CLIENT_ALARM != SIGHUP (void) sigaddset(&set,SIGHUP); #endif -#endif (void) sigaddset(&set,SIGTERM); (void) sigaddset(&set,SIGTSTP); @@ -3626,6 +3622,13 @@ int main(int argc, char **argv) MY_INIT(argv[0]); // init my_sys library & pthreads /* nothing should come before this line ^^^ */ + /* Set signal used to kill MySQL */ +#if defined(SIGUSR2) + thr_kill_signal= thd_lib_detected == THD_LIB_LT ? SIGINT : SIGUSR2; +#else + thr_kill_signal= SIGINT; +#endif + /* Perform basic logger initialization logger. Should be called after MY_INIT, as it initializes mutexes. Log tables are inited later. @@ -6262,7 +6265,7 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.net_write_timeout, (gptr*) &max_system_variables.net_write_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_WRITE_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, - { "old_mode", OPT_OLD_MODE, "Use compatible behaviour.", + { "old", OPT_OLD_MODE, "Use compatible behavior.", (gptr*) &global_system_variables.old_mode, (gptr*) &max_system_variables.old_mode, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -6445,12 +6448,12 @@ The minimum value for this variable is 4096.", (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULL, REQUIRED_ARG, 16*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0}, {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE, - "Allocation block size for transactions to be stored in binary log", + "Allocation block size for various transaction-related structures", (gptr*) &global_system_variables.trans_alloc_block_size, (gptr*) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE, - "Persistent buffer for transactions to be stored in binary log", + "Persistent buffer for various transaction-related structures", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, @@ -6723,12 +6726,20 @@ static int show_ssl_ctx_get_session_cache_mode(THD *thd, SHOW_VAR *var, char *bu return 0; } -/* Functions relying on SSL */ +/* + Functions relying on SSL + Note: In the show_ssl_* functions, we need to check if we have a + valid vio-object since this isn't always true, specifically + when session_status or global_status is requested from + inside an Event. + */ static int show_ssl_get_version(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; - var->value= const_cast<char*>(thd->net.vio->ssl_arg ? - SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + var->value= const_cast<char*>(SSL_get_version((SSL*) thd->net.vio->ssl_arg)); + else + var->value= (char *)""; return 0; } @@ -6736,9 +6747,10 @@ static int show_ssl_session_reused(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_session_reused((SSL*) thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_session_reused((SSL*) thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6746,9 +6758,10 @@ static int show_ssl_get_default_timeout(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6756,9 +6769,10 @@ static int show_ssl_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->net.vio && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6766,17 +6780,20 @@ static int show_ssl_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } static int show_ssl_get_cipher(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; - var->value= const_cast<char*>(thd->net.vio->ssl_arg ? - SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : ""); + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + var->value= const_cast<char*>(SSL_get_cipher((SSL*) thd->net.vio->ssl_arg)); + else + var->value= (char *)""; return 0; } @@ -6784,7 +6801,7 @@ static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; var->value= buff; - if (thd->net.vio->ssl_arg) + if (thd->vio_ok() && thd->net.vio->ssl_arg) { int i; const char *p; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 52faaf25b42..f5cf79bd609 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -84,7 +84,7 @@ static int sel_cmp(Field *f,char *a,char *b,uint8 a_flag,uint8 b_flag); static char is_null_string[2]= {1,0}; - +class RANGE_OPT_PARAM; /* A construction block of the SEL_ARG-graph. @@ -170,6 +170,89 @@ static char is_null_string[2]= {1,0}; - get_quick_select() - Walk the SEL_ARG, materialize the key intervals, and create QUICK_RANGE_SELECT object that will read records within these intervals. + + 4. SPACE COMPLEXITY NOTES + + SEL_ARG graph is a representation of an ordered disjoint sequence of + intervals over the ordered set of index tuple values. + + For multi-part keys, one can construct a WHERE expression such that its + list of intervals will be of combinatorial size. Here is an example: + + (keypart1 IN (1,2, ..., n1)) AND + (keypart2 IN (1,2, ..., n2)) AND + (keypart3 IN (1,2, ..., n3)) + + For this WHERE clause the list of intervals will have n1*n2*n3 intervals + of form + + (keypart1, keypart2, keypart3) = (k1, k2, k3), where 1 <= k{i} <= n{i} + + SEL_ARG graph structure aims to reduce the amount of required space by + "sharing" the elementary intervals when possible (the pic at the + beginning of this comment has examples of such sharing). The sharing may + prevent combinatorial blowup: + + There are WHERE clauses that have combinatorial-size interval lists but + will be represented by a compact SEL_ARG graph. + Example: + (keypartN IN (1,2, ..., n1)) AND + ... + (keypart2 IN (1,2, ..., n2)) AND + (keypart1 IN (1,2, ..., n3)) + + but not in all cases: + + - There are WHERE clauses that do have a compact SEL_ARG-graph + representation but get_mm_tree() and its callees will construct a + graph of combinatorial size. + Example: + (keypart1 IN (1,2, ..., n1)) AND + (keypart2 IN (1,2, ..., n2)) AND + ... + (keypartN IN (1,2, ..., n3)) + + - There are WHERE clauses for which the minimal possible SEL_ARG graph + representation will have combinatorial size. + Example: + By induction: Let's take any interval on some keypart in the middle: + + kp15=c0 + + Then let's AND it with this interval 'structure' from preceding and + following keyparts: + + (kp14=c1 AND kp16=c3) OR keypart14=c2) (*) + + We will obtain this SEL_ARG graph: + + kp14 $ kp15 $ kp16 + $ $ + +---------+ $ +---------+ $ +---------+ + | kp14=c1 |--$-->| kp15=c0 |--$-->| kp16=c3 | + +---------+ $ +---------+ $ +---------+ + | $ $ + +---------+ $ +---------+ $ + | kp14=c2 |--$-->| kp15=c0 | $ + +---------+ $ +---------+ $ + $ $ + + Note that we had to duplicate "kp15=c0" and there was no way to avoid + that. + The induction step: AND the obtained expression with another "wrapping" + expression like (*). + When the process ends because of the limit on max. number of keyparts + we'll have: + + WHERE clause length is O(3*#max_keyparts) + SEL_ARG graph size is O(2^(#max_keyparts/2)) + + (it is also possible to construct a case where instead of 2 in 2^n we + have a bigger constant, e.g. 4, and get a graph with 4^(31/2)= 2^31 + nodes) + + We avoid consuming too much memory by setting a limit on the number of + SEL_ARG object we can construct during one range analysis invocation. */ class SEL_ARG :public Sql_alloc @@ -200,6 +283,8 @@ public: enum leaf_color { BLACK,RED } color; enum Type { IMPOSSIBLE, MAYBE, MAYBE_KEY, KEY_RANGE } type; + enum { MAX_SEL_ARGS = 16000 }; + SEL_ARG() {} SEL_ARG(SEL_ARG &); SEL_ARG(Field *,const char *,const char *); @@ -271,7 +356,7 @@ public: return new SEL_ARG(field, part, min_value, arg->max_value, min_flag, arg->max_flag, maybe_flag | arg->maybe_flag); } - SEL_ARG *clone(SEL_ARG *new_parent,SEL_ARG **next); + SEL_ARG *clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, SEL_ARG **next); bool copy_min(SEL_ARG* arg) { // Get overlapping range @@ -423,7 +508,6 @@ public: { return parent->left == this ? &parent->left : &parent->right; } - SEL_ARG *clone_tree(); /* @@ -466,6 +550,7 @@ public: } return !field->key_cmp(min_val, max_val); } + SEL_ARG *clone_tree(RANGE_OPT_PARAM *param); }; class SEL_IMERGE; @@ -545,6 +630,8 @@ public: using_real_indexes==TRUE */ uint real_keynr[MAX_KEY]; + /* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */ + uint alloced_sel_args; }; class PARAM : public RANGE_OPT_PARAM @@ -634,8 +721,9 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg); static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_TREE *tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2); -static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2); -static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag); +static SEL_ARG *key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2); +static SEL_ARG *key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, + uint clone_flag); static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1); bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, @@ -860,6 +948,7 @@ int imerge_list_or_tree(RANGE_OPT_PARAM *param, return im1->is_empty(); } + /*************************************************************************** ** Basic functions for SQL_SELECT and QUICK_RANGE_SELECT ***************************************************************************/ @@ -1510,12 +1599,18 @@ SEL_ARG::SEL_ARG(Field *field_,uint8 part_,char *min_value_,char *max_value_, left=right= &null_element; } -SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg) +SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, + SEL_ARG **next_arg) { SEL_ARG *tmp; + + /* Bail out if we have already generated too many SEL_ARGs */ + if (++param->alloced_sel_args > MAX_SEL_ARGS) + return 0; + if (type != KEY_RANGE) { - if (!(tmp= new SEL_ARG(type))) + if (!(tmp= new (param->mem_root) SEL_ARG(type))) return 0; // out of memory tmp->prev= *next_arg; // Link into next/prev chain (*next_arg)->next=tmp; @@ -1523,20 +1618,21 @@ SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg) } else { - if (!(tmp= new SEL_ARG(field,part, min_value,max_value, - min_flag, max_flag, maybe_flag))) + if (!(tmp= new (param->mem_root) SEL_ARG(field,part, min_value,max_value, + min_flag, max_flag, maybe_flag))) return 0; // OOM tmp->parent=new_parent; tmp->next_key_part=next_key_part; if (left != &null_element) - tmp->left=left->clone(tmp,next_arg); + if (!(tmp->left=left->clone(param, tmp, next_arg))) + return 0; // OOM tmp->prev= *next_arg; // Link into next/prev chain (*next_arg)->next=tmp; (*next_arg)= tmp; if (right != &null_element) - if (!(tmp->right= right->clone(tmp,next_arg))) + if (!(tmp->right= right->clone(param, tmp, next_arg))) return 0; // OOM } increment_use_count(1); @@ -1614,11 +1710,12 @@ static int sel_cmp(Field *field, char *a,char *b,uint8 a_flag,uint8 b_flag) } -SEL_ARG *SEL_ARG::clone_tree() +SEL_ARG *SEL_ARG::clone_tree(RANGE_OPT_PARAM *param) { SEL_ARG tmp_link,*next_arg,*root; next_arg= &tmp_link; - root= clone((SEL_ARG *) 0, &next_arg); + if (!(root= clone(param, (SEL_ARG *) 0, &next_arg))) + return 0; next_arg->next=0; // Fix last link tmp_link.next->prev=0; // Fix first link if (root) // If not OOM @@ -2120,6 +2217,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, param.real_keynr[param.keys++]=idx; } param.key_parts_end=key_parts; + param.alloced_sel_args= 0; /* Calculate cost of full index read for the shortest covering index */ if (!head->covering_keys.is_clear_all()) @@ -2512,6 +2610,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) range_par->using_real_indexes= FALSE; range_par->remove_jump_scans= FALSE; range_par->real_keynr[0]= 0; + range_par->alloced_sel_args= 0; thd->no_errors=1; // Don't warn about NULL thd->mem_root=&alloc; @@ -5242,7 +5341,8 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond) while ((item=li++)) { SEL_TREE *new_tree=get_mm_tree(param,item); - if (param->thd->is_fatal_error) + if (param->thd->is_fatal_error || + param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS) DBUG_RETURN(0); // out of memory tree=tree_and(param,tree,new_tree); if (tree && tree->type == SEL_TREE::IMPOSSIBLE) @@ -5818,9 +5918,9 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) tree1->type=SEL_TREE::KEY_SMALLER; DBUG_RETURN(tree1); } - key_map result_keys; result_keys.clear_all(); + /* Join the trees key per key */ SEL_ARG **key1,**key2,**end; for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; @@ -5833,7 +5933,7 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) flag|=CLONE_KEY1_MAYBE; if (*key2 && !(*key2)->simple_key()) flag|=CLONE_KEY2_MAYBE; - *key1=key_and(*key1,*key2,flag); + *key1=key_and(param, *key1, *key2, flag); if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE) { tree1->type= SEL_TREE::IMPOSSIBLE; @@ -5841,8 +5941,8 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) } result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - if (*key1) - (*key1)->test_use_count(*key1); + if (*key1 && param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + (*key1)->test_use_count(*key1); #endif } } @@ -5994,13 +6094,14 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) for (key1= tree1->keys,key2= tree2->keys,end= key1+param->keys ; key1 != end ; key1++,key2++) { - *key1=key_or(*key1,*key2); + *key1=key_or(param, *key1, *key2); if (*key1) { result=tree1; // Added to tree1 result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - (*key1)->test_use_count(*key1); + if (param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + (*key1)->test_use_count(*key1); #endif } } @@ -6058,14 +6159,15 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) /* And key trees where key1->part < key2 -> part */ static SEL_ARG * -and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) +and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, + uint clone_flag) { SEL_ARG *next; ulong use_count=key1->use_count; if (key1->elements != 1) { - key2->use_count+=key1->elements-1; + key2->use_count+=key1->elements-1; //psergey: why we don't count that key1 has n-k-p? key2->increment_use_count((int) key1->elements-1); } if (key1->type == SEL_ARG::MAYBE_KEY) @@ -6077,7 +6179,7 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) { if (next->next_key_part) { - SEL_ARG *tmp=key_and(next->next_key_part,key2,clone_flag); + SEL_ARG *tmp= key_and(param, next->next_key_part, key2, clone_flag); if (tmp && tmp->type == SEL_ARG::IMPOSSIBLE) { key1=key1->tree_delete(next); @@ -6086,6 +6188,8 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) next->next_key_part=tmp; if (use_count) next->increment_use_count(use_count); + if (param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS) + break; } else next->next_key_part=key2; @@ -6102,8 +6206,10 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) SYNOPSIS key_and() - key1 First argument, root of its RB-tree - key2 Second argument, root of its RB-tree + param Range analysis context (needed to track if we have allocated + too many SEL_ARGs) + key1 First argument, root of its RB-tree + key2 Second argument, root of its RB-tree RETURN RB-tree root of the resulting SEL_ARG graph. @@ -6111,7 +6217,7 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) */ static SEL_ARG * -key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) +key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) { if (!key1) return key2; @@ -6127,9 +6233,9 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) // key1->part < key2->part key1->use_count--; if (key1->use_count > 0) - if (!(key1= key1->clone_tree())) + if (!(key1= key1->clone_tree(param))) return 0; // OOM - return and_all_keys(key1,key2,clone_flag); + return and_all_keys(param, key1, key2, clone_flag); } if (((clone_flag & CLONE_KEY2_MAYBE) && @@ -6147,14 +6253,14 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) if (key1->use_count > 1) { key1->use_count--; - if (!(key1=key1->clone_tree())) + if (!(key1=key1->clone_tree(param))) return 0; // OOM key1->use_count++; } if (key1->type == SEL_ARG::MAYBE_KEY) { // Both are maybe key - key1->next_key_part=key_and(key1->next_key_part,key2->next_key_part, - clone_flag); + key1->next_key_part=key_and(param, key1->next_key_part, + key2->next_key_part, clone_flag); if (key1->next_key_part && key1->next_key_part->type == SEL_ARG::IMPOSSIBLE) return key1; @@ -6165,7 +6271,7 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) if (key2->next_key_part) { key1->use_count--; // Incremented in and_all_keys - return and_all_keys(key1,key2,clone_flag); + return and_all_keys(param, key1, key2, clone_flag); } key2->use_count--; // Key2 doesn't have a tree } @@ -6201,7 +6307,8 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) } else if (get_range(&e2,&e1,key2)) continue; - SEL_ARG *next=key_and(e1->next_key_part,e2->next_key_part,clone_flag); + SEL_ARG *next=key_and(param, e1->next_key_part, e2->next_key_part, + clone_flag); e1->increment_use_count(1); e2->increment_use_count(1); if (!next || next->type != SEL_ARG::IMPOSSIBLE) @@ -6249,7 +6356,7 @@ get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1) static SEL_ARG * -key_or(SEL_ARG *key1,SEL_ARG *key2) +key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) { if (!key1) { @@ -6297,7 +6404,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) { swap_variables(SEL_ARG *,key1,key2); } - if (key1->use_count > 0 || !(key1=key1->clone_tree())) + if (key1->use_count > 0 || !(key1=key1->clone_tree(param))) return 0; // OOM } @@ -6441,7 +6548,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) { // tmp.min. <= x <= tmp.max tmp->maybe_flag|= key.maybe_flag; key.increment_use_count(key1->use_count+1); - tmp->next_key_part=key_or(tmp->next_key_part,key.next_key_part); + tmp->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part); if (!cmp) // Key2 is ready break; key.copy_max_to_min(tmp); @@ -6472,7 +6579,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) tmp->increment_use_count(key1->use_count+1); /* Increment key count as it may be used for next loop */ key.increment_use_count(1); - new_arg->next_key_part=key_or(tmp->next_key_part,key.next_key_part); + new_arg->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part); key1=key1->insert(new_arg); break; } @@ -8985,7 +9092,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) else DBUG_RETURN(NULL); - Item *expr= min_max_item->args[0]; /* The argument of MIN/MAX. */ + /* The argument of MIN/MAX. */ + Item *expr= min_max_item->args[0]->real_item(); if (expr->type() == Item::FIELD_ITEM) /* Is it an attribute? */ { if (! min_max_arg_item) @@ -9356,6 +9464,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, DBUG_ENTER("check_group_min_max_predicates"); DBUG_ASSERT(cond && min_max_arg_item); + cond= cond->real_item(); Item::Type cond_type= cond->type(); if (cond_type == Item::COND_ITEM) /* 'AND' or 'OR' */ { @@ -9393,7 +9502,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++) { - cur_arg= arguments[arg_idx]; + cur_arg= arguments[arg_idx]->real_item(); DBUG_PRINT("info", ("cur_arg: %s", cur_arg->full_name())); if (cur_arg->type() == Item::FIELD_ITEM) { diff --git a/sql/records.cc b/sql/records.cc index 0923ab1d75e..0fb9f4f9650 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -150,7 +150,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, info->file= table->file; info->forms= &info->table; /* Only one table */ - if (table->s->tmp_table == TMP_TABLE && !table->sort.addon_field) + if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE && + !table->sort.addon_field) VOID(table->file->extra(HA_EXTRA_MMAP)); if (table->sort.addon_field) diff --git a/sql/rpl_constants.h b/sql/rpl_constants.h new file mode 100644 index 00000000000..426e80a328d --- /dev/null +++ b/sql/rpl_constants.h @@ -0,0 +1,18 @@ +#ifndef RPL_CONSTANTS_H +#define RPL_CONSTANTS_H + +/** + Enumeration of the incidents that can occur for the server. + */ +enum Incident { + /** No incident */ + INCIDENT_NONE, + + /** There are possibly lost events in the replication stream */ + INCIDENT_LOST_EVENTS, + + /** Shall be last event of the enumeration */ + INCIDENT_COUNT +}; + +#endif /* RPL_CONSTANTS_H */ diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc index 95b5ecba895..b22b052a105 100644 --- a/sql/rpl_injector.cc +++ b/sql/rpl_injector.cc @@ -188,3 +188,21 @@ void injector::new_trans(THD *thd, injector::transaction *ptr) DBUG_VOID_RETURN; } + +int injector::record_incident(THD *thd, Incident incident) +{ + Incident_log_event ev(thd, incident); + if (int error= mysql_bin_log.write(&ev)) + return error; + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + return 0; +} + +int injector::record_incident(THD *thd, Incident incident, LEX_STRING message) +{ + Incident_log_event ev(thd, incident, message); + if (int error= mysql_bin_log.write(&ev)) + return error; + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + return 0; +} diff --git a/sql/rpl_injector.h b/sql/rpl_injector.h index 61c2e0ecebc..eabf374857a 100644 --- a/sql/rpl_injector.h +++ b/sql/rpl_injector.h @@ -18,9 +18,10 @@ /* Pull in 'byte', 'my_off_t', and 'uint32' */ #include <my_global.h> - #include <my_bitmap.h> +#include "rpl_constants.h" + /* Forward declarations */ class handler; class MYSQL_BIN_LOG; @@ -322,6 +323,9 @@ public: transaction new_trans(THD *); void new_trans(THD *, transaction *); + int record_incident(THD*, Incident incident); + int record_incident(THD*, Incident incident, LEX_STRING message); + private: explicit injector(); ~injector() { } /* Nothing needs to be done */ diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 8a051195dba..b0db355154e 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -29,14 +29,15 @@ int init_strvar_from_file(char *var, int max_size, IO_CACHE *f, st_relay_log_info::st_relay_log_info() - :no_storage(FALSE), info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), + :no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id), + info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), cur_log_old_open_count(0), group_master_log_pos(0), log_space_total(0), ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0), abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0), inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE), until_log_pos(0), retried_trans(0), tables_to_lock(0), tables_to_lock_count(0), - unsafe_to_stop_at(0) + last_event_start_time(0) { DBUG_ENTER("st_relay_log_info::st_relay_log_info"); @@ -968,7 +969,7 @@ err: strtol() conversions needed for log names comparison. We don't need to compare them each time this function is called, we only need to do this when current log name changes. If we have UNTIL_MASTER_POS condition we - need to do this only after Rotate_log_event::exec_event() (which is + need to do this only after Rotate_log_event::do_apply_event() (which is rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS condition then we should invalidate cached comarison value after inc_group_relay_log_pos() which called for each group of events (so we @@ -1001,6 +1002,22 @@ bool st_relay_log_info::is_until_satisfied() log_pos= group_relay_log_pos; } +#ifndef DBUG_OFF + { + char buf[32]; + DBUG_PRINT("info", ("group_master_log_name='%s', group_master_log_pos=%s", + group_master_log_name, llstr(group_master_log_pos, buf))); + DBUG_PRINT("info", ("group_relay_log_name='%s', group_relay_log_pos=%s", + group_relay_log_name, llstr(group_relay_log_pos, buf))); + DBUG_PRINT("info", ("(%s) log_name='%s', log_pos=%s", + until_condition == UNTIL_MASTER_POS ? "master" : "relay", + log_name, llstr(log_pos, buf))); + DBUG_PRINT("info", ("(%s) until_log_name='%s', until_log_pos=%s", + until_condition == UNTIL_MASTER_POS ? "master" : "relay", + until_log_name, llstr(until_log_pos, buf))); + } +#endif + if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) { /* @@ -1056,30 +1073,19 @@ void st_relay_log_info::cached_charset_invalidate() } -bool st_relay_log_info::cached_charset_compare(char *charset) +bool st_relay_log_info::cached_charset_compare(char *charset) const { DBUG_ENTER("st_relay_log_info::cached_charset_compare"); if (bcmp(cached_charset, charset, sizeof(cached_charset))) { - memcpy(cached_charset, charset, sizeof(cached_charset)); + memcpy(const_cast<char*>(cached_charset), charset, sizeof(cached_charset)); DBUG_RETURN(1); } DBUG_RETURN(0); } -void st_relay_log_info::transaction_end(THD* thd) -{ - DBUG_ENTER("st_relay_log_info::transaction_end"); - - /* - Nothing to do here right now. - */ - - DBUG_VOID_RETURN; -} - #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) void st_relay_log_info::cleanup_context(THD *thd, bool error) { @@ -1087,12 +1093,12 @@ void st_relay_log_info::cleanup_context(THD *thd, bool error) DBUG_ASSERT(sql_thd == thd); /* - 1) Instances of Table_map_log_event, if ::exec_event() was called on them, + 1) Instances of Table_map_log_event, if ::do_apply_event() was called on them, may have opened tables, which we cannot be sure have been closed (because maybe the Rows_log_event have not been found or will not be, because slave SQL thread is stopping, or relay log has a missing tail etc). So we close all thread's tables. And so the table mappings have to be cancelled. - 2) Rows_log_event::exec_event() may even have started statements or + 2) Rows_log_event::do_apply_event() may even have started statements or transactions on them, which we need to rollback in case of error. 3) If finding a Format_description_log_event after a BEGIN, we also need to rollback before continuing with the next events. @@ -1106,7 +1112,7 @@ void st_relay_log_info::cleanup_context(THD *thd, bool error) m_table_map.clear_tables(); close_thread_tables(thd); clear_tables_to_lock(); - unsafe_to_stop_at= 0; + last_event_start_time= 0; DBUG_VOID_RETURN; } diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index 45c9fb1cf96..3f06e108f6d 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -58,6 +58,15 @@ typedef struct st_relay_log_info */ bool no_storage; + /* + If true, events with the same server id should be replicated. This + field is set on creation of a relay log info structure by copying + the value of ::replicate_same_server_id and can be overridden if + necessary. For example of when this is done, check sql_binlog.cc, + where the BINLOG statement can be used to execute "raw" events. + */ + bool replicate_same_server_id; + /*** The following variables can only be read when protect by data lock ****/ /* @@ -292,14 +301,19 @@ typedef struct st_relay_log_info When the 6 bytes are equal to 0 is used to mean "cache is invalidated". */ void cached_charset_invalidate(); - bool cached_charset_compare(char *charset); - - void transaction_end(THD*); + bool cached_charset_compare(char *charset) const; void cleanup_context(THD *, bool); void clear_tables_to_lock(); - time_t unsafe_to_stop_at; + /* + Used by row-based replication to detect that it should not stop at + this event, but give it a chance to send more events. The time + where the last event inside a group started is stored here. If the + variable is zero, we are not in a group (but may be in a + transaction). + */ + time_t last_event_start_time; } RELAY_LOG_INFO; diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 65a44a4947b..1d7cc808f0c 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -108,7 +108,7 @@ field_length_from_packed(enum_field_types const field_type, */ int -table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) +table_def::compatible_with(RELAY_LOG_INFO const *rli_arg, TABLE *table) const { /* @@ -116,6 +116,7 @@ table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) */ uint const cols_to_check= min(table->s->fields, size()); int error= 0; + RELAY_LOG_INFO const *rli= const_cast<RELAY_LOG_INFO*>(rli_arg); TABLE_SHARE const *const tsh= table->s; diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index b1aa642619c..17879a9ecfc 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -117,7 +117,7 @@ public: @retval 1 if the table definition is not compatible with @c table @retval 0 if the table definition is compatible with @c table */ - int compatible_with(RELAY_LOG_INFO *rli, TABLE *table) const; + int compatible_with(RELAY_LOG_INFO const *rli, TABLE *table) const; private: my_size_t m_size; // Number of elements in the types array diff --git a/sql/set_var.cc b/sql/set_var.cc index d5bf9ca4bc8..898d47e766e 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -356,6 +356,8 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count", &SV::net_retry_count, 0, fix_net_retry_count); sys_var_thd_bool sys_new_mode("new", &SV::new_mode); +sys_var_bool_ptr_readonly sys_old_mode("old", + &global_system_variables.old_mode); sys_var_thd_bool sys_old_alter_table("old_alter_table", &SV::old_alter_table); sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords); @@ -942,6 +944,7 @@ SHOW_VAR init_vars[]= { {sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS}, {sys_net_write_timeout.name,(char*) &sys_net_write_timeout, SHOW_SYS}, {sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS}, + {sys_old_mode.name, (char*) &sys_old_mode, SHOW_SYS}, {sys_old_alter_table.name, (char*) &sys_old_alter_table, SHOW_SYS}, {sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS}, {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, diff --git a/sql/set_var.h b/sql/set_var.h index e4284c374bb..a3963171192 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -169,6 +169,16 @@ public: }; +class sys_var_bool_ptr_readonly :public sys_var_bool_ptr +{ +public: + sys_var_bool_ptr_readonly(const char *name_arg, my_bool *value_arg) + :sys_var_bool_ptr(name_arg, value_arg) + {} + bool is_readonly() const { return 1; } +}; + + class sys_var_str :public sys_var { public: diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index d0d20a9e232..5fb2074db6e 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -6055,3 +6055,5 @@ ER_EVENT_CANNOT_CREATE_IN_THE_PAST eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created" ER_EVENT_CANNOT_ALTER_IN_THE_PAST eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered" +ER_SLAVE_INCIDENT + eng "The incident %s occured on the master. Message: %-.64s" diff --git a/sql/slave.cc b/sql/slave.cc index 6bd5fd3da21..bc6cef95fc6 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -25,6 +25,7 @@ #include <thr_alarm.h> #include <my_dir.h> #include <sql_common.h> +#include <errmsg.h> #ifdef HAVE_REPLICATION @@ -521,11 +522,11 @@ static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli) really one minute of idleness, we don't timeout if the slave SQL thread is actively working. */ - if (!rli->unsafe_to_stop_at) + if (rli->last_event_start_time == 0) DBUG_RETURN(1); DBUG_PRINT("info", ("Slave SQL thread is in an unsafe situation, giving " "it some grace period")); - if (difftime(time(0), rli->unsafe_to_stop_at) > 60) + if (difftime(time(0), rli->last_event_start_time) > 60) { slave_print_msg(ERROR_LEVEL, rli, 0, "SQL thread had to stop in an unsafe situation, in " @@ -559,7 +560,7 @@ static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli) void */ -void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, +void slave_print_msg(enum loglevel level, RELAY_LOG_INFO const *rli, int err_code, const char* msg, ...) { void (*report_function)(const char *, ...); @@ -581,9 +582,9 @@ void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, It's an error, it must be reported in Last_error and Last_errno in SHOW SLAVE STATUS. */ - pbuff= rli->last_slave_error; + pbuff= const_cast<RELAY_LOG_INFO*>(rli)->last_slave_error; pbuffsize= sizeof(rli->last_slave_error); - rli->last_slave_errno = err_code; + const_cast<RELAY_LOG_INFO*>(rli)->last_slave_errno = err_code; report_function= sql_print_error; break; case WARNING_LEVEL: @@ -815,7 +816,7 @@ do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"); { if ((master_row= mysql_fetch_row(master_res)) && (::server_id == strtoul(master_row[1], 0, 10)) && - !replicate_same_server_id) + !mi->rli.replicate_same_server_id) errmsg= "The slave I/O thread stops because master and slave have equal \ MySQL server ids; these ids must be different for replication to work (or \ the --replicate-same-server-id option must be used on slave but this does \ @@ -1392,7 +1393,7 @@ void set_slave_thread_options(THD* thd) DBUG_VOID_RETURN; } -void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli) +void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO const *rli) { DBUG_ENTER("set_slave_thread_default_charset"); @@ -1403,7 +1404,14 @@ void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli) thd->variables.collation_server= global_system_variables.collation_server; thd->update_charset(); - rli->cached_charset_invalidate(); + + /* + We use a const cast here since the conceptual (and externally + visible) behavior of the function is to set the default charset of + the thread. That the cache has to be invalidated is a secondary + effect. + */ + const_cast<RELAY_LOG_INFO*>(rli)->cached_charset_invalidate(); DBUG_VOID_RETURN; } @@ -1611,7 +1619,8 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) } -int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error) +int check_expected_error(THD* thd, RELAY_LOG_INFO const *rli, + int expected_error) { DBUG_ENTER("check_expected_error"); @@ -1716,78 +1725,43 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) } if (ev) { - int type_code = ev->get_type_code(); - int exec_res; + int const type_code= ev->get_type_code(); + int exec_res= 0; /* - Queries originating from this server must be skipped. - Low-level events (Format_desc, Rotate, Stop) from this server - must also be skipped. But for those we don't want to modify - group_master_log_pos, because these events did not exist on the master. - Format_desc is not completely skipped. - Skip queries specified by the user in slave_skip_counter. - We can't however skip events that has something to do with the - log files themselves. - Filtering on own server id is extremely important, to ignore execution of - events created by the creation/rotation of the relay log (remember that - now the relay log starts with its Format_desc, has a Rotate etc). */ - DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id)); + DBUG_PRINT("info",("type_code=%d (%s), server_id=%d", + type_code, ev->get_type_str(), ev->server_id)); + DBUG_PRINT("info", ("thd->options={ %s%s}", + FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), + FLAGSTR(thd->options, OPTION_BEGIN))); - if ((ev->server_id == (uint32) ::server_id && - !replicate_same_server_id && - type_code != FORMAT_DESCRIPTION_EVENT) || - (rli->slave_skip_counter && - type_code != ROTATE_EVENT && type_code != STOP_EVENT && - type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT)) - { - DBUG_PRINT("info", ("event skipped")); - /* - We only skip the event here and do not increase the group log - position. In the event that we have to restart, this means - that we might have to skip the event again, but that is a - minor issue. - - If we were to increase the group log position when skipping an - event, it might be that we are restarting at the wrong - position and have events before that we should have executed, - so not increasing the group log position is a sure bet in this - case. - - In this way, we just step the group log position when we - *know* that we are at the end of a group. - */ - rli->inc_event_relay_log_pos(); - /* - Protect against common user error of setting the counter to 1 - instead of 2 while recovering from an insert which used auto_increment, - rand or user var. - */ - if (rli->slave_skip_counter && - !((type_code == INTVAR_EVENT || - type_code == RAND_EVENT || - type_code == USER_VAR_EVENT) && - rli->slave_skip_counter == 1) && - /* - The events from ourselves which have something to do with the relay - log itself must be skipped, true, but they mustn't decrement - rli->slave_skip_counter, because the user is supposed to not see - these events (they are not in the master's binlog) and if we - decremented, START SLAVE would for example decrement when it sees - the Rotate, so the event which the user probably wanted to skip - would not be skipped. - */ - !(ev->server_id == (uint32) ::server_id && - (type_code == ROTATE_EVENT || type_code == STOP_EVENT || - type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT))) - --rli->slave_skip_counter; - pthread_mutex_unlock(&rli->data_lock); - delete ev; - DBUG_RETURN(0); // avoid infinite update loops - } - pthread_mutex_unlock(&rli->data_lock); + + /* + Execute the event to change the database and update the binary + log coordinates, but first we set some data that is needed for + the thread. + + The event will be executed unless it is supposed to be skipped. + + Queries originating from this server must be skipped. Low-level + events (Format_description_log_event, Rotate_log_event, + Stop_log_event) from this server must also be skipped. But for + those we don't want to modify 'group_master_log_pos', because + these events did not exist on the master. + Format_description_log_event is not completely skipped. + + Skip queries specified by the user in 'slave_skip_counter'. We + can't however skip events that has something to do with the log + files themselves. + + Filtering on own server id is extremely important, to ignore + execution of events created by the creation/rotation of the relay + log (remember that now the relay log starts with its Format_desc, + has a Rotate etc). + */ thd->server_id = ev->server_id; // use the original server id for logging thd->set_time(); // time the query @@ -1795,19 +1769,69 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) if (!ev->when) ev->when = time(NULL); ev->thd = thd; // because up to this point, ev->thd == 0 - DBUG_PRINT("info", ("thd->options={ %s%s}", - FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), - FLAGSTR(thd->options, OPTION_BEGIN))); - exec_res = ev->exec_event(rli); - DBUG_PRINT("info", ("exec_event result: %d", exec_res)); - DBUG_ASSERT(rli->sql_thd==thd); + int reason= ev->shall_skip(rli); + if (reason == Log_event::EVENT_SKIP_COUNT) + --rli->slave_skip_counter; + pthread_mutex_unlock(&rli->data_lock); + if (reason == Log_event::EVENT_SKIP_NOT) + exec_res= ev->apply_event(rli); +#ifndef DBUG_OFF + else + { + /* + This only prints information to the debug trace. + + TODO: Print an informational message to the error log? + */ + static const char *const explain[] = { + "event was not skipped", // EVENT_SKIP_NOT, + "event originated from this server", // EVENT_SKIP_IGNORE, + "event skip counter was non-zero" // EVENT_SKIP_COUNT + }; + DBUG_PRINT("info", ("%s was skipped because %s", + ev->get_type_str(), explain[reason])); + } +#endif + + DBUG_PRINT("info", ("apply_event error = %d", exec_res)); + if (exec_res == 0) + { + int error= ev->update_pos(rli); + char buf[22]; + DBUG_PRINT("info", ("update_pos error = %d", error)); + DBUG_PRINT("info", ("group %s %s", + llstr(rli->group_relay_log_pos, buf), + rli->group_relay_log_name)); + DBUG_PRINT("info", ("event %s %s", + llstr(rli->event_relay_log_pos, buf), + rli->event_relay_log_name)); + /* + The update should not fail, so print an error message and + return an error code. + + TODO: Replace this with a decent error message when merged + with BUG#24954 (which adds several new error message). + */ + if (error) + { + slave_print_msg(ERROR_LEVEL, rli, ER_UNKNOWN_ERROR, + "It was not possible to update the positions" + " of the relay log information: the slave may" + " be in an inconsistent state." + " Stopped in %s position %s", + rli->group_relay_log_name, + llstr(rli->group_relay_log_pos, buf)); + DBUG_RETURN(1); + } + } + /* Format_description_log_event should not be deleted because it will be used to read info about the relay log's format; it will be deleted when the SQL thread does not need it, i.e. when this thread terminates. */ - if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) + if (type_code != FORMAT_DESCRIPTION_EVENT) { DBUG_PRINT("info", ("Deleting the event after it has been executed")); delete ev; @@ -2068,7 +2092,7 @@ after reconnect"); if (event_len == packet_error) { uint mysql_error_number= mysql_errno(mysql); - if (mysql_error_number == ER_NET_PACKET_TOO_LARGE) + if (mysql_error_number == CR_NET_PACKET_TOO_LARGE) { sql_print_error("\ Log entry on master is longer than max_allowed_packet (%ld) on \ @@ -2368,13 +2392,17 @@ Slave SQL thread aborted. Can't execute init_slave query"); THD_CHECK_SENTRY(thd); if (exec_relay_log_event(thd,rli)) { + DBUG_PRINT("info", ("exec_relay_log_event() failed")); // do not scare the user if SQL thread was simply killed or stopped if (!sql_slave_killed(thd,rli)) { /* - retrieve as much info as possible from the thd and, error codes and warnings - and print this to the error log as to allow the user to locate the error + retrieve as much info as possible from the thd and, error + codes and warnings and print this to the error log as to + allow the user to locate the error */ + DBUG_PRINT("info", ("thd->net.last_errno=%d; rli->last_slave_errno=%d", + thd->net.last_errno, rli->last_slave_errno)); if (thd->net.last_errno != 0) { if (rli->last_slave_errno == 0) @@ -2395,10 +2423,25 @@ Slave SQL thread aborted. Can't execute init_slave query"); /* Print any warnings issued */ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); MYSQL_ERROR *err; + /* + Added controlled slave thread cancel for replication + of user-defined variables. + */ + bool udf_error = false; while ((err= it++)) + { + if (err->code == ER_CANT_OPEN_LIBRARY) + udf_error = true; sql_print_warning("Slave: %s Error_code: %d",err->msg, err->code); - - sql_print_error("\ + } + if (udf_error) + sql_print_error("Error loading user-defined library, slave SQL " + "thread aborted. Install the missing library, and restart the " + "slave SQL thread with \"SLAVE START\". We stopped at log '%s' " + "position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, + llbuff)); + else + sql_print_error("\ Error running query, slave SQL thread aborted. Fix the problem, and restart \ the slave SQL thread with \"SLAVE START\". We stopped at log \ '%s' position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff)); @@ -2701,6 +2744,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN(1); } + pthread_mutex_lock(&mi->data_lock); ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */ switch (ev->get_type_code()) { @@ -2964,7 +3008,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) pthread_mutex_lock(log_lock); if ((uint4korr(buf + SERVER_ID_OFFSET) == ::server_id) && - !replicate_same_server_id) + !mi->rli.replicate_same_server_id) { /* Do not write it to the relay log. diff --git a/sql/slave.h b/sql/slave.h index f21266bbee4..107b74c09dd 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -162,9 +162,9 @@ bool show_binlog_info(THD* thd); bool rpl_master_has_bug(RELAY_LOG_INFO *rli, uint bug_id); const char *print_slave_db_safe(const char *db); -int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); +int check_expected_error(THD* thd, RELAY_LOG_INFO const *rli, int error_code); void skip_load_data_infile(NET* net); -void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, +void slave_print_msg(enum loglevel level, RELAY_LOG_INFO const *rli, int err_code, const char* msg, ...) ATTRIBUTE_FORMAT(printf, 4, 5); @@ -182,7 +182,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,ulonglong pos, int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, const char** errmsg); void set_slave_thread_options(THD* thd); -void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli); +void set_slave_thread_default_charset(THD *thd, RELAY_LOG_INFO const *rli); void rotate_relay_log(MASTER_INFO* mi); pthread_handler_t handle_slave_io(void *arg); diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 86d2cabc703..cba283ec65b 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -47,8 +47,7 @@ don't forget to update 1. static struct show_privileges_st sys_privileges[] 2. static const char *command_array[] and static uint command_lengths[] - 3. mysql_create_system_tables.sh, mysql_fix_privilege_tables.sql - and mysql-test/lib/init_db.sql + 3. mysql_system_tables.sql and mysql_system_tables_fix.sql 4. acl_init() or whatever - to define behaviour for old privilege tables 5. sql_yacc.yy - for GRANT/REVOKE to work */ diff --git a/sql/sql_base.cc b/sql/sql_base.cc index e0b60b875d5..82cce335f00 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -858,6 +858,7 @@ void free_io_cache(TABLE *table) DBUG_VOID_RETURN; } + /* Close all tables which aren't in use by any thread @@ -970,6 +971,71 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, /* + Close all tables which match specified connection string or + if specified string is NULL, then any table with a connection string. +*/ + +bool close_cached_connection_tables(THD *thd, bool if_wait_for_refresh, + LEX_STRING *connection, bool have_lock) +{ + uint idx; + TABLE_LIST tmp, *tables= NULL; + bool result= FALSE; + DBUG_ENTER("close_cached_connections"); + DBUG_ASSERT(thd); + + bzero(&tmp, sizeof(TABLE_LIST)); + + if (!have_lock) + VOID(pthread_mutex_lock(&LOCK_open)); + + for (idx= 0; idx < table_def_cache.records; idx++) + { + TABLE_SHARE *share= (TABLE_SHARE *) hash_element(&table_def_cache, idx); + + /* Ignore if table is not open or does not have a connect_string */ + if (!share->connect_string.length || !share->ref_count) + continue; + + /* Compare the connection string */ + if (connection && + (connection->length > share->connect_string.length || + (connection->length < share->connect_string.length && + (share->connect_string.str[connection->length] != '/' && + share->connect_string.str[connection->length] != '\\')) || + strncasecmp(connection->str, share->connect_string.str, + connection->length))) + continue; + + /* close_cached_tables() only uses these elements */ + tmp.db= share->db.str; + tmp.table_name= share->table_name.str; + tmp.next_local= tables; + + tables= (TABLE_LIST *) memdup_root(thd->mem_root, (char*)&tmp, + sizeof(TABLE_LIST)); + } + + if (tables) + result= close_cached_tables(thd, FALSE, tables, TRUE); + + if (!have_lock) + VOID(pthread_mutex_unlock(&LOCK_open)); + + if (if_wait_for_refresh) + { + pthread_mutex_lock(&thd->mysys_var->mutex); + thd->mysys_var->current_mutex= 0; + thd->mysys_var->current_cond= 0; + thd->proc_info=0; + pthread_mutex_unlock(&thd->mysys_var->mutex); + } + + DBUG_RETURN(result); +} + + +/* Mark all tables in the list which were used by current substatement as free for reuse. @@ -1328,10 +1394,10 @@ void close_temporary_tables(THD *thd) due to special characters in the names */ append_identifier(thd, &s_query, table->s->db.str, strlen(table->s->db.str)); - s_query.q_append('.'); + s_query.append('.'); append_identifier(thd, &s_query, table->s->table_name.str, strlen(table->s->table_name.str)); - s_query.q_append(','); + s_query.append(','); next= table->next; close_temporary(table, 1, 1); } @@ -2272,6 +2338,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->insert_values= 0; table->fulltext_searched= 0; table->file->ft_handler= 0; + /* Catch wrong handling of the auto_increment_field_not_null. */ + DBUG_ASSERT(!table->auto_increment_field_not_null); + table->auto_increment_field_not_null= FALSE; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); table->pos_in_table_list= table_list; @@ -3746,7 +3815,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, tmp_table->reginfo.lock_type= TL_WRITE; // Simulate locked share->tmp_table= (tmp_table->file->has_transactions() ? - TRANSACTIONAL_TMP_TABLE : TMP_TABLE); + TRANSACTIONAL_TMP_TABLE : NON_TRANSACTIONAL_TMP_TABLE); if (link_in_list) { @@ -5776,7 +5845,8 @@ bool setup_tables_and_check_access(THD *thd, &leaves_tmp, select_insert)) return TRUE; - *leaves= leaves_tmp; + if (leaves) + *leaves= leaves_tmp; for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf) { @@ -6171,6 +6241,11 @@ err_no_arena: values values to fill with ignore_errors TRUE if we should ignore errors + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + RETURN FALSE OK TRUE error occured @@ -6183,27 +6258,52 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values, List_iterator_fast<Item> f(fields),v(values); Item *value, *fld; Item_field *field; + TABLE *table= 0; DBUG_ENTER("fill_record"); + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (fields.elements) + { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + fld= (Item_field*)f++; + if (!(field= fld->filed_for_view_update())) + { + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); + goto err; + } + table= field->field->table; + table->auto_increment_field_not_null= FALSE; + f.rewind(); + } while ((fld= f++)) { if (!(field= fld->filed_for_view_update())) { my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); - DBUG_RETURN(TRUE); + goto err; } value=v++; Field *rfield= field->field; - TABLE *table= rfield->table; + table= rfield->table; if (rfield == table->next_number_field) table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors) { my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0)); - DBUG_RETURN(TRUE); + goto err; } } DBUG_RETURN(thd->net.report_error); +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); } @@ -6252,6 +6352,11 @@ fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields, values list of fields ignore_errors TRUE if we should ignore errors + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + RETURN FALSE OK TRUE error occured @@ -6262,19 +6367,38 @@ fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors) { List_iterator_fast<Item> v(values); Item *value; + TABLE *table= 0; DBUG_ENTER("fill_record"); Field *field; + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (*ptr) + { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + table= (*ptr)->table; + table->auto_increment_field_not_null= FALSE; + } while ((field = *ptr++)) { value=v++; - TABLE *table= field->table; + table= field->table; if (field == table->next_number_field) table->auto_increment_field_not_null= TRUE; if (value->save_in_field(field, 0) == -1) - DBUG_RETURN(TRUE); + goto err; } DBUG_RETURN(thd->net.report_error); + +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); } diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index b0a54bec664..6f7bbda96de 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -163,9 +163,17 @@ void mysql_client_binlog_statement(THD* thd) (ulong) uint4korr(bufptr+EVENT_LEN_OFFSET))); #endif ev->thd= thd; - if (IF_DBUG(int err= ) ev->exec_event(thd->rli_fake)) + /* + We go directly to the application phase, since we don't need + to check if the event shall be skipped or not. + + Neither do we have to update the log positions, since that is + not used at all: the rli_fake instance is used only for error + reporting. + */ + if (IF_DBUG(int err= ) ev->apply_event(thd->rli_fake)) { - DBUG_PRINT("error", ("exec_event() returned: %d", err)); + DBUG_PRINT("info", ("apply_event() returned: %d", err)); /* TODO: Maybe a better error message since the BINLOG statement now contains several events. diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 57db12ec9cd..cc38d63c9f9 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -679,11 +679,22 @@ bool THD::store_globals() void THD::cleanup_after_query() { + /* + Reset rand_used so that detection of calls to rand() will save random + seeds if needed by the slave. + + Do not reset rand_used if inside a stored function or trigger because + only the call to these operations is logged. Thus only the calling + statement needs to detect rand() calls made by its substatements. These + substatements must not set rand_used to 0 because it would remove the + detection of rand() by the calling statement. + */ if (!in_sub_stmt) /* stored functions and triggers are a special case */ { /* Forget those values, for next binlogger: */ stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + rand_used= 0; } if (first_successful_insert_id_in_cur_stmt > 0) { @@ -1322,7 +1333,6 @@ bool select_export::send_data(List<Item> &items) } row_count++; Item *item; - char *buff_ptr=buff; uint used_length=0,items_left=items.elements; List_iterator_fast<Item> li(items); @@ -1422,19 +1432,18 @@ bool select_export::send_data(List<Item> &items) goto err; } } - buff_ptr=buff; // Place separators here if (res && (!exchange->opt_enclosed || result_type == STRING_RESULT)) { - memcpy(buff_ptr,exchange->enclosed->ptr(),exchange->enclosed->length()); - buff_ptr+=exchange->enclosed->length(); + if (my_b_write(&cache, (byte*) exchange->enclosed->ptr(), + exchange->enclosed->length())) + goto err; } if (--items_left) { - memcpy(buff_ptr,exchange->field_term->ptr(),field_term_length); - buff_ptr+=field_term_length; + if (my_b_write(&cache, (byte*) exchange->field_term->ptr(), + field_term_length)) + goto err; } - if (my_b_write(&cache,(byte*) buff,(uint) (buff_ptr-buff))) - goto err; } if (my_b_write(&cache,(byte*) exchange->line_term->ptr(), exchange->line_term->length())) @@ -2544,30 +2553,111 @@ my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const } -my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data, - const byte *record) const +/* + Pack a record of data for a table into a format suitable for + transfer via the binary log. + + SYNOPSIS + THD::pack_row() + table Table describing the format of the record + cols Bitmap with a set bit for each column that should be + stored in the row + row_data Pointer to memory where row will be written + record Pointer to record that should be packed. It is assumed + that the pointer refers to either record[0] or + record[1], but no such check is made since the code does + not rely on that. + + DESCRIPTION + + The format for a row in transfer with N fields is the following: + + ceil(N/8) null bytes: + One null bit for every column *regardless of whether it can be + null or not*. This simplifies the decoding. Observe that the + number of null bits is equal to the number of set bits in the + 'cols' bitmap. The number of null bytes is the smallest number + of bytes necessary to store the null bits. + + Padding bits are 1. + + N packets: + Each field is stored in packed format. + + + RETURN VALUE + + The number of bytes written at 'row_data'. + */ +my_size_t +THD::pack_row(TABLE *table, MY_BITMAP const* cols, + byte *const row_data, const byte *record) const { Field **p_field= table->field, *field; - int n_null_bytes= table->s->null_bytes; - byte *ptr; - uint i; + int const null_byte_count= (bitmap_bits_set(cols) + 7) / 8; + byte *pack_ptr = row_data + null_byte_count; + byte *null_ptr = row_data; my_ptrdiff_t const rec_offset= record - table->record[0]; my_ptrdiff_t const def_offset= table->s->default_values - table->record[0]; - memcpy(row_data, record, n_null_bytes); - ptr= row_data+n_null_bytes; - for (i= 0 ; (field= *p_field) ; i++, p_field++) + /* + We write the null bits and the packed records using one pass + through all the fields. The null bytes are written little-endian, + i.e., the first fields are in the first byte. + */ + unsigned int null_bits= (1U << 8) - 1; + // Mask to mask out the correct but among the null bits + unsigned int null_mask= 1U; + for ( ; (field= *p_field) ; p_field++) { - if (bitmap_is_set(cols,i)) + DBUG_PRINT("debug", ("null_mask=%d; null_ptr=%p; row_data=%p; null_byte_count=%d", + null_mask, null_ptr, row_data, null_byte_count)); + if (bitmap_is_set(cols, p_field - table->field)) { - my_ptrdiff_t const offset= - field->is_null((uint) rec_offset) ? def_offset : rec_offset; - field->move_field_offset(offset); - ptr= (byte*)field->pack((char *) ptr, field->ptr); - field->move_field_offset(-offset); + my_ptrdiff_t offset; + if (field->is_null(rec_offset)) + { + offset= def_offset; + null_bits |= null_mask; + } + else + { + offset= rec_offset; + null_bits &= ~null_mask; + + /* + We only store the data of the field if it is non-null + */ + pack_ptr= (byte*)field->pack((char *) pack_ptr, field->ptr + offset); + } + + null_mask <<= 1; + if ((null_mask & 0xFF) == 0) + { + DBUG_ASSERT(null_ptr < row_data + null_byte_count); + null_mask = 1U; + *null_ptr++ = null_bits; + null_bits= (1U << 8) - 1; + } } } - return (static_cast<my_size_t>(ptr - row_data)); + + /* + Write the last (partial) byte, if there is one + */ + if ((null_mask & 0xFF) > 1) + { + DBUG_ASSERT(null_ptr < row_data + null_byte_count); + *null_ptr++ = null_bits; + } + + /* + The null pointer should now point to the first byte of the + packed data. If it doesn't, something is very wrong. + */ + DBUG_ASSERT(null_ptr == row_data + null_byte_count); + + return static_cast<my_size_t>(pack_ptr - row_data); } diff --git a/sql/sql_class.h b/sql/sql_class.h index 11bf0f05d13..66914bdf908 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1270,7 +1270,7 @@ public: return first_successful_insert_id_in_prev_stmt; } /* - Used by Intvar_log_event::exec_event() and by "SET INSERT_ID=#" + Used by Intvar_log_event::do_apply_event() and by "SET INSERT_ID=#" (mysqlbinlog). We'll soon add a variant which can take many intervals in argument. */ @@ -1411,7 +1411,7 @@ public: #ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *work_part_info; #endif - + THD(); ~THD(); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 0ff5c4e5b50..d300edd6e18 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -54,6 +54,27 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (mysql_prepare_delete(thd, table_list, &conds)) DBUG_RETURN(TRUE); + /* check ORDER BY even if it can be ignored */ + if (order && order->elements) + { + TABLE_LIST tables; + List<Item> fields; + List<Item> all_fields; + + bzero((char*) &tables,sizeof(tables)); + tables.table = table; + tables.alias = table_list->alias; + + if (select_lex->setup_ref_array(thd, order->elements) || + setup_order(thd, select_lex->ref_pointer_array, &tables, + fields, all_fields, (ORDER*) order->first)) + { + delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); + DBUG_RETURN(TRUE); + } + } + const_cond= (!conds || conds->const_item()); safe_update=test(thd->options & OPTION_SAFE_UPDATES); if (safe_update && const_cond) @@ -155,23 +176,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, { uint length= 0; SORT_FIELD *sortorder; - TABLE_LIST tables; - List<Item> fields; - List<Item> all_fields; ha_rows examined_rows; - - bzero((char*) &tables,sizeof(tables)); - tables.table = table; - tables.alias = table_list->alias; - - if (select_lex->setup_ref_array(thd, order->elements) || - setup_order(thd, select_lex->ref_pointer_array, &tables, - fields, all_fields, (ORDER*) order->first)) - { - delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(TRUE); - } if ((!select || table->quick_keys.is_clear_all()) && limit != HA_POS_ERROR) usable_index= get_index_for_order(table, (ORDER*)(order->first), limit); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 5712fbceac8..89bd7958c86 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -179,7 +179,7 @@ exit: orig_table_list->table_name= table->s->table_name.str; orig_table_list->table_name_length= table->s->table_name.length; table->derived_select_number= first_select->select_number; - table->s->tmp_table= TMP_TABLE; + table->s->tmp_table= NON_TRANSACTIONAL_TMP_TABLE; #ifndef NO_EMBEDDED_ACCESS_CHECKS if (orig_table_list->referencing_view) table->grant= orig_table_list->grant; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d24230eb379..c0e0203ed86 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -753,6 +753,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table->next_number_field->val_int() : 0)); table->next_number_field=0; thd->count_cuted_fields= CHECK_FIELD_IGNORE; + table->auto_increment_field_not_null= FALSE; if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); if (duplic == DUP_REPLACE && @@ -2689,6 +2690,7 @@ select_insert::~select_insert() if (table) { table->next_number_field=0; + table->auto_increment_field_not_null= FALSE; table->file->ha_reset(); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; diff --git a/sql/sql_list.h b/sql/sql_list.h index af30cbe0d6a..ba61a931e04 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -23,7 +23,7 @@ class Sql_alloc { public: - static void *operator new(size_t size) + static void *operator new(size_t size) throw () { return (void*) sql_alloc((uint) size); } @@ -31,9 +31,9 @@ public: { return (void*) sql_alloc((uint) size); } - static void *operator new[](size_t size, MEM_ROOT *mem_root) + static void *operator new[](size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } - static void *operator new(size_t size, MEM_ROOT *mem_root) + static void *operator new(size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } static void operator delete(void *ptr, size_t size) { TRASH(ptr, size); } static void operator delete(void *ptr, MEM_ROOT *mem_root) diff --git a/sql/sql_load.cc b/sql/sql_load.cc index cf356a4b336..7d2c2281bba 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -512,6 +512,7 @@ err: mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + table->auto_increment_field_not_null= FALSE; thd->abort_on_warning= 0; DBUG_RETURN(error); } @@ -609,8 +610,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, { uint length; byte save_chr; - if (field == table->next_number_field) - table->auto_increment_field_not_null= TRUE; if ((length=(uint) (read_info.row_end-pos)) > field->field_length) length=field->field_length; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 23ec0fd0128..e2a36b232c9 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2594,6 +2594,36 @@ end_with_restore_list: break; } case SQLCOM_REPLACE: +#ifndef DBUG_OFF + if (mysql_bin_log.is_open()) + { + /* + Generate an incident log event before writing the real event + to the binary log. We put this event is before the statement + since that makes it simpler to check that the statement was + not executed on the slave (since incidents usually stop the + slave). + + Observe that any row events that are generated will be + generated before. + + This is only for testing purposes and will not be present in a + release build. + */ + + Incident incident= INCIDENT_NONE; + DBUG_PRINT("debug", ("Just before generate_incident()")); + DBUG_EXECUTE_IF("incident_database_resync_on_replace", + incident= INCIDENT_LOST_EVENTS;); + if (incident) + { + Incident_log_event ev(thd, incident); + mysql_bin_log.write(&ev); + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + } + DBUG_PRINT("debug", ("Just after generate_incident()")); + } +#endif case SQLCOM_INSERT: { DBUG_ASSERT(first_table == all_tables && first_table != 0); @@ -3902,7 +3932,6 @@ create_sp_error: if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0, 0)) goto error; - /* Does NOT write to binlog */ if (!(res = mysql_drop_function(thd, &lex->spname->m_name))) { send_ok(thd); @@ -4253,6 +4282,10 @@ create_sp_error: int error; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_CREATE_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((error= create_server(thd, &lex->server_options))) { DBUG_PRINT("info", ("problem creating server <%s>", @@ -4268,6 +4301,10 @@ create_sp_error: int error; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_ALTER_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((error= alter_server(thd, &lex->server_options))) { DBUG_PRINT("info", ("problem altering server <%s>", @@ -4283,9 +4320,13 @@ create_sp_error: int err_code; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_DROP_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((err_code= drop_server(thd, &lex->server_options))) { - if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_EXISTS) + if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_DOESNT_EXIST) { DBUG_PRINT("info", ("problem dropping server %s", lex->server_options.server_name)); @@ -5161,6 +5202,7 @@ void mysql_init_multi_delete(LEX *lex) lex->query_tables_last= &lex->query_tables; } + /* When you modify mysql_parse(), you may need to mofify mysql_test_parse_for_slave() in this same file. @@ -5173,6 +5215,7 @@ void mysql_parse(THD *thd, char *inBuf, uint length) DBUG_EXECUTE_IF("parser_debug", turn_parser_debug_on();); mysql_init_query(thd, inBuf, length); + if (query_cache_send_result_to_client(thd, inBuf, length) <= 0) { LEX *lex= thd->lex; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index e7ade996d87..eb6f4efccc3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -408,12 +408,19 @@ JOIN::prepare(Item ***rref_pointer_array, /* Check that all tables, fields, conds and order are ok */ - if ((!(select_options & OPTION_SETUP_TABLES_DONE) && - setup_tables_and_check_access(thd, &select_lex->context, join_list, - tables_list, - &select_lex->leaf_tables, FALSE, - SELECT_ACL, SELECT_ACL)) || - setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || + if (!(select_options & OPTION_SETUP_TABLES_DONE) && + setup_tables_and_check_access(thd, &select_lex->context, join_list, + tables_list, &select_lex->leaf_tables, + FALSE, SELECT_ACL, SELECT_ACL)) + DBUG_RETURN(-1); + + TABLE_LIST *table_ptr; + for (table_ptr= select_lex->leaf_tables; + table_ptr; + table_ptr= table_ptr->next_leaf) + tables++; + + if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || select_lex->setup_ref_array(thd, og_num) || setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ, &all_fields, 1) || @@ -518,11 +525,6 @@ JOIN::prepare(Item ***rref_pointer_array, DBUG_RETURN(-1); } } - TABLE_LIST *table_ptr; - for (table_ptr= select_lex->leaf_tables; - table_ptr; - table_ptr= table_ptr->next_leaf) - tables++; } { /* Caclulate the number of groups */ @@ -6635,7 +6637,8 @@ static void update_depend_map(JOIN *join, ORDER *order) order->item[0]->update_used_tables(); order->depend_map=depend_map=order->item[0]->used_tables(); // Not item_sum(), RAND() and no reference to table outside of sub select - if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))) + if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) + && !order->item[0]->with_sum_func) { for (JOIN_TAB **tab=join->map2table; depend_map ; @@ -7117,6 +7120,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item, SYNOPSIS check_row_equality() + thd thread handle left_row left term of the row equality to be processed right_row right term of the row equality to be processed cond_equal multiple equalities that must hold together with the predicate @@ -7137,7 +7141,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item, FALSE otherwise */ -static bool check_row_equality(Item *left_row, Item_row *right_row, +static bool check_row_equality(THD *thd, Item *left_row, Item_row *right_row, COND_EQUAL *cond_equal, List<Item>* eq_list) { uint n= left_row->cols(); @@ -7148,13 +7152,21 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, Item *right_item= right_row->element_index(i); if (left_item->type() == Item::ROW_ITEM && right_item->type() == Item::ROW_ITEM) - is_converted= check_row_equality((Item_row *) left_item, - (Item_row *) right_item, - cond_equal, eq_list); - else + { + is_converted= check_row_equality(thd, + (Item_row *) left_item, + (Item_row *) right_item, + cond_equal, eq_list); + if (!is_converted) + thd->lex->current_select->cond_count++; + } + else + { is_converted= check_simple_equality(left_item, right_item, 0, cond_equal); - - if (!is_converted) + thd->lex->current_select->cond_count++; + } + + if (!is_converted) { Item_func_eq *eq_item; if (!(eq_item= new Item_func_eq(left_item, right_item))) @@ -7173,6 +7185,7 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, SYNOPSIS check_equality() + thd thread handle item predicate to process cond_equal multiple equalities that must hold together with the predicate eq_list results of conversions of row equalities that are not simple @@ -7197,7 +7210,7 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, or, if the procedure fails by a fatal error. */ -static bool check_equality(Item *item, COND_EQUAL *cond_equal, +static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal, List<Item> *eq_list) { if (item->type() == Item::FUNC_ITEM && @@ -7208,9 +7221,13 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, if (left_item->type() == Item::ROW_ITEM && right_item->type() == Item::ROW_ITEM) - return check_row_equality((Item_row *) left_item, + { + thd->lex->current_select->cond_count--; + return check_row_equality(thd, + (Item_row *) left_item, (Item_row *) right_item, cond_equal, eq_list); + } else return check_simple_equality(left_item, right_item, item, cond_equal); } @@ -7223,6 +7240,7 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, SYNOPSIS build_equal_items_for_cond() + thd thread handle cond condition(expression) where to make replacement inherited path to all inherited multiple equality items @@ -7285,7 +7303,7 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, pointer to the transformed condition */ -static COND *build_equal_items_for_cond(COND *cond, +static COND *build_equal_items_for_cond(THD *thd, COND *cond, COND_EQUAL *inherited) { Item_equal *item_equal; @@ -7318,7 +7336,7 @@ static COND *build_equal_items_for_cond(COND *cond, structure here because it's restored before each re-execution of any prepared statement/stored procedure. */ - if (check_equality(item, &cond_equal, &eq_list)) + if (check_equality(thd, item, &cond_equal, &eq_list)) li.remove(); } @@ -7353,7 +7371,7 @@ static COND *build_equal_items_for_cond(COND *cond, while ((item= li++)) { Item *new_item; - if ((new_item = build_equal_items_for_cond(item, inherited))!= item) + if ((new_item= build_equal_items_for_cond(thd, item, inherited)) != item) { /* This replacement happens only for standalone equalities */ /* @@ -7383,7 +7401,7 @@ static COND *build_equal_items_for_cond(COND *cond, for WHERE a=b AND c=d AND (b=c OR d=5) b=c is replaced by =(a,b,c,d). */ - if (check_equality(cond, &cond_equal, &eq_list)) + if (check_equality(thd, cond, &cond_equal, &eq_list)) { int n= cond_equal.current_level.elements + eq_list.elements; if (n == 0) @@ -7446,7 +7464,7 @@ static COND *build_equal_items_for_cond(COND *cond, SYNOPSIS build_equal_items() - thd Thread handler + thd thread handle cond condition to build the multiple equalities for inherited path to all inherited multiple equality items join_list list of join tables to which the condition refers to @@ -7507,7 +7525,7 @@ static COND *build_equal_items(THD *thd, COND *cond, if (cond) { - cond= build_equal_items_for_cond(cond, inherited); + cond= build_equal_items_for_cond(thd, cond, inherited); cond->update_used_tables(); if (cond->type() == Item::COND_ITEM && ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) @@ -8979,20 +8997,19 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, enum enum_field_types type; /* - DATE/TIME fields have STRING_RESULT result type. To preserve - type they needed to be handled separately. + DATE/TIME and GEOMETRY fields have STRING_RESULT result type. + To preserve type they needed to be handled separately. */ if ((type= item->field_type()) == MYSQL_TYPE_DATETIME || type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE || - type == MYSQL_TYPE_TIMESTAMP) + type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_GEOMETRY) new_field= item->tmp_table_field_from_field_type(table, 1); /* Make sure that the blob fits into a Field_varstring which has 2-byte lenght. */ else if (item->max_length/item->collation.collation->mbmaxlen > 255 && - item->max_length/item->collation.collation->mbmaxlen < UINT_MAX16 - && convert_blob_length) + convert_blob_length < UINT_MAX16 && convert_blob_length) new_field= new Field_varstring(convert_blob_length, maybe_null, item->name, table->s, item->collation.collation); @@ -9419,13 +9436,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, { if (item->with_sum_func && type != Item::SUM_FUNC_ITEM) { - /* - Mark that the we have ignored an item that refers to a summary - function. We need to know this if someone is going to use - DISTINCT on the result. - */ - param->using_indirect_summary_function=1; - continue; + if (item->used_tables() & OUTER_REF_TABLE_BIT) + item->update_used_tables(); + if (type == Item::SUBSELECT_ITEM || + (item->used_tables() & ~OUTER_REF_TABLE_BIT)) + { + /* + Mark that the we have ignored an item that refers to a summary + function. We need to know this if someone is going to use + DISTINCT on the result. + */ + param->using_indirect_summary_function=1; + continue; + } } if (item->const_item() && (int) hidden_field_count <= 0) continue; // We don't have to store this @@ -9610,6 +9633,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, share->default_values= table->record[1]+alloc_length; } copy_func[0]=0; // End marker + param->func_count= copy_func - param->items_to_copy; setup_tmp_table_column_bitmaps(table, bitmaps); @@ -13826,6 +13850,7 @@ count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields, if (!sum_item->quick_group) param->quick_group=0; // UDF SUM function param->sum_func_count++; + param->func_count++; for (uint i=0 ; i < sum_item->arg_count ; i++) { diff --git a/sql/sql_select.h b/sql/sql_select.h index ca93179f9d9..ca37c7bd274 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -527,15 +527,11 @@ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); class store_key :public Sql_alloc { - protected: - Field *to_field; // Store data here - char *null_ptr; - char err; public: bool null_key; /* TRUE <=> the value of the key has a null part */ enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) - :null_ptr(null), err(0), null_key(0) + :null_key(0), null_ptr(null), err(0) { if (field_arg->type() == MYSQL_TYPE_BLOB) { @@ -550,8 +546,35 @@ public: ptr, (uchar*) null, 1); } virtual ~store_key() {} /* Not actually needed */ - virtual enum store_key_result copy()=0; virtual const char *name() const=0; + + /** + @brief sets ignore truncation warnings mode and calls the real copy method + + @details this function makes sure truncation warnings when preparing the + key buffers don't end up as errors (because of an enclosing INSERT/UPDATE). + */ + enum store_key_result copy() + { + enum store_key_result result; + enum_check_fields saved_count_cuted_fields= + to_field->table->in_use->count_cuted_fields; + + to_field->table->in_use->count_cuted_fields= CHECK_FIELD_IGNORE; + + result= copy_inner(); + + to_field->table->in_use->count_cuted_fields= saved_count_cuted_fields; + + return result; + } + + protected: + Field *to_field; // Store data here + char *null_ptr; + char err; + + virtual enum store_key_result copy_inner()=0; }; @@ -571,7 +594,10 @@ class store_key_field: public store_key copy_field.set(to_field,from_field,0); } } - enum store_key_result copy() + const char *name() const { return field_name; } + + protected: + enum store_key_result copy_inner() { TABLE *table= copy_field.to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -581,7 +607,6 @@ class store_key_field: public store_key null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } - const char *name() const { return field_name; } }; @@ -596,7 +621,10 @@ public: null_ptr_arg ? null_ptr_arg : item_arg->maybe_null ? &err : NullS, length), item(item_arg) {} - enum store_key_result copy() + const char *name() const { return "func"; } + + protected: + enum store_key_result copy_inner() { TABLE *table= to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -606,7 +634,6 @@ public: null_key= to_field->is_null() || item->null_value; return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res); } - const char *name() const { return "func"; } }; @@ -622,7 +649,10 @@ public: &err : NullS, length, item_arg), inited(0) { } - enum store_key_result copy() + const char *name() const { return "const"; } + +protected: + enum store_key_result copy_inner() { int res; if (!inited) @@ -637,7 +667,6 @@ public: null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } - const char *name() const { return "const"; } }; bool cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref); diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 60ca08fb555..a62ce98850b 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -16,6 +16,21 @@ /* The servers are saved in the system table "servers" + + Currently, when the user performs an ALTER SERVER or a DROP SERVER + operation, it will cause all open tables which refer to the named + server connection to be flushed. This may cause some undesirable + behaviour with regard to currently running transactions. It is + expected that the DBA knows what s/he is doing when s/he performs + the ALTER SERVER or DROP SERVER operation. + + TODO: + It is desirable for us to implement a callback mechanism instead where + callbacks can be registered for specific server protocols. The callback + will be fired when such a server name has been created/altered/dropped + or when statistics are to be gathered such as how many actual connections. + Storage engines etc will be able to make use of the callback so that + currently running transactions etc will not be disrupted. */ #include "mysql_priv.h" @@ -25,15 +40,43 @@ #include "sp_head.h" #include "sp.h" -static my_bool servers_load(THD *thd, TABLE_LIST *tables); -HASH servers_cache; -pthread_mutex_t servers_cache_mutex; // To init the hash -uint servers_cache_initialised=FALSE; -/* Version of server table. incremented by servers_load */ -static uint servers_version=0; +/* + We only use 1 mutex to guard the data structures - THR_LOCK_servers. + Read locked when only reading data and write-locked for all other access. +*/ + +static HASH servers_cache; static MEM_ROOT mem; static rw_lock_t THR_LOCK_servers; +static bool get_server_from_table_to_cache(TABLE *table); + +/* insert functions */ +static int insert_server(THD *thd, FOREIGN_SERVER *server_options); +static int insert_server_record(TABLE *table, FOREIGN_SERVER *server); +static int insert_server_record_into_cache(FOREIGN_SERVER *server); +static void prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *server); +/* drop functions */ +static int delete_server_record(TABLE *table, + char *server_name, + int server_name_length); +static int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options); + +/* update functions */ +static void prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +static int update_server(THD *thd, FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +static int update_server_record(TABLE *table, FOREIGN_SERVER *server); +static int update_server_record_in_cache(FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +/* utility functions */ +static void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to); + + + static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, my_bool not_used __attribute__((unused))) { @@ -46,6 +89,7 @@ static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, DBUG_RETURN((byte*) server->server_name); } + /* Initialize structures responsible for servers used in federated server scheme information for them from the server @@ -65,35 +109,27 @@ static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, 1 Could not initialize servers */ -my_bool servers_init(bool dont_read_servers_table) +bool servers_init(bool dont_read_servers_table) { THD *thd; - my_bool return_val= 0; + bool return_val= FALSE; DBUG_ENTER("servers_init"); /* init the mutex */ - if (pthread_mutex_init(&servers_cache_mutex, MY_MUTEX_INIT_FAST)) - DBUG_RETURN(1); - if (my_rwlock_init(&THR_LOCK_servers, NULL)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); /* initialise our servers cache */ if (hash_init(&servers_cache, system_charset_info, 32, 0, 0, (hash_get_key) servers_cache_get_key, 0, 0)) { - return_val= 1; /* we failed, out of memory? */ + return_val= TRUE; /* we failed, out of memory? */ goto end; } /* Initialize the mem root for data */ init_alloc_root(&mem, ACL_ALLOC_BLOCK_SIZE, 0); - /* - at this point, the cache is initialised, let it be known - */ - servers_cache_initialised= TRUE; - if (dont_read_servers_table) goto end; @@ -101,7 +137,7 @@ my_bool servers_init(bool dont_read_servers_table) To be able to run this from boot, we allocate a temporary THD */ if (!(thd=new THD)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); thd->thread_stack= (char*) &thd; thd->store_globals(); /* @@ -131,19 +167,13 @@ end: TRUE Error */ -static my_bool servers_load(THD *thd, TABLE_LIST *tables) +static bool servers_load(THD *thd, TABLE_LIST *tables) { TABLE *table; READ_RECORD read_record_info; - my_bool return_val= TRUE; + bool return_val= TRUE; DBUG_ENTER("servers_load"); - if (!servers_cache_initialised) - DBUG_RETURN(0); - - /* need to figure out how to utilise this variable */ - servers_version++; /* servers updated */ - /* first, send all cached rows to sleep with the fishes, oblivion! I expect this crappy comment replaced */ free_root(&mem, MYF(MY_MARK_BLOCKS_FREE)); @@ -157,7 +187,7 @@ static my_bool servers_load(THD *thd, TABLE_LIST *tables) goto end; } - return_val=0; + return_val= FALSE; end: end_read_record(&read_record_info); @@ -184,10 +214,10 @@ end: TRUE Failure */ -my_bool servers_reload(THD *thd) +bool servers_reload(THD *thd) { TABLE_LIST tables[1]; - my_bool return_val= 1; + bool return_val= TRUE; DBUG_ENTER("servers_reload"); if (thd->locked_tables) @@ -197,10 +227,9 @@ my_bool servers_reload(THD *thd) close_thread_tables(thd); } - /* - To avoid deadlocks we should obtain table locks before - obtaining servers_cache->lock mutex. - */ + DBUG_PRINT("info", ("locking servers_cache")); + rw_wrlock(&THR_LOCK_servers); + bzero((char*) tables, sizeof(tables)); tables[0].alias= tables[0].table_name= (char*) "servers"; tables[0].db= (char*) "mysql"; @@ -213,12 +242,6 @@ my_bool servers_reload(THD *thd) goto end; } - DBUG_PRINT("info", ("locking servers_cache")); - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - //old_servers_cache= servers_cache; - //old_mem=mem; - if ((return_val= servers_load(thd, tables))) { // Error. Revert to old list /* blast, for now, we have no servers, discuss later way to preserve */ @@ -227,14 +250,14 @@ my_bool servers_reload(THD *thd) servers_free(); } - DBUG_PRINT("info", ("unlocking servers_cache")); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); - end: close_thread_tables(thd); + DBUG_PRINT("info", ("unlocking servers_cache")); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(return_val); } + /* Initialize structures responsible for servers used in federated server scheme information for them from the server @@ -261,7 +284,8 @@ end: 1 could not insert server struct into global servers cache */ -my_bool get_server_from_table_to_cache(TABLE *table) +static bool +get_server_from_table_to_cache(TABLE *table) { /* alloc a server struct */ char *ptr; @@ -309,68 +333,6 @@ my_bool get_server_from_table_to_cache(TABLE *table) DBUG_RETURN(FALSE); } -/* - SYNOPSIS - server_exists_in_table() - THD *thd - thread pointer - LEX_SERVER_OPTIONS *server_options - pointer to Lex->server_options - - NOTES - This function takes a LEX_SERVER_OPTIONS struct, which is very much the - same type of structure as a FOREIGN_SERVER, it contains the values parsed - in any one of the [CREATE|DELETE|DROP] SERVER statements. Using the - member "server_name", index_read_idx either founds the record and returns - 1, or doesn't find the record, and returns 0 - - RETURN VALUES - 0 record not found - 1 record found -*/ - -my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) -{ - int result= 1; - int error= 0; - TABLE_LIST tables; - TABLE *table; - DBUG_ENTER("server_exists"); - - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.alias= tables.table_name= (char*) "servers"; - - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ - if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - - table->use_all_columns(); - - rw_wrlock(&THR_LOCK_servers); - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - /* set the field that's the PK to the value we're looking for */ - table->field[0]->store(server_options->server_name, - server_options->server_name_length, - system_charset_info); - - if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) - { - if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { - table->file->print_error(error, MYF(0)); - result= -1; - } - result= 0; - DBUG_PRINT("info",("record for server '%s' not found!", - server_options->server_name)); - } - - VOID(pthread_mutex_unlock(&servers_cache_mutex)); - rw_unlock(&THR_LOCK_servers); - DBUG_RETURN(result); -} /* SYNOPSIS @@ -382,15 +344,18 @@ my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) This function takes a server object that is has all members properly prepared, ready to be inserted both into the mysql.servers table and the servers cache. + + THR_LOCK_servers must be write locked. RETURN VALUES 0 - no error other - error code */ -int insert_server(THD *thd, FOREIGN_SERVER *server) +static int +insert_server(THD *thd, FOREIGN_SERVER *server) { - int error= 0; + int error= -1; TABLE_LIST tables; TABLE *table; @@ -402,13 +367,7 @@ int insert_server(THD *thd, FOREIGN_SERVER *server) /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - - /* lock mutex to make sure no changes happen */ - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - /* lock table */ - rw_wrlock(&THR_LOCK_servers); + goto end; /* insert the server into the table */ if ((error= insert_server_record(table, server))) @@ -419,12 +378,10 @@ int insert_server(THD *thd, FOREIGN_SERVER *server) goto end; end: - /* unlock the table */ - rw_unlock(&THR_LOCK_servers); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); DBUG_RETURN(error); } + /* SYNOPSIS int insert_server_record_into_cache() @@ -434,13 +391,16 @@ end: This function takes a FOREIGN_SERVER pointer to an allocated (root mem) and inserts it into the global servers cache + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error >0 - error code */ -int insert_server_record_into_cache(FOREIGN_SERVER *server) +static int +insert_server_record_into_cache(FOREIGN_SERVER *server) { int error=0; DBUG_ENTER("insert_server_record_into_cache"); @@ -461,6 +421,7 @@ int insert_server_record_into_cache(FOREIGN_SERVER *server) DBUG_RETURN(error); } + /* SYNOPSIS store_server_fields() @@ -478,7 +439,8 @@ int insert_server_record_into_cache(FOREIGN_SERVER *server) */ -void store_server_fields(TABLE *table, FOREIGN_SERVER *server) +static void +store_server_fields(TABLE *table, FOREIGN_SERVER *server) { table->use_all_columns(); @@ -539,6 +501,7 @@ void store_server_fields(TABLE *table, FOREIGN_SERVER *server) */ +static int insert_server_record(TABLE *table, FOREIGN_SERVER *server) { int error; @@ -605,9 +568,11 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error= 0; + int error; TABLE_LIST tables; TABLE *table; + LEX_STRING name= { server_options->server_name, + server_options->server_name_length }; DBUG_ENTER("drop_server"); DBUG_PRINT("info", ("server name server->server_name %s", @@ -617,28 +582,35 @@ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) tables.db= (char*) "mysql"; tables.alias= tables.table_name= (char*) "servers"; - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ - if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - rw_wrlock(&THR_LOCK_servers); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + /* hit the memory hit first */ + if ((error= delete_server_record_in_cache(server_options))) + goto end; - if ((error= delete_server_record(table, - server_options->server_name, - server_options->server_name_length))) + if (! (table= open_ltable(thd, &tables, TL_WRITE))) + { + error= my_errno; goto end; + } + error= delete_server_record(table, name.str, name.length); - if ((error= delete_server_record_in_cache(server_options))) - goto end; + /* close the servers table before we call closed_cached_connection_tables */ + close_thread_tables(thd); + + if (close_cached_connection_tables(thd, TRUE, &name)) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, "Server connection in use"); + } end: - VOID(pthread_mutex_unlock(&servers_cache_mutex)); rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + + /* SYNOPSIS @@ -657,10 +629,10 @@ end: */ -int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) +static int +delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) { - - int error= 0; + int error= ER_FOREIGN_SERVER_DOESNT_EXIST; FOREIGN_SERVER *server; DBUG_ENTER("delete_server_record_in_cache"); @@ -676,7 +648,7 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) DBUG_PRINT("info", ("server_name %s length %d not found!", server_options->server_name, server_options->server_name_length)); - // what should be done if not found in the cache? + goto end; } /* We succeded in deletion of the server to the table, now delete @@ -686,14 +658,15 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) server->server_name, server->server_name_length)); - if (server) - VOID(hash_delete(&servers_cache, (byte*) server)); - - servers_version++; /* servers updated */ + VOID(hash_delete(&servers_cache, (byte*) server)); + + error= 0; +end: DBUG_RETURN(error); } + /* SYNOPSIS @@ -713,6 +686,8 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) table for the particular server via the call to update_server_record, and in the servers_cache via update_server_record_in_cache. + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error >0 - error code @@ -721,7 +696,7 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered) { - int error= 0; + int error; TABLE *table; TABLE_LIST tables; DBUG_ENTER("update_server"); @@ -731,19 +706,26 @@ int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered) tables.alias= tables.table_name= (char*)"servers"; if (!(table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(1); + { + error= my_errno; + goto end; + } - rw_wrlock(&THR_LOCK_servers); if ((error= update_server_record(table, altered))) goto end; - update_server_record_in_cache(existing, altered); + error= update_server_record_in_cache(existing, altered); + + /* + Perform a reload so we don't have a 'hole' in our mem_root + */ + servers_load(thd, &tables); end: - rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -760,6 +742,8 @@ end: HASH, then the updated record inserted, in essence replacing the old record. + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error 1 - error @@ -790,13 +774,13 @@ int update_server_record_in_cache(FOREIGN_SERVER *existing, { DBUG_PRINT("info", ("had a problem inserting server %s at %lx", altered->server_name, (long unsigned int) altered)); - error= 1; + error= ER_OUT_OF_RESOURCES; } - servers_version++; /* servers updated */ DBUG_RETURN(error); } + /* SYNOPSIS @@ -829,9 +813,9 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) to->password= strdup_root(&mem, from->password); if (to->port == -1) to->port= from->port; - if (!to->socket) + if (!to->socket && from->socket) to->socket= strdup_root(&mem, from->socket); - if (!to->scheme) + if (!to->scheme && from->scheme) to->scheme= strdup_root(&mem, from->scheme); if (!to->owner) to->owner= strdup_root(&mem, from->owner); @@ -839,6 +823,7 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) DBUG_VOID_RETURN; } + /* SYNOPSIS @@ -861,7 +846,9 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) */ -int update_server_record(TABLE *table, FOREIGN_SERVER *server) + +static int +update_server_record(TABLE *table, FOREIGN_SERVER *server) { int error=0; DBUG_ENTER("update_server_record"); @@ -876,10 +863,7 @@ int update_server_record(TABLE *table, FOREIGN_SERVER *server) HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { table->file->print_error(error, MYF(0)); - error= 1; - } DBUG_PRINT("info",("server not found!")); error= ER_FOREIGN_SERVER_DOESNT_EXIST; } @@ -899,6 +883,7 @@ end: DBUG_RETURN(error); } + /* SYNOPSIS @@ -914,11 +899,11 @@ end: */ -int delete_server_record(TABLE *table, - char *server_name, - int server_name_length) +static int +delete_server_record(TABLE *table, + char *server_name, int server_name_length) { - int error= 0; + int error; DBUG_ENTER("delete_server_record"); table->use_all_columns(); @@ -930,10 +915,7 @@ int delete_server_record(TABLE *table, HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { table->file->print_error(error, MYF(0)); - error= 1; - } DBUG_PRINT("info",("server not found!")); error= ER_FOREIGN_SERVER_DOESNT_EXIST; } @@ -962,28 +944,35 @@ int delete_server_record(TABLE *table, int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error; + int error= ER_FOREIGN_SERVER_EXISTS; FOREIGN_SERVER *server; DBUG_ENTER("create_server"); DBUG_PRINT("info", ("server_options->server_name %s", server_options->server_name)); + rw_wrlock(&THR_LOCK_servers); + + /* hit the memory first */ + if (hash_search(&servers_cache, (byte*) server_options->server_name, + server_options->server_name_length)) + goto end; + server= (FOREIGN_SERVER *)alloc_root(&mem, sizeof(FOREIGN_SERVER)); - if ((error= prepare_server_struct_for_insert(server_options, server))) - goto end; + prepare_server_struct_for_insert(server_options, server); - if ((error= insert_server(thd, server))) - goto end; + error= insert_server(thd, server); DBUG_PRINT("info", ("error returned %d", error)); end: + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -1000,37 +989,44 @@ end: int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error= 0; + int error= ER_FOREIGN_SERVER_DOESNT_EXIST; FOREIGN_SERVER *altered, *existing; + LEX_STRING name= { server_options->server_name, + server_options->server_name_length }; DBUG_ENTER("alter_server"); DBUG_PRINT("info", ("server_options->server_name %s", server_options->server_name)); + rw_wrlock(&THR_LOCK_servers); + + if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache, + (byte*) name.str, + name.length))) + goto end; + altered= (FOREIGN_SERVER *)alloc_root(&mem, sizeof(FOREIGN_SERVER)); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + prepare_server_struct_for_update(server_options, existing, altered); - if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache, - (byte*) server_options->server_name, - server_options->server_name_length))) - { - error= ER_FOREIGN_SERVER_DOESNT_EXIST; - goto end; - } + error= update_server(thd, existing, altered); - if ((error= prepare_server_struct_for_update(server_options, existing, altered))) - goto end; + /* close the servers table before we call closed_cached_connection_tables */ + close_thread_tables(thd); - if ((error= update_server(thd, existing, altered))) - goto end; + if (close_cached_connection_tables(thd, FALSE, &name)) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, "Server connection in use"); + } end: DBUG_PRINT("info", ("error returned %d", error)); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -1041,19 +1037,17 @@ end: NOTES RETURN VALUE - 0 - no error + none */ -int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *server) +static void +prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *server) { - int error; char *unset_ptr= (char*)""; DBUG_ENTER("prepare_server_struct"); - error= 0; - /* these two MUST be set */ server->server_name= strdup_root(&mem, server_options->server_name); server->server_name_length= server_options->server_name_length; @@ -1083,7 +1077,7 @@ int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, server->owner= server_options->owner ? strdup_root(&mem, server_options->owner) : unset_ptr; - DBUG_RETURN(error); + DBUG_VOID_RETURN; } /* @@ -1099,13 +1093,12 @@ int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, */ -int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered) +static void +prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered) { - int error; DBUG_ENTER("prepare_server_struct_for_update"); - error= 0; altered->server_name= strdup_root(&mem, server_options->server_name); altered->server_name_length= server_options->server_name_length; @@ -1156,7 +1149,7 @@ int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, (strcmp(server_options->owner, existing->owner))) ? strdup_root(&mem, server_options->owner) : 0; - DBUG_RETURN(error); + DBUG_VOID_RETURN; } /* @@ -1175,16 +1168,65 @@ int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, void servers_free(bool end) { DBUG_ENTER("servers_free"); - if (!servers_cache_initialised) + if (!hash_inited(&servers_cache)) + DBUG_VOID_RETURN; + if (!end) + { + free_root(&mem, MYF(MY_MARK_BLOCKS_FREE)); + my_hash_reset(&servers_cache); DBUG_VOID_RETURN; - VOID(pthread_mutex_destroy(&servers_cache_mutex)); - servers_cache_initialised=0; + } + rwlock_destroy(&THR_LOCK_servers); free_root(&mem,MYF(0)); hash_free(&servers_cache); DBUG_VOID_RETURN; } +/* + SYNOPSIS + + clone_server(MEM_ROOT *mem_root, FOREIGN_SERVER *orig, FOREIGN_SERVER *buff) + + Create a clone of FOREIGN_SERVER. If the supplied mem_root is of + thd->mem_root then the copy is automatically disposed at end of statement. + + NOTES + + ARGS + MEM_ROOT pointer (strings are copied into this mem root) + FOREIGN_SERVER pointer (made a copy of) + FOREIGN_SERVER buffer (if not-NULL, this pointer is returned) + + RETURN VALUE + FOREIGN_SEVER pointer (copy of one supplied FOREIGN_SERVER) +*/ + +static FOREIGN_SERVER *clone_server(MEM_ROOT *mem, const FOREIGN_SERVER *server, + FOREIGN_SERVER *buffer) +{ + DBUG_ENTER("sql_server.cc:clone_server"); + + if (!buffer) + buffer= (FOREIGN_SERVER *) alloc_root(mem, sizeof(FOREIGN_SERVER)); + + buffer->server_name= strmake_root(mem, server->server_name, + server->server_name_length); + buffer->port= server->port; + buffer->server_name_length= server->server_name_length; + + /* TODO: We need to examine which of these can really be NULL */ + buffer->db= server->db ? strdup_root(mem, server->db) : NULL; + buffer->scheme= server->scheme ? strdup_root(mem, server->scheme) : NULL; + buffer->username= server->username? strdup_root(mem, server->username): NULL; + buffer->password= server->password? strdup_root(mem, server->password): NULL; + buffer->socket= server->socket ? strdup_root(mem, server->socket) : NULL; + buffer->owner= server->owner ? strdup_root(mem, server->owner) : NULL; + buffer->host= server->host ? strdup_root(mem, server->host) : NULL; + + DBUG_RETURN(buffer); +} + /* @@ -1199,11 +1241,11 @@ void servers_free(bool end) */ -FOREIGN_SERVER *get_server_by_name(const char *server_name) +FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name, + FOREIGN_SERVER *buff) { - ulong error_num=0; uint server_name_length; - FOREIGN_SERVER *server= 0; + FOREIGN_SERVER *server; DBUG_ENTER("get_server_by_name"); DBUG_PRINT("info", ("server_name %s", server_name)); @@ -1212,12 +1254,11 @@ FOREIGN_SERVER *get_server_by_name(const char *server_name) if (! server_name || !strlen(server_name)) { DBUG_PRINT("info", ("server_name not defined!")); - error_num= 1; DBUG_RETURN((FOREIGN_SERVER *)NULL); } DBUG_PRINT("info", ("locking servers_cache")); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + rw_rdlock(&THR_LOCK_servers); if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache, (byte*) server_name, server_name_length))) @@ -1226,8 +1267,12 @@ FOREIGN_SERVER *get_server_by_name(const char *server_name) server_name, server_name_length)); server= (FOREIGN_SERVER *) NULL; } + /* otherwise, make copy of server */ + else + server= clone_server(mem, server, buff); + DBUG_PRINT("info", ("unlocking servers_cache")); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(server); } diff --git a/sql/sql_servers.h b/sql/sql_servers.h index 23b8cefd5bb..63c691893d1 100644 --- a/sql/sql_servers.h +++ b/sql/sql_servers.h @@ -25,40 +25,19 @@ typedef struct st_federated_server } FOREIGN_SERVER; /* cache handlers */ -my_bool servers_init(bool dont_read_server_table); -my_bool servers_reload(THD *thd); -my_bool get_server_from_table_to_cache(TABLE *table); +bool servers_init(bool dont_read_server_table); +bool servers_reload(THD *thd); void servers_free(bool end=0); /* insert functions */ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int insert_server(THD *thd, FOREIGN_SERVER *server_options); -int insert_server_record(TABLE *table, FOREIGN_SERVER *server); -int insert_server_record_into_cache(FOREIGN_SERVER *server); -void store_server_fields_for_insert(TABLE *table, FOREIGN_SERVER *server); -void store_server_fields_for_insert(TABLE *table, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *server); /* drop functions */ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int delete_server_record(TABLE *table, - char *server_name, - int server_name_length); -int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options); /* update functions */ int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered); -int update_server_record(TABLE *table, FOREIGN_SERVER *server); -int update_server_record_in_cache(FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -/* utility functions */ -void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to); -FOREIGN_SERVER *get_server_by_name(const char *server_name); -my_bool server_exists_in_table(THD *thd, char *server_name); + +/* lookup functions */ +FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name, + FOREIGN_SERVER *server_buffer); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 613222fcd6e..4e89fb610cc 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -53,7 +53,8 @@ enum enum_i_s_events_fields ISE_CREATED, ISE_LAST_ALTERED, ISE_LAST_EXECUTED, - ISE_EVENT_COMMENT + ISE_EVENT_COMMENT, + ISE_ORIGINATOR }; @@ -4398,10 +4399,23 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) } /* status */ - if (et.status == Event_timed::ENABLED) - sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs); - else - sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs); + + switch (et.status) + { + case Event_timed::ENABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs); + break; + case Event_timed::SLAVESIDE_DISABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("SLAVESIDE_DISABLED"), + scs); + break; + case Event_timed::DISABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs); + break; + default: + DBUG_ASSERT(0); + } + sch_table->field[ISE_ORIGINATOR]->store(et.originator, TRUE); /* on_completion */ if (et.on_completion == Event_timed::ON_COMPLETION_DROP) @@ -4940,9 +4954,7 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list) TABLE *table; DBUG_ENTER("mysql_schema_table"); if (!(table= table_list->schema_table->create_table(thd, table_list))) - { DBUG_RETURN(1); - } table->s->tmp_table= SYSTEM_TMP_TABLE; table->grant.privilege= SELECT_ACL; /* @@ -5440,13 +5452,14 @@ ST_FIELD_INFO events_fields_info[]= {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"STARTS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Starts"}, {"ENDS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Ends"}, - {"STATUS", 8, MYSQL_TYPE_STRING, 0, 0, "Status"}, + {"STATUS", 18, MYSQL_TYPE_STRING, 0, 0, "Status"}, {"ON_COMPLETION", 12, MYSQL_TYPE_STRING, 0, 0, 0}, {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0}, {"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0}, {"LAST_EXECUTED", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0}, {"EVENT_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} + {"ORIGINATOR", 10, MYSQL_TYPE_LONG, 0, 0, "Originator"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index bb3f293941e..3eb47ebae6e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6838,7 +6838,9 @@ copy_data_between_tables(TABLE *from,TABLE *to, copy_ptr->do_copy(copy_ptr); } prev_insert_id= to->file->next_insert_id; - if ((error=to->file->ha_write_row((byte*) to->record[0]))) + error=to->file->write_row((byte*) to->record[0]); + to->auto_increment_field_not_null= FALSE; + if (error) { if (!ignore || to->file->is_fatal_error(error, HA_CHECK_DUP)) diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 4c584bff72a..da5c1b0bc66 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -403,6 +403,13 @@ int mysql_create_function(THD *thd,udf_func *udf) DBUG_RETURN(1); } + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for CREATE FUNCTION command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + rw_wrlock(&THR_LOCK_udf); if ((hash_search(&udf_hash,(byte*) udf->name.str, udf->name.length))) { @@ -466,6 +473,15 @@ int mysql_create_function(THD *thd,udf_func *udf) goto err; } rw_unlock(&THR_LOCK_udf); + + /* Binlog the create function. */ + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + DBUG_RETURN(0); err: @@ -484,11 +500,20 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) char *exact_name_str; uint exact_name_len; DBUG_ENTER("mysql_drop_function"); + if (!initialized) { my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } + + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for DROP FUNCTION command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + rw_wrlock(&THR_LOCK_udf); if (!(udf=(udf_func*) hash_search(&udf_hash,(byte*) udf_name->str, (uint) udf_name->length))) @@ -524,6 +549,15 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) close_thread_tables(thd); rw_unlock(&THR_LOCK_udf); + + /* Binlog the drop function. */ + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + DBUG_RETURN(0); err: rw_unlock(&THR_LOCK_udf); diff --git a/sql/sql_view.cc b/sql/sql_view.cc index b0c9de1453c..f84847f2f9c 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -224,6 +224,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, { LEX *lex= thd->lex; bool link_to_local; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + bool definer_check_is_needed= mode != VIEW_ALTER || lex->definer; +#endif /* first table in list is target VIEW name => cut off it */ TABLE_LIST *view= lex->unlink_first_table(&link_to_local); TABLE_LIST *tables= lex->query_tables; @@ -256,8 +259,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, /* DEFINER-clause is missing; we have to create default definer in persistent arena to be PS/SP friendly. + If this is an ALTER VIEW then the current user should be set as + the definer. */ - Query_arena original_arena; Query_arena *ps_arena = thd->activate_stmt_arena_if_needed(&original_arena); @@ -277,11 +281,11 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, - same as current user - current user has SUPER_ACL */ - if (strcmp(lex->definer->user.str, - thd->security_ctx->priv_user) != 0 || - my_strcasecmp(system_charset_info, - lex->definer->host.str, - thd->security_ctx->priv_host) != 0) + if (definer_check_is_needed && + (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) != 0 || + my_strcasecmp(system_charset_info, + lex->definer->host.str, + thd->security_ctx->priv_host) != 0)) { if (!(thd->security_ctx->master_access & SUPER_ACL)) { diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index b8dc377bb82..0c6ce1d7eeb 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -948,6 +948,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SIGNED_SYM %token SIMPLE_SYM /* SQL-2003-N */ %token SLAVE +%token SLAVESIDE_DISABLE_SYM %token SMALLINT /* SQL-2003-R */ %token SNAPSHOT_SYM %token SOCKET_SYM @@ -1770,12 +1771,17 @@ ev_schedule_time: EVERY_SYM expr interval opt_ev_status: /* empty */ { $$= 0; } | ENABLE_SYM { - Lex->event_parse_data->status= Event_parse_data::ENABLED; + Lex->event_parse_data->status= Event_basic::ENABLED; + $$= 1; + } + | DISABLE_SYM ON SLAVE + { + Lex->event_parse_data->status= Event_basic::SLAVESIDE_DISABLED; $$= 1; } | DISABLE_SYM { - Lex->event_parse_data->status= Event_parse_data::DISABLED; + Lex->event_parse_data->status= Event_basic::DISABLED; $$= 1; } ; @@ -1805,13 +1811,13 @@ ev_on_completion: ON COMPLETION_SYM PRESERVE_SYM { Lex->event_parse_data->on_completion= - Event_parse_data::ON_COMPLETION_PRESERVE; + Event_basic::ON_COMPLETION_PRESERVE; $$= 1; } | ON COMPLETION_SYM NOT_SYM PRESERVE_SYM { Lex->event_parse_data->on_completion= - Event_parse_data::ON_COMPLETION_DROP; + Event_basic::ON_COMPLETION_DROP; $$= 1; } ; @@ -7557,13 +7563,13 @@ index_hint_definition: { Select->set_index_hint_type($1, $3); } - '(' key_usage_list ')'; + '(' key_usage_list ')' | USE_SYM key_or_index index_hint_clause { Select->set_index_hint_type(INDEX_HINT_USE, $3); } - '(' opt_key_usage_list ')'; - + '(' opt_key_usage_list ')' + ; index_hints_list: index_hint_definition @@ -9998,6 +10004,7 @@ keyword_sp: | SIMPLE_SYM {} | SHARE_SYM {} | SHUTDOWN {} + | SLAVESIDE_DISABLE_SYM {} | SNAPSHOT_SYM {} | SOUNDS_SYM {} | SQL_CACHE_SYM {} @@ -11075,7 +11082,7 @@ union_list: UNION_SYM union_option { LEX *lex=Lex; - if (lex->exchange) + if (lex->result) { /* Only the last SELECT can have INTO...... */ my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO"); diff --git a/sql/stacktrace.c b/sql/stacktrace.c index d8e9b7fd883..078f62c6b2b 100644 --- a/sql/stacktrace.c +++ b/sql/stacktrace.c @@ -53,21 +53,6 @@ void safe_print_str(const char* name, const char* val, int max_len) #define SIGRETURN_FRAME_OFFSET 23 #endif -static my_bool is_nptl; - -/* Check if we are using NPTL or LinuxThreads on Linux */ -void check_thread_lib(void) -{ - char buf[5]; - -#ifdef _CS_GNU_LIBPTHREAD_VERSION - confstr(_CS_GNU_LIBPTHREAD_VERSION, buf, sizeof(buf)); - is_nptl = !strncasecmp(buf, "NPTL", sizeof(buf)); -#else - is_nptl = 0; -#endif -} - #if defined(__alpha__) && defined(__GNUC__) /* The only way to backtrace without a symbol table on alpha @@ -173,8 +158,8 @@ terribly wrong...\n"); #endif /* __alpha__ */ /* We are 1 frame above signal frame with NPTL and 2 frames above with LT */ - sigreturn_frame_count = is_nptl ? 1 : 2; - + sigreturn_frame_count = thd_lib_detected == THD_LIB_LT ? 2 : 1; + while (fp < (uchar**) stack_bottom) { #if defined(__i386__) || defined(__x86_64__) diff --git a/sql/stacktrace.h b/sql/stacktrace.h index f5c92e54e1c..56b9877180a 100644 --- a/sql/stacktrace.h +++ b/sql/stacktrace.h @@ -27,11 +27,9 @@ extern char* heap_start; #define init_stacktrace() do { \ heap_start = (char*) &__bss_start; \ - check_thread_lib(); \ } while(0); void print_stacktrace(gptr stack_bottom, ulong thread_stack); void safe_print_str(const char* name, const char* val, int max_len); -void check_thread_lib(void); #endif /* (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) */ #endif /* TARGET_OS_LINUX */ diff --git a/sql/table.h b/sql/table.h index 628341df55d..bb9ced2e450 100644 --- a/sql/table.h +++ b/sql/table.h @@ -58,7 +58,7 @@ typedef struct st_grant_info enum tmp_table_type { - NO_TMP_TABLE, TMP_TABLE, TRANSACTIONAL_TMP_TABLE, + NO_TMP_TABLE, NON_TRANSACTIONAL_TMP_TABLE, TRANSACTIONAL_TMP_TABLE, INTERNAL_TMP_TABLE, SYSTEM_TMP_TABLE }; @@ -439,6 +439,11 @@ struct st_table { my_bool no_cache; /* To signal that we should reset query_id for tables and cols */ my_bool clear_query_id; + /* + To indicate that a non-null value of the auto_increment field + was provided by the user or retrieved from the current record. + Used only in the MODE_NO_AUTO_VALUE_ON_ZERO mode. + */ my_bool auto_increment_field_not_null; my_bool insert_or_update; /* Can be used by the handler */ my_bool alias_name_used; /* true if table_name is alias */ diff --git a/storage/archive/archive_test.c b/storage/archive/archive_test.c index 9ac043330fc..a5b2d1dfcc9 100644 --- a/storage/archive/archive_test.c +++ b/storage/archive/archive_test.c @@ -217,14 +217,13 @@ int main(int argc, char *argv[]) azclose(&writer_handle); azclose(&reader_handle); - exit(0); unlink(TEST_FILENAME); /* Start size tests */ printf("About to run 2/4/8 gig tests now, you may want to hit CTRL-C\n"); - size_test(TWOGIG, 2097152L); - size_test(FOURGIG, 4194304L); - size_test(EIGHTGIG, 8388608L); + size_test(TWOGIG, 2088992L); + size_test(FOURGIG, 4177984L); + size_test(EIGHTGIG, 8355968L); return 0; } @@ -234,6 +233,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) azio_stream writer_handle, reader_handle; unsigned long long write_length; unsigned long long read_length= 0; + unsigned long long count; unsigned int ret; char buffer[BUFFER_LEN]; int error; @@ -244,8 +244,10 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) return 0; } - for (write_length= 0; write_length < length ; write_length+= ret) + for (count= 0, write_length= 0; write_length < length ; + write_length+= ret) { + count++; ret= azwrite(&writer_handle, test_string, BUFFER_LEN); if (ret != BUFFER_LEN) { @@ -257,7 +259,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) azflush(&writer_handle, Z_SYNC_FLUSH); } } - assert(write_length == length); + assert(write_length != count * BUFFER_LEN); /* Number of rows time BUFFER_LEN */ azflush(&writer_handle, Z_SYNC_FLUSH); printf("Reading back data\n"); @@ -279,7 +281,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) } } - assert(read_length == length); + assert(read_length == write_length); assert(writer_handle.rows == rows_to_test_for); azclose(&writer_handle); azclose(&reader_handle); diff --git a/storage/archive/azio.c b/storage/archive/azio.c index 7876dd69cab..6b01d9c3c88 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -55,8 +55,8 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) s->stream.zalloc = (alloc_func)0; s->stream.zfree = (free_func)0; s->stream.opaque = (voidpf)0; - memset(s->inbuf, 0, AZ_BUFSIZE); - memset(s->outbuf, 0, AZ_BUFSIZE); + memset(s->inbuf, 0, AZ_BUFSIZE_READ); + memset(s->outbuf, 0, AZ_BUFSIZE_WRITE); s->stream.next_in = s->inbuf; s->stream.next_out = s->outbuf; s->stream.avail_in = s->stream.avail_out = 0; @@ -109,7 +109,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) return Z_NULL; } } - s->stream.avail_out = AZ_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE_WRITE; errno = 0; s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd; @@ -159,7 +159,7 @@ void write_header(azio_stream *s) char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; char *ptr= buffer; - s->block_size= AZ_BUFSIZE; + s->block_size= AZ_BUFSIZE_WRITE; s->version = (unsigned char)az_magic[1]; s->minor_version = (unsigned char)az_magic[2]; @@ -224,7 +224,7 @@ int get_byte(s) if (s->stream.avail_in == 0) { errno = 0; - s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0)); + s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE_READ, MYF(0)); if (s->stream.avail_in == 0) { s->z_eof = 1; @@ -260,7 +260,7 @@ void check_header(azio_stream *s) if (len < 2) { if (len) s->inbuf[0] = s->stream.next_in[0]; errno = 0; - len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE >> len, MYF(0)); + len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE_READ >> len, MYF(0)); if (len == 0) s->z_err = Z_ERRNO; s->stream.avail_in += len; s->stream.next_in = s->inbuf; @@ -455,7 +455,7 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int * if (s->stream.avail_in == 0 && !s->z_eof) { errno = 0; - s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0)); + s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE_READ, MYF(0)); if (s->stream.avail_in == 0) { s->z_eof = 1; @@ -522,12 +522,13 @@ unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len) { s->stream.next_out = s->outbuf; - if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE, MYF(0)) != AZ_BUFSIZE) + if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE_WRITE, + MYF(0)) != AZ_BUFSIZE_WRITE) { s->z_err = Z_ERRNO; break; } - s->stream.avail_out = AZ_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE_WRITE; } s->in += s->stream.avail_in; s->out += s->stream.avail_out; @@ -563,7 +564,7 @@ int do_flush (azio_stream *s, int flush) for (;;) { - len = AZ_BUFSIZE - s->stream.avail_out; + len = AZ_BUFSIZE_WRITE - s->stream.avail_out; if (len != 0) { @@ -574,7 +575,7 @@ int do_flush (azio_stream *s, int flush) return Z_ERRNO; } s->stream.next_out = s->outbuf; - s->stream.avail_out = AZ_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE_WRITE; } if (done) break; s->out += s->stream.avail_out; @@ -675,8 +676,8 @@ my_off_t azseek (s, offset, whence) /* There was a zmemzero here if inbuf was null -Brian */ while (offset > 0) { - uInt size = AZ_BUFSIZE; - if (offset < AZ_BUFSIZE) size = (uInt)offset; + uInt size = AZ_BUFSIZE_WRITE; + if (offset < AZ_BUFSIZE_WRITE) size = (uInt)offset; size = azwrite(s, s->inbuf, size); if (size == 0) return -1L; @@ -719,8 +720,8 @@ my_off_t azseek (s, offset, whence) } while (offset > 0) { int error; - unsigned int size = AZ_BUFSIZE; - if (offset < AZ_BUFSIZE) size = (int)offset; + unsigned int size = AZ_BUFSIZE_READ; + if (offset < AZ_BUFSIZE_READ) size = (int)offset; size = azread(s, s->outbuf, size, &error); if (error <= 0) return -1L; diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h index 9076ff23192..a5bee1befae 100644 --- a/storage/archive/azlib.h +++ b/storage/archive/azlib.h @@ -196,7 +196,8 @@ extern "C" { /* The deflate compression method (the only one supported in this version) */ #define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ -#define AZ_BUFSIZE 16384 +#define AZ_BUFSIZE_READ 32768 +#define AZ_BUFSIZE_WRITE 16384 typedef struct azio_stream { @@ -204,8 +205,8 @@ typedef struct azio_stream { int z_err; /* error code for last stream operation */ int z_eof; /* set if end of input file */ File file; /* .gz file */ - Byte inbuf[AZ_BUFSIZE]; /* input buffer */ - Byte outbuf[AZ_BUFSIZE]; /* output buffer */ + Byte inbuf[AZ_BUFSIZE_READ]; /* input buffer */ + Byte outbuf[AZ_BUFSIZE_WRITE]; /* output buffer */ uLong crc; /* crc32 of uncompressed data */ char *msg; /* error message */ int transparent; /* 1 if input file is not a .gz file */ diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index bb638e1c17b..6853e879f55 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -81,6 +81,7 @@ TODO: Allow users to set compression level. + Allow adjustable block size. Implement versioning, should be easy. Allow for errors, find a way to mark bad rows. Add optional feature so that rows can be flushed at interval (which will cause less @@ -210,7 +211,8 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg) buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info); /* The size of the offset value we will use for position() */ - ref_length = sizeof(my_off_t); + ref_length= sizeof(my_off_t); + archive_reader_open= FALSE; } int archive_discover(handlerton *hton, THD* thd, const char *db, @@ -434,6 +436,29 @@ int ha_archive::init_archive_writer() } +int ha_archive::init_archive_reader() +{ + DBUG_ENTER("ha_archive::init_archive_reader"); + /* + It is expensive to open and close the data files and since you can't have + a gzip file that can be both read and written we keep a writer open + that is shared amoung all open tables. + */ + if (!archive_reader_open) + { + if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))) + { + DBUG_PRINT("ha_archive", ("Could not open archive read file")); + share->crashed= TRUE; + DBUG_RETURN(1); + } + archive_reader_open= TRUE; + } + + DBUG_RETURN(0); +} + + /* We just implement one additional file extension. */ @@ -477,7 +502,6 @@ int ha_archive::open(const char *name, int mode, uint open_options) DBUG_ASSERT(share); - record_buffer= create_record_buffer(table->s->reclength + ARCHIVE_ROW_HEADER_SIZE); @@ -489,14 +513,6 @@ int ha_archive::open(const char *name, int mode, uint open_options) thr_lock_data_init(&share->lock, &lock, NULL); - DBUG_PRINT("ha_archive", ("archive data_file_name %s", share->data_file_name)); - if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))) - { - if (errno == EROFS || errno == EACCES) - DBUG_RETURN(my_errno= errno); - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - } - DBUG_PRINT("ha_archive", ("archive table was crashed %s", rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no")); if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR) @@ -533,8 +549,11 @@ int ha_archive::close(void) destroy_record_buffer(record_buffer); /* First close stream */ - if (azclose(&archive)) - rc= 1; + if (archive_reader_open) + { + if (azclose(&archive)) + rc= 1; + } /* then also close share */ rc|= free_share(); @@ -904,7 +923,7 @@ int ha_archive::index_read(byte *buf, const byte *key, int ha_archive::index_read_idx(byte *buf, uint index, const byte *key, uint key_len, enum ha_rkey_function find_flag) { - int rc= 0; + int rc; bool found= 0; KEY *mkey= &table->s->key_info[index]; current_k_offset= mkey->key_part->offset; @@ -914,22 +933,10 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key, DBUG_ENTER("ha_archive::index_read_idx"); - /* - All of the buffer must be written out or we won't see all of the - data - */ - pthread_mutex_lock(&share->mutex); - azflush(&(share->archive_write), Z_SYNC_FLUSH); - pthread_mutex_unlock(&share->mutex); + rc= rnd_init(TRUE); - /* - Set the position of the local read thread to the beginning postion. - */ - if (read_data_header(&archive)) - { - rc= HA_ERR_CRASHED_ON_USAGE; + if (rc) goto error; - } while (!(get_row(&archive, buf))) { @@ -979,10 +986,11 @@ int ha_archive::rnd_init(bool scan) if (share->crashed) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + init_archive_reader(); + /* We rewind the file so that we can read from the beginning if scan */ if (scan) { - scan_rows= share->rows_recorded; DBUG_PRINT("info", ("archive will retrieve %llu rows", (unsigned long long) scan_rows)); stats.records= 0; @@ -991,17 +999,18 @@ int ha_archive::rnd_init(bool scan) If dirty, we lock, and then reset/flush the data. I found that just calling azflush() doesn't always work. */ + pthread_mutex_lock(&share->mutex); + scan_rows= share->rows_recorded; if (share->dirty == TRUE) { - pthread_mutex_lock(&share->mutex); if (share->dirty == TRUE) { DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); azflush(&(share->archive_write), Z_SYNC_FLUSH); share->dirty= FALSE; } - pthread_mutex_unlock(&share->mutex); } + pthread_mutex_unlock(&share->mutex); if (read_data_header(&archive)) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -1283,6 +1292,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) azio_stream writer; char writer_filename[FN_REFLEN]; + init_archive_reader(); + // now we close both our writer and our reader for the rename if (share->archive_write_open) { @@ -1475,6 +1486,7 @@ int ha_archive::info(uint flag) if (flag & HA_STATUS_AUTO) { + init_archive_reader(); azflush(&archive, Z_SYNC_FLUSH); stats.auto_increment_value= archive.auto_increment; } @@ -1557,6 +1569,8 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt) Now we will rewind the archive file so that we are positioned at the start of the file. */ + init_archive_reader(); + if (!rc) read_data_header(&archive); diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 8f56e8ce060..8fc54f6715f 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -71,6 +71,7 @@ class ha_archive: public handler uint current_key_len; uint current_k_offset; archive_record_buffer *record_buffer; + bool archive_reader_open; archive_record_buffer *create_record_buffer(unsigned int length); void destroy_record_buffer(archive_record_buffer *r); @@ -119,6 +120,7 @@ public: ARCHIVE_SHARE *get_share(const char *table_name, int *rc); int free_share(); int init_archive_writer(); + int init_archive_reader(); bool auto_repair() const { return 1; } // For the moment we just do this int read_data_header(azio_stream *file_to_read); void position(const byte *record); diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 441c1785e74..aa7184268f5 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -43,23 +43,55 @@ The create table will simply create the .frm file, and within the "CREATE TABLE" SQL, there SHALL be any of the following : - comment=scheme://username:password@hostname:port/database/tablename - comment=scheme://username@hostname/database/tablename - comment=scheme://username:password@hostname/database/tablename - comment=scheme://username:password@hostname/database/tablename + connection=scheme://username:password@hostname:port/database/tablename + connection=scheme://username@hostname/database/tablename + connection=scheme://username:password@hostname/database/tablename + connection=scheme://username:password@hostname/database/tablename + + - OR - + + As of 5.1 (See worklog #3031), federated now allows you to use a non-url + format, taking advantage of mysql.servers: + + connection="connection_one" + connection="connection_one/table_foo" An example would be: - comment=mysql://username:password@hostname:port/database/tablename + connection=mysql://username:password@hostname:port/database/tablename - ***IMPORTANT*** + or, if we had: + + create server 'server_one' foreign data wrapper 'mysql' options + (HOST '127.0.0.1', + DATABASE 'db1', + USER 'root', + PASSWORD '', + PORT 3306, + SOCKET '', + OWNER 'root'); + + CREATE TABLE federated.t1 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + ENGINE="FEDERATED" DEFAULT CHARSET=latin1 + CONNECTION='server_one'; + + So, this will have been the equivalent of - This is a first release, conceptual release - Only 'mysql://' is supported at this release. + CONNECTION="mysql://root@127.0.0.1:3306/db1/t1" + Then, we can also change the server to point to a new schema: - This comment connection string is necessary for the handler to be - able to connect to the foreign server. + ALTER SERVER 'server_one' options(DATABASE 'db2'); + + All subsequent calls will now be against db2.t1! Guess what? You don't + have to perform an alter table! + + This connecton="connection string" is necessary for the handler to be + able to connect to the foreign server, either by URL, or by server + name. The basic flow is this: @@ -166,7 +198,7 @@ KEY other_key (other)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 - COMMENT='root@127.0.0.1:9306/federated/test_federated'; + CONNECTION='mysql://root@127.0.0.1:9306/federated/test_federated'; Notice the "COMMENT" and "ENGINE" field? This is where you respectively set the engine type, "FEDERATED" and foreign @@ -263,7 +295,7 @@ To run these tests, go into ./mysql-test (based in the directory you built the server in) - ./mysql-test-run federatedd + ./mysql-test-run federated To run the test, or if you want to run the test and have debug info: @@ -311,7 +343,7 @@ ------------- These were the files that were modified or created for this - Federated handler to work: + Federated handler to work, in 5.0: ./configure.in ./sql/Makefile.am @@ -329,6 +361,13 @@ ./sql/ha_federated.cc ./sql/ha_federated.h + In 5.1 + + my:~/mysql-build/mysql-5.1-bkbits patg$ ls storage/federated/ + CMakeLists.txt Makefile.in ha_federated.h plug.in + Makefile SCCS libfederated.a + Makefile.am ha_federated.cc libfederated_a-ha_federated.o + */ @@ -546,42 +585,39 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num) int buf_len; DBUG_ENTER("ha_federated parse_url_error"); - if (share->connection_string) - { - DBUG_PRINT("info", - ("error: parse_url. Returning error code %d \ - freeing share->connection_string %lx", - error_num, (long unsigned int) share->connection_string)); - my_free((gptr) share->connection_string, MYF(0)); - share->connection_string= 0; - } buf_len= min(table->s->connect_string.length, FEDERATED_QUERY_BUFFER_SIZE-1); strmake(buf, table->s->connect_string.str, buf_len); my_error(error_num, MYF(0), buf); DBUG_RETURN(error_num); } + /* retrieve server object which contains server meta-data from the system table given a server's name, set share connection parameter members */ -int get_connection(FEDERATED_SHARE *share) +int get_connection(MEM_ROOT *mem_root, FEDERATED_SHARE *share) { int error_num= ER_FOREIGN_SERVER_DOESNT_EXIST; char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - FOREIGN_SERVER *server; + FOREIGN_SERVER *server, server_buffer; DBUG_ENTER("ha_federated::get_connection"); + /* + get_server_by_name() clones the server if exists and allocates + copies of strings in the supplied mem_root + */ if (!(server= - get_server_by_name(share->connection_string))) + get_server_by_name(mem_root, share->connection_string, &server_buffer))) { DBUG_PRINT("info", ("get_server_by_name returned > 0 error condition!")); /* need to come up with error handling */ error_num=1; goto error; } - DBUG_PRINT("info", ("get_server_by_name returned server at %lx", (long unsigned int) server)); + DBUG_PRINT("info", ("get_server_by_name returned server at %lx", + (long unsigned int) server)); /* Most of these should never be empty strings, error handling will @@ -590,29 +626,22 @@ int get_connection(FEDERATED_SHARE *share) except there are errors in the trace file of the share being overrun at the address of the share. */ - if (server->server_name) - share->server_name= server->server_name; - share->server_name_length= server->server_name_length ? - server->server_name_length : 0; - if (server->username) - share->username= server->username; - if (server->password) - share->password= server->password; - if (server->db) - share->database= server->db; - - share->port= server->port ? (ushort) server->port : MYSQL_PORT; - - if (server->host) - share->hostname= server->host; - if (server->socket) - share->socket= server->socket; - else if (strcmp(share->hostname, my_localhost) == 0) - share->socket= my_strdup(MYSQL_UNIX_ADDR, MYF(0)); - if (server->scheme) - share->scheme= server->scheme; - else - share->scheme= NULL; + share->server_name_length= server->server_name_length; + share->server_name= server->server_name; + share->username= server->username; + share->password= server->password; + share->database= server->db; +#ifndef I_AM_PARANOID + share->port= server->port > 0 && server->port < 65536 ? +#else + share->port= server->port > 1023 && server->port < 65536 ? +#endif + (ushort) server->port : MYSQL_PORT; + share->hostname= server->host; + if (!(share->socket= server->socket) && + !strcmp(share->hostname, my_localhost)) + share->socket= (char *) MYSQL_UNIX_ADDR; + share->scheme= server->scheme; DBUG_PRINT("info", ("share->username %s", share->username)); DBUG_PRINT("info", ("share->password %s", share->password)); @@ -635,6 +664,7 @@ error: SYNOPSIS parse_url() + mem_root MEM_ROOT pointer for memory allocation share pointer to FEDERATED share table pointer to current TABLE class table_create_flag determines what error to throw @@ -684,7 +714,7 @@ error: */ -static int parse_url(FEDERATED_SHARE *share, TABLE *table, +static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) { uint error_num= (table_create_flag ? @@ -698,20 +728,19 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, DBUG_PRINT("info", ("Length: %d", table->s->connect_string.length)); DBUG_PRINT("info", ("String: '%.*s'", table->s->connect_string.length, table->s->connect_string.str)); - share->connection_string= my_strndup(table->s->connect_string.str, - table->s->connect_string.length, - MYF(0)); + share->connection_string= strmake_root(mem_root, table->s->connect_string.str, + table->s->connect_string.length); - // Add a null for later termination of table name - share->connection_string[table->s->connect_string.length]= 0; DBUG_PRINT("info",("parse_url alloced share->connection_string %lx", (long unsigned int) share->connection_string)); DBUG_PRINT("info",("share->connection_string %s",share->connection_string)); - /* No delimiters, must be a straight connection name */ - if ( (!strchr(share->connection_string, '/')) && - (!strchr(share->connection_string, '@')) && - (!strchr(share->connection_string, ';'))) + /* + No :// or @ in connection string. Must be a straight connection name of + either "servername" or "servername/tablename" + */ + if ( (!strstr(share->connection_string, "://") && + (!strchr(share->connection_string, '@')))) { DBUG_PRINT("info", @@ -720,17 +749,51 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, share->connection_string, (long unsigned int) share->connection_string)); + /* ok, so we do a little parsing, but not completely! */ share->parsed= FALSE; - if ((error_num= get_connection(share))) - goto error; + /* + If there is a single '/' in the connection string, this means the user is + specifying a table name + */ + + if ((share->table_name= strchr(share->connection_string, '/'))) + { + share->connection_string[share->table_name - share->connection_string]= '\0'; + share->table_name++; + share->table_name_length= strlen(share->table_name); + DBUG_PRINT("info", + ("internal format, parsed table_name share->connection_string \ + %s share->table_name %s", + share->connection_string, share->table_name)); + + /* + there better not be any more '/'s ! + */ + if (strchr(share->table_name, '/')) + goto error; + + } /* - connection specifies everything but, resort to - expecting remote and foreign table names to match + otherwise, straight server name, use tablename of federated table + as remote table name */ - share->table_name= table->s->table_name.str; - share->table_name_length= table->s->table_name.length; - share->table_name[share->table_name_length]= '\0'; + else + { + /* + connection specifies everything but, resort to + expecting remote and foreign table names to match + */ + share->table_name= strmake_root(mem_root, table->s->table_name.str, + (share->table_name_length= table->s->table_name.length)); + DBUG_PRINT("info", + ("internal format, default table_name share->connection_string \ + %s share->table_name %s", + share->connection_string, share->table_name)); + } + + if ((error_num= get_connection(mem_root, share))) + goto error; } else { @@ -816,7 +879,7 @@ Then password is a null string, so set to NULL if (!share->port) { if (strcmp(share->hostname, my_localhost) == 0) - share->socket= my_strdup(MYSQL_UNIX_ADDR, MYF(0)); + share->socket= (char *) MYSQL_UNIX_ADDR; else share->port= MYSQL_PORT; } @@ -1420,22 +1483,26 @@ err: static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) { - char *select_query; char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; Field **field; String query(query_buffer, sizeof(query_buffer), &my_charset_bin); FEDERATED_SHARE *share= NULL, tmp_share; + MEM_ROOT mem_root; + DBUG_ENTER("ha_federated.cc::get_share"); + /* In order to use this string, we must first zero it's length, or it will contain garbage */ query.length(0); + init_alloc_root(&mem_root, 256, 0); + pthread_mutex_lock(&federated_mutex); tmp_share.share_key= table_name; tmp_share.share_key_length= strlen(table_name); - if (parse_url(&tmp_share, table, 0)) + if (parse_url(&mem_root, &tmp_share, table, 0)) goto error; /* TODO: change tmp_share.scheme to LEX_STRING object */ @@ -1456,24 +1523,17 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) query.length(query.length() - sizeof_trailing_comma); query.append(STRING_WITH_LEN(" FROM `")); + query.append(tmp_share.table_name, tmp_share.table_name_length); + query.append(STRING_WITH_LEN("`")); + DBUG_PRINT("info", ("calling alloc_root")); - if (!(share= (FEDERATED_SHARE *) - my_multi_malloc(MYF(MY_WME), - &share, sizeof(*share), - &select_query, - query.length()+table->s->connect_string.length+1, - NullS))) + if (!(share= (FEDERATED_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) || + !(share->select_query= (char*) strmake_root(&mem_root, query.ptr(), query.length()))) goto error; - memcpy(share, &tmp_share, sizeof(tmp_share)); - - share->table_name_length= strlen(share->table_name); - /* TODO: share->table_name to LEX_STRING object */ - query.append(share->table_name, share->table_name_length); - query.append(STRING_WITH_LEN("`")); - share->select_query= select_query; - strmov(share->select_query, query.ptr()); share->use_count= 0; + share->mem_root= mem_root; + DBUG_PRINT("info", ("share->select_query %s", share->select_query)); @@ -1482,17 +1542,18 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST); } + else + free_root(&mem_root, MYF(0)); /* prevents memory leak */ + share->use_count++; pthread_mutex_unlock(&federated_mutex); - return share; + DBUG_RETURN(share); error: pthread_mutex_unlock(&federated_mutex); - my_free((gptr) tmp_share.connection_string, MYF(MY_ALLOW_ZERO_PTR)); - tmp_share.connection_string= 0; - my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR)); - return NULL; + free_root(&mem_root, MYF(0)); + DBUG_RETURN(NULL); } @@ -1504,23 +1565,16 @@ error: static int free_share(FEDERATED_SHARE *share) { + MEM_ROOT mem_root= share->mem_root; DBUG_ENTER("free_share"); pthread_mutex_lock(&federated_mutex); if (!--share->use_count) { hash_delete(&federated_open_tables, (byte*) share); - if (share->parsed) - my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); - /*if (share->connection_string) - { - */ - my_free((gptr) share->connection_string, MYF(MY_ALLOW_ZERO_PTR)); - share->connection_string= 0; - /*}*/ thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->mutex)); - my_free((gptr) share, MYF(0)); + free_root(&mem_root, MYF(0)); } pthread_mutex_unlock(&federated_mutex); @@ -1589,6 +1643,8 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked) mysql_options(mysql,MYSQL_SET_CHARSET_NAME, this->table->s->table_charset->csname); + DBUG_PRINT("info", ("calling mysql_real_connect hostname %s user %s", + share->hostname, share->username)); if (!mysql || !mysql_real_connect(mysql, share->hostname, share->username, @@ -2831,15 +2887,13 @@ int ha_federated::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { int retval; + THD *thd= current_thd; FEDERATED_SHARE tmp_share; // Only a temporary share, to test the url DBUG_ENTER("ha_federated::create"); - if (!(retval= parse_url(&tmp_share, table_arg, 1))) + if (!(retval= parse_url(thd->mem_root, &tmp_share, table_arg, 1))) retval= check_foreign_data_source(&tmp_share, 1); - /* free this because strdup created it in parse_url */ - my_free((gptr) tmp_share.connection_string, MYF(MY_ALLOW_ZERO_PTR)); - tmp_share.connection_string= 0; DBUG_RETURN(retval); } diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h index bbc2b2fe9f8..4d2eefdd986 100644 --- a/storage/federated/ha_federated.h +++ b/storage/federated/ha_federated.h @@ -43,6 +43,8 @@ The example implements the minimum of what you will probably need. */ typedef struct st_federated_share { + MEM_ROOT mem_root; + bool parsed; /* this key is unique db/tablename */ const char *share_key; @@ -67,6 +69,7 @@ typedef struct st_federated_share { char *sport; int share_key_length; ushort port; + uint table_name_length, server_name_length, connect_string_length, use_count; pthread_mutex_t mutex; THR_LOCK lock; diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 5831ec6167a..8c378f7334f 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -628,7 +628,10 @@ int ha_heap::create(const char *name, TABLE *table_arg, seg->length= (uint) key_part->length; seg->flag= key_part->key_part_flag; - seg->charset= field->charset(); + if (field->flags & (ENUM_FLAG | SET_FLAG)) + seg->charset= &my_charset_bin; + else + seg->charset= field->charset(); if (field->null_ptr) { seg->null_bit= field->null_bit; diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c index 86e79c9d7ec..19215fcf017 100644 --- a/storage/heap/hp_write.c +++ b/storage/heap/hp_write.c @@ -105,7 +105,6 @@ int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *record, heap_rb_param custom_arg; uint old_allocated; - info->last_pos= NULL; /* For heap_rnext/heap_rprev */ custom_arg.keyseg= keyinfo->seg; custom_arg.key_length= hp_rb_make_key(keyinfo, info->recbuf, record, recpos); if (keyinfo->flag & HA_NOSAME) diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index beeee5310c2..873a73be0ec 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -13,8 +13,8 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -#SET(CMAKE_CXX_FLAGS_DEBUG "-DSAFEMALLOC -DSAFE_MUTEX") -#SET(CMAKE_C_FLAGS_DEBUG "-DSAFEMALLOC -DSAFE_MUTEX") +SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") +SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") ADD_DEFINITIONS(-DMYSQL_SERVER -D_WIN32 -DWIN32 -D_LIB) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 8fe4d988dbc..d31f5b7e792 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -635,6 +635,9 @@ bool ha_myisam::check_if_locking_is_allowed(uint sql_command, int ha_myisam::open(const char *name, int mode, uint test_if_locked) { + MI_KEYDEF *keyinfo; + MI_COLUMNDEF *recinfo= 0; + uint recs; uint i; /* @@ -657,6 +660,26 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) if (!(file=mi_open(name, mode, test_if_locked | HA_OPEN_FROM_SQL_LAYER))) return (my_errno ? my_errno : -1); + if (!table->s->tmp_table) /* No need to perform a check for tmp table */ + { + if ((my_errno= table2myisam(table, &keyinfo, &recinfo, &recs))) + { + /* purecov: begin inspected */ + DBUG_PRINT("error", ("Failed to convert TABLE object to MyISAM " + "key and column definition")); + goto err; + /* purecov: end */ + } + if (check_definition(keyinfo, recinfo, table->s->keys, recs, + file->s->keyinfo, file->s->rec, + file->s->base.keys, file->s->base.fields, true)) + { + /* purecov: begin inspected */ + my_errno= HA_ERR_CRASHED; + goto err; + /* purecov: end */ + } + } if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE)) VOID(mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0)); @@ -677,7 +700,18 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) (struct st_mysql_ftparser *)parser->plugin->info; table->key_info[i].block_size= file->s->keyinfo[i].block_length; } - return (0); + my_errno= 0; + goto end; + err: + this->close(); + end: + /* + Both recinfo and keydef are allocated by my_multi_malloc(), thus only + recinfo must be freed. + */ + if (recinfo) + my_free((gptr) recinfo, MYF(0)); + return my_errno; } int ha_myisam::close(void) @@ -1023,6 +1057,22 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool do_optimize) ha_rows rows= file->state->records; DBUG_ENTER("ha_myisam::repair"); + /* + Normally this method is entered with a properly opened table. If the + repair fails, it can be repeated with more elaborate options. Under + special circumstances it can happen that a repair fails so that it + closed the data file and cannot re-open it. In this case file->dfile + is set to -1. We must not try another repair without an open data + file. (Bug #25289) + */ + if (file->dfile == -1) + { + sql_print_information("Retrying repair of: '%s' failed. " + "Please try REPAIR EXTENDED or myisamchk", + table->s->path.str); + DBUG_RETURN(HA_ADMIN_FAILED); + } + param.db_name= table->s->db.str; param.table_name= table->alias; param.tmpfile_createflag = O_RDWR | O_TRUNC; diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c index 5dd4a98127e..71d377c8b6b 100644 --- a/storage/myisam/mi_create.c +++ b/storage/myisam/mi_create.c @@ -573,6 +573,10 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, pthread_mutex_lock(&THR_LOCK_myisam); + /* + NOTE: For test_if_reopen() we need a real path name. Hence we need + MY_RETURN_REAL_PATH for every fn_format(filename, ...). + */ if (ci->index_file_name) { char *iext= strrchr(ci->index_file_name, '.'); @@ -584,13 +588,14 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, if ((path= strrchr(ci->index_file_name, FN_LIBCHAR))) *path= '\0'; fn_format(filename, name, ci->index_file_name, MI_NAME_IEXT, - MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT); + MY_REPLACE_DIR | MY_UNPACK_FILENAME | + MY_RETURN_REAL_PATH | MY_APPEND_EXT); } else { fn_format(filename, ci->index_file_name, "", MI_NAME_IEXT, - MY_UNPACK_FILENAME | (have_iext ? MY_REPLACE_EXT : - MY_APPEND_EXT)); + MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH | + (have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT)); } fn_format(linkname, name, "", MI_NAME_IEXT, MY_UNPACK_FILENAME|MY_APPEND_EXT); @@ -603,10 +608,11 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } else { + char *iext= strrchr(name, '.'); + int have_iext= iext && !strcmp(iext, MI_NAME_IEXT); fn_format(filename, name, "", MI_NAME_IEXT, - (MY_UNPACK_FILENAME | - (flags & HA_DONT_TOUCH_DATA) ? MY_RETURN_REAL_PATH : 0) | - MY_APPEND_EXT); + MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH | + (have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT)); linkname_ptr=0; /* Replace the current file */ create_flag=MY_DELETE_OLD; @@ -618,6 +624,9 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, A TRUNCATE command checks for the table in the cache only and could be fooled to believe, the table is not open. Pull the emergency brake in this situation. (Bug #8306) + + NOTE: The filename is compared against unique_file_name of every + open table. Hence we need a real path here. */ if (test_if_reopen(filename)) { diff --git a/storage/myisam/mi_range.c b/storage/myisam/mi_range.c index 2cdb02b2c80..c7ab6731a2c 100644 --- a/storage/myisam/mi_range.c +++ b/storage/myisam/mi_range.c @@ -138,8 +138,42 @@ static ha_rows _mi_record_pos(MI_INFO *info, const byte *key, if (!(nextflag & (SEARCH_FIND | SEARCH_NO_FIND | SEARCH_LAST))) key_len=USE_WHOLE_KEY; + /* + my_handler.c:mi_compare_text() has a flag 'skip_end_space'. + This is set in my_handler.c:ha_key_cmp() in dependence on the + compare flags 'nextflag' and the column type. + + TEXT columns are of type HA_KEYTYPE_VARTEXT. In this case the + condition is skip_end_space= ((nextflag & (SEARCH_FIND | + SEARCH_UPDATE)) == SEARCH_FIND). + + SEARCH_FIND is used for an exact key search. The combination + SEARCH_FIND | SEARCH_UPDATE is used in write/update/delete + operations with a comment like "Not real duplicates", whatever this + means. From the condition above we can see that 'skip_end_space' is + always false for these operations. The result is that trailing space + counts in key comparison and hence, emtpy strings ('', string length + zero, but not NULL) compare less that strings starting with control + characters and these in turn compare less than strings starting with + blanks. + + When estimating the number of records in a key range, we request an + exact search for the minimum key. This translates into a plain + SEARCH_FIND flag. Using this alone would lead to a 'skip_end_space' + compare. Empty strings would be expected above control characters. + Their keys would not be found because they are located below control + characters. + + This is the reason that we add the SEARCH_UPDATE flag here. It makes + the key estimation compare in the same way like key write operations + do. Olny so we will find the keys where they have been inserted. + + Adding the flag unconditionally does not hurt as it is used in the + above mentioned condition only. So it can safely be used together + with other flags. + */ pos=_mi_search_pos(info,keyinfo,key_buff,key_len, - nextflag | SEARCH_SAVE_BUFF, + nextflag | SEARCH_SAVE_BUFF | SEARCH_UPDATE, info->s->state.key_root[inx]); if (pos >= 0.0) { diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c index 8d2b68a97f0..d313619e007 100644 --- a/storage/myisam/mi_search.c +++ b/storage/myisam/mi_search.c @@ -926,11 +926,16 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, /* Keys are compressed the following way: - prefix length Packed length of prefix for the prev key. (1 or 3 bytes) + prefix length Packed length of prefix common with prev key (1 or 3 bytes) for each key segment: [is null] Null indicator if can be null (1 byte, zero means null) [length] Packed length if varlength (1 or 3 bytes) + key segment 'length' bytes of key segment value pointer Reference to the data file (last_keyseg->length). + + get_key_length() is a macro. It gets the prefix length from 'page' + and puts it into 'length'. It increments 'page' by 1 or 3, depending + on the packed length of the prefix length. */ get_key_length(length,page); if (length) @@ -945,34 +950,44 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, my_errno=HA_ERR_CRASHED; DBUG_RETURN(0); /* Wrong key */ } - from=key; from_end=key+length; + /* Key is packed against prev key, take prefix from prev key. */ + from= key; + from_end= key + length; } else { - from=page; from_end=page_end; /* Not packed key */ + /* Key is not packed against prev key, take all from page buffer. */ + from= page; + from_end= page_end; } /* - The trouble is that key is split in two parts: - The first part is in from ...from_end-1. - The second part starts at page + The trouble is that key can be split in two parts: + The first part (prefix) is in from .. from_end - 1. + The second part starts at page. + The split can be at every byte position. So we need to check for + the end of the first part before using every byte. */ for (keyseg=keyinfo->seg ; keyseg->type ;keyseg++) { if (keyseg->flag & HA_NULL_PART) { + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } if (!(*key++ = *from++)) continue; /* Null part */ } if (keyseg->flag & (HA_VAR_LENGTH_PART | HA_BLOB_PART | HA_SPACE_PACK)) { - /* Get length of dynamic length key part */ + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } + /* Get length of dynamic length key part */ if ((length= (*key++ = *from++)) == 255) { + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } length= (uint) ((*key++ = *from++)) << 8; + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } length+= (uint) ((*key++ = *from++)); } @@ -992,14 +1007,26 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, key+=length; from+=length; } + /* + Last segment (type == 0) contains length of data pointer. + If we have mixed key blocks with data pointer and key block pointer, + we have to copy both. + */ length=keyseg->length+nod_flag; if ((tmp=(uint) (from_end-from)) <= length) { + /* Remaining length is less or equal max possible length. */ memcpy(key+tmp,page,length-tmp); /* Get last part of key */ *page_pos= page+length-tmp; } else { + /* + Remaining length is greater than max possible length. + This can happen only if we switched to the new key bytes already. + 'page_end' is calculated with MI_MAX_KEY_BUFF. So it can be far + behind the real end of the key. + */ if (from_end != page_end) { DBUG_PRINT("error",("Error when unpacking key")); @@ -1007,6 +1034,7 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, my_errno=HA_ERR_CRASHED; DBUG_RETURN(0); /* Error */ } + /* Copy data pointer and, if appropriate, key block pointer. */ memcpy((byte*) key,(byte*) from,(size_t) length); *page_pos= from+length; } diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index bc41d8373fd..57b4f0e7520 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -140,6 +140,8 @@ public: RowGCIFlag = 150, RowChecksumFlag = 151, + SingleUserMode = 152, + TableEnd = 999, AttributeName = 1000, // String, Mandatory @@ -344,6 +346,8 @@ public: Uint32 RowGCIFlag; Uint32 RowChecksumFlag; + + Uint32 SingleUserMode; Table() {} void init(); diff --git a/storage/ndb/include/kernel/signaldata/LqhKey.hpp b/storage/ndb/include/kernel/signaldata/LqhKey.hpp index 15ed7c25395..c85a4d4a796 100644 --- a/storage/ndb/include/kernel/signaldata/LqhKey.hpp +++ b/storage/ndb/include/kernel/signaldata/LqhKey.hpp @@ -582,6 +582,7 @@ class LqhKeyRef { * Reciver(s) */ friend class Dbtc; + friend class Restore; /** * Sender(s) diff --git a/storage/ndb/include/kernel/signaldata/StartPerm.hpp b/storage/ndb/include/kernel/signaldata/StartPerm.hpp index ffb16bfec9d..16229f6552f 100644 --- a/storage/ndb/include/kernel/signaldata/StartPerm.hpp +++ b/storage/ndb/include/kernel/signaldata/StartPerm.hpp @@ -67,6 +67,7 @@ private: enum ErrorCode { ZNODE_ALREADY_STARTING_ERROR = 305, + ZNODE_START_DISALLOWED_ERROR = 309, InitialStartRequired = 320 }; }; diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h index 883d3c43699..2bedba963e2 100644 --- a/storage/ndb/include/mgmapi/mgmapi.h +++ b/storage/ndb/include/mgmapi/mgmapi.h @@ -515,6 +515,18 @@ extern "C" { int ndb_mgm_set_connectstring(NdbMgmHandle handle, const char *connect_string); + /** + * Returns the number of management servers in the connect string + * (as set by ndb_mgm_set_connectstring()). This can be used + * to help work out how long the maximum amount of time that + * ndb_mgm_connect can take. + * + * @param handle Management handle + * + * @return < 0 on error + */ + int ndb_mgm_number_of_mgmd_in_connect_string(NdbMgmHandle handle); + int ndb_mgm_set_configuration_nodeid(NdbMgmHandle handle, int nodeid); int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle); int ndb_mgm_get_connected_port(NdbMgmHandle handle); @@ -546,8 +558,7 @@ extern "C" { const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz); /** - * Sets the number of seconds to wait for connect(2) during ndb_mgm_connect - * Default is no timeout + * DEPRICATED: use ndb_mgm_set_timeout instead. * * @param handle NdbMgmHandle * @param seconds number of seconds @@ -556,9 +567,26 @@ extern "C" { int ndb_mgm_set_connect_timeout(NdbMgmHandle handle, unsigned int seconds); /** + * Sets the number of milliseconds for timeout of network operations + * Default is 60 seconds. + * Only increments of 1000 ms are supported. No function is gaurenteed + * to return in a fraction of a second. + * + * @param handle NdbMgmHandle + * @param timeout_ms number of milliseconds + * @return zero on success + */ + int ndb_mgm_set_timeout(NdbMgmHandle handle, unsigned int timeout_ms); + + /** * Connects to a management server. Connectstring is set by * ndb_mgm_set_connectstring(). * + * The timeout value is for connect to each management server. + * Use ndb_mgm_number_of_mgmd_in_connect_string to work out + * the approximate maximum amount of time that could be spent in this + * function. + * * @param handle Management handle. * @param no_retries Number of retries to connect * (0 means connect once). diff --git a/storage/ndb/include/mgmcommon/ConfigRetriever.hpp b/storage/ndb/include/mgmcommon/ConfigRetriever.hpp index 221e24d0572..27a189c1563 100644 --- a/storage/ndb/include/mgmcommon/ConfigRetriever.hpp +++ b/storage/ndb/include/mgmcommon/ConfigRetriever.hpp @@ -28,7 +28,8 @@ class ConfigRetriever { public: ConfigRetriever(const char * _connect_string, Uint32 version, Uint32 nodeType, - const char * _bind_address = 0); + const char * _bind_address = 0, + int timeout_ms = 30000); ~ConfigRetriever(); int do_connect(int no_retries, int retry_delay_in_seconds, int verbose); diff --git a/storage/ndb/include/ndb_constants.h b/storage/ndb/include/ndb_constants.h index 8b3cb5b6cf9..e2f8e17e913 100644 --- a/storage/ndb/include/ndb_constants.h +++ b/storage/ndb/include/ndb_constants.h @@ -89,5 +89,12 @@ */ #define NDB_TEMP_TAB_PERMANENT 0 #define NDB_TEMP_TAB_TEMPORARY 1 + +/* + * Table single user mode + */ +#define NDB_SUM_LOCKED 0 +#define NDB_SUM_READONLY 1 +#define NDB_SUM_READ_WRITE 2 #endif diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index aada314e454..0a012f867f2 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -576,6 +576,15 @@ public: */ class Table : public Object { public: + /* + * Single user mode specifies access rights to table during single user mode + */ + enum SingleUserMode { + SingleUserModeLocked = NDB_SUM_LOCKED, + SingleUserModeReadOnly = NDB_SUM_READONLY, + SingleUserModeReadWrite = NDB_SUM_READ_WRITE + }; + /** * @name General * @{ @@ -895,6 +904,13 @@ public: void setMinRows(Uint64 minRows); Uint64 getMinRows() const; + /** + * Set/Get SingleUserMode + */ + void setSingleUserMode(enum SingleUserMode); + enum SingleUserMode getSingleUserMode() const; + + /** @} *******************************************************************/ /** diff --git a/storage/ndb/include/ndbapi/NdbRecAttr.hpp b/storage/ndb/include/ndbapi/NdbRecAttr.hpp index 40d5b598c0c..adaddd2dab3 100644 --- a/storage/ndb/include/ndbapi/NdbRecAttr.hpp +++ b/storage/ndb/include/ndbapi/NdbRecAttr.hpp @@ -131,6 +131,13 @@ public: /** * Get value stored in NdbRecAttr object. + * + * @return Medium value. + */ + Int32 medium_value() const; + + /** + * Get value stored in NdbRecAttr object. * * @return Short value. */ @@ -160,6 +167,13 @@ public: /** * Get value stored in NdbRecAttr object. * + * @return Unsigned medium value. + */ + Uint32 u_medium_value() const; + + /** + * Get value stored in NdbRecAttr object. + * * @return Unsigned short value. */ Uint16 u_short_value() const; @@ -288,6 +302,13 @@ NdbRecAttr::int32_value() const } inline +Int32 +NdbRecAttr::medium_value() const +{ + return sint3korr((unsigned char *)theRef); +} + +inline short NdbRecAttr::short_value() const { @@ -309,6 +330,13 @@ NdbRecAttr::u_32_value() const } inline +Uint32 +NdbRecAttr::u_medium_value() const +{ + return uint3korr((unsigned char*)theRef); +} + +inline Uint16 NdbRecAttr::u_short_value() const { @@ -409,6 +437,25 @@ NdbRecAttr::setUNDEFINED() class NdbOut& operator <<(class NdbOut&, const NdbRecAttr &); +class NdbRecordPrintFormat +{ +public: + NdbRecordPrintFormat(); + virtual ~NdbRecordPrintFormat(); + const char *lines_terminated_by; + const char *fields_terminated_by; + const char *start_array_enclosure; + const char *end_array_enclosure; + const char *fields_enclosed_by; + const char *fields_optionally_enclosed_by; + const char *hex_prefix; + const char *null_string; + int hex_format; +}; +NdbOut& +ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r, + const NdbRecordPrintFormat &f); + #endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL #endif diff --git a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp index e3532b072c0..80bfe7461f8 100644 --- a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp +++ b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp @@ -62,6 +62,24 @@ public: void set_name(const char *name); /** + * Set timeout + * + * Used as a timeout when talking to the management server, + * helps limit the amount of time that we may block when connecting + * + * Basically just calls ndb_mgm_set_timeout(h,ms). + * + * The default is 30 seconds. + * + * @param timeout_ms millisecond timeout. As with ndb_mgm_set_timeout, + * only increments of 1000 are really supported, + * with not to much gaurentees about calls completing + * in any hard amount of time. + * @return 0 on success + */ + int set_timeout(int timeout_ms); + + /** * Connect to a cluster management server * * @param no_retries specifies the number of retries to attempt diff --git a/storage/ndb/include/util/InputStream.hpp b/storage/ndb/include/util/InputStream.hpp index 031260dac3b..3e696eac732 100644 --- a/storage/ndb/include/util/InputStream.hpp +++ b/storage/ndb/include/util/InputStream.hpp @@ -32,6 +32,7 @@ public: * Set the mutex to be UNLOCKED when blocking (e.g. select(2)) */ void set_mutex(NdbMutex *m) { m_mutex= m; }; + virtual void reset_timeout() {}; protected: NdbMutex *m_mutex; }; @@ -48,12 +49,17 @@ extern FileInputStream Stdin; class SocketInputStream : public InputStream { NDB_SOCKET_TYPE m_socket; - unsigned m_timeout; + unsigned m_timeout_ms; + unsigned m_timeout_remain; bool m_startover; + bool m_timedout; public: - SocketInputStream(NDB_SOCKET_TYPE socket, unsigned readTimeout = 1000); + SocketInputStream(NDB_SOCKET_TYPE socket, unsigned read_timeout_ms = 60000); virtual ~SocketInputStream() {} char* gets(char * buf, int bufLen); + bool timedout() { return m_timedout; }; + void reset_timeout() { m_timedout= false; m_timeout_remain= m_timeout_ms;}; + }; #endif diff --git a/storage/ndb/include/util/OutputStream.hpp b/storage/ndb/include/util/OutputStream.hpp index d56d04adc50..05fc69a46c3 100644 --- a/storage/ndb/include/util/OutputStream.hpp +++ b/storage/ndb/include/util/OutputStream.hpp @@ -29,6 +29,7 @@ public: virtual int print(const char * fmt, ...) = 0; virtual int println(const char * fmt, ...) = 0; virtual void flush() {}; + virtual void reset_timeout() {}; }; class FileOutputStream : public OutputStream { @@ -36,6 +37,7 @@ class FileOutputStream : public OutputStream { public: FileOutputStream(FILE * file = stdout); virtual ~FileOutputStream() {} + FILE *getFile() { return f; } int print(const char * fmt, ...); int println(const char * fmt, ...); @@ -45,9 +47,13 @@ public: class SocketOutputStream : public OutputStream { NDB_SOCKET_TYPE m_socket; unsigned m_timeout_ms; + bool m_timedout; + unsigned m_timeout_remain; public: SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000); virtual ~SocketOutputStream() {} + bool timedout() { return m_timedout; }; + void reset_timeout() { m_timedout= false; m_timeout_remain= m_timeout_ms;}; int print(const char * fmt, ...); int println(const char * fmt, ...); diff --git a/storage/ndb/include/util/socket_io.h b/storage/ndb/include/util/socket_io.h index a988f4a1e8d..f76b6790b19 100644 --- a/storage/ndb/include/util/socket_io.h +++ b/storage/ndb/include/util/socket_io.h @@ -28,15 +28,20 @@ extern "C" { int read_socket(NDB_SOCKET_TYPE, int timeout_ms, char *, int len); - int readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, + int readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, char * buf, int buflen, NdbMutex *mutex); - int write_socket(NDB_SOCKET_TYPE, int timeout_ms, const char[], int len); - - int print_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, ...); - int println_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, ...); - int vprint_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, va_list); - int vprintln_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, va_list); + int write_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char[], int len); + + int print_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, ...); + int println_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, ...); + int vprint_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, va_list); + int vprintln_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, va_list); #ifdef __cplusplus } diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp index 17694fa6718..0964a54f906 100644 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ b/storage/ndb/src/common/debugger/EventLogger.cpp @@ -744,9 +744,9 @@ void getTextEventBufferStatus(QQQQ) { "Event buffer status: used=%d%s(%d%) alloc=%d%s(%d%) " "max=%d%s apply_gci=%lld latest_gci=%lld", used, used_unit, - theData[2] ? (theData[1]*100)/theData[2] : 0, + theData[2] ? (Uint32)((((Uint64)theData[1])*100)/theData[2]) : 0, alloc, alloc_unit, - theData[3] ? (theData[2]*100)/theData[3] : 0, + theData[3] ? (Uint32)((((Uint64)theData[2])*100)/theData[3]) : 0, max_, max_unit, theData[4]+(((Uint64)theData[5])<<32), theData[6]+(((Uint64)theData[7])<<32)); diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index 6d713e53351..49392b9beeb 100644 --- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -66,6 +66,7 @@ DictTabInfo::TableMapping[] = { DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), DTIMAP(Table, MinRowsLow, MinRowsLow), DTIMAP(Table, MinRowsHigh, MinRowsHigh), + DTIMAP(Table, SingleUserMode, SingleUserMode), DTIBREAK(AttributeName) }; @@ -164,6 +165,8 @@ DictTabInfo::Table::init(){ MaxRowsHigh = 0; MinRowsLow = 0; MinRowsHigh = 0; + + SingleUserMode = 0; } void diff --git a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp index bcb13f38c72..35b1a91e9da 100644 --- a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -45,7 +45,8 @@ ConfigRetriever::ConfigRetriever(const char * _connect_string, Uint32 version, Uint32 node_type, - const char * _bindaddress) + const char * _bindaddress, + int timeout_ms) { DBUG_ENTER("ConfigRetriever::ConfigRetriever"); @@ -61,6 +62,8 @@ ConfigRetriever::ConfigRetriever(const char * _connect_string, DBUG_VOID_RETURN; } + ndb_mgm_set_timeout(m_handle, timeout_ms); + if (ndb_mgm_set_connectstring(m_handle, _connect_string)) { BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle)); diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index 8ff95d1115e..f35217a9726 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -119,6 +119,7 @@ void TransporterRegistry::set_mgm_handle(NdbMgmHandle h) if (m_mgm_handle) ndb_mgm_destroy_handle(&m_mgm_handle); m_mgm_handle= h; + ndb_mgm_set_timeout(m_mgm_handle, 5000); #ifndef DBUG_OFF if (h) { @@ -1058,9 +1059,16 @@ TransporterRegistry::update_connections() void TransporterRegistry::start_clients_thread() { + int persist_mgm_count= 0; DBUG_ENTER("TransporterRegistry::start_clients_thread"); while (m_run_start_clients_thread) { NdbSleep_MilliSleep(100); + persist_mgm_count++; + if(persist_mgm_count==50) + { + ndb_mgm_check_connection(m_mgm_handle); + persist_mgm_count= 0; + } for (int i= 0, n= 0; n < nTransporters && m_run_start_clients_thread; i++){ Transporter * t = theTransporters[i]; if (!t) @@ -1118,7 +1126,12 @@ TransporterRegistry::start_clients_thread() { g_eventLogger.info("Management server closed connection early. " "It is probably being shut down (or has problems). " - "We will retry the connection."); + "We will retry the connection. %d %s %s line: %d", + ndb_mgm_get_latest_error(m_mgm_handle), + ndb_mgm_get_latest_error_desc(m_mgm_handle), + ndb_mgm_get_latest_error_msg(m_mgm_handle), + ndb_mgm_get_latest_error_line(m_mgm_handle) + ); } } /** else diff --git a/storage/ndb/src/common/util/InputStream.cpp b/storage/ndb/src/common/util/InputStream.cpp index 74c31cd7583..2337344d91a 100644 --- a/storage/ndb/src/common/util/InputStream.cpp +++ b/storage/ndb/src/common/util/InputStream.cpp @@ -34,14 +34,18 @@ FileInputStream::gets(char * buf, int bufLen){ } SocketInputStream::SocketInputStream(NDB_SOCKET_TYPE socket, - unsigned readTimeout) + unsigned read_timeout_ms) : m_socket(socket) { m_startover= true; - m_timeout = readTimeout; + m_timeout_remain= m_timeout_ms = read_timeout_ms; + + m_timedout= false; } char* SocketInputStream::gets(char * buf, int bufLen) { + if(timedout()) + return 0; assert(bufLen >= 2); int offset= 0; if(m_startover) @@ -52,10 +56,15 @@ SocketInputStream::gets(char * buf, int bufLen) { else offset= strlen(buf); - int res = readln_socket(m_socket, m_timeout, buf+offset, bufLen-offset, m_mutex); + int time= 0; + int res = readln_socket(m_socket, m_timeout_remain, &time, + buf+offset, bufLen-offset, m_mutex); - if(res == 0) + if(res >= 0) + m_timeout_remain-=time; + if(res == 0 || m_timeout_remain<=0) { + m_timedout= true; buf[0]=0; return buf; } @@ -63,7 +72,9 @@ SocketInputStream::gets(char * buf, int bufLen) { m_startover= true; if(res == -1) + { return 0; + } return buf; } diff --git a/storage/ndb/src/common/util/OutputStream.cpp b/storage/ndb/src/common/util/OutputStream.cpp index 99216ba5a28..0943e47e33f 100644 --- a/storage/ndb/src/common/util/OutputStream.cpp +++ b/storage/ndb/src/common/util/OutputStream.cpp @@ -44,22 +44,51 @@ FileOutputStream::println(const char * fmt, ...){ SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms){ m_socket = socket; - m_timeout_ms = write_timeout_ms; + m_timeout_remain= m_timeout_ms = write_timeout_ms; + m_timedout= false; } int SocketOutputStream::print(const char * fmt, ...){ va_list ap; + + if(timedout()) + return -1; + + int time= 0; va_start(ap, fmt); - const int ret = vprint_socket(m_socket, m_timeout_ms, fmt, ap); + int ret = vprint_socket(m_socket, m_timeout_ms, &time, fmt, ap); va_end(ap); + + if(ret >= 0) + m_timeout_remain-=time; + if(errno==ETIMEDOUT || m_timeout_remain<=0) + { + m_timedout= true; + ret= -1; + } + return ret; } int SocketOutputStream::println(const char * fmt, ...){ va_list ap; + + if(timedout()) + return -1; + + int time= 0; va_start(ap, fmt); - const int ret = vprintln_socket(m_socket, m_timeout_ms, fmt, ap); + int ret = vprintln_socket(m_socket, m_timeout_ms, &time, fmt, ap); va_end(ap); + + if(ret >= 0) + m_timeout_remain-=time; + if (errno==ETIMEDOUT || m_timeout_remain<=0) + { + m_timedout= true; + ret= -1; + } + return ret; } diff --git a/storage/ndb/src/common/util/socket_io.cpp b/storage/ndb/src/common/util/socket_io.cpp index d19c792e20f..dfdcd19412f 100644 --- a/storage/ndb/src/common/util/socket_io.cpp +++ b/storage/ndb/src/common/util/socket_io.cpp @@ -18,6 +18,7 @@ #include <NdbTCP.h> #include <socket_io.h> #include <NdbOut.hpp> +#include <NdbTick.h> extern "C" int @@ -47,7 +48,7 @@ read_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, char * buf, int buflen, NdbMutex *mutex){ if(buflen <= 1) return 0; @@ -62,7 +63,10 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, if(mutex) NdbMutex_Unlock(mutex); + Uint64 tick= NdbTick_CurrentMillisecond(); const int selectRes = select(socket + 1, &readset, 0, 0, &timeout); + + *time= NdbTick_CurrentMillisecond() - tick; if(mutex) NdbMutex_Lock(mutex); @@ -126,9 +130,13 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, FD_ZERO(&readset); FD_SET(socket, &readset); - timeout.tv_sec = (timeout_millis / 1000); - timeout.tv_usec = (timeout_millis % 1000) * 1000; + timeout.tv_sec = ((timeout_millis - *time) / 1000); + timeout.tv_usec = ((timeout_millis - *time) % 1000) * 1000; + + tick= NdbTick_CurrentMillisecond(); const int selectRes = select(socket + 1, &readset, 0, 0, &timeout); + *time= NdbTick_CurrentMillisecond() - tick; + if(selectRes != 1){ return -1; } @@ -139,7 +147,7 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char buf[], int len){ fd_set writeset; FD_ZERO(&writeset); @@ -148,7 +156,11 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, timeout.tv_sec = (timeout_millis / 1000); timeout.tv_usec = (timeout_millis % 1000) * 1000; + + Uint64 tick= NdbTick_CurrentMillisecond(); const int selectRes = select(socket + 1, 0, &writeset, 0, &timeout); + *time= NdbTick_CurrentMillisecond() - tick; + if(selectRes != 1){ return -1; } @@ -167,9 +179,13 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, FD_ZERO(&writeset); FD_SET(socket, &writeset); - timeout.tv_sec = 1; - timeout.tv_usec = 0; + timeout.tv_sec = ((timeout_millis - *time) / 1000); + timeout.tv_usec = ((timeout_millis - *time) % 1000) * 1000; + + Uint64 tick= NdbTick_CurrentMillisecond(); const int selectRes2 = select(socket + 1, 0, &writeset, 0, &timeout); + *time= NdbTick_CurrentMillisecond() - tick; + if(selectRes2 != 1){ return -1; } @@ -180,11 +196,11 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -print_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +print_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, ...){ va_list ap; va_start(ap, fmt); - int ret = vprint_socket(socket, timeout_millis, fmt, ap); + int ret = vprint_socket(socket, timeout_millis, time, fmt, ap); va_end(ap); return ret; @@ -192,18 +208,18 @@ print_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -println_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +println_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, ...){ va_list ap; va_start(ap, fmt); - int ret = vprintln_socket(socket, timeout_millis, fmt, ap); + int ret = vprintln_socket(socket, timeout_millis, time, fmt, ap); va_end(ap); return ret; } extern "C" int -vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, va_list ap){ char buf[1000]; char *buf2 = buf; @@ -221,7 +237,7 @@ vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, } else return 0; - int ret = write_socket(socket, timeout_millis, buf2, size); + int ret = write_socket(socket, timeout_millis, time, buf2, size); if(buf2 != buf) free(buf2); return ret; @@ -229,7 +245,7 @@ vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, va_list ap){ char buf[1000]; char *buf2 = buf; @@ -249,7 +265,7 @@ vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, } buf2[size-1]='\n'; - int ret = write_socket(socket, timeout_millis, buf2, size); + int ret = write_socket(socket, timeout_millis, time, buf2, size); if(buf2 != buf) free(buf2); return ret; diff --git a/storage/ndb/src/common/util/version.c b/storage/ndb/src/common/util/version.c index b2ebb87c144..f309a3d4ad5 100644 --- a/storage/ndb/src/common/util/version.c +++ b/storage/ndb/src/common/util/version.c @@ -91,7 +91,8 @@ void ndbSetOwnVersion() {} #ifndef TEST_VERSION struct NdbUpGradeCompatible ndbCompatibleTable_full[] = { - { MAKE_VERSION(5,1,NDB_VERSION_BUILD), MAKE_VERSION(5,1,0), UG_Range}, + { MAKE_VERSION(5,1,NDB_VERSION_BUILD), MAKE_VERSION(5,1,18), UG_Range}, + { MAKE_VERSION(5,1,17), MAKE_VERSION(5,1,0), UG_Range}, { MAKE_VERSION(5,0,NDB_VERSION_BUILD), MAKE_VERSION(5,0,12), UG_Range}, { MAKE_VERSION(5,0,11), MAKE_VERSION(5,0,2), UG_Range}, { MAKE_VERSION(4,1,NDB_VERSION_BUILD), MAKE_VERSION(4,1,15), UG_Range }, diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj index 56f9f3a8511..fb1e2fd601c 100644 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj +++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj @@ -12,8 +12,8 @@ <Configurations> <Configuration Name="Release|Win32" - OutputDirectory=".\Release" - IntermediateDirectory=".\Release" + OutputDirectory=".\release_obj" + IntermediateDirectory=".\release_obj" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -27,10 +27,10 @@ EnableFunctionLevelLinking="TRUE" UsePrecompiledHeader="3" PrecompiledHeaderThrough="stdafx.h" - PrecompiledHeaderFile=".\Release/CPC_GUI.pch" - AssemblerListingLocation=".\Release/" - ObjectFile=".\Release/" - ProgramDataBaseFileName=".\Release/" + PrecompiledHeaderFile=".\release_obj/CPC_GUI.pch" + AssemblerListingLocation=".\release_obj/" + ObjectFile=".\release_obj/" + ProgramDataBaseFileName=".\release_obj/" WarningLevel="3" SuppressStartupBanner="TRUE"/> <Tool @@ -39,10 +39,10 @@ Name="VCLinkerTool" AdditionalOptions="/MACHINE:I386" AdditionalDependencies="mfc42.lib" - OutputFile=".\Release/CPC_GUI.exe" + OutputFile=".\release_obj/CPC_GUI.exe" LinkIncremental="1" SuppressStartupBanner="TRUE" - ProgramDatabaseFile=".\Release/CPC_GUI.pdb" + ProgramDatabaseFile=".\release_obj/CPC_GUI.pdb" SubSystem="2"/> <Tool Name="VCMIDLTool" @@ -50,7 +50,7 @@ MkTypLibCompatible="TRUE" SuppressStartupBanner="TRUE" TargetEnvironment="1" - TypeLibraryName=".\Release/CPC_GUI.tlb"/> + TypeLibraryName=".\release_obj/CPC_GUI.tlb"/> <Tool Name="VCPostBuildEventTool"/> <Tool @@ -68,8 +68,8 @@ </Configuration> <Configuration Name="Debug|Win32" - OutputDirectory=".\Debug" - IntermediateDirectory=".\Debug" + OutputDirectory=".\debug_obj" + IntermediateDirectory=".\debug_obj" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -82,10 +82,10 @@ RuntimeLibrary="5" UsePrecompiledHeader="3" PrecompiledHeaderThrough="stdafx.h" - PrecompiledHeaderFile=".\Debug/CPC_GUI.pch" - AssemblerListingLocation=".\Debug/" - ObjectFile=".\Debug/" - ProgramDataBaseFileName=".\Debug/" + PrecompiledHeaderFile=".\debug_obj/CPC_GUI.pch" + AssemblerListingLocation=".\debug_obj/" + ObjectFile=".\debug_obj/" + ProgramDataBaseFileName=".\debug_obj/" BrowseInformation="1" WarningLevel="3" SuppressStartupBanner="TRUE" @@ -96,11 +96,11 @@ Name="VCLinkerTool" AdditionalOptions="/MACHINE:I386" AdditionalDependencies="comctl32.lib mfc70d.lib" - OutputFile=".\Debug/CPC_GUI.exe" - LinkIncremental="2" + OutputFile=".\debug_obj/CPC_GUI.exe" + LinkIncremental="1" SuppressStartupBanner="TRUE" GenerateDebugInformation="TRUE" - ProgramDatabaseFile=".\Debug/CPC_GUI.pdb" + ProgramDatabaseFile=".\debug_obj/CPC_GUI.pdb" SubSystem="2"/> <Tool Name="VCMIDLTool" @@ -108,7 +108,7 @@ MkTypLibCompatible="TRUE" SuppressStartupBanner="TRUE" TargetEnvironment="1" - TypeLibraryName=".\Debug/CPC_GUI.tlb"/> + TypeLibraryName=".\debug_obj/CPC_GUI.tlb"/> <Tool Name="VCPostBuildEventTool"/> <Tool diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt index c91a2da15d1..b3405679978 100644 --- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt @@ -1,12 +1,12 @@ Next QMGR 1 -Next NDBCNTR 1000 +Next NDBCNTR 1001 Next NDBFS 2000 Next DBACC 3002 -Next DBTUP 4024 +Next DBTUP 4029 Next DBLQH 5045 Next DBDICT 6007 -Next DBDIH 7181 -Next DBTC 8039 +Next DBDIH 7183 +Next DBTC 8040 Next CMVMI 9000 Next BACKUP 10038 Next DBUTIL 11002 @@ -327,6 +327,8 @@ Test Crashes in handling node restarts 7170: Crash when receiving START_PERMREF (InitialStartRequired) +8039: DBTC delay INCL_NODECONF and kill starting node + 7174: Crash starting node before sending DICT_LOCK_REQ 7175: Master sends one fake START_PERMREF (ZNODE_ALREADY_STARTING_ERROR) 7176: Slave NR pretends master does not support DICT lock (rolling upgrade) @@ -512,3 +514,12 @@ Dbtup: 4022 - addTuxEntries - fail before add of first entry 4023 - addTuxEntries - fail add of last entry (the entry for last index) + +4025: Fail all inserts with out of memory +4026: Fail one insert with oom +4027: Fail inserts randomly with oom +4028: Fail one random insert with oom + +NDBCNTR: + +1000: Crash insertion on SystemError::CopyFragRef diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp index ca348b23e6a..21114c6a155 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp @@ -760,7 +760,7 @@ private: void increaselistcont(Signal* signal); void seizeLeftlist(Signal* signal); void seizeRightlist(Signal* signal); - Uint32 readTablePk(Uint32 localkey1, Uint32 eh, const Operationrec*); + Uint32 readTablePk(Uint32 localkey1, Uint32 eh, OperationrecPtr); Uint32 getElement(Signal* signal, OperationrecPtr& lockOwner); void getdirindex(Signal* signal); void commitdelete(Signal* signal); diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index b90a000d55b..0ba5908ddb4 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -46,6 +46,7 @@ ndbrequire(false); } } while(0) #else #define vlqrequire(x) ndbrequire(x) +#define dump_lock_queue(x) #endif @@ -3182,7 +3183,7 @@ void Dbacc::getdirindex(Signal* signal) }//Dbacc::getdirindex() Uint32 -Dbacc::readTablePk(Uint32 localkey1, Uint32 eh, const Operationrec* op) +Dbacc::readTablePk(Uint32 localkey1, Uint32 eh, Ptr<Operationrec> opPtr) { int ret; Uint32 tableId = fragrecptr.p->myTableId; @@ -3203,8 +3204,17 @@ Dbacc::readTablePk(Uint32 localkey1, Uint32 eh, const Operationrec* op) else { ndbrequire(ElementHeader::getLocked(eh)); - ndbrequire((op->m_op_bits & Operationrec::OP_MASK) != ZSCAN_OP); - ret = c_lqh->readPrimaryKeys(op->userptr, ckeys, xfrm); + if (unlikely((opPtr.p->m_op_bits & Operationrec::OP_MASK) == ZSCAN_OP)) + { + dump_lock_queue(opPtr); + ndbrequire(opPtr.p->nextParallelQue == RNIL); + ndbrequire(opPtr.p->nextSerialQue == RNIL); + ndbrequire(opPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED); + ndbrequire(opPtr.p->m_op_bits & Operationrec::OP_COMMIT_DELETE_CHECK); + ndbrequire((opPtr.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_RUNNING); + return 0; + } + ret = c_lqh->readPrimaryKeys(opPtr.p->userptr, ckeys, xfrm); } jamEntry(); ndbrequire(ret >= 0); @@ -3349,7 +3359,7 @@ Dbacc::getElement(Signal* signal, OperationrecPtr& lockOwnerPtr) if (! searchLocalKey) { Uint32 len = readTablePk(localkey1, tgeElementHeader, - lockOwnerPtr.p); + lockOwnerPtr); found = (len == operationRecPtr.p->xfrmtupkeylen) && (memcmp(Tkeydata, ckeys, len << 2) == 0); } else { @@ -6864,6 +6874,7 @@ void Dbacc::execACC_TO_REQ(Signal* signal) { tatrOpPtr.p->transId1 = signal->theData[2]; tatrOpPtr.p->transId2 = signal->theData[3]; + validate_lock_queue(tatrOpPtr); } else { jam(); signal->theData[0] = cminusOne; diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index fa982063e36..46c7c71d886 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -461,6 +461,7 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount); w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow); w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh); + w.add(DictTabInfo::SingleUserMode, tablePtr.p->singleUserMode); if(signal) { @@ -1089,7 +1090,6 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, SchemaFile::TableEntry* te, Callback* callback, bool savetodisk){ jam(); - ndbrequire(tableId < c_tableRecordPool.getSize()); XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId); @@ -1872,6 +1872,7 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->m_bits = 0; tablePtr.p->minRowsLow = 0; tablePtr.p->minRowsHigh = 0; + tablePtr.p->singleUserMode = 0; tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->primaryTableId = RNIL; // volatile elements @@ -5699,7 +5700,9 @@ Dbdict::execTAB_COMMITCONF(Signal* signal){ signal->theData[4] = (Uint32)tabPtr.p->tableType; signal->theData[5] = createTabPtr.p->key; signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey; - sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB); + signal->theData[7] = (Uint32)tabPtr.p->singleUserMode; + + sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 8, JBB); return; } @@ -6129,6 +6132,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh; tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; + tablePtr.p->singleUserMode = c_tableDesc.SingleUserMode; { Rope frm(c_rope_pool, tablePtr.p->frmData); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 7e98daa39c3..9665f016600 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -375,6 +375,11 @@ public: Uint32 fragmentCount; Uint32 m_tablespace_id; + + /* + * Access rights to table during single user mode + */ + Uint8 singleUserMode; }; typedef Ptr<TableRecord> TableRecordPtr; diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index f7e27359261..b4dc445e94d 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -74,7 +74,6 @@ #define ZWRONG_FAILURE_NUMBER_ERROR 302 #define ZWRONG_START_NODE_ERROR 303 #define ZNO_REPLICA_FOUND_ERROR 304 -#define ZNODE_START_DISALLOWED_ERROR 309 // -------------------------------------- // Codes from LQH @@ -1645,6 +1644,8 @@ private: // NR Uint32 c_dictLockSlavePtrI_nodeRestart; // userPtr for NR void recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret); + + Uint32 c_error_7181_ref; }; #if (DIH_CDATA_SIZE < _SYSFILE_SIZE32) diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 5e89f3f1d95..1fe932aaae8 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1772,7 +1772,8 @@ void Dbdih::execSTART_PERMREF(Signal* signal) { jamEntry(); Uint32 errorCode = signal->theData[1]; - if (errorCode == StartPermRef::ZNODE_ALREADY_STARTING_ERROR) { + if (errorCode == StartPermRef::ZNODE_ALREADY_STARTING_ERROR || + errorCode == StartPermRef::ZNODE_START_DISALLOWED_ERROR) { jam(); /*-----------------------------------------------------------------------*/ // The master was busy adding another node. We will wait for a second and @@ -2122,49 +2123,45 @@ void Dbdih::execINCL_NODECONF(Signal* signal) TstartNode_or_blockref = signal->theData[0]; TsendNodeId = signal->theData[1]; - if (TstartNode_or_blockref == clocallqhblockref) { - jam(); - /*-----------------------------------------------------------------------*/ - // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK. - // WE WILL NOW SEND INCLUDE TO THE TC BLOCK. - /*-----------------------------------------------------------------------*/ - signal->theData[0] = reference(); - signal->theData[1] = c_nodeStartSlave.nodeId; - sendSignal(clocaltcblockref, GSN_INCL_NODEREQ, signal, 2, JBB); - return; - }//if - if (TstartNode_or_blockref == clocaltcblockref) { - jam(); - /*----------------------------------------------------------------------*/ - // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK. - // WE WILL NOW SEND INCLUDE TO THE DICT BLOCK. - /*----------------------------------------------------------------------*/ - signal->theData[0] = reference(); - signal->theData[1] = c_nodeStartSlave.nodeId; - sendSignal(cdictblockref, GSN_INCL_NODEREQ, signal, 2, JBB); - return; - }//if - if (TstartNode_or_blockref == cdictblockref) { - jam(); - /*-----------------------------------------------------------------------*/ - // THIS SIGNAL CAME FROM THE LOCAL DICT BLOCK. WE WILL NOW SEND CONF TO THE - // BACKUP. - /*-----------------------------------------------------------------------*/ - signal->theData[0] = reference(); - signal->theData[1] = c_nodeStartSlave.nodeId; - sendSignal(BACKUP_REF, GSN_INCL_NODEREQ, signal, 2, JBB); - - // Suma will not send response to this for now, later... - sendSignal(SUMA_REF, GSN_INCL_NODEREQ, signal, 2, JBB); - return; - }//if - if (TstartNode_or_blockref == numberToRef(BACKUP, getOwnNodeId())){ - jam(); - signal->theData[0] = c_nodeStartSlave.nodeId; - signal->theData[1] = cownNodeId; - sendSignal(cmasterdihref, GSN_INCL_NODECONF, signal, 2, JBB); - c_nodeStartSlave.nodeId = 0; - return; + Uint32 blocklist[6]; + blocklist[0] = clocallqhblockref; + blocklist[1] = clocaltcblockref; + blocklist[2] = cdictblockref; + blocklist[3] = numberToRef(BACKUP, getOwnNodeId()); + blocklist[4] = numberToRef(SUMA, getOwnNodeId()); + blocklist[5] = 0; + + for (Uint32 i = 0; blocklist[i] != 0; i++) + { + if (TstartNode_or_blockref == blocklist[i]) + { + jam(); + if (getNodeStatus(c_nodeStartSlave.nodeId) == NodeRecord::ALIVE && + blocklist[i+1] != 0) + { + /** + * Send to next in block list + */ + jam(); + signal->theData[0] = reference(); + signal->theData[1] = c_nodeStartSlave.nodeId; + sendSignal(blocklist[i+1], GSN_INCL_NODEREQ, signal, 2, JBB); + return; + } + else + { + /** + * All done, reply to master + */ + jam(); + signal->theData[0] = c_nodeStartSlave.nodeId; + signal->theData[1] = cownNodeId; + sendSignal(cmasterdihref, GSN_INCL_NODECONF, signal, 2, JBB); + + c_nodeStartSlave.nodeId = 0; + return; + } + } } ndbrequire(cmasterdihref = reference()); @@ -2283,7 +2280,7 @@ void Dbdih::execSTART_INFOREQ(Signal* signal) StartInfoRef *const ref =(StartInfoRef*)&signal->theData[0]; ref->startingNodeId = startNode; ref->sendingNodeId = cownNodeId; - ref->errorCode = ZNODE_START_DISALLOWED_ERROR; + ref->errorCode = StartPermRef::ZNODE_START_DISALLOWED_ERROR; sendSignal(cmasterdihref, GSN_START_INFOREF, signal, StartInfoRef::SignalLength, JBB); return; @@ -5096,6 +5093,16 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal) } else { ndbrequire(failedNodePtr.p->nodeStatus == NodeRecord::DYING); }//if + + if (ERROR_INSERTED(7181)) + { + ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ"); + CLEAR_ERROR_INSERT_VALUE; + signal->theData[0] = c_error_7181_ref; + signal->theData[1] = coldgcp; + execGCP_TCFINISHED(signal); + } + MasterGCPConf::State gcpState; switch (cgcpParticipantState) { case GCP_PARTICIPANT_READY: @@ -5162,6 +5169,15 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal) masterGCPConf->lcpActive[i] = SYSFILE->lcpActive[i]; sendSignal(newMasterBlockref, GSN_MASTER_GCPCONF, signal, MasterGCPConf::SignalLength, JBB); + + if (ERROR_INSERTED(7182)) + { + ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ"); + CLEAR_ERROR_INSERT_VALUE; + signal->theData[0] = c_error_7181_ref; + signal->theData[1] = coldgcp; + execGCP_TCFINISHED(signal); + } }//Dbdih::execMASTER_GCPREQ() void Dbdih::execMASTER_GCPCONF(Signal* signal) @@ -7923,10 +7939,10 @@ void Dbdih::execGCP_NODEFINISH(Signal* signal) } else if (cmasterState == MASTER_TAKE_OVER_GCP) { jam(); //------------------------------------------------------------- - // We are currently taking over as master. We will delay the - // signal until we have completed the take over gcp handling. + // We are currently taking over as master. Ignore + // signal in this case since we will discover it in reception of + // MASTER_GCPCONF. //------------------------------------------------------------- - sendSignalWithDelay(reference(), GSN_GCP_NODEFINISH, signal, 20, 3); return; } else { ndbrequire(cmasterState == MASTER_ACTIVE); @@ -8061,6 +8077,7 @@ void Dbdih::execGCP_COMMIT(Signal* signal) cgckptflag = false; emptyverificbuffer(signal, true); cgcpParticipantState = GCP_PARTICIPANT_COMMIT_RECEIVED; + signal->theData[0] = calcDihBlockRef(masterNodeId); signal->theData[1] = coldgcp; sendSignal(clocaltcblockref, GSN_GCP_NOMORETRANS, signal, 2, JBB); return; @@ -8070,14 +8087,25 @@ void Dbdih::execGCP_TCFINISHED(Signal* signal) { jamEntry(); CRASH_INSERTION(7007); + Uint32 retRef = signal->theData[0]; Uint32 gci = signal->theData[1]; ndbrequire(gci == coldgcp); + if (ERROR_INSERTED(7181) || ERROR_INSERTED(7182)) + { + c_error_7181_ref = retRef; // Save ref + ndbout_c("killing %d", refToNode(cmasterdihref)); + signal->theData[0] = 9999; + sendSignal(numberToRef(CMVMI, refToNode(cmasterdihref)), + GSN_NDB_TAMPER, signal, 1, JBB); + return; + } + cgcpParticipantState = GCP_PARTICIPANT_TC_FINISHED; signal->theData[0] = cownNodeId; signal->theData[1] = coldgcp; signal->theData[2] = cfailurenr; - sendSignal(cmasterdihref, GSN_GCP_NODEFINISH, signal, 3, JBB); + sendSignal(retRef, GSN_GCP_NODEFINISH, signal, 3, JBB); }//Dbdih::execGCP_TCFINISHED() /*****************************************************************************/ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index b8882d72913..56d320de68c 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -10570,6 +10570,15 @@ void Dblqh::copyCompletedLab(Signal* signal) closeCopyLab(signal); return; }//if + + if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY && + scanptr.p->scanErrorCounter) + { + jam(); + closeCopyLab(signal); + return; + } + if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY) { jam(); /*---------------------------------------------------------------------------*/ @@ -10658,13 +10667,16 @@ void Dblqh::continueCopyAfterBlockedLab(Signal* signal) void Dblqh::copyLqhKeyRefLab(Signal* signal) { ndbrequire(tcConnectptr.p->transid[1] == signal->theData[4]); - tcConnectptr.p->copyCountWords -= signal->theData[3]; + Uint32 copyWords = signal->theData[3]; scanptr.i = tcConnectptr.p->tcScanRec; c_scanRecordPool.getPtr(scanptr); scanptr.p->scanErrorCounter++; tcConnectptr.p->errorCode = terrorCode; - closeCopyLab(signal); - return; + + LqhKeyConf* conf = (LqhKeyConf*)signal->getDataPtrSend(); + conf->transId1 = copyWords; + conf->transId2 = tcConnectptr.p->transid[1]; + copyCompletedLab(signal); }//Dblqh::copyLqhKeyRefLab() void Dblqh::closeCopyLab(Signal* signal) @@ -10675,6 +10687,7 @@ void Dblqh::closeCopyLab(Signal* signal) // Wait until all of those have arrived until we start the // close process. /*---------------------------------------------------------------------------*/ + scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY; jam(); return; }//if diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index dae8ee7e73b..42f4033dd4a 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -710,6 +710,7 @@ public: Uint8 tckeyrec; // Ändrad från R Uint8 tcindxrec; Uint8 apiFailState; // Ändrad från R + Uint8 singleUserMode; ReturnSignal returnsignal; Uint8 timeOutCounter; @@ -961,10 +962,21 @@ public: struct TableRecord { TableRecord() {} Uint32 currentSchemaVersion; - Uint8 enabled; - Uint8 dropping; + Uint16 m_flags; Uint8 tableType; - Uint8 storedTable; + Uint8 singleUserMode; + + enum { + TR_ENABLED = 1 << 0, + TR_DROPPING = 1 << 1, + TR_STORED_TABLE = 1 << 2 + }; + Uint8 get_enabled() const { return (m_flags & TR_ENABLED) != 0; } + Uint8 get_dropping() const { return (m_flags & TR_DROPPING) != 0; } + Uint8 get_storedTable() const { return (m_flags & TR_STORED_TABLE) != 0; } + void set_enabled(Uint8 f) { f ? m_flags |= (Uint16)TR_ENABLED : m_flags &= ~(Uint16)TR_ENABLED; } + void set_dropping(Uint8 f) { f ? m_flags |= (Uint16)TR_DROPPING : m_flags &= ~(Uint16)TR_DROPPING; } + void set_storedTable(Uint8 f) { f ? m_flags |= (Uint16)TR_STORED_TABLE : m_flags &= ~(Uint16)TR_STORED_TABLE; } Uint8 noOfKeyAttr; Uint8 hasCharAttr; @@ -972,7 +984,7 @@ public: Uint8 hasVarKeys; bool checkTable(Uint32 schemaVersion) const { - return enabled && !dropping && + return get_enabled() && !get_dropping() && (table_version_major(schemaVersion) == table_version_major(currentSchemaVersion)); } @@ -1840,9 +1852,14 @@ private: Uint32 transid2); void removeMarkerForFailedAPI(Signal* signal, Uint32 nodeId, Uint32 bucket); - bool getAllowStartTransaction() const { - if(getNodeState().getSingleUserMode()) - return true; + bool getAllowStartTransaction(Uint32 nodeId, Uint32 table_single_user_mode) const { + if (unlikely(getNodeState().getSingleUserMode())) + { + if (getNodeState().getSingleUserApi() == nodeId || table_single_user_mode) + return true; + else + return false; + } return getNodeState().startLevel < NodeState::SL_STOPPING_2; } @@ -1955,5 +1972,8 @@ private: // those variables should be removed and exchanged for stack // variable communication. /**************************************************************************/ + + Uint32 c_gcp_ref; }; + #endif diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 7ea6d3936b0..024d7bdb00c 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -311,6 +311,19 @@ void Dbtc::execINCL_NODEREQ(Signal* signal) hostptr.p->hostStatus = HS_ALIVE; signal->theData[0] = cownref; c_alive_nodes.set(hostptr.i); + + if (ERROR_INSERTED(8039)) + { + CLEAR_ERROR_INSERT_VALUE; + Uint32 save = signal->theData[0]; + signal->theData[0] = 9999; + sendSignal(numberToRef(CMVMI, hostptr.i), + GSN_NDB_TAMPER, signal, 1, JBB); + signal->theData[0] = save; + sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 1); + return; + } + sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB); } @@ -330,19 +343,21 @@ void Dbtc::execTC_SCHVERREQ(Signal* signal) tabptr.i = signal->theData[0]; ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord); tabptr.p->currentSchemaVersion = signal->theData[1]; - tabptr.p->storedTable = (bool)signal->theData[2]; + tabptr.p->m_flags = 0; + tabptr.p->set_storedTable((bool)signal->theData[2]); BlockReference retRef = signal->theData[3]; tabptr.p->tableType = (Uint8)signal->theData[4]; BlockReference retPtr = signal->theData[5]; Uint32 noOfKeyAttr = signal->theData[6]; + tabptr.p->singleUserMode = (Uint8)signal->theData[7]; ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX); const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tabptr.i); ndbrequire(noOfKeyAttr == desc->noOfKeyAttr); - ndbrequire(tabptr.p->enabled == false); - tabptr.p->enabled = true; - tabptr.p->dropping = false; + ndbrequire(tabptr.p->get_enabled() == false); + tabptr.p->set_enabled(true); + tabptr.p->set_dropping(false); tabptr.p->noOfKeyAttr = desc->noOfKeyAttr; tabptr.p->hasCharAttr = desc->hasCharAttr; tabptr.p->noOfDistrKeys = desc->noOfDistrKeys; @@ -366,7 +381,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal) Uint32 senderRef = req->senderRef; Uint32 senderData = req->senderData; - if(!tabPtr.p->enabled){ + if(!tabPtr.p->get_enabled()){ jam(); PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -378,7 +393,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal) return; } - if(tabPtr.p->dropping){ + if(tabPtr.p->get_dropping()){ jam(); PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -390,7 +405,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal) return; } - tabPtr.p->dropping = true; + tabPtr.p->set_dropping(true); tabPtr.p->dropTable.senderRef = senderRef; tabPtr.p->dropTable.senderData = senderData; @@ -426,7 +441,7 @@ Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal) tabPtr.i = conf->tableId; ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - ndbrequire(tabPtr.p->dropping == true); + ndbrequire(tabPtr.p->get_dropping() == true); Uint32 nodeId = refToNode(conf->senderRef); tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId); @@ -456,7 +471,7 @@ Dbtc::execWAIT_DROP_TAB_REF(Signal* signal) tabPtr.i = ref->tableId; ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - ndbrequire(tabPtr.p->dropping == true); + ndbrequire(tabPtr.p->get_dropping() == true); Uint32 nodeId = refToNode(ref->senderRef); tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId); @@ -493,7 +508,7 @@ Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId) for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabrecFilesize; i++, tabPtr.i++){ jam(); ptrAss(tabPtr, tableRecord); - if(tabPtr.p->enabled && tabPtr.p->dropping){ + if(tabPtr.p->get_enabled() && tabPtr.p->get_dropping()){ if(tabPtr.p->dropTable.waitDropTabCount.isWaitingFor(nodeId)){ jam(); conf->senderRef = calcLqhBlockRef(nodeId); @@ -534,7 +549,7 @@ Dbtc::execDROP_TAB_REQ(Signal* signal) Uint32 senderData = req->senderData; DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType; - if(!tabPtr.p->enabled && rt == DropTabReq::OnlineDropTab){ + if(!tabPtr.p->get_enabled() && rt == DropTabReq::OnlineDropTab){ jam(); DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -546,7 +561,7 @@ Dbtc::execDROP_TAB_REQ(Signal* signal) return; } - if(!tabPtr.p->dropping && rt == DropTabReq::OnlineDropTab){ + if(!tabPtr.p->get_dropping() && rt == DropTabReq::OnlineDropTab){ jam(); DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -558,8 +573,8 @@ Dbtc::execDROP_TAB_REQ(Signal* signal) return; } - tabPtr.p->enabled = false; - tabPtr.p->dropping = false; + tabPtr.p->set_enabled(false); + tabPtr.p->set_dropping(false); DropTabConf * conf = (DropTabConf*)signal->getDataPtrSend(); conf->tableId = tabPtr.i; @@ -1202,16 +1217,14 @@ void Dbtc::execTCSEIZEREQ(Signal* signal) const NodeId senderNodeId = refToNode(tapiBlockref); const bool local = senderNodeId == getOwnNodeId() || senderNodeId == 0; - if(!(senderNodeId == getNodeState().getSingleUserApi()) && - !getNodeState().getSingleUserMode()) { - if(!(sl==NodeState::SL_SINGLEUSER && - senderNodeId == getNodeState().getSingleUserApi())) { + { + { if (!(sl == NodeState::SL_STARTED || (sl == NodeState::SL_STARTING && local == true))) { jam(); - Uint32 errCode; - if(!(sl == NodeState::SL_SINGLEUSER && local)) + Uint32 errCode = 0; + if(!local) { switch(sl){ case NodeState::SL_STARTING: @@ -1219,6 +1232,8 @@ void Dbtc::execTCSEIZEREQ(Signal* signal) break; case NodeState::SL_STOPPING_1: case NodeState::SL_STOPPING_2: + if (getNodeState().getSingleUserMode()) + break; case NodeState::SL_STOPPING_3: case NodeState::SL_STOPPING_4: if(getNodeState().stopping.systemShutdown) @@ -1227,16 +1242,18 @@ void Dbtc::execTCSEIZEREQ(Signal* signal) errCode = ZNODE_SHUTDOWN_IN_PROGRESS; break; case NodeState::SL_SINGLEUSER: - errCode = ZCLUSTER_IN_SINGLEUSER_MODE; break; default: errCode = ZWRONG_STATE; break; } - signal->theData[0] = tapiPointer; - signal->theData[1] = errCode; - sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB); - return; + if (errCode) + { + signal->theData[0] = tapiPointer; + signal->theData[1] = errCode; + sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB); + return; + } }//if (!(sl == SL_SINGLEUSER)) } //if } @@ -1724,8 +1741,14 @@ Dbtc::TCKEY_abort(Signal* signal, int place) * Initialize object before starting error handling */ initApiConnectRec(signal, apiConnectptr.p, true); +start_failure: switch(getNodeState().startLevel){ case NodeState::SL_STOPPING_2: + if (getNodeState().getSingleUserMode()) + { + terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; + break; + } case NodeState::SL_STOPPING_3: case NodeState::SL_STOPPING_4: if(getNodeState().stopping.systemShutdown) @@ -1736,6 +1759,12 @@ Dbtc::TCKEY_abort(Signal* signal, int place) case NodeState::SL_SINGLEUSER: terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; break; + case NodeState::SL_STOPPING_1: + if (getNodeState().getSingleUserMode()) + { + terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; + break; + } default: terrorCode = ZWRONG_STATE; break; @@ -1757,6 +1786,13 @@ Dbtc::TCKEY_abort(Signal* signal, int place) return; } + case 60: + { + jam(); + initApiConnectRec(signal, apiConnectptr.p, true); + apiConnectptr.p->m_exec_flag = 1; + goto start_failure; + } default: jam(); systemErrorLab(signal, __LINE__); @@ -2377,6 +2413,7 @@ void Dbtc::initApiConnectRec(Signal* signal, regApiPtr->buddyPtr = RNIL; regApiPtr->currSavePointId = 0; regApiPtr->m_transaction_nodes.clear(); + regApiPtr->singleUserMode = 0; // Trigger data releaseFiredTriggerData(®ApiPtr->theFiredTriggers), // Index data @@ -2486,6 +2523,7 @@ Dbtc::seizeCacheRecord(Signal* signal) /*****************************************************************************/ void Dbtc::execTCKEYREQ(Signal* signal) { + Uint32 sendersNodeId = refToNode(signal->getSendersBlockRef()); UintR compare_transid1, compare_transid2; UintR titcLenAiInTckeyreq; UintR TkeyLength; @@ -2529,9 +2567,12 @@ void Dbtc::execTCKEYREQ(Signal* signal) bool isIndexOpReturn = regApiPtr->indexOpReturn; regApiPtr->isIndexOp = false; // Reset marker regApiPtr->m_exec_flag |= TexecFlag; + TableRecordPtr localTabptr; + localTabptr.i = TtabIndex; + localTabptr.p = &tableRecord[TtabIndex]; switch (regApiPtr->apiConnectstate) { case CS_CONNECTED:{ - if (TstartFlag == 1 && getAllowStartTransaction() == true){ + if (TstartFlag == 1 && getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){ //--------------------------------------------------------------------- // Initialise API connect record if transaction is started. //--------------------------------------------------------------------- @@ -2539,7 +2580,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) initApiConnectRec(signal, regApiPtr); regApiPtr->m_exec_flag = TexecFlag; } else { - if(getAllowStartTransaction() == true){ + if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){ /*------------------------------------------------------------------ * WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN * RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO @@ -2549,9 +2590,9 @@ void Dbtc::execTCKEYREQ(Signal* signal) return; } else { /** - * getAllowStartTransaction() == false + * getAllowStartTransaction(sendersNodeId) == false */ - TCKEY_abort(signal, 57); + TCKEY_abort(signal, TexecFlag ? 60 : 57); return; }//if } @@ -2566,6 +2607,13 @@ void Dbtc::execTCKEYREQ(Signal* signal) * the state will be CS_STARTED */ jam(); + if (unlikely(getNodeState().getSingleUserMode()) && + getNodeState().getSingleUserApi() != sendersNodeId && + !localTabptr.p->singleUserMode) + { + TCKEY_abort(signal, TexecFlag ? 60 : 57); + return; + } initApiConnectRec(signal, regApiPtr); regApiPtr->m_exec_flag = TexecFlag; } else { @@ -2586,6 +2634,10 @@ void Dbtc::execTCKEYREQ(Signal* signal) case CS_ABORTING: if (regApiPtr->abortState == AS_IDLE) { if (TstartFlag == 1) { + if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == false){ + TCKEY_abort(signal, TexecFlag ? 60 : 57); + return; + } //-------------------------------------------------------------------- // Previous transaction had been aborted and the abort was completed. // It is then OK to start a new transaction again. @@ -2649,9 +2701,6 @@ void Dbtc::execTCKEYREQ(Signal* signal) return; }//switch - TableRecordPtr localTabptr; - localTabptr.i = TtabIndex; - localTabptr.p = &tableRecord[TtabIndex]; if (localTabptr.p->checkTable(tcKeyReq->tableSchemaVersion)) { ; } else { @@ -2710,6 +2759,8 @@ void Dbtc::execTCKEYREQ(Signal* signal) regTcPtr->savePointId = regApiPtr->currSavePointId; regApiPtr->executingIndexOp = RNIL; + regApiPtr->singleUserMode |= 1 << localTabptr.p->singleUserMode; + if (TcKeyReq::getExecutingTrigger(Treqinfo)) { // Save the TcOperationPtr for fireing operation regTcPtr->triggeringOperation = TsenderData; @@ -2841,7 +2892,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) * THIS VARIABLE CONTROLS THE INTERVAL BETWEEN LCP'S AND * TEMP TABLES DON'T PARTICIPATE. * -------------------------------------------------------------------- */ - if (localTabptr.p->storedTable) { + if (localTabptr.p->get_storedTable()) { coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17; } c_counters.cwriteCount = TwriteCount + 1; @@ -4674,6 +4725,7 @@ void Dbtc::copyApi(Signal* signal) regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec; regApiPtr->commitAckMarker = TcommitAckMarker; regApiPtr->m_transaction_nodes = Tnodes; + regApiPtr->singleUserMode = 0; gcpPtr.i = TgcpPointer; ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord); @@ -4685,6 +4737,7 @@ void Dbtc::copyApi(Signal* signal) regTmpApiPtr->firstTcConnect = RNIL; regTmpApiPtr->lastTcConnect = RNIL; regTmpApiPtr->m_transaction_nodes.clear(); + regTmpApiPtr->singleUserMode = 0; releaseAllSeizedIndexOperations(regTmpApiPtr); }//Dbtc::copyApi() @@ -6154,9 +6207,11 @@ and otherwise we spread it out 310 ms. void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr) { Uint32 end_ptr, time_passed, time_out_value, mask_value; + Uint32 old_mask_value= 0; const Uint32 api_con_sz= capiConnectFilesize; const Uint32 tc_timer= ctcTimer; const Uint32 time_out_param= ctimeOutValue; + const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue; ctimeOutCheckHeartbeat = tc_timer; @@ -6177,16 +6232,50 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr) jam(); mask_value= 31; } + if (time_out_param != old_time_out_param && + getNodeState().getSingleUserMode()) + { + // abort during single user mode, use old_mask_value as flag + // and calculate value to be used for connections with allowed api + if (old_time_out_param > 300) { + jam(); + old_mask_value= 63; + } else if (old_time_out_param < 30) { + jam(); + old_mask_value= 7; + } else { + jam(); + old_mask_value= 31; + } + } for ( ; api_con_ptr < end_ptr; api_con_ptr++) { Uint32 api_timer= getApiConTimer(api_con_ptr); jam(); if (api_timer != 0) { + Uint32 error= ZTIME_OUT_ERROR; time_out_value= time_out_param + (api_con_ptr & mask_value); + if (unlikely(old_mask_value)) // abort during single user mode + { + apiConnectptr.i = api_con_ptr; + ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); + if ((getNodeState().getSingleUserApi() == + refToNode(apiConnectptr.p->ndbapiBlockref)) || + !(apiConnectptr.p->singleUserMode & (1 << NDB_SUM_LOCKED))) + { + // api allowed during single user, use original timeout + time_out_value= + old_time_out_param + (api_con_ptr & old_mask_value); + } + else + { + error= ZCLUSTER_IN_SINGLEUSER_MODE; + } + } time_passed= tc_timer - api_timer; if (time_passed > time_out_value) { jam(); - timeOutFoundLab(signal, api_con_ptr, ZTIME_OUT_ERROR); + timeOutFoundLab(signal, api_con_ptr, error); api_con_ptr++; break; } @@ -6226,7 +6315,8 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode) << " code: " << errCode); switch (apiConnectptr.p->apiConnectstate) { case CS_STARTED: - if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){ + if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec && + errCode != ZCLUSTER_IN_SINGLEUSER_MODE){ jam(); /* We are waiting for application to continue the transaction. In this @@ -6798,6 +6888,33 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr) c_scan_frag_pool.getPtr(ptr, TscanConPtr); DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState); + const Uint32 time_out_param= ctimeOutValue; + const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue; + + if (unlikely(time_out_param != old_time_out_param && + getNodeState().getSingleUserMode())) + { + jam(); + ScanRecordPtr scanptr; + scanptr.i = ptr.p->scanRec; + ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); + ApiConnectRecordPtr TlocalApiConnectptr; + TlocalApiConnectptr.i = scanptr.p->scanApiRec; + ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord); + + if (refToNode(TlocalApiConnectptr.p->ndbapiBlockref) == + getNodeState().getSingleUserApi()) + { + jam(); + Uint32 val = ctcTimer - ptr.p->scanFragTimer; + if (val <= old_time_out_param) + { + jam(); + goto next; + } + } + } + /*-------------------------------------------------------------------------*/ // The scan fragment has expired its timeout. Check its state to decide // what to do. @@ -6859,6 +6976,7 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr) break; }//switch +next: signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL; signal->theData[1] = TscanConPtr + 1; sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); @@ -6886,6 +7004,7 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr) void Dbtc::execGCP_NOMORETRANS(Signal* signal) { jamEntry(); + c_gcp_ref = signal->theData[0]; tcheckGcpId = signal->theData[1]; if (cfirstgcp != RNIL) { jam(); @@ -7104,20 +7223,15 @@ Dbtc::nodeFailCheckTransactions(Signal* signal, for (transPtr.i = transPtrI; transPtr.i < capiConnectFilesize; transPtr.i++) { ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord); - Uint32 state = transPtr.p->apiConnectstate; if (transPtr.p->m_transaction_nodes.get(failedNodeId)) { jam(); - // avoid assertion in timeoutfoundlab - if (state != CS_PREPARE_TO_COMMIT) - { - // Force timeout regardless of state - c_appl_timeout_value = 1; - setApiConTimer(transPtr.i, TtcTimer - 2, __LINE__); - timeOutFoundLab(signal, transPtr.i, ZNODEFAIL_BEFORE_COMMIT); - c_appl_timeout_value = TapplTimeout; - } + // Force timeout regardless of state + c_appl_timeout_value = 1; + setApiConTimer(transPtr.i, TtcTimer - 2, __LINE__); + timeOutFoundLab(signal, transPtr.i, ZNODEFAIL_BEFORE_COMMIT); + c_appl_timeout_value = TapplTimeout; } // Send CONTINUEB to continue later @@ -8081,6 +8195,7 @@ void Dbtc::initApiConnectFail(Signal* signal) apiConnectptr.p->ndbapiConnect = 0; apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; setApiConTimer(apiConnectptr.i, 0, __LINE__); switch(ttransStatus){ case LqhTransConf::Committed: @@ -8693,6 +8808,14 @@ void Dbtc::execSCAN_TABREQ(Signal* signal) } } + if (getNodeState().startLevel == NodeState::SL_SINGLEUSER && + getNodeState().getSingleUserApi() != + refToNode(apiConnectptr.p->ndbapiBlockref)) + { + errCode = ZCLUSTER_IN_SINGLEUSER_MODE; + goto SCAN_TAB_error; + } + seizeTcConnect(signal); tcConnectptr.p->apiConnect = apiConnectptr.i; tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN; @@ -9939,6 +10062,7 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) { void Dbtc::gcpTcfinished(Signal* signal) { + signal->theData[0] = c_gcp_ref; signal->theData[1] = tcheckGcpId; sendSignal(cdihblockref, GSN_GCP_TCFINISHED, signal, 2, JBB); }//Dbtc::gcpTcfinished() @@ -9987,6 +10111,7 @@ void Dbtc::initApiConnect(Signal* signal) apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->currSavePointId = 0; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; }//for apiConnectptr.i = tiacTmp - 1; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); @@ -10015,6 +10140,7 @@ void Dbtc::initApiConnect(Signal* signal) apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->currSavePointId = 0; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; }//for apiConnectptr.i = (2 * tiacTmp) - 1; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); @@ -10043,6 +10169,7 @@ void Dbtc::initApiConnect(Signal* signal) apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->currSavePointId = 0; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; }//for apiConnectptr.i = (3 * tiacTmp) - 1; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); @@ -10227,10 +10354,11 @@ void Dbtc::initTable(Signal* signal) refresh_watch_dog(); ptrAss(tabptr, tableRecord); tabptr.p->currentSchemaVersion = 0; - tabptr.p->storedTable = true; + tabptr.p->m_flags = 0; + tabptr.p->set_storedTable(true); tabptr.p->tableType = 0; - tabptr.p->enabled = false; - tabptr.p->dropping = false; + tabptr.p->set_enabled(false); + tabptr.p->set_dropping(false); tabptr.p->noOfKeyAttr = 0; tabptr.p->hasCharAttr = 0; tabptr.p->noOfDistrKeys = 0; @@ -10364,6 +10492,7 @@ void Dbtc::releaseAbortResources(Signal* signal) apiConnectptr.p->firstTcConnect = RNIL; apiConnectptr.p->lastTcConnect = RNIL; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; // MASV let state be CS_ABORTING until all // signals in the "air" have been received. Reset to CS_CONNECTED @@ -11012,7 +11141,7 @@ void Dbtc::execABORT_ALL_REQ(Signal* signal) const Uint32 senderData = req->senderData; const BlockReference senderRef = req->senderRef; - if(getAllowStartTransaction() == true && !getNodeState().getSingleUserMode()){ + if(getAllowStartTransaction(refToNode(senderRef), 0) == true && !getNodeState().getSingleUserMode()){ jam(); ref->senderData = senderData; @@ -11440,6 +11569,17 @@ void Dbtc::execTCINDXREQ(Signal* signal) regApiPtr->transid[1] = tcIndxReq->transId2; }//if + if (getNodeState().startLevel == NodeState::SL_SINGLEUSER && + getNodeState().getSingleUserApi() != + refToNode(regApiPtr->ndbapiBlockref)) + { + terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; + regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo); + apiConnectptr = transPtr; + abortErrorLab(signal); + return; + } + if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) { jam(); // Failed to allocate index operation @@ -13278,9 +13418,9 @@ void Dbtc::deleteFromIndexTable(Signal* signal, Uint32 Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const { - if(!enabled) + if(!get_enabled()) return ZNO_SUCH_TABLE; - if(dropping) + if(get_dropping()) return ZDROP_TABLE_IN_PROGRESS; if(table_version_major(schemaVersion) != table_version_major(currentSchemaVersion)) return ZWRONG_SCHEMA_VERSION_ERROR; diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index fecc4649fe9..3380131a01e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -32,6 +32,82 @@ #include <../pgman.hpp> #include <../tsman.hpp> +// jams +#undef jam +#undef jamEntry +#ifdef DBTUP_BUFFER_CPP +#define jam() jamLine(10000 + __LINE__) +#define jamEntry() jamEntryLine(10000 + __LINE__) +#endif +#ifdef DBTUP_ROUTINES_CPP +#define jam() jamLine(15000 + __LINE__) +#define jamEntry() jamEntryLine(15000 + __LINE__) +#endif +#ifdef DBTUP_COMMIT_CPP +#define jam() jamLine(20000 + __LINE__) +#define jamEntry() jamEntryLine(20000 + __LINE__) +#endif +#ifdef DBTUP_FIXALLOC_CPP +#define jam() jamLine(25000 + __LINE__) +#define jamEntry() jamEntryLine(25000 + __LINE__) +#endif +#ifdef DBTUP_TRIGGER_CPP +#define jam() jamLine(30000 + __LINE__) +#define jamEntry() jamEntryLine(30000 + __LINE__) +#endif +#ifdef DBTUP_ABORT_CPP +#define jam() jamLine(35000 + __LINE__) +#define jamEntry() jamEntryLine(35000 + __LINE__) +#endif +#ifdef DBTUP_PAGE_MAP_CPP +#define jam() jamLine(40000 + __LINE__) +#define jamEntry() jamEntryLine(40000 + __LINE__) +#endif +#ifdef DBTUP_PAG_MAN_CPP +#define jam() jamLine(45000 + __LINE__) +#define jamEntry() jamEntryLine(45000 + __LINE__) +#endif +#ifdef DBTUP_STORE_PROC_DEF_CPP +#define jam() jamLine(50000 + __LINE__) +#define jamEntry() jamEntryLine(50000 + __LINE__) +#endif +#ifdef DBTUP_META_CPP +#define jam() jamLine(55000 + __LINE__) +#define jamEntry() jamEntryLine(55000 + __LINE__) +#endif +#ifdef DBTUP_TAB_DES_MAN_CPP +#define jam() jamLine(60000 + __LINE__) +#define jamEntry() jamEntryLine(60000 + __LINE__) +#endif +#ifdef DBTUP_GEN_CPP +#define jam() jamLine(65000 + __LINE__) +#define jamEntry() jamEntryLine(65000 + __LINE__) +#endif +#ifdef DBTUP_INDEX_CPP +#define jam() jamLine(70000 + __LINE__) +#define jamEntry() jamEntryLine(70000 + __LINE__) +#endif +#ifdef DBTUP_DEBUG_CPP +#define jam() jamLine(75000 + __LINE__) +#define jamEntry() jamEntryLine(75000 + __LINE__) +#endif +#ifdef DBTUP_VAR_ALLOC_CPP +#define jam() jamLine(80000 + __LINE__) +#define jamEntry() jamEntryLine(80000 + __LINE__) +#endif +#ifdef DBTUP_SCAN_CPP +#define jam() jamLine(85000 + __LINE__) +#define jamEntry() jamEntryLine(85000 + __LINE__) +#endif +#ifdef DBTUP_DISK_ALLOC_CPP +#define jam() jamLine(90000 + __LINE__) +#define jamEntry() jamEntryLine(90000 + __LINE__) +#endif +#ifndef jam +#define jam() jamLine(__LINE__) +#define jamEntry() jamEntryLine(__LINE__) +#endif + #ifdef VM_TRACE inline const char* dbgmask(const Bitmask<MAXNROFATTRIBUTESINWORDS>& bm) { static int i=0; static char buf[5][200]; @@ -70,22 +146,23 @@ inline const Uint32* ALIGN_WORD(const void* ptr) // only reports the line number in the file it currently is located in. // // DbtupExecQuery.cpp 0 -// DbtupBuffer.cpp 2000 -// DbtupRoutines.cpp 3000 -// DbtupCommit.cpp 5000 -// DbtupFixAlloc.cpp 6000 -// DbtupTrigger.cpp 7000 -// DbtupAbort.cpp 9000 -// DbtupPageMap.cpp 14000 -// DbtupPagMan.cpp 16000 -// DbtupStoredProcDef.cpp 18000 -// DbtupMeta.cpp 20000 -// DbtupTabDesMan.cpp 22000 -// DbtupGen.cpp 24000 -// DbtupIndex.cpp 28000 -// DbtupDebug.cpp 30000 -// DbtupVarAlloc.cpp 32000 -// DbtupScan.cpp 33000 +// DbtupBuffer.cpp 10000 +// DbtupRoutines.cpp 15000 +// DbtupCommit.cpp 20000 +// DbtupFixAlloc.cpp 25000 +// DbtupTrigger.cpp 30000 +// DbtupAbort.cpp 35000 +// DbtupPageMap.cpp 40000 +// DbtupPagMan.cpp 45000 +// DbtupStoredProcDef.cpp 50000 +// DbtupMeta.cpp 55000 +// DbtupTabDesMan.cpp 60000 +// DbtupGen.cpp 65000 +// DbtupIndex.cpp 70000 +// DbtupDebug.cpp 75000 +// DbtupVarAlloc.cpp 80000 +// DbtupScan.cpp 85000 +// DbtupDiskAlloc.cpp 90000 //------------------------------------------------------------------ /* @@ -1209,9 +1286,40 @@ typedef Ptr<HostBuffer> HostBufferPtr; */ struct Var_part_ref { +#ifdef NDB_32BIT_VAR_REF + /* + In versions prior to ndb 6.1.6, 6.2.1 and mysql 5.1.17 + Running this code limits DataMemory to 16G, also online + upgrade not possible between versions + */ Uint32 m_ref; - }; + STATIC_CONST( SZ32 = 1 ); + + void copyout(Local_key* dst) const { + dst->m_page_no = m_ref >> MAX_TUPLES_BITS; + dst->m_page_idx = m_ref & MAX_TUPLES_PER_PAGE; + } + void assign(const Local_key* src) { + m_ref = (src->m_page_no << MAX_TUPLES_BITS) | src->m_page_idx; + } +#else + Uint32 m_page_no; + Uint32 m_page_idx; + STATIC_CONST( SZ32 = 2 ); + + void copyout(Local_key* dst) const { + dst->m_page_no = m_page_no; + dst->m_page_idx = m_page_idx; + } + + void assign(const Local_key* src) { + m_page_no = src->m_page_no; + m_page_idx = src->m_page_idx; + } +#endif + }; + struct Tuple_header { union { @@ -2851,12 +2959,13 @@ Uint32* Dbtup::get_ptr(Ptr<Page>* pagePtr, Var_part_ref ref) { PagePtr tmp; - Uint32 page_idx= ref.m_ref & MAX_TUPLES_PER_PAGE; - tmp.i= ref.m_ref >> MAX_TUPLES_BITS; + Local_key key; + ref.copyout(&key); + tmp.i = key.m_page_no; c_page_pool.getPtr(tmp); memcpy(pagePtr, &tmp, sizeof(tmp)); - return ((Var_page*)tmp.p)->get_ptr(page_idx); + return ((Var_page*)tmp.p)->get_ptr(key.m_page_idx); } inline diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp index 2414e8a10bf..ea2c69b5751 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp @@ -14,21 +14,19 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_ABORT_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(9000 + __LINE__); } -#define ljamEntry() { jamEntryLine(9000 + __LINE__); } - void Dbtup::freeAllAttrBuffers(Operationrec* const regOperPtr) { if (regOperPtr->storedProcedureId == RNIL) { - ljam(); + jam(); freeAttrinbufrec(regOperPtr->firstAttrinbufrec); } else { - ljam(); + jam(); StoredProcPtr storedPtr; c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcedureId); ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); @@ -46,7 +44,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf) Uint32 RnoFree = cnoFreeAttrbufrec; localAttrBufPtr.i = anAttrBuf; while (localAttrBufPtr.i != RNIL) { - ljam(); + jam(); ptrCheckGuard(localAttrBufPtr, cnoOfAttrbufrec, attrbufrec); Ttemp = localAttrBufPtr.p->attrbuf[ZBUF_NEXT]; localAttrBufPtr.p->attrbuf[ZBUF_NEXT] = cfirstfreeAttrbufrec; @@ -62,7 +60,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf) */ void Dbtup::execTUP_ABORTREQ(Signal* signal) { - ljamEntry(); + jamEntry(); do_tup_abortreq(signal, 0); } @@ -80,7 +78,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) (trans_state == TRANS_ERROR_WAIT_TUPKEYREQ) || (trans_state == TRANS_IDLE)); if (regOperPtr.p->op_struct.op_type == ZREAD) { - ljam(); + jam(); freeAllAttrBuffers(regOperPtr.p); initOpConnection(regOperPtr.p); return; @@ -94,7 +92,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) if (get_tuple_state(regOperPtr.p) == TUPLE_PREPARED) { - ljam(); + jam(); if (!regTabPtr.p->tuxCustomTriggers.isEmpty() && (flags & ZSKIP_TUX_TRIGGERS) == 0) executeTuxAbortTriggers(signal, @@ -105,12 +103,12 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) OperationrecPtr loopOpPtr; loopOpPtr.i = regOperPtr.p->nextActiveOp; while (loopOpPtr.i != RNIL) { - ljam(); + jam(); c_operation_pool.getPtr(loopOpPtr); if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED && !regTabPtr.p->tuxCustomTriggers.isEmpty() && (flags & ZSKIP_TUX_TRIGGERS) == 0) { - ljam(); + jam(); executeTuxAbortTriggers(signal, loopOpPtr.p, regFragPtr.p, @@ -153,12 +151,14 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) ndbassert(tuple_ptr->m_header_bits & Tuple_header::CHAINED_ROW); - Uint32 ref= * tuple_ptr->get_var_part_ptr(regTabPtr.p); + Var_part_ref *ref = + (Var_part_ref*)tuple_ptr->get_var_part_ptr(regTabPtr.p); + Local_key tmp; - tmp.assref(ref); + ref->copyout(&tmp); idx= tmp.m_page_idx; - var_part= get_ptr(&vpage, *(Var_part_ref*)&ref); + var_part= get_ptr(&vpage, *ref); Var_page* pageP = (Var_page*)vpage.p; Uint32 len= pageP->get_entry_len(idx) & ~Var_page::CHAIN; Uint32 sz = ((((mm_vars + 1) << 1) + (((Uint16*)var_part)[mm_vars]) + 3)>> 2); @@ -211,116 +211,116 @@ int Dbtup::TUPKEY_abort(Signal* signal, int error_type) case 1: //tmupdate_alloc_error: terrorCode= ZMEM_NOMEM_ERROR; - ljam(); + jam(); break; case 15: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 16: - ljam(); + jam(); terrorCode = ZTRY_TO_UPDATE_ERROR; break; case 17: - ljam(); + jam(); terrorCode = ZNO_ILLEGAL_NULL_ATTR; break; case 19: - ljam(); + jam(); terrorCode = ZTRY_TO_UPDATE_ERROR; break; case 20: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 22: - ljam(); + jam(); terrorCode = ZTOTAL_LEN_ERROR; break; case 23: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 24: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 26: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 27: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 28: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 29: - ljam(); + jam(); break; case 30: - ljam(); + jam(); terrorCode = ZCALL_ERROR; break; case 31: - ljam(); + jam(); terrorCode = ZSTACK_OVERFLOW_ERROR; break; case 32: - ljam(); + jam(); terrorCode = ZSTACK_UNDERFLOW_ERROR; break; case 33: - ljam(); + jam(); terrorCode = ZNO_INSTRUCTION_ERROR; break; case 34: - ljam(); + jam(); terrorCode = ZOUTSIDE_OF_PROGRAM_ERROR; break; case 35: - ljam(); + jam(); terrorCode = ZTOO_MANY_INSTRUCTIONS_ERROR; break; case 38: - ljam(); + jam(); terrorCode = ZTEMPORARY_RESOURCE_FAILURE; break; case 39: if (get_trans_state(operPtr.p) == TRANS_TOO_MUCH_AI) { - ljam(); + jam(); terrorCode = ZTOO_MUCH_ATTRINFO_ERROR; } else if (get_trans_state(operPtr.p) == TRANS_ERROR_WAIT_TUPKEYREQ) { - ljam(); + jam(); terrorCode = ZSEIZE_ATTRINBUFREC_ERROR; } else { ndbrequire(false); }//if break; case 40: - ljam(); + jam(); terrorCode = ZUNSUPPORTED_BRANCH; break; default: diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp index 60c0c22ae6c..adac4d3d460 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp @@ -14,28 +14,26 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_BUFFER_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> #include <signaldata/TransIdAI.hpp> -#define ljam() { jamLine(2000 + __LINE__); } -#define ljamEntry() { jamEntryLine(2000 + __LINE__); } - void Dbtup::execSEND_PACKED(Signal* signal) { Uint16 hostId; Uint32 i; Uint32 TpackedListIndex= cpackedListIndex; - ljamEntry(); + jamEntry(); for (i= 0; i < TpackedListIndex; i++) { - ljam(); + jam(); hostId= cpackedList[i]; ndbrequire((hostId - 1) < (MAX_NODES - 1)); // Also check not zero Uint32 TpacketTA= hostBuffer[hostId].noOfPacketsTA; if (TpacketTA != 0) { - ljam(); + jam(); BlockReference TBref= numberToRef(API_PACKED, hostId); Uint32 TpacketLen= hostBuffer[hostId].packetLenTA; MEMCOPY_NO_WORDS(&signal->theData[0], @@ -73,7 +71,7 @@ void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef, // There is still space in the buffer. We will copy it into the // buffer. // ---------------------------------------------------------------- - ljam(); + jam(); updatePackedList(signal, hostId); } else if (false && TnoOfPackets == 1) { // ---------------------------------------------------------------- @@ -118,7 +116,7 @@ void Dbtup::updatePackedList(Signal* signal, Uint16 hostId) { if (hostBuffer[hostId].inPackedList == false) { Uint32 TpackedListIndex= cpackedListIndex; - ljam(); + jam(); hostBuffer[hostId].inPackedList= true; cpackedList[TpackedListIndex]= hostId; cpackedListIndex= TpackedListIndex + 1; @@ -149,7 +147,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, if (ERROR_INSERTED(4006) && (nodeId != getOwnNodeId())){ // Use error insert to turn routing on - ljam(); + jam(); connectedToNode= false; } @@ -167,18 +165,18 @@ void Dbtup::sendReadAttrinfo(Signal* signal, * Own node -> execute direct */ if(nodeId != getOwnNodeId()){ - ljam(); + jam(); /** * Send long sig */ if (ToutBufIndex >= 22 && is_api && !old_dest) { - ljam(); + jam(); /** * Flush buffer so that order is maintained */ if (TpacketTA != 0) { - ljam(); + jam(); BlockReference TBref = numberToRef(API_PACKED, nodeId); MEMCOPY_NO_WORDS(&signal->theData[0], &hostBuffer[nodeId].packetBufferTA[0], @@ -202,7 +200,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, */ #ifndef NDB_NO_DROPPED_SIGNAL if (ToutBufIndex < 22 && is_api){ - ljam(); + jam(); bufferTRANSID_AI(signal, recBlockref, 3+ToutBufIndex); return; } @@ -214,7 +212,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, Uint32 * src= signal->theData+25; if (ToutBufIndex >= 22){ do { - ljam(); + jam(); MEMCOPY_NO_WORDS(&signal->theData[3], src, 22); sendSignal(recBlockref, GSN_TRANSID_AI, signal, 25, JBB); ToutBufIndex -= 22; @@ -223,14 +221,14 @@ void Dbtup::sendReadAttrinfo(Signal* signal, } if (ToutBufIndex > 0){ - ljam(); + jam(); MEMCOPY_NO_WORDS(&signal->theData[3], src, ToutBufIndex); sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3+ToutBufIndex, JBB); } return; } EXECUTE_DIRECT(block, GSN_TRANSID_AI, signal, 3 + ToutBufIndex); - ljamEntry(); + jamEntry(); return; } @@ -242,7 +240,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, Uint32 routeBlockref= req_struct->TC_ref; if (true){ // TODO is_api && !old_dest){ - ljam(); + jam(); transIdAI->attrData[0]= recBlockref; LinearSectionPtr ptr[3]; ptr[0].p= &signal->theData[25]; @@ -260,7 +258,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, Uint32 sent= 0; Uint32 maxLen= TransIdAI::DataLength - 1; while (sent < tot) { - ljam(); + jam(); Uint32 dataLen= (tot - sent > maxLen) ? maxLen : tot - sent; Uint32 sigLen= dataLen + TransIdAI::HeaderLength + 1; MEMCOPY_NO_WORDS(&transIdAI->attrData, diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp index 91d2ca97744..37c2d26d119 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_COMMIT_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -21,16 +22,13 @@ #include <signaldata/TupCommit.hpp> #include "../dblqh/Dblqh.hpp" -#define ljam() { jamLine(5000 + __LINE__); } -#define ljamEntry() { jamEntryLine(5000 + __LINE__); } - void Dbtup::execTUP_DEALLOCREQ(Signal* signal) { TablerecPtr regTabPtr; FragrecordPtr regFragPtr; Uint32 frag_page_id, frag_id; - ljamEntry(); + jamEntry(); frag_id= signal->theData[0]; regTabPtr.i= signal->theData[1]; @@ -62,7 +60,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal) if (regTabPtr.p->m_attributes[MM].m_no_of_varsize) { - ljam(); + jam(); free_var_rec(regFragPtr.p, regTabPtr.p, &tmp, pagePtr); } else { free_fix_rec(regFragPtr.p, regTabPtr.p, &tmp, (Fix_page*)pagePtr.p); @@ -78,7 +76,7 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal) Uint32 gci= signal->theData[1]; c_operation_pool.getPtr(loopOpPtr); while (loopOpPtr.p->prevActiveOp != RNIL) { - ljam(); + jam(); loopOpPtr.i= loopOpPtr.p->prevActiveOp; c_operation_pool.getPtr(loopOpPtr); } @@ -87,11 +85,11 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal) signal->theData[0]= loopOpPtr.p->userpointer; signal->theData[1]= gci; if (loopOpPtr.p->nextActiveOp == RNIL) { - ljam(); + jam(); EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2); return; } - ljam(); + jam(); EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2); jamEntry(); loopOpPtr.i= loopOpPtr.p->nextActiveOp; @@ -114,16 +112,16 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr, if (regOperPtr->op_struct.in_active_list) { regOperPtr->op_struct.in_active_list= false; if (regOperPtr->nextActiveOp != RNIL) { - ljam(); + jam(); raoOperPtr.i= regOperPtr->nextActiveOp; c_operation_pool.getPtr(raoOperPtr); raoOperPtr.p->prevActiveOp= regOperPtr->prevActiveOp; } else { - ljam(); + jam(); tuple_ptr->m_operation_ptr_i = regOperPtr->prevActiveOp; } if (regOperPtr->prevActiveOp != RNIL) { - ljam(); + jam(); raoOperPtr.i= regOperPtr->prevActiveOp; c_operation_pool.getPtr(raoOperPtr); raoOperPtr.p->nextActiveOp= regOperPtr->nextActiveOp; @@ -236,13 +234,14 @@ Dbtup::commit_operation(Signal* signal, } else { - Uint32 *ref= tuple_ptr->get_var_part_ptr(regTabPtr); + Var_part_ref *ref= (Var_part_ref*)tuple_ptr->get_var_part_ptr(regTabPtr); memcpy(tuple_ptr, copy, 4*(Tuple_header::HeaderSize+fixsize)); - Local_key tmp; tmp.assref(*ref); - + Local_key tmp; + ref->copyout(&tmp); + PagePtr vpagePtr; - Uint32 *dst= get_ptr(&vpagePtr, *(Var_part_ref*)ref); + Uint32 *dst= get_ptr(&vpagePtr, *ref); Var_page* vpagePtrP = (Var_page*)vpagePtr.p; Uint32 *src= copy->get_var_part_ptr(regTabPtr); Uint32 sz= ((mm_vars + 1) << 1) + (((Uint16*)src)[mm_vars]); @@ -343,7 +342,7 @@ Dbtup::disk_page_commit_callback(Signal* signal, Uint32 gci; OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); c_operation_pool.getPtr(regOperPtr, opPtrI); c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci); @@ -379,7 +378,7 @@ Dbtup::disk_page_log_buffer_callback(Signal* signal, Uint32 gci; OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); c_operation_pool.getPtr(regOperPtr, opPtrI); c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci); @@ -447,7 +446,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr(); regOperPtr.i= tupCommitReq->opPtr; - ljamEntry(); + jamEntry(); c_operation_pool.getPtr(regOperPtr); if(!regOperPtr.p->is_first_operation()) @@ -603,7 +602,7 @@ skip_disk: * why can't we instead remove "own version" (when approriate ofcourse) */ if (!regTabPtr.p->tuxCustomTriggers.isEmpty()) { - ljam(); + jam(); OperationrecPtr loopPtr= regOperPtr; while(loopPtr.i != RNIL) { @@ -656,18 +655,18 @@ Dbtup::set_change_mask_info(KeyReqStruct * const req_struct, { ChangeMaskState state = get_change_mask_state(regOperPtr); if (state == USE_SAVED_CHANGE_MASK) { - ljam(); + jam(); req_struct->changeMask.setWord(0, regOperPtr->saved_change_mask[0]); req_struct->changeMask.setWord(1, regOperPtr->saved_change_mask[1]); } else if (state == RECALCULATE_CHANGE_MASK) { - ljam(); + jam(); // Recompute change mask, for now set all bits req_struct->changeMask.set(); } else if (state == SET_ALL_MASK) { - ljam(); + jam(); req_struct->changeMask.set(); } else { - ljam(); + jam(); ndbrequire(state == DELETE_CHANGES); req_struct->changeMask.set(); } @@ -687,17 +686,17 @@ Dbtup::calculateChangeMask(Page* const pagePtr, ndbrequire(loopOpPtr.p->op_struct.op_type == ZUPDATE); ChangeMaskState change_mask= get_change_mask_state(loopOpPtr.p); if (change_mask == USE_SAVED_CHANGE_MASK) { - ljam(); + jam(); saved_word1|= loopOpPtr.p->saved_change_mask[0]; saved_word2|= loopOpPtr.p->saved_change_mask[1]; } else if (change_mask == RECALCULATE_CHANGE_MASK) { - ljam(); + jam(); //Recompute change mask, for now set all bits req_struct->changeMask.set(); return; } else { ndbrequire(change_mask == SET_ALL_MASK); - ljam(); + jam(); req_struct->changeMask.set(); return; } diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp index ccb0bdcb537..708b4e0e8d7 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_DEBUG_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -24,9 +25,6 @@ #include <signaldata/EventReport.hpp> #include <Vector.hpp> -#define ljam() { jamLine(30000 + __LINE__); } -#define ljamEntry() { jamEntryLine(30000 + __LINE__); } - /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* ------------------------ DEBUG MODULE -------------------------- */ @@ -35,7 +33,7 @@ void Dbtup::execDEBUG_SIG(Signal* signal) { PagePtr regPagePtr; - ljamEntry(); + jamEntry(); regPagePtr.i = signal->theData[0]; c_page_pool.getPtr(regPagePtr); }//Dbtup::execDEBUG_SIG() @@ -248,18 +246,18 @@ void Dbtup::execMEMCHECKREQ(Signal* signal) PagePtr regPagePtr; Uint32* data = &signal->theData[0]; - ljamEntry(); + jamEntry(); BlockReference blockref = signal->theData[0]; Uint32 i; for (i = 0; i < 25; i++) { - ljam(); + jam(); data[i] = 0; }//for for (i = 0; i < 16; i++) { regPagePtr.i = cfreepageList[i]; - ljam(); + jam(); while (regPagePtr.i != RNIL) { - ljam(); + jam(); ptrCheckGuard(regPagePtr, cnoOfPage, cpage); regPagePtr.i = regPagePtr.p->next_page; data[0]++; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp index f865904b413..1c3986afbbd 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_DISK_ALLOC_CPP #include "Dbtup.hpp" static bool f_undo_done = true; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index fe0f570f484..d7476f5f69d 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -1259,6 +1259,28 @@ int Dbtup::handleInsertReq(Signal* signal, { shrink_tuple(req_struct, sizes+2, regTabPtr, true); } + + if (ERROR_INSERTED(4025)) + { + goto mem_error; + } + + if (ERROR_INSERTED(4026)) + { + CLEAR_ERROR_INSERT_VALUE; + goto mem_error; + } + + if (ERROR_INSERTED(4027) && (rand() % 100) > 25) + { + goto mem_error; + } + + if (ERROR_INSERTED(4028) && (rand() % 100) > 25) + { + CLEAR_ERROR_INSERT_VALUE; + goto mem_error; + } /** * Alloc memory @@ -2833,11 +2855,11 @@ Dbtup::handle_size_change_after_update(KeyReqStruct* req_struct, Ptr<Page> pagePtr = req_struct->m_varpart_page_ptr; Var_page* pageP= (Var_page*)pagePtr.p; Uint32 idx, alloc, needed; - Uint32 *refptr = org->get_var_part_ptr(regTabPtr); + Var_part_ref *refptr = (Var_part_ref*)org->get_var_part_ptr(regTabPtr); ndbassert(bits & Tuple_header::CHAINED_ROW); Local_key ref; - ref.assref(*refptr); + refptr->copyout(&ref); idx= ref.m_page_idx; if (! (copy_bits & Tuple_header::CHAINED_ROW)) { @@ -2860,7 +2882,7 @@ Dbtup::handle_size_change_after_update(KeyReqStruct* req_struct, } copy_bits |= Tuple_header::MM_GROWN; if (unlikely(realloc_var_part(regFragPtr, regTabPtr, pagePtr, - (Var_part_ref*)refptr, alloc, needed))) + refptr, alloc, needed))) return -1; } req_struct->m_tuple_ptr->m_header_bits = copy_bits; @@ -2926,9 +2948,10 @@ Dbtup::nr_read_pk(Uint32 fragPtrI, PagePtr page_ptr; if (tablePtr.p->m_attributes[MM].m_no_of_varsize) { - tablePtr.p->m_offsets[MM].m_fix_header_size += Tuple_header::HeaderSize+1; + const Uint32 XXX = Tuple_header::HeaderSize+Var_part_ref::SZ32; + tablePtr.p->m_offsets[MM].m_fix_header_size += XXX; ret = alloc_page(tablePtr.p, fragPtr.p, &page_ptr, tmp.m_page_no); - tablePtr.p->m_offsets[MM].m_fix_header_size -= Tuple_header::HeaderSize+1; + tablePtr.p->m_offsets[MM].m_fix_header_size -= XXX; } else { diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp index 50500b96134..31903ea8c33 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp @@ -14,14 +14,12 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_FIXALLOC_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(6000 + __LINE__); } -#define ljamEntry() { jamEntryLine(6000 + __LINE__); } - // // Fixed Allocator // This module is used to allocate and free fixed size tuples from the @@ -79,7 +77,7 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr, /* ---------------------------------------------------------------- */ pagePtr.i = getEmptyPage(regFragPtr); if (pagePtr.i != RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ // We found empty pages on the fragment. Allocate an empty page and // convert it into a tuple header page and put it in thFreeFirst-list. @@ -95,14 +93,14 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr, LocalDLList<Page> free_pages(c_page_pool, regFragPtr->thFreeFirst); free_pages.add(pagePtr); } else { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THERE ARE NO EMPTY PAGES. MEMORY CAN NOT BE ALLOCATED. */ /* ---------------------------------------------------------------- */ return 0; } } else { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THIS SHOULD BE THE COMMON PATH THROUGH THE CODE, FREE */ /* COPY PAGE EXISTED. */ @@ -193,7 +191,7 @@ void Dbtup::free_fix_rec(Fragrecord* regFragPtr, if(free == 1) { - ljam(); + jam(); PagePtr pagePtr = { (Page*)regPagePtr, key->m_page_no }; LocalDLList<Page> free_pages(c_page_pool, regFragPtr->thFreeFirst); ndbrequire(regPagePtr->page_state == ZTH_MM_FULL); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index 67fc5a4ceb0..a3e21a2a203 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_GEN_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -34,9 +35,6 @@ #define DEBUG(x) { ndbout << "TUP::" << x << endl; } -#define ljam() { jamLine(24000 + __LINE__); } -#define ljamEntry() { jamEntryLine(24000 + __LINE__); } - void Dbtup::initData() { cnoOfAttrbufrec = ZNO_OF_ATTRBUFREC; @@ -112,6 +110,7 @@ Dbtup::Dbtup(Block_context& ctx, Pgman* pgman) cnoOfAllocatedPages = 0; initData(); + CLEAR_ERROR_INSERT_VALUE; }//Dbtup::Dbtup() Dbtup::~Dbtup() @@ -151,21 +150,21 @@ BLOCK_FUNCTIONS(Dbtup) void Dbtup::execCONTINUEB(Signal* signal) { - ljamEntry(); + jamEntry(); Uint32 actionType = signal->theData[0]; Uint32 dataPtr = signal->theData[1]; switch (actionType) { case ZINITIALISE_RECORDS: - ljam(); + jam(); initialiseRecordsLab(signal, dataPtr, signal->theData[2], signal->theData[3]); break; case ZREL_FRAG: - ljam(); + jam(); releaseFragment(signal, dataPtr, signal->theData[2]); break; case ZREPORT_MEMORY_USAGE:{ - ljam(); + jam(); static int c_currentMemUsed = 0; Uint32 cnt = signal->theData[1]; Uint32 tmp = c_page_pool.getSize(); @@ -200,11 +199,11 @@ void Dbtup::execCONTINUEB(Signal* signal) return; } case ZBUILD_INDEX: - ljam(); + jam(); buildIndex(signal, dataPtr); break; case ZTUP_SCAN: - ljam(); + jam(); { ScanOpPtr scanPtr; c_scanOpPool.getPtr(scanPtr, dataPtr); @@ -213,7 +212,7 @@ void Dbtup::execCONTINUEB(Signal* signal) return; case ZFREE_EXTENT: { - ljam(); + jam(); TablerecPtr tabPtr; tabPtr.i= dataPtr; @@ -226,7 +225,7 @@ void Dbtup::execCONTINUEB(Signal* signal) } case ZUNMAP_PAGES: { - ljam(); + jam(); TablerecPtr tabPtr; tabPtr.i= dataPtr; @@ -239,7 +238,7 @@ void Dbtup::execCONTINUEB(Signal* signal) } case ZFREE_VAR_PAGES: { - ljam(); + jam(); drop_fragment_free_var_pages(signal); return; } @@ -256,20 +255,19 @@ void Dbtup::execCONTINUEB(Signal* signal) /* **************************************************************** */ void Dbtup::execSTTOR(Signal* signal) { - ljamEntry(); + jamEntry(); Uint32 startPhase = signal->theData[1]; Uint32 sigKey = signal->theData[6]; switch (startPhase) { case ZSTARTPHASE1: - ljam(); - CLEAR_ERROR_INSERT_VALUE; + jam(); ndbrequire((c_lqh= (Dblqh*)globalData.getBlock(DBLQH)) != 0); ndbrequire((c_tsman= (Tsman*)globalData.getBlock(TSMAN)) != 0); ndbrequire((c_lgman= (Lgman*)globalData.getBlock(LGMAN)) != 0); cownref = calcTupBlockRef(0); break; default: - ljam(); + jam(); break; }//switch signal->theData[0] = sigKey; @@ -292,7 +290,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal) Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); - ljamEntry(); + jamEntry(); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); @@ -412,58 +410,58 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData, { switch (switchData) { case 0: - ljam(); + jam(); initializeHostBuffer(); break; case 1: - ljam(); + jam(); initializeOperationrec(); break; case 2: - ljam(); + jam(); initializePage(); break; case 3: - ljam(); + jam(); break; case 4: - ljam(); + jam(); initializeTablerec(); break; case 5: - ljam(); + jam(); break; case 6: - ljam(); + jam(); initializeFragrecord(); break; case 7: - ljam(); + jam(); initializeFragoperrec(); break; case 8: - ljam(); + jam(); initializePageRange(); break; case 9: - ljam(); + jam(); initializeTabDescr(); break; case 10: - ljam(); + jam(); break; case 11: - ljam(); + jam(); break; case 12: - ljam(); + jam(); initializeAttrbufrec(); break; case 13: - ljam(); + jam(); break; case 14: - ljam(); + jam(); { ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); @@ -487,28 +485,28 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData, void Dbtup::execNDB_STTOR(Signal* signal) { - ljamEntry(); + jamEntry(); cndbcntrRef = signal->theData[0]; Uint32 ownNodeId = signal->theData[1]; Uint32 startPhase = signal->theData[2]; switch (startPhase) { case ZSTARTPHASE1: - ljam(); + jam(); cownNodeId = ownNodeId; cownref = calcTupBlockRef(ownNodeId); break; case ZSTARTPHASE2: - ljam(); + jam(); break; case ZSTARTPHASE3: - ljam(); + jam(); startphase3Lab(signal, ~0, ~0); break; case ZSTARTPHASE4: - ljam(); + jam(); break; case ZSTARTPHASE6: - ljam(); + jam(); /*****************************************/ /* NOW SET THE DISK WRITE SPEED TO */ /* PAGES PER TICK AFTER SYSTEM */ @@ -516,10 +514,10 @@ void Dbtup::execNDB_STTOR(Signal* signal) /*****************************************/ signal->theData[0] = ZREPORT_MEMORY_USAGE; signal->theData[1] = 0; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1); + sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2); break; default: - ljam(); + jam(); break; }//switch signal->theData[0] = cownref; @@ -596,7 +594,7 @@ void Dbtup::initializeTablerec() { TablerecPtr regTabPtr; for (regTabPtr.i = 0; regTabPtr.i < cnoOfTablerec; regTabPtr.i++) { - ljam(); + jam(); refresh_watch_dog(); ptrAss(regTabPtr, tablerec); initTab(regTabPtr.p); @@ -667,12 +665,12 @@ void Dbtup::initializeTabDescr() void Dbtup::execTUPSEIZEREQ(Signal* signal) { OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); Uint32 userPtr = signal->theData[0]; BlockReference userRef = signal->theData[1]; if (!c_operation_pool.seize(regOperPtr)) { - ljam(); + jam(); signal->theData[0] = userPtr; signal->theData[1] = ZGET_OPREC_ERROR; sendSignal(userRef, GSN_TUPSEIZEREF, signal, 2, JBB); @@ -706,7 +704,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal) void Dbtup::execTUPRELEASEREQ(Signal* signal) { OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); regOperPtr.i = signal->theData[0]; c_operation_pool.getPtr(regOperPtr); set_trans_state(regOperPtr.p, TRANS_DISCONNECTED); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp index 3f42f0151e6..17ab0573376 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_INDEX_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -23,9 +24,6 @@ #include <AttributeHeader.hpp> #include <signaldata/TuxMaint.hpp> -#define ljam() { jamLine(28000 + __LINE__); } -#define ljamEntry() { jamEntryLine(28000 + __LINE__); } - // methods used by ordered index void @@ -34,7 +32,7 @@ Dbtup::tuxGetTupAddr(Uint32 fragPtrI, Uint32 pageIndex, Uint32& tupAddr) { - ljamEntry(); + jamEntry(); PagePtr pagePtr; c_page_pool.getPtr(pagePtr, pageId); Uint32 fragPageId= pagePtr.p->frag_page_id; @@ -48,7 +46,7 @@ Dbtup::tuxAllocNode(Signal* signal, Uint32& pageOffset, Uint32*& node) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -61,7 +59,7 @@ Dbtup::tuxAllocNode(Signal* signal, Uint32* ptr, frag_page_id; if ((ptr= alloc_fix_rec(fragPtr.p, tablePtr.p, &key, &frag_page_id)) == 0) { - ljam(); + jam(); terrorCode = ZMEM_NOMEM_ERROR; // caller sets error return terrorCode; } @@ -82,7 +80,7 @@ Dbtup::tuxFreeNode(Signal* signal, Uint32 pageOffset, Uint32* node) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -105,7 +103,7 @@ Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageOffset, Uint32*& node) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -130,7 +128,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 numAttrs, Uint32* dataOut) { - ljamEntry(); + jamEntry(); // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i= fragPtrI; @@ -150,21 +148,21 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Tuple_header *tuple_ptr= req_struct.m_tuple_ptr; if (tuple_ptr->get_tuple_version() != tupVersion) { - ljam(); + jam(); OperationrecPtr opPtr; opPtr.i= tuple_ptr->m_operation_ptr_i; Uint32 loopGuard= 0; while (opPtr.i != RNIL) { c_operation_pool.getPtr(opPtr); if (opPtr.p->tupVersion == tupVersion) { - ljam(); + jam(); if (!opPtr.p->m_copy_tuple_location.isNull()) { req_struct.m_tuple_ptr= (Tuple_header*) c_undo_buffer.get_ptr(&opPtr.p->m_copy_tuple_location); } break; } - ljam(); + jam(); opPtr.i= opPtr.p->prevActiveOp; ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS)); } @@ -202,7 +200,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, int Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) { - ljamEntry(); + jamEntry(); // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i= fragPtrI; @@ -305,7 +303,7 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataO int Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) { - ljamEntry(); + jamEntry(); // get table TablerecPtr tablePtr; tablePtr.i = tableId; @@ -329,7 +327,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI, Uint32 transId2, Uint32 savePointId) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -358,9 +356,9 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI, * for this transaction and savepoint id. If its tuple version * equals the requested then we have a visible tuple otherwise not. */ - ljam(); + jam(); if (req_struct.m_tuple_ptr->get_tuple_version() == tupVersion) { - ljam(); + jam(); return true; } } @@ -378,7 +376,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI, void Dbtup::execBUILDINDXREQ(Signal* signal) { - ljamEntry(); + jamEntry(); #ifdef TIME_MEASUREMENT time_events= 0; tot_time_passed= 0; @@ -387,7 +385,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) // get new operation BuildIndexPtr buildPtr; if (! c_buildIndexList.seize(buildPtr)) { - ljam(); + jam(); BuildIndexRec buildRec; memcpy(buildRec.m_request, signal->theData, sizeof(buildRec.m_request)); buildRec.m_errorCode= BuildIndxRef::Busy; @@ -402,7 +400,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) do { const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request; if (buildReq->getTableId() >= cnoOfTablerec) { - ljam(); + jam(); buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable; break; } @@ -410,7 +408,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) tablePtr.i= buildReq->getTableId(); ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); if (tablePtr.p->tableStatus != DEFINED) { - ljam(); + jam(); buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable; break; } @@ -418,7 +416,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) buildPtr.p->m_build_vs = tablePtr.p->m_attributes[MM].m_no_of_varsize > 0; if (DictTabInfo::isOrderedIndex(buildReq->getIndexType())) { - ljam(); + jam(); const DLList<TupTriggerData>& triggerList = tablePtr.p->tuxCustomTriggers; @@ -426,13 +424,13 @@ Dbtup::execBUILDINDXREQ(Signal* signal) triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { if (triggerPtr.p->indexId == buildReq->getIndexId()) { - ljam(); + jam(); break; } triggerList.next(triggerPtr); } if (triggerPtr.i == RNIL) { - ljam(); + jam(); // trigger was not created buildPtr.p->m_errorCode = BuildIndxRef::InternalError; break; @@ -440,12 +438,12 @@ Dbtup::execBUILDINDXREQ(Signal* signal) buildPtr.p->m_indexId = buildReq->getIndexId(); buildPtr.p->m_buildRef = DBTUX; } else if(buildReq->getIndexId() == RNIL) { - ljam(); + jam(); // REBUILD of acc buildPtr.p->m_indexId = RNIL; buildPtr.p->m_buildRef = DBACC; } else { - ljam(); + jam(); buildPtr.p->m_errorCode = BuildIndxRef::InvalidIndexType; break; } @@ -479,7 +477,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) const Uint32 firstTupleNo = 0; const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size + - (buildPtr.p->m_build_vs ? Tuple_header::HeaderSize + 1: 0); + (buildPtr.p->m_build_vs? Tuple_header::HeaderSize + Var_part_ref::SZ32: 0); #ifdef TIME_MEASUREMENT MicroSecondTimer start; @@ -490,7 +488,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) // get fragment FragrecordPtr fragPtr; if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) { - ljam(); + jam(); // build ready buildIndexReply(signal, buildPtr.p); c_buildIndexList.release(buildPtr); @@ -499,7 +497,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE); fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo]; if (fragPtr.i == RNIL) { - ljam(); + jam(); buildPtr.p->m_fragNo++; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; @@ -509,7 +507,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) // get page PagePtr pagePtr; if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) { - ljam(); + jam(); buildPtr.p->m_fragNo++; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; @@ -520,7 +518,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) Uint32 pageState= pagePtr.p->page_state; // skip empty page if (pageState == ZEMPTY_MM) { - ljam(); + jam(); buildPtr.p->m_pageId++; buildPtr.p->m_tupleNo= firstTupleNo; break; @@ -530,7 +528,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) const Tuple_header* tuple_ptr = 0; pageIndex = buildPtr.p->m_tupleNo * tupheadsize; if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) { - ljam(); + jam(); buildPtr.p->m_pageId++; buildPtr.p->m_tupleNo= firstTupleNo; break; @@ -538,7 +536,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex]; // skip over free tuple if (tuple_ptr->m_header_bits & Tuple_header::FREE) { - ljam(); + jam(); buildPtr.p->m_tupleNo++; break; } @@ -581,7 +579,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) tuple as a copy tuple. The original tuple is stable and is thus preferrable to store in TUX. */ - ljam(); + jam(); /** * Since copy tuples now can't be found on real pages. @@ -610,11 +608,11 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) } while(req->errorCode == 0 && pageOperPtr.i != RNIL); } - ljamEntry(); + jamEntry(); if (req->errorCode != 0) { switch (req->errorCode) { case TuxMaintReq::NoMemError: - ljam(); + jam(); buildPtr.p->m_errorCode= BuildIndxRef::AllocationFailure; break; default: @@ -666,7 +664,7 @@ Dbtup::buildIndexReply(Signal* signal, const BuildIndexRec* buildPtrP) rep->setIndexId(buildReq->getIndexId()); // conf if (buildPtrP->m_errorCode == BuildIndxRef::NoError) { - ljam(); + jam(); sendSignal(rep->getUserRef(), GSN_BUILDINDXCONF, signal, BuildIndxConf::SignalLength, JBB); return; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index e51638b8a20..cf700c5fa58 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_META_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -29,16 +30,13 @@ #include "AttributeOffset.hpp" #include <my_sys.h> -#define ljam() { jamLine(20000 + __LINE__); } -#define ljamEntry() { jamEntryLine(20000 + __LINE__); } - void Dbtup::execTUPFRAGREQ(Signal* signal) { - ljamEntry(); + jamEntry(); TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr(); if (tupFragReq->userPtr == (Uint32)-1) { - ljam(); + jam(); abortAddFragOp(signal); return; } @@ -69,7 +67,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) #ifndef VM_TRACE // config mismatch - do not crash if release compiled if (regTabPtr.i >= cnoOfTablerec) { - ljam(); + jam(); tupFragReq->userPtr = userptr; tupFragReq->userRef = 800; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); @@ -79,7 +77,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); if (cfirstfreeFragopr == RNIL) { - ljam(); + jam(); tupFragReq->userPtr = userptr; tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); @@ -108,29 +106,29 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) getFragmentrec(regFragPtr, fragId, regTabPtr.p); if (regFragPtr.i != RNIL) { - ljam(); + jam(); terrorCode= ZEXIST_FRAG_ERROR; fragrefuse1Lab(signal, fragOperPtr); return; } if (cfirstfreefrag != RNIL) { - ljam(); + jam(); seizeFragrecord(regFragPtr); } else { - ljam(); + jam(); terrorCode= ZFULL_FRAGRECORD_ERROR; fragrefuse1Lab(signal, fragOperPtr); return; } initFragRange(regFragPtr.p); if (!addfragtotab(regTabPtr.p, fragId, regFragPtr.i)) { - ljam(); + jam(); terrorCode= ZNO_FREE_TAB_ENTRY_ERROR; fragrefuse2Lab(signal, fragOperPtr, regFragPtr); return; } if (cfirstfreerange == RNIL) { - ljam(); + jam(); terrorCode= ZNO_FREE_PAGE_RANGE_ERROR; fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); return; @@ -146,7 +144,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId || ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) { - ljam(); + jam(); terrorCode = 1; fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); CLEAR_ERROR_INSERT_VALUE; @@ -154,7 +152,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) } if (regTabPtr.p->tableStatus == NOT_DEFINED) { - ljam(); + jam(); //----------------------------------------------------------------------------- // We are setting up references to the header of the tuple. // Active operation This word contains a reference to the operation active @@ -200,13 +198,13 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) Uint32 offset[10]; Uint32 tableDescriptorRef= allocTabDescr(regTabPtr.p, offset); if (tableDescriptorRef == RNIL) { - ljam(); + jam(); fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); return; } setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset); } else { - ljam(); + jam(); fragOperPtr.p->definingFragment= false; } signal->theData[0]= fragOperPtr.p->lqhPtrFrag; @@ -222,9 +220,9 @@ bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragIndex) { for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (regTabPtr->fragid[i] == RNIL) { - ljam(); + jam(); regTabPtr->fragid[i]= fragId; regTabPtr->fragrec[i]= fragIndex; return true; @@ -238,9 +236,9 @@ void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, Tablerec* const regTabPtr) { for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (regTabPtr->fragid[i] == fragId) { - ljam(); + jam(); regFragPtr.i= regTabPtr->fragrec[i]; ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); return; @@ -276,7 +274,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) FragoperrecPtr fragOperPtr; TablerecPtr regTabPtr; - ljamEntry(); + jamEntry(); fragOperPtr.i= signal->theData[0]; ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); Uint32 attrId = signal->theData[2]; @@ -337,7 +335,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) Uint32 attrDes2= 0; if (!AttributeDescriptor::getDynamic(attrDescriptor)) { - ljam(); + jam(); Uint32 pos= 0, null_pos; Uint32 bytes= AttributeDescriptor::getSizeInBytes(attrDescriptor); Uint32 words= (bytes + 3) / 4; @@ -347,7 +345,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) if (AttributeDescriptor::getNullable(attrDescriptor)) { - ljam(); + jam(); fragOperPtr.p->m_null_bits[ind]++; } else @@ -362,17 +360,17 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) switch (AttributeDescriptor::getArrayType(attrDescriptor)) { case NDB_ARRAYTYPE_FIXED: { - ljam(); + jam(); regTabPtr.p->m_attributes[ind].m_no_of_fixsize++; if(attrLen != 0) { - ljam(); + jam(); pos= fragOperPtr.p->m_fix_attributes_size[ind]; fragOperPtr.p->m_fix_attributes_size[ind] += words; } else { - ljam(); + jam(); Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor); fragOperPtr.p->m_null_bits[ind] += bitCount; } @@ -380,7 +378,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) } default: { - ljam(); + jam(); fragOperPtr.p->m_var_attributes_size[ind] += bytes; pos= regTabPtr.p->m_attributes[ind].m_no_of_varsize++; break; @@ -397,13 +395,13 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) ndbrequire(cs != NULL); Uint32 i = 0; while (i < fragOperPtr.p->charsetIndex) { - ljam(); + jam(); if (regTabPtr.p->charsetArray[i] == cs) break; i++; } if (i == fragOperPtr.p->charsetIndex) { - ljam(); + jam(); fragOperPtr.p->charsetIndex++; } ndbrequire(i < regTabPtr.p->noOfCharsets); @@ -416,7 +414,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr || ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0|| ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) { - ljam(); + jam(); terrorCode = 1; addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); CLEAR_ERROR_INSERT_VALUE; @@ -427,7 +425,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) /* ************** TUP_ADD_ATTCONF ****************** */ /* **************************************************************** */ if (! lastAttr) { - ljam(); + jam(); signal->theData[0] = fragOperPtr.p->lqhPtrFrag; signal->theData[1] = lastAttr; sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, @@ -541,7 +539,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) { Uint32 fix_tupheader = regTabPtr.p->m_offsets[MM].m_fix_header_size; if(regTabPtr.p->m_attributes[MM].m_no_of_varsize != 0) - fix_tupheader += Tuple_header::HeaderSize + 1; + fix_tupheader += Tuple_header::HeaderSize + Var_part_ref::SZ32; ndbassert(fix_tupheader > 0); Uint32 noRowsPerPage = ZWORDS_ON_PAGE / fix_tupheader; Uint32 noAllocatedPages = @@ -553,7 +551,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); if (noAllocatedPages == 0) { - ljam(); + jam(); terrorCode = ZNO_PAGES_ALLOCATED_ERROR; addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); return; @@ -563,7 +561,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) CreateFilegroupImplReq rep; if(regTabPtr.p->m_no_of_disk_attributes) { - ljam(); + jam(); Tablespace_client tsman(0, c_tsman, 0, 0, regFragPtr.p->m_tablespace_id); ndbrequire(tsman.get_tablespace_info(&rep) == 0); @@ -580,12 +578,12 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) if (regTabPtr.p->m_no_of_disk_attributes) { - ljam(); + jam(); if(!(getNodeState().startLevel == NodeState::SL_STARTING && getNodeState().starting.startPhase <= 4)) { Callback cb; - ljam(); + jam(); cb.m_callbackData= fragOperPtr.i; cb.m_callbackFunction = @@ -599,7 +597,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) int res= lgman.get_log_buffer(signal, sz, &cb); switch(res){ case 0: - ljam(); + jam(); signal->theData[0] = 1; return; case -1: @@ -718,11 +716,11 @@ void Dbtup::setUpKeyArray(Tablerec* const regTabPtr) Uint32* keyArray= &tableDescriptor[regTabPtr->readKeyArray].tabDescr; Uint32 countKeyAttr= 0; for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) { - ljam(); + jam(); Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE); Uint32 attrDescriptor= getTabDescrWord(refAttr); if (AttributeDescriptor::getPrimaryKey(attrDescriptor)) { - ljam(); + jam(); AttributeHeader::init(&keyArray[countKeyAttr], i, 0); countKeyAttr++; } @@ -742,7 +740,7 @@ void Dbtup::setUpKeyArray(Tablerec* const regTabPtr) { for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) { - ljam(); + jam(); Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE); Uint32 desc = getTabDescrWord(refAttr); Uint32 t = 0; @@ -837,9 +835,9 @@ void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr) void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId) { for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (regTabPtr->fragid[i] == fragId) { - ljam(); + jam(); regTabPtr->fragid[i]= RNIL; regTabPtr->fragrec[i]= RNIL; return; @@ -865,7 +863,7 @@ void Dbtup::abortAddFragOp(Signal* signal) void Dbtup::execDROP_TAB_REQ(Signal* signal) { - ljamEntry(); + jamEntry(); if (ERROR_INSERTED(4013)) { #ifdef VM_TRACE verifytabdes(); @@ -891,7 +889,7 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr) { Uint32 descriptor= regTabPtr->readKeyArray; if (descriptor != RNIL) { - ljam(); + jam(); Uint32 offset[10]; getTabDescrOffsets(regTabPtr, offset); @@ -922,16 +920,16 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId, Uint32 fragId = RNIL; Uint32 i = 0; for (i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (tabPtr.p->fragid[i] != RNIL) { - ljam(); + jam(); fragIndex= tabPtr.p->fragrec[i]; fragId= tabPtr.p->fragid[i]; break; } } if (fragIndex != RNIL) { - ljam(); + jam(); signal->theData[0] = ZUNMAP_PAGES; signal->theData[1] = tabPtr.i; @@ -956,7 +954,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId, int res= lgman.get_log_buffer(signal, sz, &cb); switch(res){ case 0: - ljam(); + jam(); return; case -1: ndbrequire("NOT YET IMPLEMENTED" == 0); @@ -1087,7 +1085,7 @@ Dbtup::drop_fragment_free_extent(Signal *signal, int res= lgman.get_log_buffer(signal, sz, &cb); switch(res){ case 0: - ljam(); + jam(); return; case -1: ndbrequire("NOT YET IMPLEMENTED" == 0); @@ -1238,7 +1236,7 @@ Dbtup::drop_fragment_free_extent_log_buffer_callback(Signal* signal, void Dbtup::drop_fragment_free_var_pages(Signal* signal) { - ljam(); + jam(); Uint32 tableId = signal->theData[1]; Uint32 fragPtrI = signal->theData[2]; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp index 73755494207..d10fabf42da 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp @@ -14,14 +14,12 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_PAG_MAN_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(16000 + __LINE__); } -#define ljamEntry() { jamEntryLine(16000 + __LINE__); } - /* ---------------------------------------------------------------- */ // 4) Page Memory Manager (buddy algorithm) // @@ -121,7 +119,7 @@ void Dbtup::initializePage() }//for PagePtr pagePtr; for (pagePtr.i = 0; pagePtr.i < c_page_pool.getSize(); pagePtr.i++) { - ljam(); + jam(); refresh_watch_dog(); c_page_pool.getPtr(pagePtr); pagePtr.p->physical_page_id= RNIL; @@ -153,16 +151,16 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, Uint32& allocPageRef) { if (noOfPagesToAllocate == 0){ - ljam(); + jam(); noOfPagesAllocated = 0; return; }//if Uint32 firstListToCheck = nextHigherTwoLog(noOfPagesToAllocate - 1); for (Uint32 i = firstListToCheck; i < 16; i++) { - ljam(); + jam(); if (cfreepageList[i] != RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* PROPER AMOUNT OF PAGES WERE FOUND. NOW SPLIT THE FOUND */ /* AREA AND RETURN THE PART NOT NEEDED. */ @@ -182,11 +180,11 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, /* ---------------------------------------------------------------- */ if (firstListToCheck) { - ljam(); + jam(); for (Uint32 j = firstListToCheck - 1; (Uint32)~j; j--) { - ljam(); + jam(); if (cfreepageList[j] != RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */ /* ---------------------------------------------------------------- */ @@ -212,9 +210,9 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, void Dbtup::returnCommonArea(Uint32 retPageRef, Uint32 retNo) { do { - ljam(); + jam(); if (retNo == 0) { - ljam(); + jam(); return; }//if Uint32 list = nextHigherTwoLog(retNo) - 1; @@ -231,28 +229,28 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef, PagePtr pageFirstPtr, pageLastPtr; Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated; while (allocPageRef > 0) { - ljam(); + jam(); pageLastPtr.i = allocPageRef - 1; c_page_pool.getPtr(pageLastPtr); if (pageLastPtr.p->page_state != ZFREE_COMMON) { - ljam(); + jam(); return; } else { - ljam(); + jam(); pageFirstPtr.i = pageLastPtr.p->first_cluster_page; ndbrequire(pageFirstPtr.i != RNIL); Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i); removeCommonArea(pageFirstPtr.i, list); Uint32 listSize = 1 << list; if (listSize > remainAllocate) { - ljam(); + jam(); Uint32 retNo = listSize - remainAllocate; returnCommonArea(pageFirstPtr.i, retNo); allocPageRef = pageFirstPtr.i + retNo; noPagesAllocated = noOfPagesToAllocate; return; } else { - ljam(); + jam(); allocPageRef = pageFirstPtr.i; noPagesAllocated += listSize; remainAllocate -= listSize; @@ -268,32 +266,32 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef, PagePtr pageFirstPtr, pageLastPtr; Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated; if (remainAllocate == 0) { - ljam(); + jam(); return; }//if while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize()) { - ljam(); + jam(); pageFirstPtr.i = allocPageRef + noPagesAllocated; c_page_pool.getPtr(pageFirstPtr); if (pageFirstPtr.p->page_state != ZFREE_COMMON) { - ljam(); + jam(); return; } else { - ljam(); + jam(); pageLastPtr.i = pageFirstPtr.p->last_cluster_page; ndbrequire(pageLastPtr.i != RNIL); Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i); removeCommonArea(pageFirstPtr.i, list); Uint32 listSize = 1 << list; if (listSize > remainAllocate) { - ljam(); + jam(); Uint32 retPageRef = pageFirstPtr.i + remainAllocate; Uint32 retNo = listSize - remainAllocate; returnCommonArea(retPageRef, retNo); noPagesAllocated += remainAllocate; return; } else { - ljam(); + jam(); noPagesAllocated += listSize; remainAllocate -= listSize; }//if @@ -328,30 +326,30 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list) c_page_pool.getPtr(remPagePtr, remPageRef); ndbrequire(list < 16); if (cfreepageList[list] == remPagePtr.i) { - ljam(); + jam(); cfreepageList[list] = remPagePtr.p->next_cluster_page; pageNextPtr.i = cfreepageList[list]; if (pageNextPtr.i != RNIL) { - ljam(); + jam(); c_page_pool.getPtr(pageNextPtr); pageNextPtr.p->prev_cluster_page = RNIL; }//if } else { pageSearchPtr.i = cfreepageList[list]; while (true) { - ljam(); + jam(); c_page_pool.getPtr(pageSearchPtr); pagePrevPtr = pageSearchPtr; pageSearchPtr.i = pageSearchPtr.p->next_cluster_page; if (pageSearchPtr.i == remPagePtr.i) { - ljam(); + jam(); break; }//if }//while pageNextPtr.i = remPagePtr.p->next_cluster_page; pagePrevPtr.p->next_cluster_page = pageNextPtr.i; if (pageNextPtr.i != RNIL) { - ljam(); + jam(); c_page_pool.getPtr(pageNextPtr); pageNextPtr.p->prev_cluster_page = pagePrevPtr.i; }//if diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index 26373708b66..cbbbc1fa56c 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -15,14 +15,12 @@ #define DBTUP_C +#define DBTUP_PAGE_MAP_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(14000 + __LINE__); } -#define ljamEntry() { jamEntryLine(14000 + __LINE__); } - // // PageMap is a service used by Dbtup to map logical page id's to physical // page id's. The mapping is needs the fragment and the logical page id to @@ -92,11 +90,11 @@ Uint32 Dbtup::getEmptyPage(Fragrecord* regFragPtr) { Uint32 pageId = regFragPtr->emptyPrimPage.firstItem; if (pageId == RNIL) { - ljam(); + jam(); allocMoreFragPages(regFragPtr); pageId = regFragPtr->emptyPrimPage.firstItem; if (pageId == RNIL) { - ljam(); + jam(); return RNIL; }//if }//if @@ -122,11 +120,11 @@ Uint32 Dbtup::getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId) loopLimit = grpPageRangePtr.p->currentIndexPos; ndbrequire(loopLimit <= 3); for (Uint32 i = 0; i <= loopLimit; i++) { - ljam(); + jam(); if (grpPageRangePtr.p->startRange[i] <= logicalPageId) { if (grpPageRangePtr.p->endRange[i] >= logicalPageId) { if (grpPageRangePtr.p->type[i] == ZLEAF) { - ljam(); + jam(); Uint32 realPageId = (logicalPageId - grpPageRangePtr.p->startRange[i]) + grpPageRangePtr.p->basePageId[i]; return realPageId; @@ -167,12 +165,12 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, { PageRangePtr currPageRangePtr; if (cfirstfreerange == RNIL) { - ljam(); + jam(); return false; }//if currPageRangePtr.i = regFragPtr->currentPageRange; if (currPageRangePtr.i == RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THE FIRST PAGE RANGE IS HANDLED WITH SPECIAL CODE */ /* ---------------------------------------------------------------- */ @@ -181,10 +179,10 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, currPageRangePtr.p->currentIndexPos = 0; currPageRangePtr.p->parentPtr = RNIL; } else { - ljam(); + jam(); ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange); if (currPageRangePtr.p->currentIndexPos < 3) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THE SIMPLE CASE WHEN IT IS ONLY NECESSARY TO FILL IN THE */ /* NEXT EMPTY POSITION IN THE PAGE RANGE RECORD IS TREATED */ @@ -192,7 +190,7 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, /* ---------------------------------------------------------------- */ currPageRangePtr.p->currentIndexPos++; } else { - ljam(); + jam(); ndbrequire(currPageRangePtr.p->currentIndexPos == 3); currPageRangePtr.i = leafPageRangeFull(regFragPtr, currPageRangePtr); if (currPageRangePtr.i == RNIL) { @@ -223,15 +221,15 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, PageRangePtr loopPageRangePtr; loopPageRangePtr = currPageRangePtr; while (true) { - ljam(); + jam(); loopPageRangePtr.i = loopPageRangePtr.p->parentPtr; if (loopPageRangePtr.i != RNIL) { - ljam(); + jam(); ptrCheckGuard(loopPageRangePtr, cnoOfPageRangeRec, pageRange); ndbrequire(loopPageRangePtr.p->currentIndexPos < 4); loopPageRangePtr.p->endRange[loopPageRangePtr.p->currentIndexPos] += noPages; } else { - ljam(); + jam(); break; }//if }//while @@ -243,26 +241,26 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, void Dbtup::releaseFragPages(Fragrecord* regFragPtr) { if (regFragPtr->rootPageRange == RNIL) { - ljam(); + jam(); return; }//if PageRangePtr regPRPtr; regPRPtr.i = regFragPtr->rootPageRange; ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange); while (true) { - ljam(); + jam(); const Uint32 indexPos = regPRPtr.p->currentIndexPos; ndbrequire(indexPos < 4); const Uint32 basePageId = regPRPtr.p->basePageId[indexPos]; regPRPtr.p->basePageId[indexPos] = RNIL; if (basePageId == RNIL) { - ljam(); + jam(); /** * Finished with indexPos continue with next */ if (indexPos > 0) { - ljam(); + jam(); regPRPtr.p->currentIndexPos--; continue; }//if @@ -274,13 +272,13 @@ void Dbtup::releaseFragPages(Fragrecord* regFragPtr) releasePagerange(regPRPtr); if (parentPtr != RNIL) { - ljam(); + jam(); regPRPtr.i = parentPtr; ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange); continue; }//if - ljam(); + jam(); ndbrequire(regPRPtr.i == regFragPtr->rootPageRange); initFragRange(regFragPtr); for (Uint32 i = 0; i<MAX_FREE_LIST; i++) @@ -364,7 +362,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested Uint32 retPageRef = RNIL; allocConsPages(noPagesToAllocate, noOfPagesAllocated, retPageRef); if (noOfPagesAllocated == 0) { - ljam(); + jam(); return tafpPagesAllocated; }//if /* ---------------------------------------------------------------- */ @@ -373,7 +371,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested /* ---------------------------------------------------------------- */ Uint32 startRange = regFragPtr->nextStartRange; if (!insertPageRangeTab(regFragPtr, retPageRef, noOfPagesAllocated)) { - ljam(); + jam(); returnCommonArea(retPageRef, noOfPagesAllocated); return tafpPagesAllocated; }//if @@ -388,7 +386,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested /* ---------------------------------------------------------------- */ Uint32 prev = RNIL; for (loopPagePtr.i = retPageRef; loopPagePtr.i < loopLimit; loopPagePtr.i++) { - ljam(); + jam(); c_page_pool.getPtr(loopPagePtr); loopPagePtr.p->page_state = ZEMPTY_MM; loopPagePtr.p->frag_page_id = startRange + @@ -416,10 +414,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested /* WAS ENOUGH PAGES ALLOCATED OR ARE MORE NEEDED. */ /* ---------------------------------------------------------------- */ if (tafpPagesAllocated < tafpNoAllocRequested) { - ljam(); + jam(); } else { ndbrequire(tafpPagesAllocated == tafpNoAllocRequested); - ljam(); + jam(); return tafpNoAllocRequested; }//if }//while @@ -451,15 +449,15 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr parentPageRangePtr = currPageRangePtr; Uint32 tiprNoLevels = 1; while (true) { - ljam(); + jam(); parentPageRangePtr.i = parentPageRangePtr.p->parentPtr; if (parentPageRangePtr.i == RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* WE HAVE REACHED THE ROOT. A NEW ROOT MUST BE ALLOCATED. */ /* ---------------------------------------------------------------- */ if (c_noOfFreePageRanges < tiprNoLevels) { - ljam(); + jam(); return RNIL; }//if PageRangePtr oldRootPRPtr; @@ -482,10 +480,10 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr foundPageRangePtr = newRootPRPtr; break; } else { - ljam(); + jam(); ptrCheckGuard(parentPageRangePtr, cnoOfPageRangeRec, pageRange); if (parentPageRangePtr.p->currentIndexPos < 3) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* WE HAVE FOUND AN EMPTY ENTRY IN A PAGE RANGE RECORD. */ /* ALLOCATE A NEW PAGE RANGE RECORD, FILL IN THE START RANGE, */ @@ -498,7 +496,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr foundPageRangePtr = parentPageRangePtr; break; } else { - ljam(); + jam(); ndbrequire(parentPageRangePtr.p->currentIndexPos == 3); /* ---------------------------------------------------------------- */ /* THE PAGE RANGE RECORD WAS FULL. FIND THE PARENT RECORD */ @@ -516,7 +514,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr PageRangePtr prevPageRangePtr; prevPageRangePtr = foundPageRangePtr; if (c_noOfFreePageRanges < tiprNoLevels) { - ljam(); + jam(); return RNIL; }//if /* ---------------------------------------------------------------- */ @@ -527,7 +525,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr /* ARE ALSO PROPERLY UPDATED ON THE PATH TO THE LEAF LEVEL. */ /* ---------------------------------------------------------------- */ while (true) { - ljam(); + jam(); seizePagerange(newPageRangePtr); tiprNoLevels--; ndbrequire(prevPageRangePtr.p->currentIndexPos < 4); @@ -535,13 +533,13 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr newPageRangePtr.p->parentPtr = prevPageRangePtr.i; newPageRangePtr.p->currentIndexPos = 0; if (tiprNoLevels > 0) { - ljam(); + jam(); newPageRangePtr.p->startRange[0] = regFragPtr->nextStartRange; newPageRangePtr.p->endRange[0] = regFragPtr->nextStartRange - 1; newPageRangePtr.p->type[0] = ZNON_LEAF; prevPageRangePtr = newPageRangePtr; } else { - ljam(); + jam(); break; }//if }//while @@ -576,16 +574,16 @@ void Dbtup::errorHandler(Uint32 errorCode) { switch (errorCode) { case 0: - ljam(); + jam(); break; case 1: - ljam(); + jam(); break; case 2: - ljam(); + jam(); break; default: - ljam(); + jam(); } ndbrequire(false); }//Dbtup::errorHandler() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index 28f66c5620a..6e932f8a058 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_ROUTINES_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -23,9 +24,6 @@ #include "AttributeOffset.hpp" #include <AttributeHeader.hpp> -#define ljam() { jamLine(3000 + __LINE__); } -#define ljamEntry() { jamEntryLine(3000 + __LINE__); } - void Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) { @@ -40,23 +38,23 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){ if (!AttributeDescriptor::getNullable(attrDescr)) { if (AttributeDescriptor::getSize(attrDescr) == 0){ - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL; regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4) { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHOneWordNotNULL; regTabPtr->updateFunctionArray[i]= &Dbtup::updateFixedSizeTHOneWordNotNULL; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHTwoWordNotNULL; regTabPtr->updateFunctionArray[i]= &Dbtup::updateFixedSizeTHTwoWordNotNULL; } else { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHManyWordNotNULL; regTabPtr->updateFunctionArray[i]= @@ -64,27 +62,27 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) } // replace functions for char attribute if (AttributeOffset::getCharsetFlag(attrOffset)) { - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL; } } else { if (AttributeDescriptor::getSize(attrDescr) == 0){ - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4){ - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHTwoWordNULLable; regTabPtr->updateFunctionArray[i]= &Dbtup::updateFixedSizeTHManyWordNULLable; } else { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHManyWordNULLable; regTabPtr->updateFunctionArray[i]= @@ -92,7 +90,7 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) } // replace functions for char attribute if (AttributeOffset::getCharsetFlag(attrOffset)) { - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; } @@ -144,7 +142,7 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) } } else { if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){ - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readDynFixedSize; regTabPtr->updateFunctionArray[i]= &Dbtup::updateDynFixedSize; } else { @@ -204,7 +202,7 @@ int Dbtup::readAttributes(KeyReqStruct *req_struct, inBufIndex++; attributeId= ahIn.getAttributeId(); descr_index= attributeId << ZAD_LOG_SIZE; - ljam(); + jam(); AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, 0); ahOut= (AttributeHeader*)&outBuffer[tmpAttrBufIndex]; @@ -223,7 +221,7 @@ int Dbtup::readAttributes(KeyReqStruct *req_struct, return -1; } } else if(attributeId & AttributeHeader::PSEUDO) { - ljam(); + jam(); Uint32 sz= read_pseudo(attributeId, req_struct, outBuffer+tmpAttrBufIndex+1); @@ -252,13 +250,13 @@ Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer, ndbrequire(readOffset < req_struct->check_offset[MM]); if (newIndexBuf <= maxRead) { - ljam(); + jam(); outBuffer[indexBuf]= wordRead; ahOut->setDataSize(1); req_struct->out_buf_index= newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -280,14 +278,14 @@ Dbtup::readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer, ndbrequire(readOffset + 1 < req_struct->check_offset[MM]); if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize(2); outBuffer[indexBuf]= wordReadFirst; outBuffer[indexBuf + 1]= wordReadSecond; req_struct->out_buf_index= newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -311,7 +309,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, if (! charsetFlag || ! req_struct->xfrm_flag) { Uint32 newIndexBuf = indexBuf + attrNoOfWords; if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor)); MEMCOPY_NO_WORDS(&outBuffer[indexBuf], &tuple_header[readOffset], @@ -319,11 +317,11 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; }//if } else { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; @@ -340,7 +338,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, Uint32 dstLen = xmul * (srcBytes - lb); Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); if (maxIndexBuf <= maxRead && ok) { - ljam(); + jam(); int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); ndbrequire(n != -1); int m = n; @@ -353,7 +351,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; } } @@ -367,13 +365,13 @@ Dbtup::readFixedSizeTHOneWordNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readFixedSizeTHOneWordNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -386,13 +384,13 @@ Dbtup::readFixedSizeTHTwoWordNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readFixedSizeTHTwoWordNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -405,13 +403,13 @@ Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readFixedSizeTHManyWordNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -423,9 +421,9 @@ Dbtup::readFixedSizeTHZeroWordNULLable(Uint32* outBuffer, AttributeHeader* ahOut, Uint32 attrDes2) { - ljam(); + jam(); if (nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); ahOut->setNULL(); } return true; @@ -477,7 +475,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, if (! charsetFlag || ! req_struct->xfrm_flag) { if (new_index <= max_read) { - ljam(); + jam(); ah_out->setByteSize(vsize_in_bytes); out_buffer[index_buf + (vsize_in_bytes >> 2)] = 0; memcpy(out_buffer+index_buf, @@ -489,7 +487,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, } else { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 maxBytes = AttributeDescriptor::getSizeInBytes(attr_descriptor); Uint32 srcBytes = vsize_in_bytes; @@ -508,7 +506,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, Uint32 dstLen = xmul * (maxBytes - lb); Uint32 maxIndexBuf = index_buf + (dstLen >> 2); if (maxIndexBuf <= max_read && ok) { - ljam(); + jam(); int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); ndbrequire(n != -1); int m = n; @@ -522,7 +520,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, return true; } } - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -534,13 +532,13 @@ Dbtup::readVarSizeNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readVarSizeNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -552,7 +550,7 @@ Dbtup::readDynFixedSize(Uint32* outBuffer, AttributeHeader* ahOut, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; } @@ -563,7 +561,7 @@ Dbtup::readDynVarSize(Uint32* outBuffer, AttributeHeader* ahOut, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; }//Dbtup::readDynBigVarSize() @@ -586,7 +584,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, if (! charsetFlag || ! req_struct->xfrm_flag) { Uint32 newIndexBuf = indexBuf + attrNoOfWords; if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor)); MEMCOPY_NO_WORDS(&outBuffer[indexBuf], &tuple_header[readOffset], @@ -594,11 +592,11 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; }//if } else { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; @@ -615,7 +613,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, Uint32 dstLen = xmul * (srcBytes - lb); Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); if (maxIndexBuf <= maxRead && ok) { - ljam(); + jam(); int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); ndbrequire(n != -1); int m = n; @@ -628,7 +626,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; } } @@ -642,13 +640,13 @@ Dbtup::readDiskFixedSizeNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!disk_nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readDiskFixedSizeNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -677,7 +675,7 @@ Dbtup::readDiskVarSizeNotNULL(Uint32* out_buffer, ndbrequire(vsize_in_words <= max_var_size); if (new_index <= max_read) { - ljam(); + jam(); ah_out->setByteSize(vsize_in_bytes); memcpy(out_buffer+index_buf, req_struct->m_var_data[DD].m_data_ptr+var_attr_pos, @@ -685,7 +683,7 @@ Dbtup::readDiskVarSizeNotNULL(Uint32* out_buffer, req_struct->out_buf_index= new_index; return true; } else { - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -698,13 +696,13 @@ Dbtup::readDiskVarSizeNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!disk_nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readDiskVarSizeNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -746,13 +744,13 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct, if (checkUpdateOfPrimaryKey(req_struct, &inBuffer[inBufIndex], regTabPtr)) { - ljam(); + jam(); terrorCode= ZTRY_UPDATE_PRIMARY_KEY; return -1; } } UpdateFunction f= regTabPtr->updateFunctionArray[attributeId]; - ljam(); + jam(); req_struct->attr_descriptor= attrDescriptor; req_struct->changeMask.set(attributeId); if (attributeId >= 64) { @@ -768,13 +766,13 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct, inBufIndex= req_struct->in_buf_index; continue; } else { - ljam(); + jam(); return -1; } } else if(attributeId == AttributeHeader::DISK_REF) { - ljam(); + jam(); Uint32 sz= ahIn.getDataSize(); ndbrequire(sz == 2); req_struct->m_tuple_ptr->m_header_bits |= Tuple_header::DISK_PART; @@ -785,7 +783,7 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct, } else { - ljam(); + jam(); terrorCode= ZATTRIBUTE_ID_ERROR; return -1; } @@ -839,13 +837,13 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct, ndbrequire(req_struct->out_buf_index == ahOut->getDataSize()); if (ahIn.getDataSize() != ahOut->getDataSize()) { - ljam(); + jam(); return true; } if (memcmp(&keyReadBuffer[0], &updateBuffer[1], req_struct->out_buf_index << 2) != 0) { - ljam(); + jam(); return true; } return false; @@ -868,17 +866,17 @@ Dbtup::updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer, if (newIndex <= inBufLen) { Uint32 updateWord= inBuffer[indexBuf + 1]; if (!nullIndicator) { - ljam(); + jam(); req_struct->in_buf_index= newIndex; tuple_header[updateOffset]= updateWord; return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -903,18 +901,18 @@ Dbtup::updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer, Uint32 updateWord1= inBuffer[indexBuf + 1]; Uint32 updateWord2= inBuffer[indexBuf + 2]; if (!nullIndicator) { - ljam(); + jam(); req_struct->in_buf_index= newIndex; tuple_header[updateOffset]= updateWord1; tuple_header[updateOffset + 1]= updateWord2; return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -940,9 +938,9 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, if (newIndex <= inBufLen) { if (!nullIndicator) { - ljam(); + jam(); if (charsetFlag) { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); @@ -954,14 +952,14 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, const char* ssrc = (const char*)&inBuffer[indexBuf + 1]; Uint32 lb, len; if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } // fast fix bug#7340 if (typeId != NDB_TYPE_TEXT && (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, ¬_used) != len) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } @@ -973,12 +971,12 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -996,7 +994,7 @@ Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer, Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos); return updateFixedSizeTHManyWordNotNULL(inBuffer, req_struct, @@ -1005,11 +1003,11 @@ Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer, Uint32 newIndex= req_struct->in_buf_index + 1; if (newIndex <= req_struct->in_buf_len) { BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - ljam(); + jam(); req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1043,7 +1041,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, if (new_index <= in_buf_len && vsize_in_words <= max_var_size) { if (!null_ind) { - ljam(); + jam(); var_attr_pos= vpos_array[var_index]; var_data_start= req_struct->m_var_data[MM].m_data_ptr; vpos_array[var_index+idx]= var_attr_pos+size_in_bytes; @@ -1054,12 +1052,12 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, size_in_bytes); return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1079,7 +1077,7 @@ Dbtup::updateVarSizeNULLable(Uint32* inBuffer, Uint32 idx= req_struct->m_var_data[MM].m_var_len_offset; if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos); return updateVarSizeNotNULL(inBuffer, req_struct, @@ -1089,13 +1087,13 @@ Dbtup::updateVarSizeNULLable(Uint32* inBuffer, Uint32 var_index= AttributeOffset::getOffset(attrDes2); Uint32 var_pos= req_struct->var_pos_array[var_index]; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); req_struct->var_pos_array[var_index+idx]= var_pos; req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1107,7 +1105,7 @@ Dbtup::updateDynFixedSize(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; } @@ -1117,7 +1115,7 @@ Dbtup::updateDynVarSize(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; } @@ -1215,7 +1213,7 @@ Dbtup::readBitsNotNULL(Uint32* outBuffer, Uint32 maxRead = req_struct->max_read; Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; @@ -1224,7 +1222,7 @@ Dbtup::readBitsNotNULL(Uint32* outBuffer, return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1248,20 +1246,20 @@ Dbtup::readBitsNULLable(Uint32* outBuffer, if(BitmaskImpl::get(regTabPtr->m_offsets[MM].m_null_words, bits, pos)) { - ljam(); + jam(); ahOut->setNULL(); return true; } if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; BitmaskImpl::getField(regTabPtr->m_offsets[MM].m_null_words, bits, pos+1, bitCount, outBuffer+indexBuf); return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1290,12 +1288,12 @@ Dbtup::updateBitsNotNULL(Uint32* inBuffer, req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZNOT_NULL_ATTR; return false; }//if } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if @@ -1328,13 +1326,13 @@ Dbtup::updateBitsNULLable(Uint32* inBuffer, Uint32 newIndex = indexBuf + 1; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if @@ -1361,9 +1359,9 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer, if (newIndex <= inBufLen) { if (!nullIndicator) { - ljam(); + jam(); if (charsetFlag) { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); @@ -1375,14 +1373,14 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer, const char* ssrc = (const char*)&inBuffer[indexBuf + 1]; Uint32 lb, len; if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } // fast fix bug#7340 if (typeId != NDB_TYPE_TEXT && (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, ¬_used) != len) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } @@ -1393,12 +1391,12 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer, noOfWords); return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1416,7 +1414,7 @@ Dbtup::updateDiskFixedSizeNULLable(Uint32* inBuffer, Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos); return updateDiskFixedSizeNotNULL(inBuffer, req_struct, @@ -1425,11 +1423,11 @@ Dbtup::updateDiskFixedSizeNULLable(Uint32* inBuffer, Uint32 newIndex= req_struct->in_buf_index + 1; if (newIndex <= req_struct->in_buf_len) { BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - ljam(); + jam(); req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1463,7 +1461,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, if (new_index <= in_buf_len && vsize_in_words <= max_var_size) { if (!null_ind) { - ljam(); + jam(); var_attr_pos= vpos_array[var_index]; var_data_start= req_struct->m_var_data[DD].m_data_ptr; vpos_array[var_index+idx]= var_attr_pos+size_in_bytes; @@ -1474,12 +1472,12 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, size_in_bytes); return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1499,7 +1497,7 @@ Dbtup::updateDiskVarSizeNULLable(Uint32* inBuffer, Uint32 idx= req_struct->m_var_data[DD].m_var_len_offset; if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos); return updateDiskVarSizeNotNULL(inBuffer, req_struct, @@ -1509,13 +1507,13 @@ Dbtup::updateDiskVarSizeNULLable(Uint32* inBuffer, Uint32 var_index= AttributeOffset::getOffset(attrDes2); Uint32 var_pos= req_struct->var_pos_array[var_index]; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); req_struct->var_pos_array[var_index+idx]= var_pos; req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1537,7 +1535,7 @@ Dbtup::readDiskBitsNotNULL(Uint32* outBuffer, Uint32 maxRead = req_struct->max_read; Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; @@ -1546,7 +1544,7 @@ Dbtup::readDiskBitsNotNULL(Uint32* outBuffer, return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1570,20 +1568,20 @@ Dbtup::readDiskBitsNULLable(Uint32* outBuffer, if(BitmaskImpl::get(regTabPtr->m_offsets[DD].m_null_words, bits, pos)) { - ljam(); + jam(); ahOut->setNULL(); return true; } if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; BitmaskImpl::getField(regTabPtr->m_offsets[DD].m_null_words, bits, pos+1, bitCount, outBuffer+indexBuf); return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1612,12 +1610,12 @@ Dbtup::updateDiskBitsNotNULL(Uint32* inBuffer, req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZNOT_NULL_ATTR; return false; }//if } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if @@ -1650,13 +1648,13 @@ Dbtup::updateDiskBitsNULLable(Uint32* inBuffer, Uint32 newIndex = indexBuf + 1; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp index aad68c5ed17..1334cbebe3e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_SCAN_CPP #include "Dbtup.hpp" #include <signaldata/AccScan.hpp> #include <signaldata/NextScan.hpp> @@ -597,7 +598,7 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr) Uint32 lcp_list = fragPtr.p->m_lcp_keep_list; Uint32 size = table.m_offsets[mm].m_fix_header_size + - (bits & ScanOp::SCAN_VS ? Tuple_header::HeaderSize + 1: 0); + (bits&ScanOp::SCAN_VS ? Tuple_header::HeaderSize + Var_part_ref::SZ32 : 0); if (lcp && lcp_list != RNIL) goto found_lcp_keep; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp index b2c42418418..c1ccf5952da 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp @@ -15,14 +15,12 @@ #define DBTUP_C +#define DBTUP_STORE_PROC_DEF_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(18000 + __LINE__); } -#define ljamEntry() { jamEntryLine(18000 + __LINE__); } - /* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */ /* ------------ADD/DROP STORED PROCEDURE MODULE ------------------- */ @@ -32,7 +30,7 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal) { OperationrecPtr regOperPtr; TablerecPtr regTabPtr; - ljamEntry(); + jamEntry(); regOperPtr.i = signal->theData[0]; c_operation_pool.getPtr(regOperPtr); regTabPtr.i = signal->theData[1]; @@ -46,17 +44,17 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal) ndbrequire(regTabPtr.p->tableStatus == DEFINED); switch (requestInfo) { case ZSCAN_PROCEDURE: - ljam(); + jam(); scanProcedure(signal, regOperPtr.p, signal->theData[4]); break; case ZCOPY_PROCEDURE: - ljam(); + jam(); copyProcedure(signal, regTabPtr, regOperPtr.p); break; case ZSTORED_PROCEDURE_DELETE: - ljam(); + jam(); deleteScanProcedure(signal, regOperPtr.p); break; default: @@ -124,14 +122,14 @@ void Dbtup::copyProcedure(Signal* signal, AttributeHeader::init(&signal->theData[length + 1], Ti, 0); length++; if (length == 24) { - ljam(); + jam(); ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, signal->theData+1, length, true)); length = 0; }//if }//for if (length != 0) { - ljam(); + jam(); ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, signal->theData+1, length, true)); }//if @@ -155,7 +153,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal, ndbrequire(regOperPtr->currentAttrinbufLen <= regOperPtr->attrinbufLen); if ((RnoFree > MIN_ATTRBUF) || (copyProcedure)) { - ljam(); + jam(); regAttrPtr.i = cfirstfreeAttrbufrec; ptrCheckGuard(regAttrPtr, cnoOfAttrbufrec, attrbufrec); regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = 0; @@ -163,18 +161,18 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal, cnoFreeAttrbufrec = RnoFree - 1; regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; } else { - ljam(); + jam(); storedSeizeAttrinbufrecErrorLab(signal, regOperPtr); return false; }//if if (regOperPtr->firstAttrinbufrec == RNIL) { - ljam(); + jam(); regOperPtr->firstAttrinbufrec = regAttrPtr.i; }//if regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; if (regOperPtr->lastAttrinbufrec != RNIL) { AttrbufrecPtr tempAttrinbufptr; - ljam(); + jam(); tempAttrinbufptr.i = regOperPtr->lastAttrinbufrec; ptrCheckGuard(tempAttrinbufptr, cnoOfAttrbufrec, attrbufrec); tempAttrinbufptr.p->attrbuf[ZBUF_NEXT] = regAttrPtr.i; @@ -187,7 +185,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal, length); if (regOperPtr->currentAttrinbufLen < regOperPtr->attrinbufLen) { - ljam(); + jam(); return true; }//if if (ERROR_INSERTED(4005) && !copyProcedure) { diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp index b85a2a8394d..6406bdefe1e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp @@ -15,14 +15,12 @@ #define DBTUP_C +#define DBTUP_TAB_DES_MAN_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(22000 + __LINE__); } -#define ljamEntry() { jamEntryLine(22000 + __LINE__); } - /* * TABLE DESCRIPTOR MEMORY MANAGER * @@ -65,30 +63,30 @@ Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset) allocSize = (((allocSize - 1) >> 4) + 1) << 4; Uint32 list = nextHigherTwoLog(allocSize - 1); /* CALCULATE WHICH LIST IT BELONGS TO */ for (Uint32 i = list; i < 16; i++) { - ljam(); + jam(); if (cfreeTdList[i] != RNIL) { - ljam(); + jam(); reference = cfreeTdList[i]; removeTdArea(reference, i); /* REMOVE THE AREA FROM THE FREELIST */ Uint32 retNo = (1 << i) - allocSize; /* CALCULATE THE DIFFERENCE */ if (retNo >= ZTD_FREE_SIZE) { - ljam(); + jam(); // return unused words, of course without attempting left merge Uint32 retRef = reference + allocSize; freeTabDescr(retRef, retNo, false); } else { - ljam(); + jam(); allocSize = 1 << i; }//if break; }//if }//for if (reference == RNIL) { - ljam(); + jam(); terrorCode = ZMEM_NOTABDESCR_ERROR; return RNIL; } else { - ljam(); + jam(); setTabDescrWord((reference + allocSize) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL); setTabDescrWord(reference + ZTD_DATASIZE, allocSize); @@ -105,7 +103,7 @@ void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal) { itdaMergeTabDescr(retRef, retNo, normal); /* MERGE WITH POSSIBLE NEIGHBOURS */ while (retNo >= ZTD_FREE_SIZE) { - ljam(); + jam(); Uint32 list = nextHigherTwoLog(retNo); list--; /* RETURN TO NEXT LOWER LIST */ Uint32 sizeOfChunk = 1 << list; @@ -136,7 +134,7 @@ void Dbtup::insertTdArea(Uint32 tabDesRef, Uint32 list) setTabDescrWord(tabDesRef + ZTD_FL_HEADER, ZTD_TYPE_FREE); setTabDescrWord(tabDesRef + ZTD_FL_NEXT, cfreeTdList[list]); if (cfreeTdList[list] != RNIL) { - ljam(); /* PREVIOUSLY EMPTY SLOT */ + jam(); /* PREVIOUSLY EMPTY SLOT */ setTabDescrWord(cfreeTdList[list] + ZTD_FL_PREV, tabDesRef); }//if cfreeTdList[list] = tabDesRef; /* RELINK THE LIST */ @@ -156,28 +154,28 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal) { // merge right while ((retRef + retNo) < cnoOfTabDescrRec) { - ljam(); + jam(); Uint32 tabDesRef = retRef + retNo; Uint32 headerWord = getTabDescrWord(tabDesRef + ZTD_FL_HEADER); if (headerWord == ZTD_TYPE_FREE) { - ljam(); + jam(); Uint32 sizeOfMergedPart = getTabDescrWord(tabDesRef + ZTD_FL_SIZE); retNo += sizeOfMergedPart; Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1); removeTdArea(tabDesRef, list); } else { - ljam(); + jam(); break; } } // merge left const bool mergeLeft = normal; while (mergeLeft && retRef > 0) { - ljam(); + jam(); Uint32 trailerWord = getTabDescrWord(retRef - ZTD_TR_TYPE); if (trailerWord == ZTD_TYPE_FREE) { - ljam(); + jam(); Uint32 sizeOfMergedPart = getTabDescrWord(retRef - ZTD_TR_SIZE); ndbrequire(retRef >= sizeOfMergedPart); retRef -= sizeOfMergedPart; @@ -185,7 +183,7 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal) Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1); removeTdArea(retRef, list); } else { - ljam(); + jam(); break; } } @@ -213,15 +211,15 @@ void Dbtup::removeTdArea(Uint32 tabDesRef, Uint32 list) setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL); if (tabDesRef == cfreeTdList[list]) { - ljam(); + jam(); cfreeTdList[list] = tabDescrNextPtr; /* RELINK THE LIST */ }//if if (tabDescrNextPtr != RNIL) { - ljam(); + jam(); setTabDescrWord(tabDescrNextPtr + ZTD_FL_PREV, tabDescrPrevPtr); }//if if (tabDescrPrevPtr != RNIL) { - ljam(); + jam(); setTabDescrWord(tabDescrPrevPtr + ZTD_FL_NEXT, tabDescrNextPtr); }//if }//Dbtup::removeTdArea() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index 9615047540b..0c6a1491543 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_TRIGGER_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -26,9 +27,6 @@ #include <signaldata/CreateTrig.hpp> #include <signaldata/TuxMaint.hpp> -#define ljam() { jamLine(7000 + __LINE__); } -#define ljamEntry() { jamEntryLine(7000 + __LINE__); } - /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* ----------------------- TRIGGER HANDLING ----------------------- */ @@ -47,17 +45,17 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::SUBSCRIPTION_BEFORE: switch (tevent) { case TriggerEvent::TE_INSERT: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_DETACHED) tlist = &table->subscriptionInsertTriggers; break; case TriggerEvent::TE_UPDATE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_DETACHED) tlist = &table->subscriptionUpdateTriggers; break; case TriggerEvent::TE_DELETE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_DETACHED) tlist = &table->subscriptionDeleteTriggers; break; @@ -68,17 +66,17 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::SECONDARY_INDEX: switch (tevent) { case TriggerEvent::TE_INSERT: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->afterInsertTriggers; break; case TriggerEvent::TE_UPDATE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->afterUpdateTriggers; break; case TriggerEvent::TE_DELETE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->afterDeleteTriggers; break; @@ -89,7 +87,7 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::ORDERED_INDEX: switch (tevent) { case TriggerEvent::TE_CUSTOM: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_CUSTOM) tlist = &table->tuxCustomTriggers; break; @@ -100,7 +98,7 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::READ_ONLY_CONSTRAINT: switch (tevent) { case TriggerEvent::TE_UPDATE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->constraintUpdateTriggers; break; @@ -118,7 +116,7 @@ Dbtup::findTriggerList(Tablerec* table, void Dbtup::execCREATE_TRIG_REQ(Signal* signal) { - ljamEntry(); + jamEntry(); BlockReference senderRef = signal->getSendersBlockRef(); const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr(); const CreateTrigReq* const req = &reqCopy; @@ -131,13 +129,13 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal) if (tabPtr.p->tableStatus != DEFINED ) { - ljam(); + jam(); error= CreateTrigRef::InvalidTable; } // Create trigger and associate it with the table else if (createTrigger(tabPtr.p, req)) { - ljam(); + jam(); // Send conf CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend(); conf->setUserRef(reference()); @@ -153,7 +151,7 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal) } else { - ljam(); + jam(); error= CreateTrigRef::TooManyTriggers; } ndbassert(error != CreateTrigRef::NoError); @@ -174,7 +172,7 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal) void Dbtup::execDROP_TRIG_REQ(Signal* signal) { - ljamEntry(); + jamEntry(); BlockReference senderRef = signal->getSendersBlockRef(); const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr(); const DropTrigReq* const req = &reqCopy; @@ -262,7 +260,7 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) && ((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) || (tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) { - ljam(); + jam(); tptr.p->sendBeforeValues = false; } /* @@ -270,7 +268,7 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) || (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) && (tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) { - ljam(); + jam(); tptr.p->sendOnlyChangedAttributes = true; } */ @@ -282,16 +280,16 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) tptr.p->attributeMask.clear(); if (tptr.p->monitorAllAttributes) { - ljam(); + jam(); for(Uint32 i = 0; i < table->m_no_of_attributes; i++) { if (!primaryKey(table, i)) { - ljam(); + jam(); tptr.p->attributeMask.set(i); } } } else { // Set attribute mask - ljam(); + jam(); tptr.p->attributeMask = req->getAttributeMask(); } return true; @@ -336,7 +334,7 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender) Ptr<TupTriggerData> ptr; for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) { - ljam(); + jam(); if (ptr.p->triggerId == triggerId) { if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock) { @@ -348,10 +346,10 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender) * * Backup doesn't really care about the Ids though. */ - ljam(); + jam(); continue; } - ljam(); + jam(); tlist->release(ptr.i); return 0; } @@ -379,7 +377,7 @@ Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct, if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->afterInsertTriggers.isEmpty()))) { - ljam(); + jam(); fireImmediateTriggers(req_struct, regTablePtr->afterInsertTriggers, regOperPtr); @@ -397,14 +395,14 @@ Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct, if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->afterUpdateTriggers.isEmpty()))) { - ljam(); + jam(); fireImmediateTriggers(req_struct, regTablePtr->afterUpdateTriggers, regOperPtr); } if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) { - ljam(); + jam(); fireImmediateTriggers(req_struct, regTablePtr->constraintUpdateTriggers, regOperPtr); @@ -422,7 +420,7 @@ Dbtup::checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct, if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->afterDeleteTriggers.isEmpty()))) { - ljam(); + jam(); executeTriggers(req_struct, regTablePtr->afterDeleteTriggers, regOperPtr); @@ -443,7 +441,7 @@ void Dbtup::checkDeferredTriggers(Signal* signal, Operationrec* const regOperPtr, Tablerec* const regTablePtr) { - ljam(); + jam(); // NYI }//Dbtup::checkDeferredTriggers() #endif @@ -479,7 +477,7 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, if (save_ptr->m_header_bits & Tuple_header::ALLOC) { if (save_type == ZDELETE) { // insert + delete = nothing - ljam(); + jam(); return; goto end; } @@ -495,10 +493,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, switch(regOperPtr->op_struct.op_type) { case(ZINSERT): - ljam(); + jam(); if (regTablePtr->subscriptionInsertTriggers.isEmpty()) { // Table has no active triggers monitoring inserts at commit - ljam(); + jam(); goto end; } @@ -508,10 +506,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, regOperPtr); break; case(ZDELETE): - ljam(); + jam(); if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) { // Table has no active triggers monitoring deletes at commit - ljam(); + jam(); goto end; } @@ -522,10 +520,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, regOperPtr); break; case(ZUPDATE): - ljam(); + jam(); if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) { // Table has no active triggers monitoring updates at commit - ljam(); + jam(); goto end; } @@ -553,10 +551,10 @@ Dbtup::fireImmediateTriggers(KeyReqStruct *req_struct, TriggerPtr trigPtr; triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); if (trigPtr.p->monitorAllAttributes || trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr.p, regOperPtr); @@ -575,10 +573,10 @@ Dbtup::fireDeferredTriggers(Signal* signal, TriggerPtr trigPtr; triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); if (trigPtr.p->monitorAllAttributes || trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr, regOperPtr); @@ -604,12 +602,12 @@ Dbtup::fireDetachedTriggers(KeyReqStruct *req_struct, ndbrequire(regOperPtr->is_first_operation()); triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); if ((trigPtr.p->monitorReplicas || regOperPtr->op_struct.primary_replica) && (trigPtr.p->monitorAllAttributes || trigPtr.p->attributeMask.overlaps(req_struct->changeMask))) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr.p, regOperPtr); @@ -625,7 +623,7 @@ void Dbtup::executeTriggers(KeyReqStruct *req_struct, TriggerPtr trigPtr; triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr.p, regOperPtr); @@ -675,7 +673,7 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); if (ref == BACKUP) { - ljam(); + jam(); /* In order for the implementation of BACKUP to work even when changing primaries in the middle of the backup we need to set the trigger on @@ -688,9 +686,9 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, signal->theData[0] = trigPtr->triggerId; signal->theData[1] = regFragPtr.p->fragmentId; EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2); - ljamEntry(); + jamEntry(); if (signal->theData[0] == 0) { - ljam(); + jam(); return; } } @@ -704,7 +702,7 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, noAfterWords, beforeBuffer, noBeforeWords)) { - ljam(); + jam(); return; } //-------------------------------------------------------------------- @@ -720,13 +718,13 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, switch(trigPtr->triggerType) { case (TriggerType::SECONDARY_INDEX): - ljam(); + jam(); ref = req_struct->TC_ref; executeDirect = false; break; case (TriggerType::SUBSCRIPTION): case (TriggerType::SUBSCRIPTION_BEFORE): - ljam(); + jam(); // Since only backup uses subscription triggers we send to backup directly for now ref = trigPtr->m_receiverBlock; executeDirect = true; @@ -747,22 +745,22 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, switch(regOperPtr->op_struct.op_type) { case(ZINSERT): - ljam(); + jam(); // Send AttrInfo signals with new attribute values trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES); sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref); break; case(ZDELETE): if (trigPtr->sendBeforeValues) { - ljam(); + jam(); trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES); sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref); } break; case(ZUPDATE): - ljam(); + jam(); if (trigPtr->sendBeforeValues) { - ljam(); + jam(); trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES); sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref); } @@ -788,9 +786,9 @@ Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask, { Uint32 bufIndx = 0; for (Uint32 i = 0; i < m_no_of_attributesibutes; i++) { - ljam(); + jam(); if (attributeMask.get(i)) { - ljam(); + jam(); AttributeHeader::init(&inBuffer[bufIndx++], i, 0); } } @@ -858,7 +856,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, Uint32 numAttrsToRead; if ((regOperPtr->op_struct.op_type == ZUPDATE) && (trigPtr->sendOnlyChangedAttributes)) { - ljam(); + jam(); //-------------------------------------------------------------------- // Update that sends only changed information //-------------------------------------------------------------------- @@ -870,13 +868,13 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, } else if ((regOperPtr->op_struct.op_type == ZDELETE) && (!trigPtr->sendBeforeValues)) { - ljam(); + jam(); //-------------------------------------------------------------------- // Delete without sending before values only read Primary Key //-------------------------------------------------------------------- return true; } else { - ljam(); + jam(); //-------------------------------------------------------------------- // All others send all attributes that are monitored, except: // Omit unchanged blob inlines on update i.e. @@ -898,7 +896,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, //-------------------------------------------------------------------- if (regOperPtr->op_struct.op_type != ZDELETE) { - ljam(); + jam(); int ret = readAttributes(req_struct, &readBuffer[0], numAttrsToRead, @@ -908,7 +906,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, ndbrequire(ret != -1); noAfterWords= ret; } else { - ljam(); + jam(); noAfterWords = 0; } @@ -920,7 +918,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, if ((regOperPtr->op_struct.op_type == ZUPDATE || regOperPtr->op_struct.op_type == ZDELETE) && (trigPtr->sendBeforeValues)) { - ljam(); + jam(); Tuple_header *save= req_struct->m_tuple_ptr; PagePtr tmp; @@ -956,7 +954,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, // Although a trigger was fired it was not necessary since the old // value and the new value was exactly the same //-------------------------------------------------------------------- - ljam(); + jam(); //XXX does this work with collations? return false; } @@ -976,21 +974,21 @@ void Dbtup::sendTrigAttrInfo(Signal* signal, do { sigLen = dataLen - dataIndex; if (sigLen > TrigAttrInfo::DataLength) { - ljam(); + jam(); sigLen = TrigAttrInfo::DataLength; } MEMCOPY_NO_WORDS(trigAttrInfo->getData(), data + dataIndex, sigLen); if (executeDirect) { - ljam(); + jam(); EXECUTE_DIRECT(receiverReference, GSN_TRIG_ATTRINFO, signal, TrigAttrInfo::StaticLength + sigLen); - ljamEntry(); + jamEntry(); } else { - ljam(); + jam(); sendSignal(receiverReference, GSN_TRIG_ATTRINFO, signal, @@ -1018,15 +1016,15 @@ void Dbtup::sendFireTrigOrd(Signal* signal, switch(regOperPtr->op_struct.op_type) { case(ZINSERT): - ljam(); + jam(); fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT); break; case(ZDELETE): - ljam(); + jam(); fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE); break; case(ZUPDATE): - ljam(); + jam(); fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE); break; default: @@ -1040,12 +1038,12 @@ void Dbtup::sendFireTrigOrd(Signal* signal, switch(trigPtr->triggerType) { case (TriggerType::SECONDARY_INDEX): - ljam(); + jam(); sendSignal(req_struct->TC_ref, GSN_FIRE_TRIG_ORD, signal, FireTrigOrd::SignalLength, JBB); break; case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma - ljam(); + jam(); // Since only backup uses subscription triggers we // send to backup directly for now fireTrigOrd->setGCI(req_struct->gci); @@ -1056,7 +1054,7 @@ void Dbtup::sendFireTrigOrd(Signal* signal, FireTrigOrd::SignalWithHashValueLength); break; case (TriggerType::SUBSCRIPTION): - ljam(); + jam(); // Since only backup uses subscription triggers we // send to backup directly for now fireTrigOrd->setGCI(req_struct->gci); @@ -1123,7 +1121,7 @@ Dbtup::addTuxEntries(Signal* signal, Tablerec* regTabPtr) { if (ERROR_INSERTED(4022)) { - ljam(); + jam(); CLEAR_ERROR_INSERT_VALUE; terrorCode = 9999; return -1; @@ -1134,12 +1132,12 @@ Dbtup::addTuxEntries(Signal* signal, Uint32 failPtrI; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { - ljam(); + jam(); req->indexId = triggerPtr.p->indexId; req->errorCode = RNIL; if (ERROR_INSERTED(4023) && ! triggerList.hasNext(triggerPtr)) { - ljam(); + jam(); CLEAR_ERROR_INSERT_VALUE; terrorCode = 9999; failPtrI = triggerPtr.i; @@ -1147,9 +1145,9 @@ Dbtup::addTuxEntries(Signal* signal, } EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); - ljamEntry(); + jamEntry(); if (req->errorCode != 0) { - ljam(); + jam(); terrorCode = req->errorCode; failPtrI = triggerPtr.i; goto fail; @@ -1161,12 +1159,12 @@ fail: req->opInfo = TuxMaintReq::OpRemove; triggerList.first(triggerPtr); while (triggerPtr.i != failPtrI) { - ljam(); + jam(); req->indexId = triggerPtr.p->indexId; req->errorCode = RNIL; EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); - ljamEntry(); + jamEntry(); ndbrequire(req->errorCode == 0); triggerList.next(triggerPtr); } @@ -1197,15 +1195,15 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, if (regOperPtr->op_struct.op_type == ZINSERT) { if (! regOperPtr->op_struct.delete_insert_flag) return; - ljam(); + jam(); tupVersion= decr_tup_version(regOperPtr->tupVersion); } else if (regOperPtr->op_struct.op_type == ZUPDATE) { - ljam(); + jam(); tupVersion= decr_tup_version(regOperPtr->tupVersion); } else if (regOperPtr->op_struct.op_type == ZDELETE) { if (regOperPtr->op_struct.delete_insert_flag) return; - ljam(); + jam(); tupVersion= regOperPtr->tupVersion; } else { ndbrequire(false); @@ -1231,13 +1229,13 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, // get version Uint32 tupVersion; if (regOperPtr->op_struct.op_type == ZINSERT) { - ljam(); + jam(); tupVersion = regOperPtr->tupVersion; } else if (regOperPtr->op_struct.op_type == ZUPDATE) { - ljam(); + jam(); tupVersion = regOperPtr->tupVersion; } else if (regOperPtr->op_struct.op_type == ZDELETE) { - ljam(); + jam(); return; } else { ndbrequire(false); @@ -1262,12 +1260,12 @@ Dbtup::removeTuxEntries(Signal* signal, TriggerPtr triggerPtr; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { - ljam(); + jam(); req->indexId = triggerPtr.p->indexId; req->errorCode = RNIL, EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); - ljamEntry(); + jamEntry(); // must succeed ndbrequire(req->errorCode == 0); triggerList.next(triggerPtr); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp index 072bd69da97..0b47ae1faef 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp @@ -14,12 +14,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_VAR_ALLOC_CPP #include "Dbtup.hpp" -#define ljam() { jamLine(32000 + __LINE__); } -#define ljamEntry() { jamEntryLine(32000 + __LINE__); } - - void Dbtup::init_list_sizes(void) { c_min_list_size[0]= 200; @@ -71,9 +68,10 @@ Uint32* Dbtup::alloc_var_rec(Fragrecord* fragPtr, /** * TODO alloc fix+var part */ - tabPtr->m_offsets[MM].m_fix_header_size += Tuple_header::HeaderSize + 1; + const Uint32 XXX = Tuple_header::HeaderSize + Var_part_ref::SZ32; + tabPtr->m_offsets[MM].m_fix_header_size += XXX; Uint32 *ptr = alloc_fix_rec(fragPtr, tabPtr, key, out_frag_page_id); - tabPtr->m_offsets[MM].m_fix_header_size -= Tuple_header::HeaderSize + 1; + tabPtr->m_offsets[MM].m_fix_header_size -= XXX; if (unlikely(ptr == 0)) { return 0; @@ -90,7 +88,8 @@ Uint32* Dbtup::alloc_var_rec(Fragrecord* fragPtr, if (likely(alloc_var_part(fragPtr, tabPtr, alloc_size, &varref) != 0)) { Tuple_header* tuple = (Tuple_header*)ptr; - * tuple->get_var_part_ptr(tabPtr) = varref.ref(); + Var_part_ref* dst = (Var_part_ref*)tuple->get_var_part_ptr(tabPtr); + dst->assign(&varref); return ptr; } @@ -109,9 +108,9 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr, PagePtr pagePtr; pagePtr.i= get_alloc_page(fragPtr, (alloc_size + 1)); if (pagePtr.i == RNIL) { - ljam(); + jam(); if ((pagePtr.i= get_empty_var_page(fragPtr)) == RNIL) { - ljam(); + jam(); return 0; } c_page_pool.getPtr(pagePtr); @@ -127,7 +126,7 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr, pagePtr.p->page_state = ZTH_MM_FREE; } else { c_page_pool.getPtr(pagePtr); - ljam(); + jam(); } Uint32 idx= ((Var_page*)pagePtr.p) ->alloc_record(alloc_size, (Var_page*)ctemp_page, Var_page::CHAIN); @@ -167,7 +166,8 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr, Tuple_header* tuple = (Tuple_header*)ptr; Local_key ref; - ref.assref(* tuple->get_var_part_ptr(tabPtr)); + Var_part_ref * varref = (Var_part_ref*)tuple->get_var_part_ptr(tabPtr); + varref->copyout(&ref); free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p); @@ -177,7 +177,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr, ndbassert(pagePtr.p->free_space <= Var_page::DATA_WORDS); if (pagePtr.p->free_space == Var_page::DATA_WORDS - 1) { - ljam(); + jam(); /* This code could be used when we release pages. remove_free_page(signal,fragPtr,page_header,page_header->list_index); @@ -185,7 +185,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr, */ update_free_page_list(fragPtr, pagePtr); } else { - ljam(); + jam(); update_free_page_list(fragPtr, pagePtr); } return; @@ -193,12 +193,12 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr, int Dbtup::realloc_var_part(Fragrecord* fragPtr, Tablerec* tabPtr, PagePtr pagePtr, - Var_part_ref* ref, Uint32 oldsz, Uint32 newsz) + Var_part_ref* refptr, Uint32 oldsz, Uint32 newsz) { Uint32 add = newsz - oldsz; Var_page* pageP = (Var_page*)pagePtr.p; Local_key oldref; - oldref.assref(*(Uint32*)ref); + refptr->copyout(&oldref); if (pageP->free_space >= add) { @@ -237,7 +237,7 @@ Dbtup::realloc_var_part(Fragrecord* fragPtr, Tablerec* tabPtr, PagePtr pagePtr, ndbassert(oldref.m_page_no != newref.m_page_no); ndbassert(pageP->get_entry_len(oldref.m_page_idx) == oldsz); memcpy(dst, src, 4*oldsz); - * ((Uint32*)ref) = newref.ref(); + refptr->assign(&newref); pageP->free_record(oldref.m_page_idx, Var_page::CHAIN); update_free_page_list(fragPtr, pagePtr); @@ -259,16 +259,16 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size) start_index= calculate_free_list_impl(alloc_size); if (start_index == (MAX_FREE_LIST - 1)) { - ljam(); + jam(); } else { - ljam(); + jam(); ndbrequire(start_index < (MAX_FREE_LIST - 1)); start_index++; } for (i= start_index; i < MAX_FREE_LIST; i++) { - ljam(); + jam(); if (!fragPtr->free_var_page_array[i].isEmpty()) { - ljam(); + jam(); return fragPtr->free_var_page_array[i].firstItem; } } @@ -277,9 +277,9 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size) LocalDLList<Page> list(c_page_pool, fragPtr->free_var_page_array[i]); for(list.first(pagePtr); !pagePtr.isNull() && loop < 16; ) { - ljam(); + jam(); if (pagePtr.p->free_space >= alloc_size) { - ljam(); + jam(); return pagePtr.i; } loop++; @@ -346,7 +346,7 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr, (free_space > c_max_list_size[list_index])) { Uint32 new_list_index= calculate_free_list_impl(free_space); if (list_index != MAX_FREE_LIST) { - ljam(); + jam(); /* * Only remove it from its list if it is in a list */ @@ -361,11 +361,11 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr, This can only happen for the free list with least guaranteed free space. */ - ljam(); + jam(); ndbrequire(new_list_index == 0); pagePtr.p->list_index= MAX_FREE_LIST; } else { - ljam(); + jam(); LocalDLList<Page> list(c_page_pool, fragPtr->free_var_page_array[new_list_index]); list.add(pagePtr); @@ -381,9 +381,9 @@ Uint32 Dbtup::calculate_free_list_impl(Uint32 free_space_size) const { Uint32 i; for (i = 0; i < MAX_FREE_LIST; i++) { - ljam(); + jam(); if (free_space_size <= c_max_list_size[i]) { - ljam(); + jam(); return i; } } @@ -398,9 +398,10 @@ Dbtup::alloc_var_rowid(Fragrecord* fragPtr, Local_key* key, Uint32 * out_frag_page_id) { - tabPtr->m_offsets[MM].m_fix_header_size += Tuple_header::HeaderSize + 1; + const Uint32 XXX = Tuple_header::HeaderSize + Var_part_ref::SZ32; + tabPtr->m_offsets[MM].m_fix_header_size += XXX; Uint32 *ptr = alloc_fix_rowid(fragPtr, tabPtr, key, out_frag_page_id); - tabPtr->m_offsets[MM].m_fix_header_size -= Tuple_header::HeaderSize + 1; + tabPtr->m_offsets[MM].m_fix_header_size -= XXX; if (unlikely(ptr == 0)) { return 0; @@ -416,7 +417,8 @@ Dbtup::alloc_var_rowid(Fragrecord* fragPtr, if (likely(alloc_var_part(fragPtr, tabPtr, alloc_size, &varref) != 0)) { Tuple_header* tuple = (Tuple_header*)ptr; - * tuple->get_var_part_ptr(tabPtr) = varref.ref(); + Var_part_ref* dst = (Var_part_ref*)tuple->get_var_part_ptr(tabPtr); + dst->assign(&varref); return ptr; } diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp index 90c72fe7010..82fed94f62e 100644 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ b/storage/ndb/src/kernel/blocks/lgman.cpp @@ -2918,6 +2918,25 @@ Lgman::stop_run_undo_log(Signal* signal) ptr.p->m_file_pos[TAIL] = tail; init_logbuffer_pointers(ptr); + + { + Buffer_idx head= ptr.p->m_file_pos[HEAD]; + Ptr<Undofile> file; + m_file_pool.getPtr(file, head.m_ptr_i); + if (head.m_idx == file.p->m_file_size - 1) + { + Local_undofile_list files(m_file_pool, ptr.p->m_files); + if(!files.next(file)) + { + jam(); + files.first(file); + } + head.m_idx = 0; + head.m_ptr_i = file.i; + ptr.p->m_file_pos[HEAD] = head; + } + } + ptr.p->m_free_file_words = (Uint64)File_formats::UNDO_PAGE_WORDS * (Uint64)compute_free_file_pages(ptr); ptr.p->m_next_reply_ptr_i = ptr.p->m_file_pos[HEAD].m_ptr_i; diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 8fc9e870b80..adc6d1e3ed4 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -197,6 +197,7 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal) break; case SystemError::CopyFragRefError: + CRASH_INSERTION(1000); BaseString::snprintf(buf, sizeof(buf), "Killed by node %d as " "copyfrag failed, error: %u", @@ -1683,6 +1684,7 @@ void Ndbcntr::createSystableLab(Signal* signal, unsigned index) //w.add(DictTabInfo::NoOfVariable, (Uint32)0); //w.add(DictTabInfo::KeyLength, 1); w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType); + w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE); for (unsigned i = 0; i < table.columnCount; i++) { const SysColumn& column = table.columnList[i]; diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp index 695f393c3b6..d4a2414ef2f 100644 --- a/storage/ndb/src/kernel/blocks/restore.cpp +++ b/storage/ndb/src/kernel/blocks/restore.cpp @@ -1151,8 +1151,23 @@ Restore::calulate_hash(Uint32 tableId, const Uint32 *src) } void -Restore::execLQHKEYREF(Signal*) +Restore::execLQHKEYREF(Signal* signal) { + FilePtr file_ptr; + LqhKeyRef* ref = (LqhKeyRef*)signal->getDataPtr(); + m_file_pool.getPtr(file_ptr, ref->connectPtr); + + char buf[255], name[100]; + BaseString::snprintf(name, sizeof(name), "%u/T%dF%d", + file_ptr.p->m_lcp_no, + file_ptr.p->m_table_id, + file_ptr.p->m_fragment_id); + + BaseString::snprintf(buf, sizeof(buf), + "Error %d during restore of %s", + ref->errorCode, name); + + progError(__LINE__, NDBD_EXIT_INVALID_LCP_FILE, buf); ndbrequire(false); } diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index cb5ec0f6adf..4715e9fa2b4 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -578,6 +578,18 @@ void Suma::execAPI_FAILREQ(Signal* signal) return; } + if (c_failedApiNodes.get(failedApiNode)) + { + jam(); + return; + } + + if (!c_subscriber_nodes.get(failedApiNode)) + { + jam(); + return; + } + c_failedApiNodes.set(failedApiNode); c_connected_nodes.clear(failedApiNode); bool found = removeSubscribersOnNode(signal, failedApiNode); @@ -591,9 +603,12 @@ void Suma::execAPI_FAILREQ(Signal* signal) Ptr<Gcp_record> gcp; for(c_gcp_list.first(gcp); !gcp.isNull(); c_gcp_list.next(gcp)) { + jam(); ack->rep.gci = gcp.p->m_gci; if(gcp.p->m_subscribers.get(failedApiNode)) { + jam(); + gcp.p->m_subscribers.clear(failedApiNode); ack->rep.senderRef = numberToRef(0, failedApiNode); sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_ACK, signal, SubGcpCompleteAck::SignalLength, JBB); @@ -743,6 +758,17 @@ Suma::execNODE_FAILREP(Signal* signal){ Restart.resetRestart(signal); } + if (ERROR_INSERTED(13032)) + { + Uint32 node = c_subscriber_nodes.find(0); + if (node != NodeBitmask::NotFound) + { + ndbout_c("Inserting API_FAILREQ node: %u", node); + signal->theData[0] = node; + EXECUTE_DIRECT(QMGR, GSN_API_FAILREQ, signal, 1); + } + } + signal->theData[0] = SumaContinueB::RESEND_BUCKET; NdbNodeBitmask tmp; @@ -789,17 +815,14 @@ void Suma::execINCL_NODEREQ(Signal* signal){ jamEntry(); - //const Uint32 senderRef = signal->theData[0]; + const Uint32 senderRef = signal->theData[0]; const Uint32 nodeId = signal->theData[1]; ndbrequire(!c_alive_nodes.get(nodeId)); c_alive_nodes.set(nodeId); -#if 0 // if we include this DIH's got to be prepared, later if needed... signal->theData[0] = reference(); - sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB); -#endif } void @@ -929,6 +952,15 @@ Suma::execDUMP_STATE_ORD(Signal* signal){ CLEAR_ERROR_INSERT_VALUE; } + if (tCase == 8010) + { + char buf1[255], buf2[255]; + c_subscriber_nodes.getText(buf1); + c_connected_nodes.getText(buf2); + infoEvent("c_subscriber_nodes: %s", buf1); + infoEvent("c_connected_nodes: %s", buf2); + } + if (tCase == 8009) { if (ERROR_INSERTED(13030)) @@ -3447,7 +3479,10 @@ Suma::execFIRE_TRIG_ORD(Signal* signal) */ f_bufferLock = 0; b_bufferLock = 0; - + + ndbrequire((tabPtr.p = c_tablePool.getPtr(tabPtr.i)) != 0); + Uint32 tableId = tabPtr.p->m_tableId; + Uint32 bucket= hashValue % c_no_of_buckets; m_max_seen_gci = (gci > m_max_seen_gci ? gci : m_max_seen_gci); if(m_active_buckets.get(bucket) || @@ -3470,7 +3505,7 @@ Suma::execFIRE_TRIG_ORD(Signal* signal) SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg; data->gci = gci; - data->tableId = tabPtr.p->m_tableId; + data->tableId = tableId; data->requestInfo = 0; SubTableData::setOperation(data->requestInfo, event); data->logType = 0; @@ -3493,10 +3528,11 @@ Suma::execFIRE_TRIG_ORD(Signal* signal) else { Uint32* dst; - Uint32 sz = f_trigBufferSize + b_trigBufferSize + 2; + Uint32 sz = f_trigBufferSize + b_trigBufferSize + 3; if((dst = get_buffer_ptr(signal, bucket, gci, sz))) { - * dst++ = tabPtr.i; + * dst++ = tableId; + * dst++ = tabPtr.p->m_schemaVersion; * dst++ = (event << 16) | f_trigBufferSize; memcpy(dst, f_buffer, f_trigBufferSize << 2); dst += f_trigBufferSize; @@ -3628,6 +3664,7 @@ Suma::execSUB_GCP_COMPLETE_REP(Signal* signal) if(c_buckets[i].m_buffer_tail != RNIL) { + //Uint32* dst; get_buffer_ptr(signal, i, gci, 0); } } @@ -3972,6 +4009,9 @@ void Suma::completeSubRemove(SubscriptionPtr subPtr) { DBUG_ENTER("Suma::completeSubRemove"); + //Uint32 subscriptionId = subPtr.p->m_subscriptionId; + //Uint32 subscriptionKey = subPtr.p->m_subscriptionKey; + c_subscriptions.release(subPtr); DBUG_PRINT("info",("c_subscriptionPool size: %d free: %d", c_subscriptionPool.getSize(), @@ -4967,8 +5007,11 @@ Suma::resend_bucket(Signal* signal, Uint32 buck, Uint32 min_gci, { continue; } + + ndbrequire(sz); + sz --; // remove *len* part of sz - if(sz == 1) + if(sz == 0) { SubGcpCompleteRep * rep = (SubGcpCompleteRep*)signal->getDataPtrSend(); rep->gci = last_gci; @@ -4988,43 +5031,48 @@ Suma::resend_bucket(Signal* signal, Uint32 buck, Uint32 min_gci, { g_cnt++; Uint32 table = * src++ ; + Uint32 schemaVersion = * src++; Uint32 event = * src >> 16; Uint32 sz_1 = (* src ++) & 0xFFFF; - - ndbassert(sz - 2 >= sz_1); + + ndbassert(sz - 3 >= sz_1); LinearSectionPtr ptr[3]; const Uint32 nptr= reformat(signal, ptr, src, sz_1, - src + sz_1, sz - 2 - sz_1); + src + sz_1, sz - 3 - sz_1); Uint32 ptrLen= 0; for(Uint32 i =0; i < nptr; i++) ptrLen+= ptr[i].sz; + /** * Signal to subscriber(s) */ Ptr<Table> tabPtr; - ndbrequire((tabPtr.p = c_tablePool.getPtr(table)) != 0); - - SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg; - data->gci = last_gci; - data->tableId = tabPtr.p->m_tableId; - data->requestInfo = 0; - SubTableData::setOperation(data->requestInfo, event); - data->logType = 0; - data->changeMask = 0; - data->totalLen = ptrLen; - + if (c_tables.find(tabPtr, table) && + tabPtr.p->m_schemaVersion == schemaVersion) { - LocalDLList<Subscriber> list(c_subscriberPool,tabPtr.p->c_subscribers); - SubscriberPtr subbPtr; - for(list.first(subbPtr); !subbPtr.isNull(); list.next(subbPtr)) + SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg; + data->gci = last_gci; + data->tableId = table; + data->requestInfo = 0; + SubTableData::setOperation(data->requestInfo, event); + data->logType = 0; + data->changeMask = 0; + data->totalLen = ptrLen; + { - DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d", - refToNode(subbPtr.p->m_senderRef))); - data->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal, - SubTableData::SignalLength, JBB, ptr, nptr); + LocalDLList<Subscriber> + list(c_subscriberPool,tabPtr.p->c_subscribers); + SubscriberPtr subbPtr; + for(list.first(subbPtr); !subbPtr.isNull(); list.next(subbPtr)) + { + DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d", + refToNode(subbPtr.p->m_senderRef))); + data->senderData = subbPtr.p->m_senderData; + sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal, + SubTableData::SignalLength, JBB, ptr, nptr); + } } } } diff --git a/storage/ndb/src/kernel/error/ErrorReporter.cpp b/storage/ndb/src/kernel/error/ErrorReporter.cpp index 3d1b7fad7f3..43307d43139 100644 --- a/storage/ndb/src/kernel/error/ErrorReporter.cpp +++ b/storage/ndb/src/kernel/error/ErrorReporter.cpp @@ -24,6 +24,7 @@ #include <NdbHost.h> #include <NdbConfig.h> #include <Configuration.hpp> +#include "EventLogger.hpp" #include <NdbAutoPtr.hpp> @@ -39,7 +40,7 @@ static void dumpJam(FILE* jamStream, Uint32 thrdTheEmulatedJamIndex, Uint8 thrdTheEmulatedJam[]); - +extern EventLogger g_eventLogger; const char* ErrorReporter::formatTimeStampString(){ TimeModule DateTime; /* To create "theDateTimeString" */ @@ -196,6 +197,9 @@ ErrorReporter::handleError(int messageID, WriteMessage(messageID, problemData, objRef, theEmulatedJamIndex, theEmulatedJam); + g_eventLogger.info(problemData); + g_eventLogger.info(objRef); + childReportError(messageID); if(messageID == NDBD_EXIT_ERROR_INSERT){ diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp index f13e5880e22..b1a555872eb 100644 --- a/storage/ndb/src/mgmapi/mgmapi.cpp +++ b/storage/ndb/src/mgmapi/mgmapi.cpp @@ -91,9 +91,7 @@ struct ndb_mgm_handle { int last_error; int last_error_line; char last_error_desc[NDB_MGM_MAX_ERR_DESC_SIZE]; - int read_timeout; - int write_timeout; - unsigned int connect_timeout; + unsigned int timeout; NDB_SOCKET_TYPE socket; @@ -137,18 +135,41 @@ setError(NdbMgmHandle h, int error, int error_line, const char * msg, ...){ return ret; \ } -#define CHECK_REPLY(reply, ret) \ +#define CHECK_REPLY(handle, reply, ret) \ if(reply == NULL) { \ - SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ + if(!handle->last_error) \ + SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ return ret; \ } -#define DBUG_CHECK_REPLY(reply, ret) \ +#define DBUG_CHECK_REPLY(handle, reply, ret) \ if (reply == NULL) { \ - SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ + if(!handle->last_error) \ + SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ DBUG_RETURN(ret); \ } +#define CHECK_TIMEDOUT(in, out) \ + if(in.timedout() || out.timedout()) \ + SET_ERROR(handle, ETIMEDOUT, \ + "Time out talking to management server"); + +#define CHECK_TIMEDOUT_RET(h, in, out, ret) \ + if(in.timedout() || out.timedout()) { \ + SET_ERROR(handle, ETIMEDOUT, \ + "Time out talking to management server"); \ + ndb_mgm_disconnect_quiet(h); \ + return ret; \ + } + +#define DBUG_CHECK_TIMEDOUT_RET(h, in, out, ret) \ + if(in.timedout() || out.timedout()) { \ + SET_ERROR(handle, ETIMEDOUT, \ + "Time out talking to management server"); \ + ndb_mgm_disconnect_quiet(h); \ + DBUG_RETURN(ret); \ + } + /***************************************************************************** * Handles *****************************************************************************/ @@ -164,9 +185,7 @@ ndb_mgm_create_handle() h->last_error = 0; h->last_error_line = 0; h->socket = NDB_INVALID_SOCKET; - h->read_timeout = 50000; - h->write_timeout = 100; - h->connect_timeout = 0; + h->timeout = 60000; h->cfg_i = -1; h->errstream = stdout; h->m_name = 0; @@ -323,8 +342,8 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, DBUG_ENTER("ndb_mgm_call"); DBUG_PRINT("enter",("handle->socket: %d, cmd: %s", handle->socket, cmd)); - SocketOutputStream out(handle->socket); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketOutputStream out(handle->socket, handle->timeout); + SocketInputStream in(handle->socket, handle->timeout); out.println(cmd); #ifdef MGMAPI_LOG @@ -375,6 +394,8 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, } out.println(""); + CHECK_TIMEDOUT_RET(handle, in, out, NULL); + Parser_t::Context ctx; ParserDummy session(handle->socket); Parser_t parser(command_reply, in, true, true, true); @@ -382,14 +403,17 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, const Properties* p = parser.parse(ctx, session); if (p == NULL){ if(!ndb_mgm_is_connected(handle)) { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); DBUG_RETURN(NULL); } else { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); if(ctx.m_status==Parser_t::Eof || ctx.m_status==Parser_t::NoLine) { ndb_mgm_disconnect(handle); + CHECK_TIMEDOUT_RET(handle, in, out, NULL); DBUG_RETURN(NULL); } /** @@ -411,6 +435,10 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, p->print(handle->logfile, "IN: "); } #endif + + if(p && (in.timedout() || out.timedout())) + delete p; + CHECK_TIMEDOUT_RET(handle, in, out, NULL); DBUG_RETURN(p); } @@ -437,13 +465,36 @@ int ndb_mgm_is_connected(NdbMgmHandle handle) extern "C" int ndb_mgm_set_connect_timeout(NdbMgmHandle handle, unsigned int seconds) { + return ndb_mgm_set_timeout(handle, seconds*1000); + return 0; +} + +extern "C" +int ndb_mgm_set_timeout(NdbMgmHandle handle, unsigned int timeout_ms) +{ if(!handle) return -1; - handle->connect_timeout= seconds; + handle->timeout= timeout_ms; return 0; } +extern "C" +int ndb_mgm_number_of_mgmd_in_connect_string(NdbMgmHandle handle) +{ + int count=0; + Uint32 i; + LocalConfig &cfg= handle->cfg; + + for (i = 0; i < cfg.ids.size(); i++) + { + if (cfg.ids[i].type != MgmId_TCP) + continue; + count++; + } + return count; +} + /** * Connect to a management server */ @@ -473,7 +524,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET; Uint32 i; SocketClient s(0, 0); - s.set_connect_timeout(handle->connect_timeout); + s.set_connect_timeout(handle->timeout); if (!s.init()) { fprintf(handle->errstream, @@ -594,6 +645,22 @@ ndb_mgm_get_fd(NdbMgmHandle handle) } /** + * Disconnect from mgm server without error checking + * Should be used internally only. + * e.g. on timeout, we leave NdbMgmHandle disconnected + */ +extern "C" +int +ndb_mgm_disconnect_quiet(NdbMgmHandle handle) +{ + NDB_CLOSE_SOCKET(handle->socket); + handle->socket = NDB_INVALID_SOCKET; + handle->connected = 0; + + return 0; +} + +/** * Disconnect from a mgm server */ extern "C" @@ -604,11 +671,7 @@ ndb_mgm_disconnect(NdbMgmHandle handle) CHECK_HANDLE(handle, -1); CHECK_CONNECTED(handle, -1); - NDB_CLOSE_SOCKET(handle->socket); - handle->socket = NDB_INVALID_SOCKET; - handle->connected = 0; - - return 0; + return ndb_mgm_disconnect_quiet(handle); } struct ndb_mgm_type_atoi @@ -771,24 +834,30 @@ ndb_mgm_get_status(NdbMgmHandle handle) CHECK_HANDLE(handle, NULL); CHECK_CONNECTED(handle, NULL); - SocketOutputStream out(handle->socket); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketOutputStream out(handle->socket, handle->timeout); + SocketInputStream in(handle->socket, handle->timeout); out.println("get status"); out.println(""); + CHECK_TIMEDOUT_RET(handle, in, out, NULL); + char buf[1024]; if(!in.gets(buf, sizeof(buf))) { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, "Probably disconnected"); return NULL; } if(strcmp("node status\n", buf) != 0) { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); + ndbout << in.timedout() << " " << out.timedout() << buf << endl; SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, buf); return NULL; } if(!in.gets(buf, sizeof(buf))) { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, "Probably disconnected"); return NULL; } @@ -797,6 +866,7 @@ ndb_mgm_get_status(NdbMgmHandle handle) Vector<BaseString> split; tmp.split(split, ":"); if(split.size() != 2){ + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, buf); return NULL; } @@ -831,8 +901,12 @@ ndb_mgm_get_status(NdbMgmHandle handle) if(!in.gets(buf, sizeof(buf))) { free(state); - SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, - "Probably disconnected"); + if(in.timedout() || out.timedout()) + SET_ERROR(handle, ETIMEDOUT, + "Time out talking to management server"); + else + SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, + "Probably disconnected"); return NULL; } tmp.assign(buf); @@ -863,6 +937,7 @@ ndb_mgm_get_status(NdbMgmHandle handle) if(i+1 != noOfNodes){ free(state); + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, "Node count mismatch"); return NULL; } @@ -891,7 +966,7 @@ ndb_mgm_enter_single_user(NdbMgmHandle handle, args.put("nodeId", nodeId); const Properties *reply; reply = ndb_mgm_call(handle, enter_single_reply, "enter single user", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -922,7 +997,7 @@ ndb_mgm_exit_single_user(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/) const Properties *reply; reply = ndb_mgm_call(handle, exit_single_reply, "exit single user", 0); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); const char * buf; reply->get("result", &buf); @@ -1019,7 +1094,7 @@ ndb_mgm_stop3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, reply = ndb_mgm_call(handle, stop_reply_v2, "stop all", &args); else reply = ndb_mgm_call(handle, stop_reply_v1, "stop all", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); if(!reply->get("stopped", &stoppedNoOfNodes)){ SET_ERROR(handle, NDB_MGM_STOP_FAILED, @@ -1061,7 +1136,7 @@ ndb_mgm_stop3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, else reply = ndb_mgm_call(handle, stop_reply_v1, "stop", &args); - CHECK_REPLY(reply, stoppedNoOfNodes); + CHECK_REPLY(handle, reply, stoppedNoOfNodes); if(!reply->get("stopped", &stoppedNoOfNodes)){ SET_ERROR(handle, NDB_MGM_STOP_FAILED, "Could not get number of stopped nodes from mgm server"); @@ -1160,11 +1235,11 @@ ndb_mgm_restart3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, args.put("initialstart", initial); args.put("nostart", nostart); const Properties *reply; - const int timeout = handle->read_timeout; - handle->read_timeout= 5*60*1000; // 5 minutes + const int timeout = handle->timeout; + handle->timeout= 5*60*1000; // 5 minutes reply = ndb_mgm_call(handle, restart_reply_v1, "restart all", &args); - handle->read_timeout= timeout; - CHECK_REPLY(reply, -1); + handle->timeout= timeout; + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -1196,13 +1271,13 @@ ndb_mgm_restart3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, args.put("nostart", nostart); const Properties *reply; - const int timeout = handle->read_timeout; - handle->read_timeout= 5*60*1000; // 5 minutes + const int timeout = handle->timeout; + handle->timeout= 5*60*1000; // 5 minutes if(use_v2) reply = ndb_mgm_call(handle, restart_reply_v2, "restart node v2", &args); else reply = ndb_mgm_call(handle, restart_reply_v1, "restart node", &args); - handle->read_timeout= timeout; + handle->timeout= timeout; if(reply != NULL) { BaseString result; reply->get("result", result); @@ -1291,7 +1366,7 @@ ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle, Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getinfo_reply, "get info clusterlog", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); for(unsigned int i=0; i < severity_size; i++) { reply->get(clusterlog_severity_names[severity[i].category], &severity[i].value); @@ -1322,7 +1397,7 @@ ndb_mgm_get_clusterlog_severity_filter_old(NdbMgmHandle handle) Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getinfo_reply, "get info clusterlog", &args); - CHECK_REPLY(reply, NULL); + CHECK_REPLY(handle, reply, NULL); for(int i=0; i < (int)NDB_MGM_EVENT_SEVERITY_ALL; i++) { reply->get(clusterlog_severity_names[i], &enabled[i]); @@ -1354,7 +1429,7 @@ ndb_mgm_set_clusterlog_severity_filter(NdbMgmHandle handle, const Properties *reply; reply = ndb_mgm_call(handle, filter_reply, "set logfilter", &args); - CHECK_REPLY(reply, retval); + CHECK_REPLY(handle, reply, retval); BaseString result; reply->get("result", result); @@ -1448,7 +1523,7 @@ ndb_mgm_get_clusterlog_loglevel(NdbMgmHandle handle, Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getloglevel_reply, "get cluster loglevel", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); for(int i=0; i < loglevel_count; i++) { reply->get(clusterlog_names[loglevel[i].category], &loglevel[i].value); @@ -1484,7 +1559,7 @@ ndb_mgm_get_clusterlog_loglevel_old(NdbMgmHandle handle) Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getloglevel_reply, "get cluster loglevel", &args); - CHECK_REPLY(reply, NULL); + CHECK_REPLY(handle, reply, NULL); for(int i=0; i < loglevel_count; i++) { reply->get(clusterlog_names[i], &loglevel[i]); @@ -1517,7 +1592,7 @@ ndb_mgm_set_clusterlog_loglevel(NdbMgmHandle handle, int nodeId, const Properties *reply; reply = ndb_mgm_call(handle, clusterlog_reply, "set cluster loglevel", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); DBUG_ENTER("ndb_mgm_set_clusterlog_loglevel"); DBUG_PRINT("enter",("node=%d, category=%d, level=%d", nodeId, cat, level)); @@ -1555,7 +1630,7 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, args.put("level", level); const Properties *reply; reply = ndb_mgm_call(handle, loglevel_reply, "set loglevel", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -1614,7 +1689,7 @@ ndb_mgm_listen_event_internal(NdbMgmHandle handle, const int filter[], if(reply == NULL) { close(sockfd); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); } delete reply; return sockfd; @@ -1658,7 +1733,7 @@ ndb_mgm_dump_state(NdbMgmHandle handle, int nodeId, const int * _args, const Properties *prop; prop = ndb_mgm_call(handle, dump_state_reply, "dump state", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); BaseString result; prop->get("result", result); @@ -1695,6 +1770,7 @@ ndb_mgm_start_signallog(NdbMgmHandle handle, int nodeId, start_signallog_reply, "start signallog", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1731,6 +1807,7 @@ ndb_mgm_stop_signallog(NdbMgmHandle handle, int nodeId, const Properties *prop; prop = ndb_mgm_call(handle, stop_signallog_reply, "stop signallog", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1795,6 +1872,7 @@ ndb_mgm_log_signals(NdbMgmHandle handle, int nodeId, const Properties *prop; prop = ndb_mgm_call(handle, stop_signallog_reply, "log signals", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1832,6 +1910,7 @@ ndb_mgm_set_trace(NdbMgmHandle handle, int nodeId, int traceNumber, const Properties *prop; prop = ndb_mgm_call(handle, set_trace_reply, "set trace", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1869,6 +1948,7 @@ ndb_mgm_insert_error(NdbMgmHandle handle, int nodeId, int errorCode, const Properties *prop; prop = ndb_mgm_call(handle, insert_error_reply, "insert error", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1909,7 +1989,7 @@ ndb_mgm_start(NdbMgmHandle handle, int no_of_nodes, const int * node_list) Properties args; const Properties *reply; reply = ndb_mgm_call(handle, start_reply, "start all", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); Uint32 count = 0; if(!reply->get("started", &count)){ @@ -1967,15 +2047,15 @@ ndb_mgm_start_backup(NdbMgmHandle handle, int wait_completed, args.put("completed", wait_completed); const Properties *reply; { // start backup can take some time, set timeout high - Uint64 old_timeout= handle->read_timeout; + Uint64 old_timeout= handle->timeout; if (wait_completed == 2) - handle->read_timeout= 48*60*60*1000; // 48 hours + handle->timeout= 48*60*60*1000; // 48 hours else if (wait_completed == 1) - handle->read_timeout= 10*60*1000; // 10 minutes + handle->timeout= 10*60*1000; // 10 minutes reply = ndb_mgm_call(handle, start_backup_reply, "start backup", &args); - handle->read_timeout= old_timeout; + handle->timeout= old_timeout; } - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -2009,7 +2089,7 @@ ndb_mgm_abort_backup(NdbMgmHandle handle, unsigned int backupId, const Properties *prop; prop = ndb_mgm_call(handle, stop_backup_reply, "abort backup", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); const char * buf; prop->get("result", &buf); @@ -2026,7 +2106,7 @@ ndb_mgm_abort_backup(NdbMgmHandle handle, unsigned int backupId, extern "C" struct ndb_mgm_configuration * ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) { - + SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_configuration"); CHECK_HANDLE(handle, 0); CHECK_CONNECTED(handle, 0); @@ -2044,7 +2124,7 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) { const Properties *prop; prop = ndb_mgm_call(handle, reply, "get config", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); do { const char * buf; @@ -2080,10 +2160,15 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) { int read = 0; size_t start = 0; do { - if((read = read_socket(handle->socket, handle->read_timeout, - &buf64[start], len-start)) == -1){ - delete[] buf64; + if((read = read_socket(handle->socket, handle->timeout, + &buf64[start], len-start)) < 1){ + delete[] buf64; buf64 = 0; + if(read==0) + SET_ERROR(handle, ETIMEDOUT, "Timeout reading packed config"); + else + SET_ERROR(handle, errno, "Error reading packed config"); + ndb_mgm_disconnect_quiet(handle); break; } start += read; @@ -2204,7 +2289,7 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype, const Properties *prop; prop= ndb_mgm_call(handle, reply, "get nodeid", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); nodeid= -1; do { @@ -2256,7 +2341,7 @@ ndb_mgm_set_int_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set parameter", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); int res= -1; do { @@ -2295,7 +2380,8 @@ ndb_mgm_set_int64_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set parameter", &args); - + CHECK_REPLY(handle, prop, 0); + if(prop == NULL) { SET_ERROR(handle, EIO, "Unable set parameter"); return -1; @@ -2338,6 +2424,7 @@ ndb_mgm_set_string_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set parameter", &args); + CHECK_REPLY(handle, prop, 0); if(prop == NULL) { SET_ERROR(handle, EIO, "Unable set parameter"); @@ -2375,7 +2462,8 @@ ndb_mgm_purge_stale_sessions(NdbMgmHandle handle, char **purged){ const Properties *prop; prop= ndb_mgm_call(handle, reply, "purge stale sessions", &args); - + CHECK_REPLY(handle, prop, -1); + if(prop == NULL) { SET_ERROR(handle, EIO, "Unable to purge stale sessions"); return -1; @@ -2405,8 +2493,8 @@ int ndb_mgm_check_connection(NdbMgmHandle handle){ CHECK_HANDLE(handle, 0); CHECK_CONNECTED(handle, 0); - SocketOutputStream out(handle->socket); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketOutputStream out(handle->socket, handle->timeout); + SocketInputStream in(handle->socket, handle->timeout); char buf[32]; if (out.println("check connection")) goto ndb_mgm_check_connection_error; @@ -2460,7 +2548,7 @@ ndb_mgm_set_connection_int_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set connection parameter", &args); - DBUG_CHECK_REPLY(prop, -1); + DBUG_CHECK_REPLY(handle, prop, -1); int res= -1; do { @@ -2502,7 +2590,7 @@ ndb_mgm_get_connection_int_parameter(NdbMgmHandle handle, const Properties *prop; prop = ndb_mgm_call(handle, reply, "get connection parameter", &args); - DBUG_CHECK_REPLY(prop, -3); + DBUG_CHECK_REPLY(handle, prop, -3); int res= -1; do { @@ -2535,7 +2623,7 @@ ndb_mgm_convert_to_transporter(NdbMgmHandle *handle) (*handle)->connected= 0; // we pretend we're disconnected s= (*handle)->socket; - SocketOutputStream s_output(s); + SocketOutputStream s_output(s, (*handle)->timeout); s_output.println("transporter connect"); s_output.println(""); @@ -2564,7 +2652,7 @@ ndb_mgm_get_mgmd_nodeid(NdbMgmHandle handle) const Properties *prop; prop = ndb_mgm_call(handle, reply, "get mgmd nodeid", &args); - DBUG_CHECK_REPLY(prop, 0); + DBUG_CHECK_REPLY(handle, prop, 0); if(!prop->get("nodeid",&nodeid)){ fprintf(handle->errstream, "Unable to get value\n"); @@ -2599,7 +2687,7 @@ int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length) const Properties *prop; prop = ndb_mgm_call(handle, reply, "report event", &args); - DBUG_CHECK_REPLY(prop, -1); + DBUG_CHECK_REPLY(handle, prop, -1); DBUG_RETURN(0); } @@ -2611,13 +2699,14 @@ int ndb_mgm_end_session(NdbMgmHandle handle) CHECK_CONNECTED(handle, 0); DBUG_ENTER("ndb_mgm_end_session"); - SocketOutputStream s_output(handle->socket); + SocketOutputStream s_output(handle->socket, handle->timeout); s_output.println("end session"); s_output.println(""); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketInputStream in(handle->socket, handle->timeout); char buf[32]; in.gets(buf, sizeof(buf)); + CHECK_TIMEDOUT_RET(handle, in, s_output, -1); DBUG_RETURN(0); } @@ -2643,7 +2732,7 @@ int ndb_mgm_get_version(NdbMgmHandle handle, const Properties *prop; prop = ndb_mgm_call(handle, reply, "get version", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); Uint32 id; if(!prop->get("id",&id)){ @@ -2694,7 +2783,7 @@ ndb_mgm_get_session_id(NdbMgmHandle handle) const Properties *prop; prop = ndb_mgm_call(handle, reply, "get session id", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); if(!prop->get("id",&session_id)){ fprintf(handle->errstream, "Unable to get session id\n"); @@ -2731,7 +2820,7 @@ ndb_mgm_get_session(NdbMgmHandle handle, Uint64 id, const Properties *prop; prop = ndb_mgm_call(handle, reply, "get session", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); Uint64 r_id; int rlen= 0; diff --git a/storage/ndb/src/mgmapi/mgmapi_internal.h b/storage/ndb/src/mgmapi/mgmapi_internal.h index d30be221dcd..192bc57afd9 100644 --- a/storage/ndb/src/mgmapi/mgmapi_internal.h +++ b/storage/ndb/src/mgmapi/mgmapi_internal.h @@ -68,6 +68,8 @@ extern "C" { */ NDB_SOCKET_TYPE ndb_mgm_convert_to_transporter(NdbMgmHandle *handle); + int ndb_mgm_disconnect_quiet(NdbMgmHandle handle); + #ifdef __cplusplus } #endif diff --git a/storage/ndb/src/mgmapi/ndb_logevent.cpp b/storage/ndb/src/mgmapi/ndb_logevent.cpp index 9963c24ce40..ed72db297ab 100644 --- a/storage/ndb/src/mgmapi/ndb_logevent.cpp +++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp @@ -389,14 +389,18 @@ int ndb_logevent_get_next(const NdbLogEventHandle h, struct ndb_logevent *dst, unsigned timeout_in_milliseconds) { + if (timeout_in_milliseconds == 0) + { + int res; + while ((res = ndb_logevent_get_next(h, dst, 60000))==0); + return res; + } + SocketInputStream in(h->socket, timeout_in_milliseconds); Properties p; char buf[256]; - struct timeval start_time; - gettimeofday(&start_time, 0); - /* header */ while (1) { if (in.gets(buf,sizeof(buf)) == 0) @@ -409,24 +413,15 @@ int ndb_logevent_get_next(const NdbLogEventHandle h, // timed out return 0; } + if ( strcmp("log event reply\n", buf) == 0 ) break; if ( strcmp("<PING>\n", buf) ) ndbout_c("skipped: %s", buf); - struct timeval now; - gettimeofday(&now, 0); - unsigned elapsed_ms= (now.tv_sec-start_time.tv_sec)*1000 + - ((signed int)now.tv_usec-(signed int)start_time.tv_usec)/1000; - - if (elapsed_ms >= timeout_in_milliseconds) - { - // timed out - return 0; - } - - new (&in) SocketInputStream(h->socket, timeout_in_milliseconds-elapsed_ms); + if(in.timedout()) + return 0; } /* read name-value pairs into properties object */ @@ -437,11 +432,9 @@ int ndb_logevent_get_next(const NdbLogEventHandle h, h->m_error= NDB_LEH_READ_ERROR; return -1; } - if ( buf[0] == 0 ) - { - // timed out + if (in.timedout()) return 0; - } + if ( buf[0] == '\n' ) { break; diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index 7c57d9eabbc..793b2e3cc98 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -726,10 +726,9 @@ event_thread_run(void* p) do_event_thread= 1; char *tmp= 0; char buf[1024]; - SocketInputStream in(fd,10); do { - if (tmp == 0) NdbSleep_MilliSleep(10); - if((tmp = in.gets(buf, 1024))) + SocketInputStream in(fd,2000); + if((tmp = in.gets(buf, sizeof(buf)))) { const char ping_token[]= "<PING>"; if (memcmp(ping_token,tmp,sizeof(ping_token)-1)) @@ -739,6 +738,10 @@ event_thread_run(void* p) ndbout << tmp; } } + else if(in.timedout() && ndb_mgm_check_connection(handle)<0) + { + break; + } } while(do_event_thread); NDB_CLOSE_SOCKET(fd); } @@ -2056,6 +2059,7 @@ CommandInterpreter::executeStatus(int processId, } if (cl->node_states[i].node_type != NDB_MGM_NODE_TYPE_NDB){ if (cl->node_states[i].version != 0){ + version = cl->node_states[i].version; ndbout << "Node "<< cl->node_states[i].node_id <<": connected" ; ndbout_c(" (Version %d.%d.%d)", getMajor(version) , @@ -2065,7 +2069,7 @@ CommandInterpreter::executeStatus(int processId, }else ndbout << "Node "<< cl->node_states[i].node_id <<": not connected" << endl; return 0; - } + } status = cl->node_states[i].node_status; startPhase = cl->node_states[i].start_phase; version = cl->node_states[i].version; diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index e7c6122d846..ba04ceee7ec 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -449,7 +449,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ConfigInfo::CI_INT, "128", "8", - STR_VALUE(MAX_INT_RNIL) }, + STR_VALUE(MAX_TABLES) }, { CFG_DB_NO_ORDERED_INDEXES, @@ -556,7 +556,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { true, ConfigInfo::CI_INT, "0", - "1", + "0", "2" }, { diff --git a/storage/ndb/src/mgmsrv/ERROR_codes.txt b/storage/ndb/src/mgmsrv/ERROR_codes.txt new file mode 100644 index 00000000000..44a6047c05e --- /dev/null +++ b/storage/ndb/src/mgmsrv/ERROR_codes.txt @@ -0,0 +1,29 @@ +Next Session 10 +Next Global 10001 + + +#define MGM_ERROR_MAX_INJECT_SESSION_ONLY 10000 +Errors < 10000 are per-session only - in MgmApiSession. + +Others are for the whole mgm server. + +Error 0 is no error + +TIMEOUTS +-------- + +num where type testing + +1 get config sleep begin +2 get config sleep middle parsable +3 get config mangle halfway through encoded properties + +4 end session sleep before reply + +5 node status sleep before reply +6 node status sleep during parsable reply +7 node status sleep after parsable, before status reply +8 node status sleep partway through status reporting +9 node status sleep end of status printing + +10000 events PING no ping don't send pings to event listeners diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index 38223502175..c6a3f804b70 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -66,6 +66,9 @@ #define DEBUG(x) #endif +int g_errorInsert; +#define ERROR_INSERTED(x) (g_errorInsert == x) + #define INIT_SIGNAL_SENDER(ss,nodeId) \ SignalSender ss(theFacade); \ ss.lock(); /* lock will be released on exit */ \ @@ -177,6 +180,10 @@ MgmtSrvr::logLevelThreadRun() m_log_level_requests.lock(); } m_log_level_requests.unlock(); + + if(!ERROR_INSERTED(10000)) + m_event_listner.check_listeners(); + NdbSleep_MilliSleep(_logLevelThreadSleep); } } @@ -1730,14 +1737,31 @@ MgmtSrvr::setNodeLogLevelImpl(int nodeId, const SetLogLevelOrd & ll) int MgmtSrvr::insertError(int nodeId, int errorNo) { + int block; + if (errorNo < 0) { return INVALID_ERROR_NUMBER; } - INIT_SIGNAL_SENDER(ss,nodeId); - + SignalSender ss(theFacade); + ss.lock(); /* lock will be released on exit */ + + if(getNodeType(nodeId) == NDB_MGM_NODE_TYPE_NDB) + { + block= CMVMI; + } + else if(nodeId == _ownNodeId) + { + g_errorInsert= errorNo; + return 0; + } + else if(getNodeType(nodeId) == NDB_MGM_NODE_TYPE_MGM) + block= _blockNumber; + else + return WRONG_PROCESS_TYPE; + SimpleSignal ssig; - ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_TAMPER_ORD, + ssig.set(ss,TestOrd::TraceAPI, block, GSN_TAMPER_ORD, TamperOrd::SignalLength); TamperOrd* const tamperOrd = CAST_PTR(TamperOrd, ssig.getDataPtrSend()); tamperOrd->errorNo = errorNo; @@ -1972,6 +1996,10 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) case GSN_NODE_FAILREP: break; + case GSN_TAMPER_ORD: + ndbout << "TAMPER ORD" << endl; + break; + default: g_eventLogger.error("Unknown signal received. SignalNumber: " "%i from (%d, %x)", @@ -2136,6 +2164,8 @@ MgmtSrvr::alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type) { do_send = 1; nodeId = refToNode(ref->masterRef); + if (!theFacade->get_node_alive(nodeId)) + nodeId = 0; continue; } return ref->errorCode; @@ -2626,6 +2656,8 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) ndbout_c("I'm not master resending to %d", nodeId); #endif do_send = 1; // try again + if (!theFacade->get_node_alive(nodeId)) + m_master_node = nodeId = 0; continue; } event.Event = BackupEvent::BackupFailedToStart; diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp index 19804f735b4..a54b7866091 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -38,6 +38,10 @@ */ #define MGMSRV 1 +#define MGM_ERROR_MAX_INJECT_SESSION_ONLY 10000 + +extern int g_errorInsert; + class ConfigInfoServer; class NdbApiSignal; class Config; diff --git a/storage/ndb/src/mgmsrv/ParamInfo.cpp b/storage/ndb/src/mgmsrv/ParamInfo.cpp index 4a4e80eb079..888b7948c05 100644 --- a/storage/ndb/src/mgmsrv/ParamInfo.cpp +++ b/storage/ndb/src/mgmsrv/ParamInfo.cpp @@ -275,7 +275,7 @@ const ParamInfo ParamInfoArray[] = { CI_INT, "128", "8", - STR_VALUE(MAX_INT_RNIL) }, + STR_VALUE(MAX_TABLES) }, { CFG_DB_NO_ORDERED_INDEXES, diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp index dc865c594c0..f260ff7e3ec 100644 --- a/storage/ndb/src/mgmsrv/Services.cpp +++ b/storage/ndb/src/mgmsrv/Services.cpp @@ -288,18 +288,23 @@ struct PurgeStruct NDB_TICKS tick; }; +#define ERROR_INSERTED(x) (g_errorInsert == x || m_errorInsert == x) + +#define SLEEP_ERROR_INSERTED(x) if(ERROR_INSERTED(x)){NdbSleep_SecSleep(10);} + MgmApiSession::MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock, Uint64 session_id) : SocketServer::Session(sock), m_mgmsrv(mgm) { DBUG_ENTER("MgmApiSession::MgmApiSession"); - m_input = new SocketInputStream(sock); - m_output = new SocketOutputStream(sock); + m_input = new SocketInputStream(sock, 30000); + m_output = new SocketOutputStream(sock, 30000); m_parser = new Parser_t(commands, *m_input, true, true, true); m_allocated_resources= new MgmtSrvr::Allocated_resources(m_mgmsrv); m_stopSelf= 0; m_ctx= NULL; m_session_id= session_id; m_mutex= NdbMutex_Create(); + m_errorInsert= 0; DBUG_VOID_RETURN; } @@ -339,6 +344,9 @@ MgmApiSession::runSession() while(!stop) { NdbMutex_Lock(m_mutex); + m_input->reset_timeout(); + m_output->reset_timeout(); + m_parser->run(ctx, *this); if(ctx.m_currentToken == 0) @@ -596,13 +604,23 @@ MgmApiSession::getConfig(Parser_t::Context &, char *tmp_str = (char *) malloc(base64_needed_encoded_length(src.length())); (void) base64_encode(src.get_data(), src.length(), tmp_str); - + + SLEEP_ERROR_INSERTED(1); + m_output->println("get config reply"); m_output->println("result: Ok"); m_output->println("Content-Length: %d", strlen(tmp_str)); m_output->println("Content-Type: ndbconfig/octet-stream"); + SLEEP_ERROR_INSERTED(2); m_output->println("Content-Transfer-Encoding: base64"); m_output->println(""); + if(ERROR_INSERTED(3)) + { + int l= strlen(tmp_str); + tmp_str[l/2]='\0'; + m_output->println(tmp_str); + NdbSleep_SecSleep(10); + } m_output->println(tmp_str); free(tmp_str); @@ -613,11 +631,22 @@ void MgmApiSession::insertError(Parser<MgmApiSession>::Context &, Properties const &args) { Uint32 node = 0, error = 0; + int result= 0; args.get("node", &node); args.get("error", &error); - int result = m_mgmsrv.insertError(node, error); + if(node==m_mgmsrv.getOwnNodeId() + && error < MGM_ERROR_MAX_INJECT_SESSION_ONLY) + { + m_errorInsert= error; + if(error==0) + g_errorInsert= error; + } + else + { + result= m_mgmsrv.insertError(node, error); + } m_output->println("insert error reply"); if(result != 0) @@ -734,6 +763,7 @@ MgmApiSession::endSession(Parser<MgmApiSession>::Context &, m_allocated_resources= new MgmtSrvr::Allocated_resources(m_mgmsrv); + SLEEP_ERROR_INSERTED(4); m_output->println("end session reply"); } @@ -984,12 +1014,16 @@ MgmApiSession::getStatus(Parser<MgmApiSession>::Context &, while(m_mgmsrv.getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_MGM)){ noOfNodes++; } - + SLEEP_ERROR_INSERTED(5); m_output->println("node status"); + SLEEP_ERROR_INSERTED(6); m_output->println("nodes: %d", noOfNodes); + SLEEP_ERROR_INSERTED(7); printNodeStatus(m_output, m_mgmsrv, NDB_MGM_NODE_TYPE_NDB); printNodeStatus(m_output, m_mgmsrv, NDB_MGM_NODE_TYPE_MGM); + SLEEP_ERROR_INSERTED(8); printNodeStatus(m_output, m_mgmsrv, NDB_MGM_NODE_TYPE_API); + SLEEP_ERROR_INSERTED(9); nodeId = 0; @@ -1104,8 +1138,10 @@ MgmApiSession::enterSingleUser(Parser<MgmApiSession>::Context &, Properties const &args) { int stopped = 0; Uint32 nodeId = 0; + int result= 0; args.get("nodeId", &nodeId); - int result = m_mgmsrv.enterSingleUser(&stopped, nodeId); + + result = m_mgmsrv.enterSingleUser(&stopped, nodeId); m_output->println("enter single user reply"); if(result != 0) { m_output->println("result: %s", get_error_text(result)); @@ -1301,20 +1337,21 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId) { if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)) { - NDB_SOCKET_TYPE fd= m_clients[i].m_socket; - if(fd != NDB_INVALID_SOCKET) + if(m_clients[i].m_socket==NDB_INVALID_SOCKET) + continue; + + SocketOutputStream out(m_clients[i].m_socket); + + int r; + if (m_clients[i].m_parsable) + r= out.println(str.c_str()); + else + r= out.println(m_text); + + if (r<0) { - int r; - if (m_clients[i].m_parsable) - r= println_socket(fd, - MAX_WRITE_TIMEOUT, str.c_str()); - else - r= println_socket(fd, - MAX_WRITE_TIMEOUT, m_text); - if (r == -1) { - copy.push_back(fd); - m_clients.erase(i, false); - } + copy.push_back(m_clients[i].m_socket); + m_clients.erase(i, false); } } } @@ -1365,14 +1402,16 @@ Ndb_mgmd_event_service::check_listeners() m_clients.lock(); for(i= m_clients.size() - 1; i >= 0; i--) { - int fd= m_clients[i].m_socket; - DBUG_PRINT("info",("%d %d",i,fd)); - char buf[1]; - buf[0]=0; - if (fd != NDB_INVALID_SOCKET && - println_socket(fd,MAX_WRITE_TIMEOUT,"<PING>") == -1) + if(m_clients[i].m_socket==NDB_INVALID_SOCKET) + continue; + + SocketOutputStream out(m_clients[i].m_socket); + + DBUG_PRINT("info",("%d %d",i,m_clients[i].m_socket)); + + if(out.println("<PING>") < 0) { - NDB_CLOSE_SOCKET(fd); + NDB_CLOSE_SOCKET(m_clients[i].m_socket); m_clients.erase(i, false); n=1; } @@ -1602,8 +1641,11 @@ void MgmApiSession::check_connection(Parser_t::Context &ctx, const class Properties &args) { + SLEEP_ERROR_INSERTED(1); m_output->println("check connection reply"); + SLEEP_ERROR_INSERTED(2); m_output->println("result: Ok"); + SLEEP_ERROR_INSERTED(3); m_output->println(""); } @@ -1623,7 +1665,9 @@ MgmApiSession::get_mgmd_nodeid(Parser_t::Context &ctx, Properties const &args) { m_output->println("get mgmd nodeid reply"); - m_output->println("nodeid:%u",m_mgmsrv.getOwnNodeId()); + m_output->println("nodeid:%u",m_mgmsrv.getOwnNodeId()); + SLEEP_ERROR_INSERTED(1); + m_output->println(""); } diff --git a/storage/ndb/src/mgmsrv/Services.hpp b/storage/ndb/src/mgmsrv/Services.hpp index c112c66da36..76f839e3aab 100644 --- a/storage/ndb/src/mgmsrv/Services.hpp +++ b/storage/ndb/src/mgmsrv/Services.hpp @@ -46,6 +46,8 @@ private: Parser_t::Context *m_ctx; Uint64 m_session_id; + int m_errorInsert; + const char *get_error_text(int err_no) { return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); } diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp index 2a794f69ecb..52c95df6d15 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.cpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp @@ -389,7 +389,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ node.m_state = apiRegConf->nodeState; if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED || - node.m_state.startLevel == NodeState::SL_SINGLEUSER)){ + node.m_state.getSingleUserMode())){ set_node_alive(node, true); } else { set_node_alive(node, false); diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index e90dfbfa959..495b848645f 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -58,6 +58,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) // We have connections now to the desired node. Return //**************************************************************************** DBUG_RETURN(getConnectedNdbTransaction(tConNode)); + } else if (TretCode < 0) { + DBUG_RETURN(NULL); } else if (TretCode != 0) { tAnyAlive = 1; }//if @@ -81,6 +83,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) // We have connections now to the desired node. Return //**************************************************************************** DBUG_RETURN(getConnectedNdbTransaction(tNode)); + } else if (TretCode < 0) { + DBUG_RETURN(NULL); } else if (TretCode != 0) { tAnyAlive= 1; }//if @@ -109,6 +113,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) // We have connections now to the desired node. Return //**************************************************************************** DBUG_RETURN(getConnectedNdbTransaction(tNode)); + } else if (TretCode < 0) { + DBUG_RETURN(NULL); } else if (TretCode != 0) { tAnyAlive= 1; }//if @@ -195,6 +201,11 @@ Ndb::NDB_connect(Uint32 tNode) DBUG_PRINT("info", ("unsuccessful connect tReturnCode %d, tNdbCon->Status() %d", tReturnCode, tNdbCon->Status())); + if (theError.code == 299) + { + // single user mode so no need to retry with other node + DBUG_RETURN(-1); + } DBUG_RETURN(3); }//if }//Ndb::NDB_connect() diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp index 98ba28c8d2e..b3664107cce 100644 --- a/storage/ndb/src/ndbapi/NdbBlob.cpp +++ b/storage/ndb/src/ndbapi/NdbBlob.cpp @@ -607,6 +607,12 @@ NdbBlob::getHeadInlineValue(NdbOperation* anOp) setErrorCode(anOp); DBUG_RETURN(-1); } + /* + * If we get no data from this op then the operation is aborted + * one way or other. Following hack in 5.0 makes sure we don't read + * garbage. The proper fix exists only in version >= 5.1. + */ + theHead->length = 0; DBUG_RETURN(0); } diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 47ba0335183..b7a7087c302 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -495,6 +495,18 @@ NdbDictionary::Table::getFrmLength() const { return m_impl.getFrmLength(); } +enum NdbDictionary::Table::SingleUserMode +NdbDictionary::Table::getSingleUserMode() const +{ + return (enum SingleUserMode)m_impl.m_single_user_mode; +} + +void +NdbDictionary::Table::setSingleUserMode(enum NdbDictionary::Table::SingleUserMode mode) +{ + m_impl.m_single_user_mode = (Uint8)mode; +} + void NdbDictionary::Table::setTablespaceNames(const void *data, Uint32 len) { diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 917f4e17051..2bd59c6e06f 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -51,7 +51,7 @@ #define ERR_RETURN(a,b) \ {\ - DBUG_PRINT("exit", ("error %d", (a).code));\ + DBUG_PRINT("exit", ("error %d return %d", (a).code, b));\ DBUG_RETURN(b);\ } @@ -470,6 +470,7 @@ NdbTableImpl::init(){ m_tablespace_name.clear(); m_tablespace_id = ~0; m_tablespace_version = ~0; + m_single_user_mode = 0; } bool @@ -663,6 +664,15 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const DBUG_RETURN(false); } } + + if(m_single_user_mode != obj.m_single_user_mode) + { + DBUG_PRINT("info",("m_single_user_mode %d != %d", + (int32)m_single_user_mode, + (int32)obj.m_single_user_mode)); + DBUG_RETURN(false); + } + DBUG_RETURN(true); } @@ -726,6 +736,8 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_keyLenInWords = org.m_keyLenInWords; m_fragmentCount = org.m_fragmentCount; + m_single_user_mode = org.m_single_user_mode; + if (m_index != 0) delete m_index; m_index = org.m_index; @@ -2103,6 +2115,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, impl->m_kvalue = tableDesc->TableKValue; impl->m_minLoadFactor = tableDesc->MinLoadFactor; impl->m_maxLoadFactor = tableDesc->MaxLoadFactor; + impl->m_single_user_mode = tableDesc->SingleUserMode; impl->m_indexType = (NdbDictionary::Object::Type) getApiConstant(tableDesc->TableType, @@ -2564,6 +2577,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab->MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag; tmpTab->LinearHashFlag = impl.m_linear_flag; + tmpTab->SingleUserMode = impl.m_single_user_mode; if (impl.m_ts_name.length()) { diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp index 26d7c13f968..b474b6e7fc0 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -204,6 +204,7 @@ public: int m_maxLoadFactor; Uint16 m_keyLenInWords; Uint16 m_fragmentCount; + Uint8 m_single_user_mode; NdbIndexImpl * m_index; NdbColumnImpl * getColumn(unsigned attrId); diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 52760dbbd36..b694b466530 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -2101,15 +2101,17 @@ NdbEventBuffer::alloc_mem(EventBufData* data, NdbMem_Free((char*)data->memory); assert(m_total_alloc >= data->sz); - m_total_alloc -= data->sz; data->memory = 0; data->sz = 0; data->memory = (Uint32*)NdbMem_Allocate(alloc_size); if (data->memory == 0) + { + m_total_alloc -= data->sz; DBUG_RETURN(-1); + } data->sz = alloc_size; - m_total_alloc += data->sz; + m_total_alloc += add_sz; if (change_sz != NULL) *change_sz += add_sz; @@ -2781,7 +2783,7 @@ NdbEventBuffer::reportStatus() else apply_gci= latest_gci; - if (100*m_free_data_sz < m_min_free_thresh*m_total_alloc && + if (100*(Uint64)m_free_data_sz < m_min_free_thresh*(Uint64)m_total_alloc && m_total_alloc > 1024*1024) { /* report less free buffer than m_free_thresh, @@ -2792,7 +2794,7 @@ NdbEventBuffer::reportStatus() goto send_report; } - if (100*m_free_data_sz > m_max_free_thresh*m_total_alloc && + if (100*(Uint64)m_free_data_sz > m_max_free_thresh*(Uint64)m_total_alloc && m_total_alloc > 1024*1024) { /* report more free than 2 * m_free_thresh diff --git a/storage/ndb/src/ndbapi/NdbOperation.cpp b/storage/ndb/src/ndbapi/NdbOperation.cpp index ff9c50da77c..9b3d20a1c33 100644 --- a/storage/ndb/src/ndbapi/NdbOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbOperation.cpp @@ -369,12 +369,24 @@ NdbOperation::subValue( const char* anAttrName, Uint32 aValue) } int +NdbOperation::subValue( const char* anAttrName, Uint64 aValue) +{ + return subValue(m_currentTable->getColumn(anAttrName), aValue); +} + +int NdbOperation::subValue(Uint32 anAttrId, Uint32 aValue) { return subValue(m_currentTable->getColumn(anAttrId), aValue); } int +NdbOperation::subValue(Uint32 anAttrId, Uint64 aValue) +{ + return subValue(m_currentTable->getColumn(anAttrId), aValue); +} + +int NdbOperation::read_attr(const char* anAttrName, Uint32 RegDest) { return read_attr(m_currentTable->getColumn(anAttrName), RegDest); diff --git a/storage/ndb/src/ndbapi/NdbRecAttr.cpp b/storage/ndb/src/ndbapi/NdbRecAttr.cpp index 188e718d968..20fa83e849d 100644 --- a/storage/ndb/src/ndbapi/NdbRecAttr.cpp +++ b/storage/ndb/src/ndbapi/NdbRecAttr.cpp @@ -138,8 +138,24 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz) return false; } +NdbRecordPrintFormat::NdbRecordPrintFormat() +{ + fields_terminated_by= ";"; + start_array_enclosure= "["; + end_array_enclosure= "]"; + fields_enclosed_by= ""; + fields_optionally_enclosed_by= "\""; + lines_terminated_by= "\n"; + hex_prefix= "H'"; + null_string= "[NULL]"; + hex_format= 0; +} +NdbRecordPrintFormat::~NdbRecordPrintFormat() {} +static const NdbRecordPrintFormat default_print_format; + static void -ndbrecattr_print_string(NdbOut& out, const char *type, +ndbrecattr_print_string(NdbOut& out, const NdbRecordPrintFormat &f, + const char *type, bool is_binary, const char *aref, unsigned sz) { const unsigned char* ref = (const unsigned char*)aref; @@ -148,6 +164,25 @@ ndbrecattr_print_string(NdbOut& out, const char *type, for (i=sz-1; i >= 0; i--) if (ref[i] == 0) sz--; else break; + if (!is_binary) + { + // trailing spaces are not printed + for (i=sz-1; i >= 0; i--) + if (ref[i] == 32) sz--; + else break; + } + if (is_binary && f.hex_format) + { + if (sz == 0) + { + out.print("0x0"); + return; + } + out.print("0x"); + for (len = 0; len < (int)sz; len++) + out.print("%02X", (int)ref[len]); + return; + } if (sz == 0) return; // empty for (len=0; len < (int)sz && ref[i] != 0; len++) @@ -168,37 +203,57 @@ ndbrecattr_print_string(NdbOut& out, const char *type, for (i= len+1; ref[i] != 0; i++) out.print("%u]",len-i); assert((int)sz > i); - ndbrecattr_print_string(out,type,aref+i,sz-i); + ndbrecattr_print_string(out,f,type,is_binary,aref+i,sz-i); } } -NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) +NdbOut& +ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r, + const NdbRecordPrintFormat &f) { if (r.isNULL()) { - out << "[NULL]"; + out << f.null_string; return out; } const NdbDictionary::Column* c = r.getColumn(); uint length = c->getLength(); - if (length > 1) - out << "["; - - for (Uint32 j = 0; j < length; j++) { - if (j > 0) - out << " "; - + const char *fields_optionally_enclosed_by; + if (f.fields_enclosed_by[0] == '\0') + fields_optionally_enclosed_by= + f.fields_optionally_enclosed_by; + else + fields_optionally_enclosed_by= ""; + out << f.fields_enclosed_by; + Uint32 j; switch(r.getType()){ case NdbDictionary::Column::Bigunsigned: out << r.u_64_value(); break; case NdbDictionary::Column::Bit: - out << hex << "H'" << r.u_32_value() << dec; + out << f.hex_prefix << "0x"; + { + const Uint32 *buf = (Uint32 *)r.aRef(); + int k = (length+31)/32; + while (k > 0 && (buf[--k] == 0)); + out.print("%X", buf[k]); + while (k > 0) + out.print("%.8X", buf[--k]); + } break; case NdbDictionary::Column::Unsigned: - out << *((Uint32*)r.aRef() + j); + if (length > 1) + out << f.start_array_enclosure; + out << *(Uint32*)r.aRef(); + for (j = 1; j < length; j++) + out << " " << *((Uint32*)r.aRef() + j); + if (length > 1) + out << f.end_array_enclosure; + break; + case NdbDictionary::Column::Mediumunsigned: + out << r.u_medium_value(); break; case NdbDictionary::Column::Smallunsigned: out << r.u_short_value(); @@ -212,6 +267,9 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) case NdbDictionary::Column::Int: out << r.int32_value(); break; + case NdbDictionary::Column::Mediumint: + out << r.medium_value(); + break; case NdbDictionary::Column::Smallint: out << r.short_value(); break; @@ -219,25 +277,37 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) out << (int) r.char_value(); break; case NdbDictionary::Column::Binary: + if (!f.hex_format) + out << fields_optionally_enclosed_by; j = r.get_size_in_bytes(); - ndbrecattr_print_string(out,"Binary", r.aRef(), j); + ndbrecattr_print_string(out,f,"Binary", true, r.aRef(), j); + if (!f.hex_format) + out << fields_optionally_enclosed_by; break; case NdbDictionary::Column::Char: + out << fields_optionally_enclosed_by; j = r.get_size_in_bytes(); - ndbrecattr_print_string(out,"Char", r.aRef(), j); + ndbrecattr_print_string(out,f,"Char", false, r.aRef(), j); + out << fields_optionally_enclosed_by; break; case NdbDictionary::Column::Varchar: { + out << fields_optionally_enclosed_by; unsigned len = *(const unsigned char*)r.aRef(); - ndbrecattr_print_string(out,"Varchar", r.aRef()+1,len); + ndbrecattr_print_string(out,f,"Varchar", false, r.aRef()+1,len); j = length; + out << fields_optionally_enclosed_by; } break; case NdbDictionary::Column::Varbinary: { + if (!f.hex_format) + out << fields_optionally_enclosed_by; unsigned len = *(const unsigned char*)r.aRef(); - ndbrecattr_print_string(out,"Varbinary", r.aRef()+1,len); + ndbrecattr_print_string(out,f,"Varbinary", true, r.aRef()+1,len); j = length; + if (!f.hex_format) + out << fields_optionally_enclosed_by; } break; case NdbDictionary::Column::Float: @@ -366,16 +436,26 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) break; case NdbDictionary::Column::Longvarchar: { + out << fields_optionally_enclosed_by; unsigned len = uint2korr(r.aRef()); - ndbrecattr_print_string(out,"Longvarchar", r.aRef()+2,len); + ndbrecattr_print_string(out,f,"Longvarchar", false, r.aRef()+2,len); j = length; + out << fields_optionally_enclosed_by; + } + break; + case NdbDictionary::Column::Longvarbinary: + { + if (!f.hex_format) + out << fields_optionally_enclosed_by; + unsigned len = uint2korr(r.aRef()); + ndbrecattr_print_string(out,f,"Longvarbinary", true, r.aRef()+2,len); + j = length; + if (!f.hex_format) + out << fields_optionally_enclosed_by; } break; case NdbDictionary::Column::Undefined: - case NdbDictionary::Column::Mediumint: - case NdbDictionary::Column::Mediumunsigned: - case NdbDictionary::Column::Longvarbinary: unknown: //default: /* no print functions for the rest, just print type */ out << (int) r.getType(); @@ -384,16 +464,17 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) out << " " << j << " times"; break; } - } - - if (length > 1) - { - out << "]"; + out << f.fields_enclosed_by; } return out; } +NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) +{ + return ndbrecattr_print_formatted(out, r, default_print_format); +} + Int64 NdbRecAttr::int64_value() const { diff --git a/storage/ndb/src/ndbapi/NdbScanFilter.cpp b/storage/ndb/src/ndbapi/NdbScanFilter.cpp index 6ff7485416b..89abba2eddc 100644 --- a/storage/ndb/src/ndbapi/NdbScanFilter.cpp +++ b/storage/ndb/src/ndbapi/NdbScanFilter.cpp @@ -42,7 +42,9 @@ public: int m_label; State m_current; + Uint32 m_negative; //used for translating NAND/NOR to AND/OR, equal 0 or 1 Vector<State> m_stack; + Vector<Uint32> m_stack2; //to store info of m_negative NdbOperation * m_operation; Uint32 m_latestAttrib; @@ -66,6 +68,7 @@ NdbScanFilter::NdbScanFilter(class NdbOperation * op) m_impl.m_label = 0; m_impl.m_latestAttrib = ~0; m_impl.m_operation = op; + m_impl.m_negative = 0; } NdbScanFilter::~NdbScanFilter(){ @@ -75,18 +78,39 @@ NdbScanFilter::~NdbScanFilter(){ int NdbScanFilter::begin(Group group){ + m_impl.m_stack2.push_back(m_impl.m_negative); switch(group){ case NdbScanFilter::AND: INT_DEBUG(("Begin(AND)")); + if(m_impl.m_negative == 1){ + group = NdbScanFilter::OR; + } break; case NdbScanFilter::OR: INT_DEBUG(("Begin(OR)")); + if(m_impl.m_negative == 1){ + group = NdbScanFilter::AND; + } break; case NdbScanFilter::NAND: INT_DEBUG(("Begin(NAND)")); + if(m_impl.m_negative == 0){ + group = NdbScanFilter::OR; + m_impl.m_negative = 1; + }else{ + group = NdbScanFilter::AND; + m_impl.m_negative = 0; + } break; case NdbScanFilter::NOR: INT_DEBUG(("Begin(NOR)")); + if(m_impl.m_negative == 0){ + group = NdbScanFilter::AND; + m_impl.m_negative = 1; + }else{ + group = NdbScanFilter::OR; + m_impl.m_negative = 0; + } break; } @@ -130,6 +154,13 @@ NdbScanFilter::begin(Group group){ int NdbScanFilter::end(){ + if(m_impl.m_stack2.size() == 0){ + m_impl.m_operation->setErrorCodeAbort(4259); + return -1; + } + m_impl.m_negative = m_impl.m_stack2.back(); + m_impl.m_stack2.erase(m_impl.m_stack2.size() - 1); + switch(m_impl.m_current.m_group){ case NdbScanFilter::AND: INT_DEBUG(("End(AND pc=%d)", m_impl.m_current.m_popCount)); @@ -151,6 +182,10 @@ NdbScanFilter::end(){ } NdbScanFilterImpl::State tmp = m_impl.m_current; + if(m_impl.m_stack.size() == 0){ + m_impl.m_operation->setErrorCodeAbort(4259); + return -1; + } m_impl.m_current = m_impl.m_stack.back(); m_impl.m_stack.erase(m_impl.m_stack.size() - 1); @@ -395,8 +430,17 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op, m_operation->setErrorCodeAbort(4260); return -1; } + + StrBranch2 branch; + if(m_negative == 1){ //change NdbOperation to its negative + if(m_current.m_group == NdbScanFilter::AND) + branch = table3[op].m_branches[(Uint32)(m_current.m_group) + 1]; + if(m_current.m_group == NdbScanFilter::OR) + branch = table3[op].m_branches[(Uint32)(m_current.m_group) - 1]; + }else{ + branch = table3[op].m_branches[(Uint32)(m_current.m_group)]; + } - StrBranch2 branch = table3[op].m_branches[m_current.m_group]; const NdbDictionary::Column * col = m_operation->m_currentTable->getColumn(AttrId); diff --git a/storage/ndb/src/ndbapi/TransporterFacade.hpp b/storage/ndb/src/ndbapi/TransporterFacade.hpp index d19974f8999..23fea8792f7 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.hpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp @@ -365,7 +365,8 @@ inline bool TransporterFacade::get_node_stopping(NodeId n) const { const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n); - return ((node.m_state.startLevel == NodeState::SL_STOPPING_1) || + return (!node.m_state.getSingleUserMode() && + (node.m_state.startLevel == NodeState::SL_STOPPING_1) || (node.m_state.startLevel == NodeState::SL_STOPPING_2)); } @@ -376,16 +377,9 @@ TransporterFacade::getIsNodeSendable(NodeId n) const { const Uint32 startLevel = node.m_state.startLevel; if (node.m_info.m_type == NodeInfo::DB) { - if(node.m_state.singleUserMode && - ownId() == node.m_state.singleUserApi) { - return (node.compatible && - (node.m_state.startLevel == NodeState::SL_STOPPING_1 || - node.m_state.startLevel == NodeState::SL_STARTED || - node.m_state.startLevel == NodeState::SL_SINGLEUSER)); - } - else - return node.compatible && (startLevel == NodeState::SL_STARTED || - startLevel == NodeState::SL_STOPPING_1); + return node.compatible && (startLevel == NodeState::SL_STARTED || + startLevel == NodeState::SL_STOPPING_1 || + node.m_state.getSingleUserMode()); } else { ndbout_c("TransporterFacade::getIsNodeSendable: Illegal node type: " "%d of node: %d", diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp index 24ac05caf07..a6c7c917ee2 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -666,5 +666,12 @@ Ndb_cluster_connection::get_active_ndb_objects() const { return m_impl.m_transporter_facade->get_active_ndb_objects(); } + +int Ndb_cluster_connection::set_timeout(int timeout_ms) +{ + return ndb_mgm_set_timeout(m_impl.m_config_retriever->get_mgmHandle(), + timeout_ms); +} + template class Vector<Ndb_cluster_connection_impl::Node>; diff --git a/storage/ndb/test/include/CpcClient.hpp b/storage/ndb/test/include/CpcClient.hpp index 62f016f8e1d..a743499566f 100644 --- a/storage/ndb/test/include/CpcClient.hpp +++ b/storage/ndb/test/include/CpcClient.hpp @@ -70,8 +70,6 @@ private: char *host; int port; NDB_SOCKET_TYPE cpc_sock; - InputStream *cpc_in; - OutputStream *cpc_out; public: int connect(); diff --git a/storage/ndb/test/include/NDBT_Test.hpp b/storage/ndb/test/include/NDBT_Test.hpp index 46d27ec1c3c..ad09f04e814 100644 --- a/storage/ndb/test/include/NDBT_Test.hpp +++ b/storage/ndb/test/include/NDBT_Test.hpp @@ -333,6 +333,12 @@ public: // supply argc and argv as parameters int execute(int, const char**); + // NDBT's test tables are fixed and it always create + // and drop fixed table when execute, add this method + // in order to run CTX only and adapt to some new + // customized testsuite + int executeOneCtx(Ndb_cluster_connection&, + const NdbDictionary::Table* ptab, const char* testname = NULL); // These function can be used from main in the test program // to control the behaviour of the testsuite diff --git a/storage/ndb/test/ndbapi/Makefile.am b/storage/ndb/test/ndbapi/Makefile.am index 3209a8be523..ad509dbbafe 100644 --- a/storage/ndb/test/ndbapi/Makefile.am +++ b/storage/ndb/test/ndbapi/Makefile.am @@ -39,6 +39,7 @@ testOperations \ testRestartGci \ testScan \ testInterpreter \ +testScanFilter \ testScanInterpreter \ testScanPerf \ testSystemRestart \ @@ -85,6 +86,7 @@ testOperations_SOURCES = testOperations.cpp testRestartGci_SOURCES = testRestartGci.cpp testScan_SOURCES = testScan.cpp ScanFunctions.hpp testInterpreter_SOURCES = testInterpreter.cpp +testScanFilter_SOURCES = testScanFilter.cpp testScanInterpreter_SOURCES = testScanInterpreter.cpp ScanFilter.hpp ScanInterpretTest.hpp testScanPerf_SOURCES = testScanPerf.cpp testSystemRestart_SOURCES = testSystemRestart.cpp diff --git a/storage/ndb/test/ndbapi/testBlobs.cpp b/storage/ndb/test/ndbapi/testBlobs.cpp index e26910a64ad..e86714c4fcb 100644 --- a/storage/ndb/test/ndbapi/testBlobs.cpp +++ b/storage/ndb/test/ndbapi/testBlobs.cpp @@ -123,24 +123,24 @@ printusage() << "metadata" << endl << " -pk2len N length of PK2 [" << d.m_pk2len << "/" << g_max_pk2len <<"]" << endl << " -oneblob only 1 blob attribute [default 2]" << endl - << "testcases for test/skip" << endl + << "test cases for test/skip" << endl << " k primary key ops" << endl << " i hash index ops" << endl << " s table scans" << endl << " r ordered index scans" << endl << " p performance test" << endl - << "additional flags for test/skip" << endl + << "operations for test/skip" << endl << " u update existing blob value" << endl << " n normal insert and update" << endl << " w insert and update using writeTuple" << endl + << "blob operation styles for test/skip" << endl << " 0 getValue / setValue" << endl << " 1 setActiveHook" << endl << " 2 readData / writeData" << endl - << "bug tests (no blob test)" << endl + << "example: -test kn0 (need all 3 parts)" << endl + << "bug tests" << endl << " -bug 4088 ndb api hang with mixed ops on index table" << endl << " -bug 27018 middle partial part write clobbers rest of part" << endl - << " -bug nnnn delete + write gives 626" << endl - << " -bug nnnn acc crash on delete and long key" << endl ; } @@ -1029,6 +1029,32 @@ deletePk() return 0; } +static int +deleteNoPk() +{ + DBG("--- deleteNoPk ---"); + Tup no_tup; // bug#24028 + no_tup.m_pk1 = 0xb1ffb1ff; + sprintf(no_tup.m_pk2, "%-*.*s", g_opt.m_pk2len, g_opt.m_pk2len, "b1ffb1ff"); + CHK((g_con = g_ndb->startTransaction()) != 0); + Tup& tup = no_tup; + DBG("deletePk pk1=" << hex << tup.m_pk1); + CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0); + CHK(g_opr->deleteTuple() == 0); + CHK(g_opr->equal("PK1", tup.m_pk1) == 0); + if (g_opt.m_pk2len != 0) + CHK(g_opr->equal("PK2", tup.m_pk2) == 0); + CHK(g_con->execute(Commit) == -1); // fail + // BUG: error should be on op but is on con now + DBG("con: " << g_con->getNdbError()); + DBG("opr: " << g_opr->getNdbError()); + CHK(g_con->getNdbError().code == 626 || g_opr->getNdbError().code == 626); + g_ndb->closeTransaction(g_con); + g_opr = 0; + g_con = 0; + return 0; +} + // hash index ops static int @@ -1384,6 +1410,7 @@ testmain() CHK(readPk(style) == 0); } CHK(deletePk() == 0); + CHK(deleteNoPk() == 0); CHK(verifyBlob() == 0); } if (testcase('w')) { @@ -1398,6 +1425,7 @@ testmain() CHK(readPk(style) == 0); } CHK(deletePk() == 0); + CHK(deleteNoPk() == 0); CHK(verifyBlob() == 0); } } @@ -1858,18 +1886,6 @@ bugtest_27018() return 0; } -static int -bugtest_2222() -{ - return 0; -} - -static int -bugtest_3333() -{ - return 0; -} - static struct { int m_bug; int (*m_test)(); diff --git a/storage/ndb/test/ndbapi/testMgm.cpp b/storage/ndb/test/ndbapi/testMgm.cpp index c4301165497..cc074087bdb 100644 --- a/storage/ndb/test/ndbapi/testMgm.cpp +++ b/storage/ndb/test/ndbapi/testMgm.cpp @@ -22,6 +22,9 @@ #include <random.h> #include <mgmapi.h> #include <mgmapi_debug.h> +#include <ndb_logevent.h> +#include <InputStream.hpp> +#include <signaldata/EventReport.hpp> int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){ @@ -191,6 +194,8 @@ int runTestApiSession(NDBT_Context* ctx, NDBT_Step* step) ndb_mgm_set_connectstring(h, mgm); ndb_mgm_connect(h,0,0,0); + NdbSleep_SecSleep(1); + if(ndb_mgm_get_session(h,session_id,&sess,&slen)) { ndbout << "Failed, session still exists" << endl; @@ -207,6 +212,510 @@ int runTestApiSession(NDBT_Context* ctx, NDBT_Step* step) } } +int runTestApiTimeoutBasic(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_FAILED; + int cc= 0; + int mgmd_nodeid= 0; + ndb_mgm_reply reply; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + ndbout << "TEST timout check_connection" << endl; + int errs[] = { 1, 2, 3, -1}; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndbout << "trying error " << error_ins << endl; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_set_timeout(h,2500); + + cc= ndb_mgm_check_connection(h); + if(cc < 0) + result= NDBT_OK; + else + result= NDBT_FAILED; + + if(ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: still connected" << endl; + result= NDBT_FAILED; + } + } + + ndbout << "TEST get_mgmd_nodeid" << endl; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_insert_error(h, mgmd_nodeid, 0, &reply)< 0) + { + ndbout << "failed to remove inserted error " << endl; + result= NDBT_FAILED; + goto done; + } + + cc= ndb_mgm_get_mgmd_nodeid(h); + ndbout << "got node id: " << cc << endl; + if(cc==0) + { + ndbout << "FAILED: didn't get node id" << endl; + result= NDBT_FAILED; + } + else + result= NDBT_OK; + + ndbout << "TEST end_session" << endl; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_insert_error(h, mgmd_nodeid, 4, &reply)< 0) + { + ndbout << "FAILED: insert error 1" << endl; + result= NDBT_FAILED; + goto done; + } + + cc= ndb_mgm_end_session(h); + if(cc==0) + { + ndbout << "FAILED: success in calling end_session" << endl; + result= NDBT_FAILED; + } + else if(ndb_mgm_get_latest_error(h)!=ETIMEDOUT) + { + ndbout << "FAILED: Incorrect error code (" << ndb_mgm_get_latest_error(h) + << " != expected " << ETIMEDOUT << ") desc: " + << ndb_mgm_get_latest_error_desc(h) + << " line: " << ndb_mgm_get_latest_error_line(h) + << " msg: " << ndb_mgm_get_latest_error_msg(h) + << endl; + result= NDBT_FAILED; + } + else + result= NDBT_OK; + + if(ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestApiGetStatusTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int cc= 0; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 0, 5, 6, 7, 8, 9, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + struct ndb_mgm_cluster_state *cl= ndb_mgm_get_status(h); + + if(cl!=NULL) + free(cl); + + /* + * For whatever strange reason, + * get_status is okay with not having the last enter there. + * instead of "fixing" the api, let's have a special case + * so we don't break any behaviour + */ + + if(error_ins!=0 && error_ins!=9 && cl!=NULL) + { + ndbout << "FAILED: got a ndb_mgm_cluster_state back" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && error_ins!=9 && ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && error_ins!=9 && ndb_mgm_get_latest_error(h)!=ETIMEDOUT) + { + ndbout << "FAILED: Incorrect error code (" << ndb_mgm_get_latest_error(h) + << " != expected " << ETIMEDOUT << ") desc: " + << ndb_mgm_get_latest_error_desc(h) + << " line: " << ndb_mgm_get_latest_error_line(h) + << " msg: " << ndb_mgm_get_latest_error_msg(h) + << endl; + result= NDBT_FAILED; + } + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestMgmApiGetConfigTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 0, 1, 2, 3, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + struct ndb_mgm_configuration *c= ndb_mgm_get_configuration(h,0); + + if(c!=NULL) + free(c); + + if(error_ins!=0 && c!=NULL) + { + ndbout << "FAILED: got a ndb_mgm_configuration back" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && ndb_mgm_get_latest_error(h)!=ETIMEDOUT) + { + ndbout << "FAILED: Incorrect error code (" << ndb_mgm_get_latest_error(h) + << " != expected " << ETIMEDOUT << ") desc: " + << ndb_mgm_get_latest_error_desc(h) + << " line: " << ndb_mgm_get_latest_error_line(h) + << " msg: " << ndb_mgm_get_latest_error_msg(h) + << endl; + result= NDBT_FAILED; + } + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestMgmApiEventTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 10000, 0, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, + 1, NDB_MGM_EVENT_CATEGORY_STARTUP, + 0 }; + int fd= ndb_mgm_listen_event(h, filter); + + if(fd==NDB_INVALID_SOCKET) + { + ndbout << "FAILED: could not listen to event" << endl; + result= NDBT_FAILED; + } + + Uint32 theData[25]; + EventReport *fake_event = (EventReport*)theData; + fake_event->setEventType(NDB_LE_NDBStopForced); + fake_event->setNodeId(42); + theData[2]= 0; + theData[3]= 0; + theData[4]= 0; + theData[5]= 0; + + ndb_mgm_report_event(h, theData, 6); + + char *tmp= 0; + char buf[512]; + SocketInputStream in(fd,2000); + for(int i=0; i<20; i++) + { + if((tmp = in.gets(buf, sizeof(buf)))) + { +// const char ping_token[]="<PING>"; +// if(memcmp(ping_token,tmp,sizeof(ping_token)-1)) + if(tmp && strlen(tmp)) + ndbout << tmp; + } + else + { + if(in.timedout()) + { + ndbout << "TIMED OUT READING EVENT at iteration " << i << endl; + break; + } + } + } + + /* + * events go through a *DIFFERENT* socket than the NdbMgmHandle + * so we should still be connected (and be able to check_connection) + * + */ + + if(ndb_mgm_check_connection(h) && !ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + ndb_mgm_disconnect(h); + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestMgmApiStructEventTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 10000, 0, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, + 1, NDB_MGM_EVENT_CATEGORY_STARTUP, + 0 }; + NdbLogEventHandle le_handle= ndb_mgm_create_logevent_handle(h, filter); + + struct ndb_logevent le; + for(int i=0; i<20; i++) + { + if(error_ins==0 || (error_ins!=0 && i<5)) + { + Uint32 theData[25]; + EventReport *fake_event = (EventReport*)theData; + fake_event->setEventType(NDB_LE_NDBStopForced); + fake_event->setNodeId(42); + theData[2]= 0; + theData[3]= 0; + theData[4]= 0; + theData[5]= 0; + + ndb_mgm_report_event(h, theData, 6); + } + int r= ndb_logevent_get_next(le_handle, &le, 2500); + if(r>0) + { + ndbout << "Receieved event" << endl; + } + else if(r<0) + { + ndbout << "ERROR" << endl; + } + else // no event + { + ndbout << "TIMED OUT READING EVENT at iteration " << i << endl; + if(error_ins==0) + result= NDBT_FAILED; + else + result= NDBT_OK; + break; + } + } + + /* + * events go through a *DIFFERENT* socket than the NdbMgmHandle + * so we should still be connected (and be able to check_connection) + * + */ + + if(ndb_mgm_check_connection(h) && !ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + ndb_mgm_disconnect(h); + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} NDBT_TESTSUITE(testMgm); TESTCASE("SingleUserMode", @@ -219,6 +728,31 @@ TESTCASE("ApiSessionFailure", INITIALIZER(runTestApiSession); } +TESTCASE("ApiTimeoutBasic", + "Basic timeout tests for MGMAPI"){ + INITIALIZER(runTestApiTimeoutBasic); + +} +TESTCASE("ApiGetStatusTimeout", + "Test timeout for MGMAPI getStatus"){ + INITIALIZER(runTestApiGetStatusTimeout); + +} +TESTCASE("ApiGetConfigTimeout", + "Test timeouts for mgmapi get_configuration"){ + INITIALIZER(runTestMgmApiGetConfigTimeout); + +} +TESTCASE("ApiMgmEventTimeout", + "Test timeouts for mgmapi get_configuration"){ + INITIALIZER(runTestMgmApiEventTimeout); + +} +TESTCASE("ApiMgmStructEventTimeout", + "Test timeouts for mgmapi get_configuration"){ + INITIALIZER(runTestMgmApiStructEventTimeout); + +} NDBT_TESTSUITE_END(testMgm); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index b4ae95567eb..e5e346f3e42 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -887,6 +887,9 @@ int runBug20185(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_FAILED; NdbSleep_MilliSleep(3000); + Vector<int> nodes; + for (Uint32 i = 0; i<restarter.getNumDbNodes(); i++) + nodes.push_back(restarter.getDbNodeId(i)); retry: if(hugoOps.startTransaction(pNdb) != 0) @@ -910,11 +913,16 @@ retry: nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes()); } while (nodeId == node); - if (restarter.insertErrorInAllNodes(7030)) - return NDBT_FAILED; - + ndbout_c("7031 to %d", nodeId); if (restarter.insertErrorInNode(nodeId, 7031)) return NDBT_FAILED; + + for (Uint32 i = 0; i<nodes.size(); i++) + { + if (nodes[i] != nodeId) + if (restarter.insertErrorInNode(nodes[i], 7030)) + return NDBT_FAILED; + } NdbSleep_MilliSleep(500); @@ -1223,8 +1231,7 @@ int runBug25984(NDBT_Context* ctx, NDBT_Step* step){ if (restarter.startNodes(&victim, 1)) return NDBT_FAILED; - if (restarter.waitNodesStartPhase(&victim, 1, 2)) - return NDBT_FAILED; + NdbSleep_SecSleep(3); } if (restarter.waitNodesNoStart(&victim, 1)) @@ -1415,6 +1422,151 @@ runBug26450(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int +runBug27003(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter res; + + static const int errnos[] = { 4025, 4026, 4027, 4028, 0 }; + + int node = res.getRandomNotMasterNodeId(rand()); + ndbout_c("node: %d", node); + if (res.restartOneDbNode(node, true, true, true)) + return NDBT_FAILED; + + Uint32 pos = 0; + for (Uint32 i = 0; i<loops; i++) + { + while (errnos[pos] != 0) + { + ndbout_c("Tesing err: %d", errnos[pos]); + + if (res.waitNodesNoStart(&node, 1)) + return NDBT_FAILED; + + if (res.insertErrorInNode(node, 1000)) + return NDBT_FAILED; + + if (res.insertErrorInNode(node, errnos[pos])) + return NDBT_FAILED; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 3 }; + if (res.dumpStateOneNode(node, val2, 2)) + return NDBT_FAILED; + + res.startNodes(&node, 1); + NdbSleep_SecSleep(3); + pos++; + } + pos = 0; + } + + if (res.waitNodesNoStart(&node, 1)) + return NDBT_FAILED; + + res.startNodes(&node, 1); + if (res.waitClusterStarted()) + return NDBT_FAILED; + + return NDBT_OK; +} + + +int +runBug27283(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter res; + + if (res.getNumDbNodes() < 2) + { + return NDBT_OK; + } + + static const int errnos[] = { 7181, 7182, 0 }; + + Uint32 pos = 0; + for (Uint32 i = 0; i<loops; i++) + { + while (errnos[pos] != 0) + { + int master = res.getMasterNodeId(); + int next = res.getNextMasterNodeId(master); + int next2 = res.getNextMasterNodeId(next); + + int node = (i & 1) ? next : next2; + ndbout_c("Tesing err: %d", errnos[pos]); + if (res.insertErrorInNode(next, errnos[pos])) + return NDBT_FAILED; + + NdbSleep_SecSleep(3); + + if (res.waitClusterStarted()) + return NDBT_FAILED; + + pos++; + } + pos = 0; + } + + return NDBT_OK; +} + +int +runBug27466(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter res; + + if (res.getNumDbNodes() < 2) + { + return NDBT_OK; + } + + Uint32 pos = 0; + for (Uint32 i = 0; i<loops; i++) + { + int node1 = res.getDbNodeId(rand() % res.getNumDbNodes()); + int node2 = node1; + while (node1 == node2) + { + node2 = res.getDbNodeId(rand() % res.getNumDbNodes()); + } + + if (res.restartOneDbNode(node1, false, true, true)) + return NDBT_FAILED; + + if (res.waitNodesNoStart(&node1, 1)) + return NDBT_FAILED; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + if (res.dumpStateOneNode(node1, val2, 2)) + return NDBT_FAILED; + + if (res.insertErrorInNode(node2, 8039)) + return NDBT_FAILED; + + res.startNodes(&node1, 1); + NdbSleep_SecSleep(3); + if (res.waitNodesNoStart(&node1, 1)) + return NDBT_FAILED; + NdbSleep_SecSleep(5); // Wait for delayed INCL_NODECONF to arrive + + res.startNodes(&node1, 1); + if (res.waitClusterStarted()) + return NDBT_FAILED; + } + + return NDBT_OK; +} + NDBT_TESTSUITE(testNodeRestart); TESTCASE("NoLoad", "Test that one node at a time can be stopped and then restarted "\ @@ -1763,6 +1915,15 @@ TESTCASE("Bug26450", ""){ INITIALIZER(runLoadTable); INITIALIZER(runBug26450); } +TESTCASE("Bug27003", ""){ + INITIALIZER(runBug27003); +} +TESTCASE("Bug27283", ""){ + INITIALIZER(runBug27283); +} +TESTCASE("Bug27466", ""){ + INITIALIZER(runBug27466); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/ndbapi/testScanFilter.cpp b/storage/ndb/test/ndbapi/testScanFilter.cpp new file mode 100644 index 00000000000..958b50d6701 --- /dev/null +++ b/storage/ndb/test/ndbapi/testScanFilter.cpp @@ -0,0 +1,853 @@ +/* Copyright (C) 2007, Justin He, MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <NDBT.hpp> +#include <NDBT_Test.hpp> + +#define ERR_EXIT(obj, msg) \ +do \ +{ \ +fprintf(stderr, "%s: %s (%d) in %s:%d\n", \ +msg, obj->getNdbError().message, obj->getNdbError().code, __FILE__, __LINE__); \ +exit(-1); \ +} \ +while (0); + +#define PRINT_ERROR(code,msg) \ +do \ +{ \ +fprintf(stderr, "Error in %s, line: %d, code: %d, msg: %s.\n", __FILE__, __LINE__, code, msg); \ +} \ +while (0); + +#define MYSQLERROR(mysql) { \ + PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ + exit(-1); } +#define APIERROR(error) { \ + PRINT_ERROR(error.code,error.message); \ + exit(-1); } + +#define TEST_NAME "TestScanFilter" +#define TABLE_NAME "TABLE_SCAN" + +const char *COL_NAME[] = {"id", "i", "j", "k", "l", "m", "n"}; +const char COL_LEN = 7; +/* +* Not to change TUPLE_NUM, because the column in TABLE_NAME is fixed, +* there are six columns, 'i', 'j', 'k', 'l', 'm', 'n', and each on is equal to 1 or 1, +* Since each tuple should be unique in this case, then TUPLE_NUM = 2 power 6 = 64 +*/ +const int TUPLE_NUM = (int)pow(2, COL_LEN-1); + +/* +* the recursive level of random scan filter, can +* modify this parameter more or less, range from +* 1 to 100, larger num consumes more scan time +*/ +const int RECURSIVE_LEVEL = 10; + +const int MAX_STR_LEN = (RECURSIVE_LEVEL * (COL_LEN+1) * 4); + +/* +* Each time stands for one test, it will produce a random +* filter string, and scan through ndb api and through +* calculation with tuples' data, then compare the result, +* if they are equal, this test passed, or failed. +* Only all TEST_NUM times tests passed, we can believe +* the suite of test cases are okay. +* Change TEST_NUM to larger will need more time to test +*/ +const int TEST_NUM = 5000; + + +/* Table definition*/ +static +const +NDBT_Attribute MYTAB1Attribs[] = { + NDBT_Attribute("id", NdbDictionary::Column::Unsigned, 1, true), + NDBT_Attribute("i", NdbDictionary::Column::Unsigned), + NDBT_Attribute("j", NdbDictionary::Column::Unsigned), + NDBT_Attribute("k", NdbDictionary::Column::Unsigned), + NDBT_Attribute("l", NdbDictionary::Column::Unsigned), + NDBT_Attribute("m", NdbDictionary::Column::Unsigned), + NDBT_Attribute("n", NdbDictionary::Column::Unsigned), +}; +static +const +NDBT_Table MYTAB1(TABLE_NAME, sizeof(MYTAB1Attribs)/sizeof(NDBT_Attribute), MYTAB1Attribs); + + +int createTable(Ndb* pNdb, const NdbDictionary::Table* tab, bool _temp, + bool existsOk, NDBT_CreateTableHook f) +{ + int r = 0; + do{ + NdbDictionary::Table tmpTab(* tab); + tmpTab.setStoredTable(_temp ? 0 : 1); + if(f != 0 && f(pNdb, tmpTab, 0, NULL)) + { + ndbout << "Failed to create table" << endl; + return NDBT_FAILED; + } + r = pNdb->getDictionary()->createTable(tmpTab); + if(r == -1){ + if(!existsOk){ + ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl; + break; + } + if(pNdb->getDictionary()->getNdbError().code != 721){ + ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl; + break; + } + r = 0; + } + }while(false); + + return r; +} + +/* +* Function to produce the tuples' data +*/ +int runPopulate(NDBT_Context* ctx, NDBT_Step* step) +{ + Ndb *myNdb = GETNDB(step); + const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); + const NdbDictionary::Table *myTable= myDict->getTable(TABLE_NAME); + if(myTable == NULL) + APIERROR(myDict->getNdbError()); + + NdbTransaction* myTrans = myNdb->startTransaction(); + if (myTrans == NULL) + APIERROR(myNdb->getNdbError()); + + for(int num = 0; num < TUPLE_NUM; num++) + { + NdbOperation* myNdbOperation = myTrans->getNdbOperation(myTable); + if(myNdbOperation == NULL) + { + APIERROR(myTrans->getNdbError()); + } + +/* the tuples' data in TABLE_NAME ++----+---+---+---+---+---+---+ +| id | i | j | k | l | m | n | ++----+---+---+---+---+---+---+ +| 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| 1 | 0 | 0 | 0 | 0 | 0 | 1 | +| 2 | 0 | 0 | 0 | 0 | 1 | 0 | +| 3 | 0 | 0 | 0 | 0 | 1 | 1 | +| 4 | 0 | 0 | 0 | 1 | 0 | 0 | +| 5 | 0 | 0 | 0 | 1 | 0 | 1 | +| 6 | 0 | 0 | 0 | 1 | 1 | 0 | +| 7 | 0 | 0 | 0 | 1 | 1 | 1 | +| 8 | 0 | 0 | 1 | 0 | 0 | 0 | +| 9 | 0 | 0 | 1 | 0 | 0 | 1 | +| 10 | 0 | 0 | 1 | 0 | 1 | 0 | +| 11 | 0 | 0 | 1 | 0 | 1 | 1 | +| 12 | 0 | 0 | 1 | 1 | 0 | 0 | +| 13 | 0 | 0 | 1 | 1 | 0 | 1 | +| 14 | 0 | 0 | 1 | 1 | 1 | 0 | +| 15 | 0 | 0 | 1 | 1 | 1 | 1 | +| 16 | 0 | 1 | 0 | 0 | 0 | 0 | +| 17 | 0 | 1 | 0 | 0 | 0 | 1 | +| 18 | 0 | 1 | 0 | 0 | 1 | 0 | +| 19 | 0 | 1 | 0 | 0 | 1 | 1 | +| 20 | 0 | 1 | 0 | 1 | 0 | 0 | +| 21 | 0 | 1 | 0 | 1 | 0 | 1 | +| 22 | 0 | 1 | 0 | 1 | 1 | 0 | +| 23 | 0 | 1 | 0 | 1 | 1 | 1 | +| 24 | 0 | 1 | 1 | 0 | 0 | 0 | +| 25 | 0 | 1 | 1 | 0 | 0 | 1 | +| 26 | 0 | 1 | 1 | 0 | 1 | 0 | +| 27 | 0 | 1 | 1 | 0 | 1 | 1 | +| 28 | 0 | 1 | 1 | 1 | 0 | 0 | +| 29 | 0 | 1 | 1 | 1 | 0 | 1 | +| 30 | 0 | 1 | 1 | 1 | 1 | 0 | +| 31 | 0 | 1 | 1 | 1 | 1 | 1 | +| 32 | 1 | 0 | 0 | 0 | 0 | 0 | +| 33 | 1 | 0 | 0 | 0 | 0 | 1 | +| 34 | 1 | 0 | 0 | 0 | 1 | 0 | +| 35 | 1 | 0 | 0 | 0 | 1 | 1 | +| 36 | 1 | 0 | 0 | 1 | 0 | 0 | +| 37 | 1 | 0 | 0 | 1 | 0 | 1 | +| 38 | 1 | 0 | 0 | 1 | 1 | 0 | +| 39 | 1 | 0 | 0 | 1 | 1 | 1 | +| 40 | 1 | 0 | 1 | 0 | 0 | 0 | +| 41 | 1 | 0 | 1 | 0 | 0 | 1 | +| 42 | 1 | 0 | 1 | 0 | 1 | 0 | +| 43 | 1 | 0 | 1 | 0 | 1 | 1 | +| 44 | 1 | 0 | 1 | 1 | 0 | 0 | +| 45 | 1 | 0 | 1 | 1 | 0 | 1 | +| 46 | 1 | 0 | 1 | 1 | 1 | 0 | +| 47 | 1 | 0 | 1 | 1 | 1 | 1 | +| 48 | 1 | 1 | 0 | 0 | 0 | 0 | +| 49 | 1 | 1 | 0 | 0 | 0 | 1 | +| 50 | 1 | 1 | 0 | 0 | 1 | 0 | +| 51 | 1 | 1 | 0 | 0 | 1 | 1 | +| 52 | 1 | 1 | 0 | 1 | 0 | 0 | +| 53 | 1 | 1 | 0 | 1 | 0 | 1 | +| 54 | 1 | 1 | 0 | 1 | 1 | 0 | +| 55 | 1 | 1 | 0 | 1 | 1 | 1 | +| 56 | 1 | 1 | 1 | 0 | 0 | 0 | +| 57 | 1 | 1 | 1 | 0 | 0 | 1 | +| 58 | 1 | 1 | 1 | 0 | 1 | 0 | +| 59 | 1 | 1 | 1 | 0 | 1 | 1 | +| 60 | 1 | 1 | 1 | 1 | 0 | 0 | +| 61 | 1 | 1 | 1 | 1 | 0 | 1 | +| 62 | 1 | 1 | 1 | 1 | 1 | 0 | +| 63 | 1 | 1 | 1 | 1 | 1 | 1 | ++----+---+---+---+---+---+---+ +*/ + myNdbOperation->insertTuple(); + myNdbOperation->equal(COL_NAME[0], num); + for(int col = 1; col < COL_LEN; col++) + { + myNdbOperation->setValue(COL_NAME[col], (num>>(COL_LEN-1-col))&1); + } + } + + int check = myTrans->execute(NdbTransaction::Commit); + + myTrans->close(); + + if (check == -1) + return NDBT_FAILED; + else + return NDBT_OK; + +} + + + +/* +* a=AND, o=OR, A=NAND, O=NOR +*/ +char op_string[] = "aoAO"; +/* +* the six columns' name of test table +*/ +char col_string[] = "ijklmn"; +const int op_len = strlen(op_string); +const int col_len = strlen(col_string); + +/* +* get a random op from "aoAO" +*/ +int get_rand_op_ch(char *ch) +{ + static unsigned int num = 0; + if(++num == 0) + num = 1; + srand(num*time(NULL)); + *ch = op_string[rand() % op_len]; + return 1; +} + +/* +* get a random order form of "ijklmn" trough exchanging letter +*/ +void change_col_order() +{ + int pos1,pos2; + char temp; + for (int i = 0; i < 10; i++) //exchange for 10 times + { + srand(time(NULL)/(i+1)); + pos1 = rand() % col_len; + srand((i+1)*time(NULL)); + pos2 = rand() % col_len; + if (pos1 == pos2) + continue; + temp = col_string[pos1]; + col_string[pos1] = col_string[pos2]; + col_string[pos2] = temp; + } +} + +/* +* get a random sub string of "ijklmn" +*/ +int get_rand_col_str(char *str) +{ + int len; + static unsigned int num = 0; + if(++num == 0) + num = 1; + srand(num*time(NULL)); + len = rand() % col_len + 1; + change_col_order(); + snprintf(str, len+1, "%s", col_string); //len+1, including '\0' + return len; +} + +/* +* get a random string including operation and column +* eg, Alnikx +*/ +int get_rand_op_str(char *str) +{ + char temp[256]; + int len1, len2, len; + len1 = get_rand_op_ch(temp); + len2 = get_rand_col_str(temp+len1); + len = len1 + len2; + temp[len] = 'x'; + snprintf(str, len+1+1, "%s", temp); //len+1, including '\0' + return len+1; +} + +/* +* replace a letter of source string with a new string +* e.g., source string: 'Aijkx', replace i with new string 'olmx' +* then source string is changed to 'Aolmxjkx' +* source: its format should be produced from get_rand_op_str() +* pos: range from 1 to strlen(source)-2 +*/ +int replace_a_to_str(char *source, int pos, char *newstr) +{ + char temp[MAX_STR_LEN]; + snprintf(temp, pos+1, "%s", source); + snprintf(temp+pos, strlen(newstr)+1, "%s", newstr); + snprintf(temp+pos+strlen(newstr), strlen(source)-pos, "%s", source+pos+1); + snprintf(source, strlen(temp)+1, "%s", temp); + return strlen(source); +} + +/* +* check whether the inputed char is an operation +*/ +bool check_op(char ch) +{ + if( ch == 'a' || ch == 'A' || ch == 'o' || ch == 'O') + return true; + else + return false; +} + +/* +* check whether the inputed char is end flag +*/ +bool check_end(char ch) +{ + return (ch == 'x'); +} + +/* +* check whether the inputed char is end flag +*/ +bool check_col(char ch) +{ + if( ch == 'i' || ch == 'j' || ch == 'k' + || ch == 'l' || ch == 'm' || ch == 'n' ) + return true; + else + return false; +} + +/* +* To ensure we can get a random string with RECURSIVE_LEVEL, +* we need a position where can replace a letter with a new string. +*/ +int get_rand_replace_pos(char *str, int len) +{ + int pos_op = 0; + int pos_x = 0; + int pos_col = 0; + int span = 0; + static int num = 0; + char temp; + + for(int i = 0; i < len; i++) + { + temp = str[i]; + if(! check_end(temp)) + { + if(check_op(temp)) + pos_op = i; + } + else + { + pos_x = i; + break; + } + } + + if(++num == 0) + num = 1; + + span = pos_x - pos_op - 1; + if(span <= 1) + { + pos_col = pos_op + 1; + } + else + { + srand(num*time(NULL)); + pos_col = pos_op + rand() % span + 1; + } + return pos_col; +} + +/* +* Check whether the given random string is valid +* and applicable for this test case +*/ +bool check_random_str(char *str) +{ + char *p; + int op_num = 0; + int end_num = 0; + + for(p = str; *p; p++) + { + bool tmp1 = false, tmp2 = false; + if(tmp1 = check_op(*p)) + op_num++; + if(tmp2 = check_end(*p)) + end_num++; + if(!(tmp1 || tmp2 || check_col(*p))) //there are illegal letters + return false; + } + + if(op_num != end_num) //begins are not equal to ends + return false; + + return true; +} + +/* +* Get a random string with RECURSIVE_LEVEL +*/ +void get_rand_op_str_compound(char *str) +{ + char small_str[256]; + int pos; + int tmp; + int level; + static int num = 0; + + if(++num == 0) + num = 1; + + srand(num*time(NULL)); + level = 1 + rand() % RECURSIVE_LEVEL; + + get_rand_op_str(str); + + for(int i = 0; i < level; i++) + { + get_rand_op_str(small_str); + tmp = strlen(small_str); + get_rand_op_str(small_str + tmp); //get two operations + pos = get_rand_replace_pos(str, strlen(str)); + replace_a_to_str(str, pos, small_str); + } + + //check the random string + if(!check_random_str(str)) + { + fprintf(stderr, "Error random string! \n"); + exit(-1); + } +} + +/* +* get column id of i,j,k,l,m,n +*/ +int get_column_id(char ch) +{ + return (ch - 'i' + 1); //from 1 to 6 +} + +/* +* check whether column value of the NO. tuple is equal to 1 +* col_id: column id, range from 1 to 6 +* tuple_no: record NO., range from 0 to 63 +*/ +bool check_col_equal_one(int tuple_no, int col_id) +{ + int i = (int)pow(2, 6 - col_id); + int j = tuple_no / i; + if(j % 2) + return true; + else + return false; +} + +/* +* get a result after all elements in the array with AND +* value: pointer to a bool array +* len: length of the bool array +*/ +bool AND_op(bool *value, int len) +{ + for(int i = 0; i < len; i++) + { + if(! value[i]) + return false; + } + return true; +} + +/* +* get a result after all elements in the array with OR +* value: pointer to a bool array +* len: length of the bool array +*/ +bool OR_op(bool *value, int len) +{ + for(int i = 0; i < len; i++) + { + if(value[i]) + return true; + } + return false; +} + +/* +* get a result after all elements in the array with NAND +* value: pointer to a bool array +* len: length of the bool array +*/ +bool NAND_op(bool *value, int len) +{ + return (! AND_op(value, len)); +} + +/* +* get a result after all elements in the array with NOR +* value: pointer to a bool array +* len: length of the bool array +*/ +bool NOR_op(bool *value, int len) +{ + return (! OR_op(value, len)); +} + +/* +* AND/NAND/OR/NOR operation for a bool array +*/ +bool calculate_one_op(char op_type, bool *value, int len) +{ + switch(op_type) + { + case 'a': + return AND_op(value, len); + break; + case 'o': + return OR_op(value, len); + break; + case 'A': + return NAND_op(value, len); + break; + case 'O': + return NOR_op(value, len); + break; + } + return false; //make gcc happy +} + +typedef struct _stack_element +{ + char type; + int num; +}stack_element; + +/* +* stack_op, store info for AND,OR,NAND,NOR +* stack_col, store value of column(i,j,k,l,m,n) and temporary result for an operation +*/ +stack_element stack_op[RECURSIVE_LEVEL * COL_LEN]; +bool stack_col[RECURSIVE_LEVEL * COL_LEN * 2]; + +/* +* check whether the given tuple is chosen by judgement condition +* tuple_no, the NO of tuple in TABLE_NAME, range from 0 to TUPLE_NUM +* str: a random string of scan opearation and condition +* len: length of str +*/ +bool check_one_tuple(int tuple_no, char *str, int len) +{ + int pop_op = 0; + int pop_col = 0; + for(int i = 0; i < len; i++) + { + char letter = *(str + i); + if(check_op(letter)) //push + { + stack_op[pop_op].type = letter; + stack_op[pop_op].num = 0; + pop_op++; + } + if(check_col(letter)) //push + { + stack_col[pop_col] = check_col_equal_one(tuple_no, get_column_id(letter)); + pop_col++; + stack_op[pop_op-1].num += 1; + } + if(check_end(letter)) + { + if(pop_op <= 1) + { + return calculate_one_op(stack_op[pop_op-1].type, + stack_col, + stack_op[pop_op-1].num); + } + else + { + bool tmp1 = calculate_one_op(stack_op[pop_op-1].type, + stack_col + pop_col - stack_op[pop_op-1].num, + stack_op[pop_op-1].num); + pop_col -= stack_op[pop_op-1].num; //pop + pop_op--; + stack_col[pop_col] = tmp1; //push + pop_col++; + stack_op[pop_op-1].num += 1; + } + } + } + return false; //make gcc happy +} + +/* +* get lists of tuples which match the scan condiction through calculating +* str: a random string of scan opearation and condition +*/ +void check_all_tuples(char *str, bool *res) +{ + for (int i = 0; i < TUPLE_NUM; i++) + { + if(check_one_tuple(i, str, strlen(str))) + res[i] = true; + } +} + +/* +* convert a letter to group number what ndbapi need +*/ +NdbScanFilter::Group get_api_group(char op_name) +{ + switch (op_name) { + case 'a': return NdbScanFilter::AND; + case 'o': return NdbScanFilter::OR; + case 'A': return NdbScanFilter::NAND; + case 'O': return NdbScanFilter::NOR; + default: + fprintf(stderr, "Invalid group name %c !\n", op_name); + exit(3); + } +} + +/* +* with ndbapi, call begin, eq/ne/lt/gt/le/ge..., end +*/ +NdbScanFilter * call_ndbapi(char *str, NdbTransaction *transaction, + NdbScanOperation *scan, NdbDictionary::Column const *col[]) +{ + NdbScanFilter *scanfilter = new NdbScanFilter(scan); + char *p; + + for (p = str; *p; p++) + { + if(check_op(*p)) + { + if(scanfilter->begin(get_api_group(*p))) + ERR_EXIT(transaction, "filter begin() failed"); + } + if(check_col(*p)) + { + if(scanfilter->eq(col[*p-'i'+1]->getColumnNo(), (Uint32)1)) + ERR_EXIT(transaction, "filter eq() failed"); + } + if(check_end(*p)) + { + if(scanfilter->end()) + ERR_EXIT(transaction, "filter end() failed"); + } + } + + return scanfilter; +} + +/* +* get the tuples through ndbapi, and save the tuples NO. +* str: a random string of scan opearation and condition +*/ +void ndbapi_tuples(Ndb *ndb, char *str, bool *res) +{ + const NdbDictionary::Dictionary *dict = ndb->getDictionary(); + if (!dict) + ERR_EXIT(ndb, "Can't get dict"); + + const NdbDictionary::Table *table = dict->getTable(TABLE_NAME); + if (!table) + ERR_EXIT(dict, "Can't get table"TABLE_NAME); + + const NdbDictionary::Column *col[COL_LEN]; + for(int i = 0; i < COL_LEN; i++) + { + char tmp[128]; + col[i] = table->getColumn(COL_NAME[i]); + if(!col[i]) + { + snprintf(tmp, 128, "Can't get column %s", COL_NAME[i]); + ERR_EXIT(dict, tmp); + } + } + + NdbTransaction *transaction; + NdbScanOperation *scan; + NdbScanFilter *filter; + + transaction = ndb->startTransaction(); + if (!transaction) + ERR_EXIT(ndb, "Can't start transaction"); + + scan = transaction->getNdbScanOperation(table); + if (!scan) + ERR_EXIT(transaction, "Can't get scan op"); + + if (scan->readTuples(NdbOperation::LM_Exclusive)) + ERR_EXIT(scan, "Can't set up read"); + + NdbRecAttr *rec[COL_LEN]; + for(int i = 0; i < COL_LEN; i++) + { + char tmp[128]; + rec[i] = scan->getValue(COL_NAME[i]); + if(!rec[i]) + { + snprintf(tmp, 128, "Can't get rec of %s", COL_NAME[i]); + ERR_EXIT(scan, tmp); + } + } + + filter = call_ndbapi(str, transaction, scan, col); + + if (transaction->execute(NdbTransaction::NoCommit)) + ERR_EXIT(transaction, "Can't execute"); + + int i,j,k,l,m,n; + while (scan->nextResult(true) == 0) + { + do + { + i = rec[1]->u_32_value(); + j = rec[2]->u_32_value(); + k = rec[3]->u_32_value(); + l = rec[4]->u_32_value(); + m = rec[5]->u_32_value(); + n = rec[6]->u_32_value(); + res[32*i+16*j+8*k+4*l+2*m+n] = true; + } while (scan->nextResult(false) == 0); + } + + delete filter; + transaction->close(); +} + +/* +* compare the result between calculation and NDBAPI +* str: a random string of scan opearation and condition +* return: true stands for ndbapi ok, false stands for ndbapi failed +*/ +template class Vector<bool>; +bool compare_cal_ndb(char *str, Ndb *ndb) +{ + Vector<bool> res_cal; + Vector<bool> res_ndb; + + for(int i = 0; i < TUPLE_NUM; i++) + { + res_cal.push_back(false); + res_ndb.push_back(false); + } + + check_all_tuples(str, res_cal.getBase()); + ndbapi_tuples(ndb, str, res_ndb.getBase()); + + for(int i = 0; i < TUPLE_NUM; i++) + { + if(res_cal[i] != res_ndb[i]) + return false; + } + return true; +} + + +int runCreateTables(NDBT_Context* ctx, NDBT_Step* step) +{ + Ndb *pNdb = GETNDB(step); + pNdb->getDictionary()->dropTable(MYTAB1.getName()); + int ret = createTable(pNdb, &MYTAB1, false, true, 0); + if(ret) + return ret; + return NDBT_OK; +} + + +int runDropTables(NDBT_Context* ctx, NDBT_Step* step) +{ + int ret = GETNDB(step)->getDictionary()->dropTable(MYTAB1.getName()); + if(ret == -1) + return NDBT_FAILED; + + return NDBT_OK; +} + +int runScanRandomFilterTest(NDBT_Context* ctx, NDBT_Step* step) +{ + char random_str[MAX_STR_LEN]; + Ndb *myNdb = GETNDB(step); + bool res = true; + + for(int i = 0; i < TEST_NUM; i++) + { + get_rand_op_str_compound(random_str); + if( !compare_cal_ndb(random_str, myNdb)) + return NDBT_FAILED; + } + + return NDBT_OK; +} + +NDBT_TESTSUITE(testScanFilter); +TESTCASE(TEST_NAME, + "Scan table TABLE_NAME for the records which accord with \ + conditions of logical scan operations: AND/OR/NAND/NOR") +{ + INITIALIZER(runCreateTables); + INITIALIZER(runPopulate); + INITIALIZER(runScanRandomFilterTest); + FINALIZER(runDropTables); +} + +NDBT_TESTSUITE_END(testScanFilter); + + +int main(int argc, const char** argv) +{ + ndb_init(); + + Ndb_cluster_connection con; + if(con.connect(12, 5, 1)) + { + return NDBT_ProgramExit(NDBT_FAILED); + } + + return testScanFilter.executeOneCtx(con, &MYTAB1, TEST_NAME); +} diff --git a/storage/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp index 5af3ff77df8..e1e0012d0d8 100644 --- a/storage/ndb/test/ndbapi/test_event.cpp +++ b/storage/ndb/test/ndbapi/test_event.cpp @@ -500,6 +500,12 @@ int runEventMixedLoad(NDBT_Context* ctx, NDBT_Step* step) int records = ctx->getNumRecords(); HugoTransactions hugoTrans(*ctx->getTab()); + if(ctx->getPropertyWait("LastGCI", ~(Uint32)0)) + { + g_err << "FAIL " << __LINE__ << endl; + return NDBT_FAILED; + } + while(loops -- && !ctx->isTestStopped()) { hugoTrans.clearTable(GETNDB(step), 0); @@ -606,9 +612,11 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step) goto end; } + ctx->setProperty("LastGCI", ~(Uint32)0); + ctx->broadcast(); + while(!ctx->isTestStopped()) { - int r; int count= 0; Uint32 stop_gci= ~0; Uint64 curr_gci = 0; @@ -778,7 +786,7 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step) if (trans->getNdbError().status == NdbError::PermanentError) { - g_err << "Ignoring execute " << r << " failed " + g_err << "Ignoring execute failed " << trans->getNdbError().code << " " << trans->getNdbError().message << endl; @@ -788,7 +796,7 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step) } else if (noRetries++ == 10) { - g_err << "execute " << r << " failed " + g_err << "execute failed " << trans->getNdbError().code << " " << trans->getNdbError().message << endl; trans->close(); @@ -828,6 +836,67 @@ end: DBUG_RETURN(result); } +int runEventListenerUntilStopped(NDBT_Context* ctx, NDBT_Step* step) +{ + + int result = NDBT_OK; + const NdbDictionary::Table * table= ctx->getTab(); + HugoTransactions hugoTrans(* table); + Ndb* ndb= GETNDB(step); + + char buf[1024]; + sprintf(buf, "%s_EVENT", table->getName()); + NdbEventOperation *pOp, *pCreate = 0; + pCreate = pOp = ndb->createEventOperation(buf); + if ( pOp == NULL ) { + g_err << "Event operation creation failed on %s" << buf << endl; + return NDBT_FAILED; + } + + int i; + int n_columns= table->getNoOfColumns(); + NdbRecAttr* recAttr[1024]; + NdbRecAttr* recAttrPre[1024]; + for (i = 0; i < n_columns; i++) { + recAttr[i] = pOp->getValue(table->getColumn(i)->getName()); + recAttrPre[i] = pOp->getPreValue(table->getColumn(i)->getName()); + } + + if (pOp->execute()) + { // This starts changes to "start flowing" + g_err << "execute operation execution failed: \n"; + g_err << pOp->getNdbError().code << " " + << pOp->getNdbError().message << endl; + result = NDBT_FAILED; + goto end; + } + + while(!ctx->isTestStopped()) + { + Uint64 curr_gci = 0; + while(!ctx->isTestStopped()) + { + ndb->pollEvents(100, &curr_gci); + while ((pOp= ndb->nextEvent()) != 0) + { + assert(pOp == pCreate); + } + } + } + +end: + if(pCreate) + { + if (ndb->dropEventOperation(pCreate)) { + g_err << "dropEventOperation execution failed " + << ndb->getNdbError().code << " " + << ndb->getNdbError().message << endl; + result = NDBT_FAILED; + } + } + return result; +} + int runRestarter(NDBT_Context* ctx, NDBT_Step* step){ int result = NDBT_OK; int loops = ctx->getNumLoops(); @@ -869,6 +938,51 @@ int runRestarter(NDBT_Context* ctx, NDBT_Step* step){ return result; } +int runRestarterLoop(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + NdbRestarter restarter; + int i = 0; + int lastId = 0; + + if (restarter.getNumDbNodes() < 2){ + ctx->stopTest(); + return NDBT_OK; + } + + if(restarter.waitClusterStarted(60) != 0){ + g_err << "Cluster failed to start" << endl; + return NDBT_FAILED; + } + + while(result != NDBT_FAILED + && !ctx->isTestStopped() + && i < loops) + { + int id = lastId % restarter.getNumDbNodes(); + int nodeId = restarter.getDbNodeId(id); + ndbout << "Restart node " << nodeId << endl; + if(restarter.restartOneDbNode(nodeId, false, false, true) != 0){ + g_err << "Failed to restartNextDbNode" << endl; + result = NDBT_FAILED; + break; + } + + if(restarter.waitClusterStarted(60) != 0){ + g_err << "Cluster failed to start" << endl; + result = NDBT_FAILED; + break; + } + + lastId++; + i++; + } + + ctx->stopTest(); + return result; +} + Vector<const NdbDictionary::Table*> pTabs; Vector<const NdbDictionary::Table*> pShadowTabs; @@ -1608,6 +1722,42 @@ runSubscribeUnsubscribe(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int +runScanUpdateUntilStopped(NDBT_Context* ctx, NDBT_Step* step){ + int records = ctx->getNumRecords(); + int parallelism = ctx->getProperty("Parallelism", (Uint32)0); + int abort = ctx->getProperty("AbortProb", (Uint32)0); + HugoTransactions hugoTrans(*ctx->getTab()); + while (ctx->isTestStopped() == false) + { + if (hugoTrans.scanUpdateRecords(GETNDB(step), records, abort, + parallelism) == NDBT_FAILED){ + return NDBT_FAILED; + } + } + return NDBT_OK; +} + +int +runInsertDeleteUntilStopped(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int records = ctx->getNumRecords(); + HugoTransactions hugoTrans(*ctx->getTab()); + UtilTransactions utilTrans(*ctx->getTab()); + while (ctx->isTestStopped() == false) + { + if (hugoTrans.loadTable(GETNDB(step), records, 1) != 0){ + return NDBT_FAILED; + } + if (utilTrans.clearTable(GETNDB(step), records) != 0){ + return NDBT_FAILED; + } + } + + return NDBT_OK; +} + NDBT_TESTSUITE(test_event); TESTCASE("BasicEventOperation", "Verify that we can listen to Events" @@ -1729,6 +1879,14 @@ TESTCASE("SubscribeUnsubscribe", STEPS(runSubscribeUnsubscribe, 16); FINALIZER(runDropEvent); } +TESTCASE("Bug27169", ""){ + INITIALIZER(runCreateEvent); + STEP(runEventListenerUntilStopped); + STEP(runInsertDeleteUntilStopped); + STEP(runScanUpdateUntilStopped); + STEP(runRestarterLoop); + FINALIZER(runDropEvent); +} NDBT_TESTSUITE_END(test_event); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index 4022dffa258..59535958249 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -469,6 +469,14 @@ max-time: 1000 cmd: testScan args: -n ScanVariants +max-time: 1000 +cmd: testNodeRestart +args: -n Bug27003 T1 + +max-time: 1000 +cmd: testNodeRestart +args: -n Bug27283 T1 + max-time: 500 cmd: testNodeRestart args: -n Bug15587 T1 @@ -792,6 +800,14 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug25468 T1 +max-time: 1000 +cmd: testNodeRestart +args: -n Bug27466 T1 + +max-time: 1000 +cmd: test_event +args: -l 10 -n Bug27169 T1 + # OLD FLEX max-time: 500 cmd: flexBench @@ -830,3 +846,31 @@ cmd: DbAsyncGenerator args: -time 60 -p 1 -proc 25 type: bench +max-time: 120 +cmd: testMgm +args: -n ApiSessionFailure T1 + +max-time: 120 +cmd: testMgm +args: -n ApiTimeoutBasic T1 + +max-time: 120 +cmd: testMgm +args: -n ApiSessionFailure T1 + +max-time: 120 +cmd: testMgm +args: -n ApiGetStatusTimeout T1 + +max-time: 120 +cmd: testMgm +args: -n ApiGetConfigTimeout T1 + +max-time: 120 +cmd: testMgm +args: -n ApiMgmEventTimeout T1 + +max-time: 120 +cmd: testMgm +args: -n ApiMgmStructEventTimeout T1 + diff --git a/storage/ndb/test/run-test/daily-devel-tests.txt b/storage/ndb/test/run-test/daily-devel-tests.txt index 8c1fa491cb9..f73bd021440 100644 --- a/storage/ndb/test/run-test/daily-devel-tests.txt +++ b/storage/ndb/test/run-test/daily-devel-tests.txt @@ -204,17 +204,17 @@ cmd: testSystemRestart args: -l 1 -n SR9 T1 # -max-time: 2500 +max-time: 3600 cmd: test_event args: -n EventOperationApplier -l 2 # -max-time: 2500 +max-time: 3600 cmd: test_event args: -n EventOperationApplier_NR -l 2 # -max-time: 2500 +max-time: 3600 cmd: test_event args: -n MergeEventOperationApplier_NR -l 2 @@ -224,9 +224,9 @@ cmd: test_event args: -n Multi # -max-time: 2500 +max-time: 3600 cmd: test_event -args: -n CreateDropNR -l 2 +args: -n CreateDropNR -l 1 # max-time: 600 diff --git a/storage/ndb/test/src/CpcClient.cpp b/storage/ndb/test/src/CpcClient.cpp index 51f2fb4cf4d..d5dfd01327e 100644 --- a/storage/ndb/test/src/CpcClient.cpp +++ b/storage/ndb/test/src/CpcClient.cpp @@ -428,8 +428,6 @@ SimpleCpcClient::SimpleCpcClient(const char *_host, int _port) { host = strdup(_host); port = _port; cpc_sock = -1; - cpc_in = NULL; - cpc_out = NULL; } SimpleCpcClient::~SimpleCpcClient() { @@ -444,12 +442,6 @@ SimpleCpcClient::~SimpleCpcClient() { close(cpc_sock); cpc_sock = -1; } - - if(cpc_in != NULL) - delete cpc_in; - - if(cpc_out != NULL) - delete cpc_out; } int @@ -475,17 +467,15 @@ SimpleCpcClient::connect() { if (::connect(cpc_sock, (struct sockaddr*) &sa, sizeof(sa)) < 0) return -1; - cpc_in = new SocketInputStream(cpc_sock, 60000); - cpc_out = new SocketOutputStream(cpc_sock); - return 0; } int SimpleCpcClient::cpc_send(const char *cmd, const Properties &args) { - - cpc_out->println(cmd); + SocketOutputStream cpc_out(cpc_sock); + + cpc_out.println(cmd); Properties::Iterator iter(&args); const char *name; @@ -498,18 +488,18 @@ SimpleCpcClient::cpc_send(const char *cmd, switch(t) { case PropertiesType_Uint32: args.get(name, &val_i); - cpc_out->println("%s: %d", name, val_i); + cpc_out.println("%s: %d", name, val_i); break; case PropertiesType_char: args.get(name, val_s); - cpc_out->println("%s: %s", name, val_s.c_str()); + cpc_out.println("%s: %s", name, val_s.c_str()); break; default: /* Silently ignore */ break; } } - cpc_out->println(""); + cpc_out.println(""); return 0; } @@ -523,9 +513,11 @@ SimpleCpcClient::Parser_t::ParserStatus SimpleCpcClient::cpc_recv(const ParserRow_t *syntax, const Properties **reply, void **user_value) { + SocketInputStream cpc_in(cpc_sock); + Parser_t::Context ctx; ParserDummy session(cpc_sock); - Parser_t parser(syntax, *cpc_in, true, true, true); + Parser_t parser(syntax, cpc_in, true, true, true); *reply = parser.parse(ctx, session); if(user_value != NULL) *user_value = ctx.m_currentCmd->user_value; diff --git a/storage/ndb/test/src/HugoCalculator.cpp b/storage/ndb/test/src/HugoCalculator.cpp index 4d05c99738f..1589d4a9eb0 100644 --- a/storage/ndb/test/src/HugoCalculator.cpp +++ b/storage/ndb/test/src/HugoCalculator.cpp @@ -89,6 +89,27 @@ HugoCalculator::float calcValue(int record, int attrib, int updates) const; HugoCalculator::double calcValue(int record, int attrib, int updates) const; #endif +static +Uint32 +calc_len(Uint32 rvalue, int maxlen) +{ + Uint32 minlen = 25; + + if ((rvalue >> 16) < 4096) + minlen = 15; + else if ((rvalue >> 16) < 8192) + minlen = 25; + else if ((rvalue >> 16) < 16384) + minlen = 35; + else + minlen = 64; + + if (maxlen <= minlen) + return maxlen; + + return minlen + (rvalue % (maxlen - minlen)); +} + const char* HugoCalculator::calcValue(int record, int attrib, @@ -178,7 +199,7 @@ HugoCalculator::calcValue(int record, break; case NdbDictionary::Column::Varbinary: case NdbDictionary::Column::Varchar: - len = 1 + (myRand(&seed) % (len - 1)); + len = calc_len(myRand(&seed), len - 1); assert(len < 256); * outlen = len + 1; * buf = len; @@ -186,7 +207,7 @@ HugoCalculator::calcValue(int record, goto write_char; case NdbDictionary::Column::Longvarchar: case NdbDictionary::Column::Longvarbinary: - len = 1 + (myRand(&seed) % (len - 2)); + len = calc_len(myRand(&seed), len - 2); assert(len < 65536); * outlen = len + 2; int2store(buf, len); diff --git a/storage/ndb/test/src/NDBT_Table.cpp b/storage/ndb/test/src/NDBT_Table.cpp index d6a0b014fda..2e0e5939266 100644 --- a/storage/ndb/test/src/NDBT_Table.cpp +++ b/storage/ndb/test/src/NDBT_Table.cpp @@ -33,7 +33,7 @@ operator <<(class NdbOut& ndbout, const NDBT_Table & tab) ndbout << "Length of frm data: " << tab.getFrmLength() << endl; ndbout << "Row Checksum: " << tab.getRowChecksumIndicator() << endl; ndbout << "Row GCI: " << tab.getRowGCIIndicator() << endl; - + ndbout << "SingleUserMode: " << (Uint32) tab.getSingleUserMode() << endl; //<< ((tab.getTupleKey() == TupleId) ? " tupleid" : "") <<endl; ndbout << "TableStatus: "; diff --git a/storage/ndb/test/src/NDBT_Test.cpp b/storage/ndb/test/src/NDBT_Test.cpp index 9c908ab27c6..1245d004aff 100644 --- a/storage/ndb/test/src/NDBT_Test.cpp +++ b/storage/ndb/test/src/NDBT_Test.cpp @@ -947,6 +947,63 @@ NDBT_TestSuite::executeOne(Ndb_cluster_connection& con, } } +int +NDBT_TestSuite::executeOneCtx(Ndb_cluster_connection& con, + const NdbDictionary::Table *ptab, const char* _testname){ + + testSuiteTimer.doStart(); + + do{ + if(tests.size() == 0) + break; + + Ndb ndb(&con, "TEST_DB"); + ndb.init(1024); + + int result = ndb.waitUntilReady(300); // 5 minutes + if (result != 0){ + g_err << name <<": Ndb was not ready" << endl; + break; + } + + ndbout << name << " started [" << getDate() << "]" << endl; + ndbout << "|- " << ptab->getName() << endl; + + for (unsigned t = 0; t < tests.size(); t++){ + + if (_testname != NULL && + strcasecmp(tests[t]->getName(), _testname) != 0) + continue; + + tests[t]->initBeforeTest(); + + ctx = new NDBT_Context(con); + ctx->setTab(ptab); + ctx->setNumRecords(records); + ctx->setNumLoops(loops); + if(remote_mgm != NULL) + ctx->setRemoteMgm(remote_mgm); + ctx->setSuite(this); + + result = tests[t]->execute(ctx); + if (result != NDBT_OK) + numTestsFail++; + else + numTestsOk++; + numTestsExecuted++; + + delete ctx; + } + + if (numTestsFail > 0) + break; + }while(0); + + testSuiteTimer.doStop(); + int res = report(_testname); + return NDBT_ProgramExit(res); +} + int NDBT_TestSuite::createHook(Ndb* ndb, NdbDictionary::Table& tab, int when) { diff --git a/storage/ndb/test/src/NdbRestarter.cpp b/storage/ndb/test/src/NdbRestarter.cpp index 299517b32d3..1acd28dab23 100644 --- a/storage/ndb/test/src/NdbRestarter.cpp +++ b/storage/ndb/test/src/NdbRestarter.cpp @@ -26,6 +26,8 @@ #define MGMERR(h) \ ndbout << "latest_error="<<ndb_mgm_get_latest_error(h) \ << ", line="<<ndb_mgm_get_latest_error_line(h) \ + << ", mesg="<<ndb_mgm_get_latest_error_msg(h) \ + << ", desc="<<ndb_mgm_get_latest_error_desc(h) \ << endl; diff --git a/storage/ndb/test/src/NdbRestarts.cpp b/storage/ndb/test/src/NdbRestarts.cpp index 013c7a97d1f..6ec520887b5 100644 --- a/storage/ndb/test/src/NdbRestarts.cpp +++ b/storage/ndb/test/src/NdbRestarts.cpp @@ -630,8 +630,8 @@ int restartNFDuringNR(NdbRestarter& _restarter, int nodeId = _restarter.getDbNodeId(randomId); int error = NFDuringNR_codes[i]; - g_info << _restart->m_name << ": node = " << nodeId - << " error code = " << error << endl; + g_err << _restart->m_name << ": node = " << nodeId + << " error code = " << error << endl; CHECK(_restarter.restartOneDbNode(nodeId, false, true, true) == 0, "Could not restart node "<< nodeId); diff --git a/storage/ndb/test/src/UtilTransactions.cpp b/storage/ndb/test/src/UtilTransactions.cpp index 453364fa1b2..5a408140c8e 100644 --- a/storage/ndb/test/src/UtilTransactions.cpp +++ b/storage/ndb/test/src/UtilTransactions.cpp @@ -1381,6 +1381,7 @@ loop: goto error; } + row_count= 0; { int eof; while((eof = pOp->nextResult(true)) == 0) diff --git a/storage/ndb/test/tools/listen.cpp b/storage/ndb/test/tools/listen.cpp index 1d142e0931d..fd404ff0134 100644 --- a/storage/ndb/test/tools/listen.cpp +++ b/storage/ndb/test/tools/listen.cpp @@ -168,9 +168,15 @@ main(int argc, const char** argv){ break; case NdbDictionary::Event::TE_DROP: break; + case NdbDictionary::Event::TE_NODE_FAILURE: + break; + case NdbDictionary::Event::TE_SUBSCRIBE: + case NdbDictionary::Event::TE_UNSUBSCRIBE: + break; default: /* We should REALLY never get here. */ - ndbout_c("Error: unknown event type"); + ndbout_c("Error: unknown event type: %u", + (Uint32)pOp->getEventType()); abort(); } } while ((pOp= MyNdb.nextEvent()) && gci == pOp->getGCI()); diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index 3d466384782..f99cacfc613 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -27,6 +27,7 @@ #include <NdbAutoPtr.hpp> #include "../../../../sql/ha_ndbcluster_tables.h" +extern NdbRecordPrintFormat g_ndbrecord_print_format; Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data @@ -298,6 +299,7 @@ RestoreMetaData::markSysTables() Uint32 i; for (i = 0; i < getNoOfTables(); i++) { TableS* table = allTables[i]; + table->m_local_id = i; const char* tableName = table->getTableName(); if ( // XXX should use type strcmp(tableName, "SYSTAB_0") == 0 || @@ -310,9 +312,11 @@ RestoreMetaData::markSysTables() "cluster_replication" -> "cluster" -> "mysql" */ strcmp(tableName, "cluster_replication/def/" OLD_NDB_APPLY_TABLE) == 0 || - strcmp(tableName, "cluster/def/" OLD_NDB_APPLY_TABLE) == 0 || + strcmp(tableName, OLD_NDB_REP_DB "/def/" OLD_NDB_APPLY_TABLE) == 0 || + strcmp(tableName, OLD_NDB_REP_DB "/def/" OLD_NDB_SCHEMA_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 ) + table->isSysTable = true; } for (i = 0; i < getNoOfTables(); i++) { @@ -330,6 +334,7 @@ RestoreMetaData::markSysTables() if (table->getTableId() == (Uint32) id1) { if (table->isSysTable) blobTable->isSysTable = true; + blobTable->m_main_table = table; break; } } @@ -427,6 +432,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) m_noOfRecords= 0; backupVersion = version; isSysTable = false; + m_main_table = NULL; for (int i = 0; i < tableImpl->getNoOfColumns(); i++) createAttr(tableImpl->getColumn(i)); @@ -896,6 +902,7 @@ bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId) return false; } + info.setLevel(254); info << "_____________________________________________________" << endl << "Processing data in table: " << m_currentTable->getTableName() << "(" << Header.TableId << ") fragment " @@ -1153,14 +1160,14 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){ if (data.null) { - ndbout << "<NULL>"; + ndbout << g_ndbrecord_print_format.null_string; return ndbout; } NdbRecAttr tmprec(0); - tmprec.setup(desc.m_column, (char *)data.void_value); + tmprec.setup(desc.m_column, 0); tmprec.receive_data((Uint32*)data.void_value, data.size); - ndbout << tmprec; + ndbrecattr_print_formatted(ndbout, tmprec, g_ndbrecord_print_format); return ndbout; } @@ -1169,17 +1176,15 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){ NdbOut& operator<<(NdbOut& ndbout, const TupleS& tuple) { - ndbout << tuple.getTable()->getTableName() << "; "; for (int i = 0; i < tuple.getNoOfAttributes(); i++) { + if (i > 0) + ndbout << g_ndbrecord_print_format.fields_terminated_by; AttributeData * attr_data = tuple.getData(i); const AttributeDesc * attr_desc = tuple.getDesc(i); const AttributeS attr = {attr_desc, *attr_data}; debug << i << " " << attr_desc->m_column->getName(); ndbout << attr; - - if (i != (tuple.getNoOfAttributes() - 1)) - ndbout << delimiter << " "; } // for return ndbout; } diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp index 74834fe0abc..406899b2b4d 100644 --- a/storage/ndb/tools/restore/Restore.hpp +++ b/storage/ndb/tools/restore/Restore.hpp @@ -25,8 +25,6 @@ #include <ndb_version.h> #include <version.h> -#define delimiter ";" - const int FileNameLenC = 256; const int TableNameLenC = 256; const int AttrNameLenC = 256; @@ -142,6 +140,8 @@ class TableS { Uint64 m_max_auto_val; bool isSysTable; + TableS *m_main_table; + Uint32 m_local_id; Uint64 m_noOfRecords; Vector<FragmentInfo *> m_fragmentInfo; @@ -156,6 +156,9 @@ public: Uint32 getTableId() const { return m_dictTable->getTableId(); } + Uint32 getLocalId() const { + return m_local_id; + } Uint32 getNoOfRecords() const { return m_noOfRecords; } @@ -239,6 +242,10 @@ public: return isSysTable; } + const TableS *getMainTable() const { + return m_main_table; + } + TableS& operator=(TableS& org) ; }; // TableS; diff --git a/storage/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp index b05f7e00402..4121cc4e1b4 100644 --- a/storage/ndb/tools/restore/consumer.hpp +++ b/storage/ndb/tools/restore/consumer.hpp @@ -41,6 +41,7 @@ public: NODE_GROUP_MAP *m_nodegroup_map; uint m_nodegroup_map_len; virtual bool has_temp_error() {return false;} + virtual bool table_equal(const TableS &) {return true;} }; #endif diff --git a/storage/ndb/tools/restore/consumer_printer.cpp b/storage/ndb/tools/restore/consumer_printer.cpp index dabd77f7358..721e0332381 100644 --- a/storage/ndb/tools/restore/consumer_printer.cpp +++ b/storage/ndb/tools/restore/consumer_printer.cpp @@ -14,6 +14,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "consumer_printer.hpp" +extern FilteredNdbOut info; +extern NdbRecordPrintFormat g_ndbrecord_print_format; +extern const char *tab_path; bool BackupPrinter::table(const TableS & tab) @@ -21,7 +24,8 @@ BackupPrinter::table(const TableS & tab) if (m_print || m_print_meta) { m_ndbout << tab; - ndbout_c("Successfully printed table: %s", tab.m_dictTable->getName()); + info.setLevel(254); + info << "Successfully printed table: ", tab.m_dictTable->getName(); } return true; } @@ -31,7 +35,14 @@ BackupPrinter::tuple(const TupleS & tup, Uint32 fragId) { m_dataCount++; if (m_print || m_print_data) - m_ndbout << tup << endl; + { + if (m_ndbout.m_out == info.m_out) + { + info.setLevel(254); + info << tup.getTable()->getTableName() << "; "; + } + m_ndbout << tup << g_ndbrecord_print_format.lines_terminated_by; + } } void @@ -47,9 +58,10 @@ BackupPrinter::endOfLogEntrys() { if (m_print || m_print_log) { - ndbout << "Printed " << m_dataCount << " tuples and " - << m_logCount << " log entries" - << " to stdout." << endl; + info.setLevel(254); + info << "Printed " << m_dataCount << " tuples and " + << m_logCount << " log entries" + << " to stdout." << endl; } } bool diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index 03cf3e6ddb7..e8f9842e554 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -619,6 +619,7 @@ BackupRestore::update_apply_status(const RestoreMetaData &metaData) return true; bool result= false; + unsigned apply_table_format= 0; m_ndb->setDatabaseName(NDB_REP_DB); m_ndb->setSchemaName("def"); @@ -631,8 +632,33 @@ BackupRestore::update_apply_status(const RestoreMetaData &metaData) << dict->getNdbError() << endl; return false; } + if + (ndbtab->getColumn(0)->getType() == NdbDictionary::Column::Unsigned && + ndbtab->getColumn(1)->getType() == NdbDictionary::Column::Bigunsigned) + { + if (ndbtab->getNoOfColumns() == 2) + { + apply_table_format= 1; + } + else if + (ndbtab->getColumn(2)->getType() == NdbDictionary::Column::Varchar && + ndbtab->getColumn(3)->getType() == NdbDictionary::Column::Bigunsigned && + ndbtab->getColumn(4)->getType() == NdbDictionary::Column::Bigunsigned) + { + apply_table_format= 2; + } + } + if (apply_table_format == 0) + { + err << Ndb_apply_table << " has wrong format\n"; + return false; + } + Uint32 server_id= 0; Uint64 epoch= metaData.getStopGCP(); + Uint64 zero= 0; + char empty_string[1]; + empty_string[0]= 0; NdbTransaction * trans= m_ndb->startTransaction(); if (!trans) { @@ -655,6 +681,15 @@ BackupRestore::update_apply_status(const RestoreMetaData &metaData) << op->getNdbError() << endl; goto err; } + if ((apply_table_format == 2) && + (op->setValue(2u, (const char *)&empty_string, 1) || + op->setValue(3u, (const char *)&zero, sizeof(zero)) || + op->setValue(4u, (const char *)&zero, sizeof(zero)))) + { + err << Ndb_apply_table << ": " + << op->getNdbError() << endl; + goto err; + } if (trans->execute(NdbTransaction::Commit)) { err << Ndb_apply_table << ": " @@ -668,6 +703,66 @@ err: } bool +BackupRestore::table_equal(const TableS &tableS) +{ + if (!m_restore) + return true; + + const char *tablename = tableS.getTableName(); + + if(tableS.m_dictTable == NULL){ + ndbout<<"Table %s has no m_dictTable " << tablename << endl; + return false; + } + /** + * Ignore blob tables + */ + if(match_blob(tablename) >= 0) + return true; + + const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* tableS.m_dictTable); + if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ + return true; + } + + BaseString tmp(tablename); + Vector<BaseString> split; + if(tmp.split(split, "/") != 3){ + err << "Invalid table name format " << tablename << endl; + return false; + } + + m_ndb->setDatabaseName(split[0].c_str()); + m_ndb->setSchemaName(split[1].c_str()); + + NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); + const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); + if(tab == 0){ + err << "Unable to find table: " << split[2].c_str() << endl; + return false; + } + + if(tab->getNoOfColumns() != tableS.m_dictTable->getNoOfColumns()) + { + ndbout_c("m_columns.size %d != %d",tab->getNoOfColumns(), + tableS.m_dictTable->getNoOfColumns()); + return false; + } + + for(int i = 0; i<tab->getNoOfColumns(); i++) + { + if(!tab->getColumn(i)->equal(*(tableS.m_dictTable->getColumn(i)))) + { + ndbout_c("m_columns %s != %s",tab->getColumn(i)->getName(), + tableS.m_dictTable->getColumn(i)->getName()); + return false; + } + } + + return true; +} + +bool BackupRestore::createSystable(const TableS & tables){ if (!m_restore && !m_restore_meta && !m_restore_epoch) return true; diff --git a/storage/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp index 5063619d3c3..0bc9d8e8d20 100644 --- a/storage/ndb/tools/restore/consumer_restore.hpp +++ b/storage/ndb/tools/restore/consumer_restore.hpp @@ -73,6 +73,7 @@ public: virtual bool finalize_table(const TableS &); virtual bool has_temp_error(); virtual bool createSystable(const TableS & table); + virtual bool table_equal(const TableS & table); virtual bool update_apply_status(const RestoreMetaData &metaData); void connectToMysql(); bool map_in_frm(char *new_data, const char *data, diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp index 4734e9f609d..8353f049647 100644 --- a/storage/ndb/tools/restore/restore_main.cpp +++ b/storage/ndb/tools/restore/restore_main.cpp @@ -20,6 +20,7 @@ #include <NdbTCP.h> #include <NdbMem.h> #include <NdbOut.hpp> +#include <OutputStream.hpp> #include <NDBT_ReturnCodes.h> #include "consumer_restore.hpp" @@ -34,14 +35,24 @@ static int ga_nParallelism = 128; static int ga_backupId = 0; static bool ga_dont_ignore_systab_0 = false; static Vector<class BackupConsumer *> g_consumers; +static BackupPrinter* g_printer = NULL; -static const char* ga_backupPath = "." DIR_SEPARATOR; +static const char* default_backupPath = "." DIR_SEPARATOR; +static const char* ga_backupPath = default_backupPath; static const char *opt_nodegroup_map_str= 0; static unsigned opt_nodegroup_map_len= 0; static NODE_GROUP_MAP opt_nodegroup_map[MAX_NODE_GROUP_MAPS]; #define OPT_NDB_NODEGROUP_MAP 'z' +const char *opt_ndb_database= NULL; +const char *opt_ndb_table= NULL; +unsigned int opt_verbose; +unsigned int opt_hex_format; +Vector<BaseString> g_databases; +Vector<BaseString> g_tables; +NdbRecordPrintFormat g_ndbrecord_print_format; + NDB_STD_OPTS_VARS; /** @@ -50,6 +61,7 @@ NDB_STD_OPTS_VARS; static bool ga_restore_epoch = false; static bool ga_restore = false; static bool ga_print = false; +static bool ga_skip_table_check = false; static int _print = 0; static int _print_meta = 0; static int _print_data = 0; @@ -61,6 +73,28 @@ BaseString g_options("ndb_restore"); const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 }; +enum ndb_restore_options { + OPT_PRINT= NDB_STD_OPTIONS_LAST, + OPT_PRINT_DATA, + OPT_PRINT_LOG, + OPT_PRINT_META, + OPT_BACKUP_PATH, + OPT_HEX_FORMAT, + OPT_FIELDS_ENCLOSED_BY, + OPT_FIELDS_TERMINATED_BY, + OPT_FIELDS_OPTIONALLY_ENCLOSED_BY, + OPT_LINES_TERMINATED_BY, + OPT_APPEND, + OPT_VERBOSE +}; +static const char *opt_fields_enclosed_by= NULL; +static const char *opt_fields_terminated_by= NULL; +static const char *opt_fields_optionally_enclosed_by= NULL; +static const char *opt_lines_terminated_by= NULL; + +static const char *tab_path= NULL; +static int opt_append; + static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_restore"), @@ -91,24 +125,27 @@ static struct my_option my_long_options[] = NDB_REP_DB "." NDB_APPLY_TABLE " with id 0 will be updated/inserted.", (gptr*) &ga_restore_epoch, (gptr*) &ga_restore_epoch, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "skip-table-check", 's', "Skip table structure check during restore of data", + (gptr*) &ga_skip_table_check, (gptr*) &ga_skip_table_check, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "No of parallel transactions during restore of data." "(parallelism can be 1 to 1024)", (gptr*) &ga_nParallelism, (gptr*) &ga_nParallelism, 0, GET_INT, REQUIRED_ARG, 128, 1, 1024, 0, 1, 0 }, - { "print", 256, "Print data and log to stdout", + { "print", OPT_PRINT, "Print data and log to stdout", (gptr*) &_print, (gptr*) &_print, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "print_data", 257, "Print data to stdout", + { "print_data", OPT_PRINT_DATA, "Print data to stdout", (gptr*) &_print_data, (gptr*) &_print_data, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "print_meta", 258, "Print meta data to stdout", + { "print_meta", OPT_PRINT_META, "Print meta data to stdout", (gptr*) &_print_meta, (gptr*) &_print_meta, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "print_log", 259, "Print log to stdout", + { "print_log", OPT_PRINT_LOG, "Print log to stdout", (gptr*) &_print_log, (gptr*) &_print_log, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "backup_path", 260, "Path to backup files", + { "backup_path", OPT_BACKUP_PATH, "Path to backup files", (gptr*) &ga_backupPath, (gptr*) &ga_backupPath, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "dont_ignore_systab_0", 'f', @@ -121,6 +158,37 @@ static struct my_option my_long_options[] = (gptr*) &opt_nodegroup_map_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "fields-enclosed-by", OPT_FIELDS_ENCLOSED_BY, + "Fields are enclosed by ...", + (gptr*) &opt_fields_enclosed_by, (gptr*) &opt_fields_enclosed_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "fields-terminated-by", OPT_FIELDS_TERMINATED_BY, + "Fields are terminated by ...", + (gptr*) &opt_fields_terminated_by, + (gptr*) &opt_fields_terminated_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "fields-optionally-enclosed-by", OPT_FIELDS_OPTIONALLY_ENCLOSED_BY, + "Fields are optionally enclosed by ...", + (gptr*) &opt_fields_optionally_enclosed_by, + (gptr*) &opt_fields_optionally_enclosed_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "hex", OPT_HEX_FORMAT, "print binary types in hex format", + (gptr*) &opt_hex_format, (gptr*) &opt_hex_format, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "tab", 'T', "Creates tab separated textfile for each table to " + "given path. (creates .txt files)", + (gptr*) &tab_path, (gptr*) &tab_path, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "append", OPT_APPEND, "for --tab append data to file", + (gptr*) &opt_append, (gptr*) &opt_append, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "lines-terminated-by", OPT_LINES_TERMINATED_BY, "", + (gptr*) &opt_lines_terminated_by, (gptr*) &opt_lines_terminated_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "verbose", OPT_VERBOSE, + "verbosity", + (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, + GET_INT, REQUIRED_ARG, 1, 0, 255, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -255,20 +323,25 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), #endif ndb_std_get_one_option(optid, opt, argument); switch (optid) { + case OPT_VERBOSE: + info.setThreshold(255-opt_verbose); + break; case 'n': if (ga_nodeId == 0) { - printf("Error in --nodeid,-n setting, see --help\n"); + err << "Error in --nodeid,-n setting, see --help"; exit(NDBT_ProgramExit(NDBT_WRONGARGS)); } + info.setLevel(254); info << "Nodeid = " << ga_nodeId << endl; break; case 'b': if (ga_backupId == 0) { - printf("Error in --backupid,-b setting, see --help\n"); + err << "Error in --backupid,-b setting, see --help"; exit(NDBT_ProgramExit(NDBT_WRONGARGS)); } + info.setLevel(254); info << "Backup Id = " << ga_backupId << endl; break; case OPT_NDB_NODEGROUP_MAP: @@ -277,6 +350,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), to nodegroup in new cluster. */ opt_nodegroup_map_len= 0; + + info.setLevel(254); info << "Analyse node group map" << endl; if (analyse_nodegroup_map(opt_nodegroup_map_str, &opt_nodegroup_map[0])) @@ -331,9 +406,9 @@ o verify nodegroup mapping exit(NDBT_ProgramExit(NDBT_WRONGARGS)); #endif - BackupPrinter* printer = new BackupPrinter(opt_nodegroup_map, - opt_nodegroup_map_len); - if (printer == NULL) + g_printer = new BackupPrinter(opt_nodegroup_map, + opt_nodegroup_map_len); + if (g_printer == NULL) return false; BackupRestore* restore = new BackupRestore(opt_nodegroup_map, @@ -341,7 +416,8 @@ o verify nodegroup mapping ga_nParallelism); if (restore == NULL) { - delete printer; + delete g_printer; + g_printer = NULL; return false; } @@ -349,22 +425,22 @@ o verify nodegroup mapping { ga_print = true; ga_restore = true; - printer->m_print = true; + g_printer->m_print = true; } if (_print_meta) { ga_print = true; - printer->m_print_meta = true; + g_printer->m_print_meta = true; } if (_print_data) { ga_print = true; - printer->m_print_data = true; + g_printer->m_print_data = true; } if (_print_log) { ga_print = true; - printer->m_print_log = true; + g_printer->m_print_log = true; } if (_restore_data) @@ -390,19 +466,64 @@ o verify nodegroup mapping } { - BackupConsumer * c = printer; + BackupConsumer * c = g_printer; g_consumers.push_back(c); } { BackupConsumer * c = restore; g_consumers.push_back(c); } - // Set backup file path - if (*pargv[0] != NULL) + for (;;) { - ga_backupPath = *pargv[0]; + int i= 0; + if (ga_backupPath == default_backupPath) + { + // Set backup file path + if ((*pargv)[i] == NULL) + break; + ga_backupPath = (*pargv)[i++]; + } + if ((*pargv)[i] == NULL) + break; + g_databases.push_back((*pargv)[i++]); + while ((*pargv)[i] != NULL) + { + g_tables.push_back((*pargv)[i++]); + } + break; } + info.setLevel(254); info << "backup path = " << ga_backupPath << endl; + if (g_databases.size() > 0) + { + info << "Restoring only from database " << g_databases[0].c_str() << endl; + if (g_tables.size() > 0) + info << "Restoring only tables:"; + for (unsigned i= 0; i < g_tables.size(); i++) + { + info << " " << g_tables[i].c_str(); + } + if (g_tables.size() > 0) + info << endl; + } + /* + the below formatting follows the formatting from mysqldump + do not change unless to adopt to changes in mysqldump + */ + g_ndbrecord_print_format.fields_enclosed_by= + opt_fields_enclosed_by ? opt_fields_enclosed_by : ""; + g_ndbrecord_print_format.fields_terminated_by= + opt_fields_terminated_by ? opt_fields_terminated_by : "\t"; + g_ndbrecord_print_format.fields_optionally_enclosed_by= + opt_fields_optionally_enclosed_by ? opt_fields_optionally_enclosed_by : ""; + g_ndbrecord_print_format.lines_terminated_by= + opt_lines_terminated_by ? opt_lines_terminated_by : "\n"; + if (g_ndbrecord_print_format.fields_optionally_enclosed_by[0] == '\0') + g_ndbrecord_print_format.null_string= "\\N"; + else + g_ndbrecord_print_format.null_string= ""; + g_ndbrecord_print_format.hex_prefix= ""; + g_ndbrecord_print_format.hex_format= opt_hex_format; return true; } @@ -427,6 +548,70 @@ checkSysTable(const RestoreMetaData& metaData, uint i) return checkSysTable(metaData[i]); } +static inline bool +isBlobTable(const TableS* table) +{ + return table->getMainTable() != NULL; +} + +static inline bool +isIndex(const TableS* table) +{ + const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table->m_dictTable); + return (int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined; +} + +static inline bool +checkDbAndTableName(const TableS* table) +{ + if (g_tables.size() == 0 && + g_databases.size() == 0) + return true; + if (g_databases.size() == 0) + g_databases.push_back("TEST_DB"); + + // Filter on the main table name for indexes and blobs + const char *table_name; + if (isBlobTable(table)) + table_name= table->getMainTable()->getTableName(); + else if (isIndex(table)) + table_name= + NdbTableImpl::getImpl(*table->m_dictTable).m_primaryTable.c_str(); + else + table_name= table->getTableName(); + + unsigned i; + for (i= 0; i < g_databases.size(); i++) + { + if (strncmp(table_name, g_databases[i].c_str(), + g_databases[i].length()) == 0 && + table_name[g_databases[i].length()] == '/') + { + // we have a match + if (g_databases.size() > 1 || g_tables.size() == 0) + return true; + break; + } + } + if (i == g_databases.size()) + return false; // no match found + + while (*table_name != '/') table_name++; + table_name++; + while (*table_name != '/') table_name++; + table_name++; + + for (i= 0; i < g_tables.size(); i++) + { + if (strcmp(table_name, g_tables[i].c_str()) == 0) + { + // we have a match + return true; + } + } + return false; +} + static void free_data_callback() { @@ -459,6 +644,8 @@ main(int argc, char** argv) g_options.appfmt(" -n %d", ga_nodeId); if (_restore_meta) g_options.appfmt(" -m"); + if (ga_skip_table_check) + g_options.appfmt(" -s"); if (_restore_data) g_options.appfmt(" -r"); if (ga_restore_epoch) @@ -484,9 +671,10 @@ main(int argc, char** argv) const Uint32 version = tmp.NdbVersion; char buf[NDB_VERSION_STRING_BUF_SZ]; + info.setLevel(254); info << "Ndb version in backup files: " - << getVersionString(version, 0, buf, sizeof(buf)) << endl; - + << getVersionString(version, 0, buf, sizeof(buf)) << endl; + /** * check wheater we can restore the backup (right version). */ @@ -554,27 +742,59 @@ main(int argc, char** argv) exitHandler(NDBT_FAILED); } } - debug << "Restoring tables" << endl; + + Vector<OutputStream *> table_output(metaData.getNoOfTables()); + debug << "Restoring tables" << endl; for(i = 0; i<metaData.getNoOfTables(); i++) { - if (checkSysTable(metaData, i)) + const TableS *table= metaData[i]; + table_output.push_back(NULL); + if (!checkDbAndTableName(table)) + continue; + if (checkSysTable(table)) { + if (!tab_path || isBlobTable(table) || isIndex(table)) + { + table_output[i]= ndbout.m_out; + } + else + { + FILE* res; + char filename[FN_REFLEN], tmp_path[FN_REFLEN]; + const char *table_name; + table_name= table->getTableName(); + while (*table_name != '/') table_name++; + table_name++; + while (*table_name != '/') table_name++; + table_name++; + convert_dirname(tmp_path, tab_path, NullS); + res= my_fopen(fn_format(filename, table_name, tmp_path, ".txt", 4), + opt_append ? + O_WRONLY|O_APPEND|O_CREAT : + O_WRONLY|O_TRUNC|O_CREAT, + MYF(MY_WME)); + if (res == 0) + { + exitHandler(NDBT_FAILED); + } + FileOutputStream *f= new FileOutputStream(res); + table_output[i]= f; + } for(Uint32 j= 0; j < g_consumers.size(); j++) - if (!g_consumers[j]->table(* metaData[i])) + if (!g_consumers[j]->table(* table)) { err << "Restore: Failed to restore table: "; - err << metaData[i]->getTableName() << " ... Exiting " << endl; + err << table->getTableName() << " ... Exiting " << endl; exitHandler(NDBT_FAILED); } } else { for(Uint32 j= 0; j < g_consumers.size(); j++) - if (!g_consumers[j]->createSystable(* metaData[i])) + if (!g_consumers[j]->createSystable(* table)) { err << "Restore: Failed to restore system table: "; - err << metaData[i]->getTableName() << " ... Exiting " << endl; + err << table->getTableName() << " ... Exiting " << endl; exitHandler(NDBT_FAILED); } - } } debug << "Close tables" << endl; @@ -589,6 +809,20 @@ main(int argc, char** argv) { if(_restore_data || _print_data) { + if (!ga_skip_table_check){ + for(i=0; i < metaData.getNoOfTables(); i++){ + if (checkSysTable(metaData, i)) + { + for(Uint32 j= 0; j < g_consumers.size(); j++) + if (!g_consumers[j]->table_equal(* metaData[i])) + { + err << "Restore: Failed to restore data, "; + err << metaData[i]->getTableName() << " table structure doesn't match backup ... Exiting " << endl; + exitHandler(NDBT_FAILED); + } + } + } + } RestoreDataIterator dataIter(metaData, &free_data_callback); // Read data file header @@ -604,9 +838,15 @@ main(int argc, char** argv) const TupleS* tuple; while ((tuple = dataIter.getNextTuple(res= 1)) != 0) { - if (checkSysTable(tuple->getTable())) - for(Uint32 j= 0; j < g_consumers.size(); j++) - g_consumers[j]->tuple(* tuple, fragmentId); + const TableS* table = tuple->getTable(); + OutputStream *output = table_output[table->getLocalId()]; + if (!output) + continue; + OutputStream *tmp = ndbout.m_out; + ndbout.m_out = output; + for(Uint32 j= 0; j < g_consumers.size(); j++) + g_consumers[j]->tuple(* tuple, fragmentId); + ndbout.m_out = tmp; } // while (tuple != NULL); if (res < 0) @@ -648,9 +888,12 @@ main(int argc, char** argv) const LogEntry * logEntry = 0; while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0) { - if (checkSysTable(logEntry->m_table)) - for(Uint32 j= 0; j < g_consumers.size(); j++) - g_consumers[j]->logEntry(* logEntry); + const TableS* table = logEntry->m_table; + OutputStream *output = table_output[table->getLocalId()]; + if (!output) + continue; + for(Uint32 j= 0; j < g_consumers.size(); j++) + g_consumers[j]->logEntry(* logEntry); } if (res < 0) { @@ -667,27 +910,27 @@ main(int argc, char** argv) { for(i = 0; i<metaData.getNoOfTables(); i++) { - if (checkSysTable(metaData, i)) - { - for(Uint32 j= 0; j < g_consumers.size(); j++) - if (!g_consumers[j]->finalize_table(* metaData[i])) - { - err << "Restore: Failed to finalize restore table: %s. "; - err << "Exiting... " << metaData[i]->getTableName() << endl; - exitHandler(NDBT_FAILED); - } - } + const TableS* table = metaData[i]; + OutputStream *output = table_output[table->getLocalId()]; + if (!output) + continue; + for(Uint32 j= 0; j < g_consumers.size(); j++) + if (!g_consumers[j]->finalize_table(*table)) + { + err << "Restore: Failed to finalize restore table: %s. "; + err << "Exiting... " << metaData[i]->getTableName() << endl; + exitHandler(NDBT_FAILED); + } } } } - if (ga_restore_epoch) { for (i= 0; i < g_consumers.size(); i++) if (!g_consumers[i]->update_apply_status(metaData)) { - err << "Restore: Failed to restore epoch" << endl; - return -1; + err << "Restore: Failed to restore epoch" << endl; + return -1; } } @@ -702,7 +945,23 @@ main(int argc, char** argv) } clearConsumers(); - return NDBT_ProgramExit(NDBT_OK); + + for(i = 0; i < metaData.getNoOfTables(); i++) + { + if (table_output[i] && + table_output[i] != ndbout.m_out) + { + my_fclose(((FileOutputStream *)table_output[i])->getFile(), MYF(MY_WME)); + delete table_output[i]; + table_output[i] = NULL; + } + } + + if (opt_verbose) + return NDBT_ProgramExit(NDBT_OK); + else + return 0; } // main template class Vector<BackupConsumer*>; +template class Vector<OutputStream*>; diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c index 1292d7f5ede..1263882846d 100644 --- a/strings/ctype-uca.c +++ b/strings/ctype-uca.c @@ -6744,7 +6744,7 @@ typedef struct my_uca_scanner_handler_st int (*next)(my_uca_scanner *scanner); } my_uca_scanner_handler; -static uint16 nochar[]= {0}; +static uint16 nochar[]= {0,0}; #ifdef HAVE_CHARSET_ucs2 @@ -6769,13 +6769,32 @@ static void my_uca_scanner_init_ucs2(my_uca_scanner *scanner, CHARSET_INFO *cs __attribute__((unused)), const uchar *str, uint length) { - /* Note, no needs to initialize scanner->wbeg */ - scanner->sbeg= str; - scanner->send= str + length - 2; scanner->wbeg= nochar; - scanner->uca_length= cs->sort_order; - scanner->uca_weight= cs->sort_order_big; - scanner->contractions= cs->contractions; + if (length) + { + scanner->sbeg= str; + scanner->send= str + length - 2; + scanner->uca_length= cs->sort_order; + scanner->uca_weight= cs->sort_order_big; + scanner->contractions= cs->contractions; + return; + } + + /* + Sometimes this function is called with + str=NULL and length=0, which should be + considered as an empty string. + + The above initialization is unsafe for such cases, + because scanner->send is initialized to (NULL-2), which is 0xFFFFFFFE. + Then we fall into an endless loop in my_uca_scanner_next_ucs2(). + + Do special initialization for the case when length=0. + Initialize scanner->sbeg to an address greater than scanner->send. + Next call of my_uca_scanner_next_ucs2() will correctly return with -1. + */ + scanner->sbeg= (uchar*) &nochar[1]; + scanner->send= (uchar*) &nochar[0]; } diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index 0536d445533..16882e9b25d 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -2769,6 +2769,7 @@ static int my_strnncoll_utf8_cs(CHARSET_INFO *cs, const uchar *te=t+tlen; int save_diff = 0; int diff; + MY_UNICASE_INFO **uni_plane= cs->caseinfo; while ( s < se && t < te ) { @@ -2805,13 +2806,16 @@ static int my_strnncoll_utf8_cs(CHARSET_INFO *cs, static int my_strnncollsp_utf8_cs(CHARSET_INFO *cs, const uchar *s, uint slen, - const uchar *t, uint tlen) + const uchar *t, uint tlen, + my_bool diff_if_only_endspace_difference + __attribute__((unused))) { int s_res,t_res; my_wc_t s_wc,t_wc; const uchar *se= s+slen; const uchar *te= t+tlen; int save_diff = 0; + MY_UNICASE_INFO **uni_plane= cs->caseinfo; while ( s < se && t < te ) { @@ -2880,6 +2884,7 @@ static MY_COLLATION_HANDLER my_collation_cs_handler = my_strnncoll_utf8_cs, my_strnncollsp_utf8_cs, my_strnxfrm_utf8, + my_strnxfrmlen_utf8, my_like_range_simple, my_wildcmp_mb, my_strcasecmp_utf8, diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh index a84b9cfaec5..d2742c548b6 100644 --- a/support-files/mysql.server.sh +++ b/support-files/mysql.server.sh @@ -46,6 +46,13 @@ basedir= datadir= +# Default value, in seconds, afterwhich the script should timeout waiting +# for server start. +# Value here is overriden by value in my.cnf. +# 0 means don't wait at all +# Negative numbers mean to wait indefinitely +service_startup_timeout=900 + # The following variables are only set for letting mysql.server find things. # Set some defaults @@ -126,6 +133,7 @@ parse_server_arguments() { ;; --user=*) user=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --pid-file=*) server_pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; + --service-startup-timeout=*) service_startup_timeout=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --use-mysqld_safe) use_mysqld_safe=1;; --use-manager) use_mysqld_safe=0;; esac @@ -143,7 +151,7 @@ parse_manager_arguments() { wait_for_pid () { i=0 - while test $i -lt 900 ; do + while test $i -ne $service_startup_timeout ; do sleep 1 case "$1" in 'created') diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 0671bd8a58c..1bdab2495c2 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -337,11 +337,7 @@ then cp -fp mysql-debug-%{mysql_version}/config.log "$MYSQL_DEBUGCONFLOG_DEST" fi - MTR_BUILD_THREAD=auto - export MTR_BUILD_THREAD -(cd mysql-debug-%{mysql_version}/mysql-test ; \ - ./mysql-test-run.pl --comment=debug --skip-rpl --skip-ndbcluster --force --report-features ; \ - true) +(cd mysql-debug-%{mysql_version} ; make test-bt-debug) ############################################################################## # @@ -370,15 +366,7 @@ then cp -fp mysql-release-%{mysql_version}/config.log "$MYSQL_CONFLOG_DEST" fi - MTR_BUILD_THREAD=auto - export MTR_BUILD_THREAD -cd mysql-release-%{mysql_version}/mysql-test -./mysql-test-run.pl --comment=normal --force --skip-ndbcluster --timer --report-features || true -./mysql-test-run.pl --comment=ps --ps-protocol --force --skip-ndbcluster --timer || true -./mysql-test-run.pl --comment=normal+rowrepl --mysqld=--binlog-format=row --force --skip-ndbcluster --timer || true -./mysql-test-run.pl --comment=ps+rowrepl+NDB --ps-protocol --mysqld=--binlog-format=row --force --timer || true -./mysql-test-run.pl --comment=NDB --with-ndbcluster-only --force --timer || true -cd ../.. +(cd mysql-release-%{mysql_version} ; make test-bt) ############################################################################## diff --git a/tests/Makefile.am b/tests/Makefile.am index 07a3a3b913c..5f34def19bd 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -31,7 +31,7 @@ EXTRA_DIST = auto_increment.res auto_increment.tst \ insert_and_repair.pl \ grant.pl grant.res test_delayed_insert.pl \ pmail.pl mail_to_db.pl table_types.pl \ - udf_test udf_test.res myisam-big-rows.tst \ + myisam-big-rows.tst \ CMakeLists.txt bin_PROGRAMS = mysql_client_test diff --git a/tests/udf_test b/tests/udf_test deleted file mode 100644 index 15ad640f984..00000000000 --- a/tests/udf_test +++ /dev/null @@ -1,33 +0,0 @@ -# -# For this script to work, you need to compile and install the -# udf_example script ! -# - -CREATE FUNCTION metaphon RETURNS STRING SONAME "udf_example.so"; -CREATE FUNCTION myfunc_double RETURNS REAL SONAME "udf_example.so"; -CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "udf_example.so"; -CREATE FUNCTION lookup RETURNS STRING SONAME "udf_example.so"; -CREATE FUNCTION reverse_lookup RETURNS STRING SONAME "udf_example.so"; -CREATE AGGREGATE FUNCTION avgcost RETURNS REAL SONAME "udf_example.so"; -CREATE FUNCTION myfunc_argument_name RETURNS STRING SONAME "udf_example.so"; - -select metaphon("hello"); -select myfunc_double("hello","world"); -select myfunc_int(1,2,3),myfunc_int("1","11","111"); -select lookup("localhost"); -select reverse_lookup("127.0.0.1"); - -create temporary table t1 (a int,b double); -insert into t1 values (1,5),(1,4),(2,8),(3,9),(4,11); -select avgcost(a,b) from t1; -select avgcost(a,b) from t1 group by a; -select a, myfunc_argument_name(a), myfunc_argument_name(a as b) from t1; -drop table t1; - -DROP FUNCTION metaphon; -DROP FUNCTION myfunc_double; -DROP FUNCTION myfunc_int; -DROP FUNCTION lookup; -DROP FUNCTION reverse_lookup; -DROP FUNCTION avgcost; -DROP FUNCTION myfunc_argument_name; diff --git a/tests/udf_test.res b/tests/udf_test.res deleted file mode 100644 index de9e9969f3a..00000000000 --- a/tests/udf_test.res +++ /dev/null @@ -1,175 +0,0 @@ --------------- -CREATE FUNCTION metaphon RETURNS STRING SONAME "udf_example.so" --------------- - -Query OK, 0 rows affected - --------------- -CREATE FUNCTION myfunc_double RETURNS REAL SONAME "udf_example.so" --------------- - -Query OK, 0 rows affected - --------------- -CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "udf_example.so" --------------- - -Query OK, 0 rows affected - --------------- -CREATE FUNCTION lookup RETURNS STRING SONAME "udf_example.so" --------------- - -Query OK, 0 rows affected - --------------- -CREATE FUNCTION reverse_lookup RETURNS STRING SONAME "udf_example.so" --------------- - -Query OK, 0 rows affected - --------------- -CREATE AGGREGATE FUNCTION avgcost RETURNS REAL SONAME "udf_example.so" --------------- - -Query OK, 0 rows affected - --------------- -CREATE FUNCTION myfunc_argument_name RETURNS STRING SONAME "udf_example.so" --------------- - -Query OK, 0 rows affected - --------------- -select metaphon("hello") --------------- - -metaphon("hello") -HL -1 row in set - --------------- -select myfunc_double("hello","world") --------------- - -myfunc_double("hello","world") -108.40 -1 row in set - --------------- -select myfunc_int(1,2,3),myfunc_int("1","11","111") --------------- - -myfunc_int(1,2,3) myfunc_int("1","11","111") -6 6 -1 row in set - --------------- -select lookup("localhost") --------------- - -lookup("localhost") -127.0.0.1 -1 row in set - --------------- -select reverse_lookup("127.0.0.1") --------------- - -reverse_lookup("127.0.0.1") -localhost -1 row in set - --------------- -create temporary table t1 (a int,b double) --------------- - -Query OK, 0 rows affected - --------------- -insert into t1 values (1,5),(1,4),(2,8),(3,9),(4,11) --------------- - -Query OK, 5 rows affected -Records: 0 Duplicates: 5 Warnings: 0 - --------------- -select avgcost(a,b) from t1 --------------- - -avgcost(a,b) -8.7273 -1 row in set - --------------- -select avgcost(a,b) from t1 group by a --------------- - -avgcost(a,b) -4.5000 -8.0000 -9.0000 -11.0000 -4 rows in set - --------------- -select a, myfunc_argument_name(a) from t1; --------------- - -a myfunc_argument_name(a) myfunc_argument_name(a as b) -1 a b -1 a b -2 a b -3 a b -4 a b -5 rows in set - --------------- -drop table t1 --------------- - -Query OK, 0 rows affected - --------------- -DROP FUNCTION metaphon --------------- - -Query OK, 0 rows affected - --------------- -DROP FUNCTION myfunc_double --------------- - -Query OK, 0 rows affected - --------------- -DROP FUNCTION myfunc_int --------------- - -Query OK, 0 rows affected - --------------- -DROP FUNCTION lookup --------------- - -Query OK, 0 rows affected - --------------- -DROP FUNCTION reverse_lookup --------------- - -Query OK, 0 rows affected - --------------- -DROP FUNCTION avgcost --------------- - -Query OK, 0 rows affected - --------------- -DROP FUNCTION myfunc_argument_name; --------------- - -Query OK, 0 rows affected - -Bye diff --git a/win/build-vs71.bat b/win/build-vs71.bat index 159b1ec97d1..159b1ec97d1 100644..100755 --- a/win/build-vs71.bat +++ b/win/build-vs71.bat diff --git a/win/build-vs8.bat b/win/build-vs8.bat index ff0eeb0a8cb..ff0eeb0a8cb 100644..100755 --- a/win/build-vs8.bat +++ b/win/build-vs8.bat |