diff options
87 files changed, 4189 insertions, 1061 deletions
diff --git a/cmake/abi_check.cmake b/cmake/abi_check.cmake index 98877be9351..e4a783f3de5 100644 --- a/cmake/abi_check.cmake +++ b/cmake/abi_check.cmake @@ -38,7 +38,6 @@ IF(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang" AND RUN_ABI_CHECK) SET(API_PREPROCESSOR_HEADER ${CMAKE_SOURCE_DIR}/include/mysql/plugin_audit.h ${CMAKE_SOURCE_DIR}/include/mysql/plugin_ftparser.h - ${CMAKE_SOURCE_DIR}/include/mysql.h ${CMAKE_SOURCE_DIR}/include/mysql/psi/psi_abi_v1.h ${CMAKE_SOURCE_DIR}/include/mysql/psi/psi_abi_v2.h ${CMAKE_SOURCE_DIR}/include/mysql/client_plugin.h diff --git a/include/mariadb_capi_rename.h b/include/mariadb_capi_rename.h new file mode 100644 index 00000000000..58e16fdc0c0 --- /dev/null +++ b/include/mariadb_capi_rename.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2022, MariaDB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */ + +/* Renaming C API symbols inside server + * client.c defines a number of functions from the C API, that are used in replication, in number of storage engine plugins, mariadb-backup. + * That can cause a problem if a plugin loads libmariadb/libmysql or a library, that has dependency on them. The known case is ODBC driver. + * Thus the header re-names those functions for internal use. + */ + +#ifndef MARIADB_CAPI_RENAME_INCLUDED +#define MARIADB_CAPI_RENAME_INCLUDED + +#if !defined(EMBEDDED_LIBRARY) && !defined(MYSQL_DYNAMIC_PLUGIN) + +#define MARIADB_ADD_PREFIX(_SYMBOL) server_##_SYMBOL +#define mysql_real_connect MARIADB_ADD_PREFIX(mysql_real_connect) +#define mysql_init MARIADB_ADD_PREFIX(mysql_init) +#define mysql_close MARIADB_ADD_PREFIX(mysql_close) +#define mysql_options MARIADB_ADD_PREFIX(mysql_options) +#define mysql_load_plugin MARIADB_ADD_PREFIX(mysql_load_plugin) +#define mysql_load_plugin_v MARIADB_ADD_PREFIX(mysql_load_plugin_v) +#define mysql_client_find_plugin MARIADB_ADD_PREFIX(mysql_client_find_plugin) +#define mysql_real_query MARIADB_ADD_PREFIX(mysql_real_query) +#define mysql_send_query MARIADB_ADD_PREFIX(mysql_send_query) +#define mysql_free_result MARIADB_ADD_PREFIX(mysql_free_result) +#define mysql_get_socket MARIADB_ADD_PREFIX(mysql_get_socket) +#define mysql_set_character_set MARIADB_ADD_PREFIX(mysql_set_character_set) +#define mysql_get_server_version MARIADB_ADD_PREFIX(mysql_get_server_version) +#define mysql_error MARIADB_ADD_PREFIX(mysql_error) +#define mysql_errno MARIADB_ADD_PREFIX(mysql_errno) +#define mysql_num_fields MARIADB_ADD_PREFIX(mysql_num_fields) +#define mysql_num_rows MARIADB_ADD_PREFIX(mysql_num_rows) +#define mysql_options4 MARIADB_ADD_PREFIX(mysql_options4) +#define mysql_fetch_lengths MARIADB_ADD_PREFIX(mysql_fetch_lengths) +#define mysql_fetch_row MARIADB_ADD_PREFIX(mysql_fetch_row) +#define mysql_affected_rows MARIADB_ADD_PREFIX(mysql_affected_rows) +#define mysql_store_result MARIADB_ADD_PREFIX(mysql_store_result) +#define mysql_select_db MARIADB_ADD_PREFIX(mysql_select_db) +#define mysql_get_ssl_cipher MARIADB_ADD_PREFIX(mysql_get_ssl_cipher) +#define mysql_ssl_set MARIADB_ADD_PREFIX(mysql_ssl_set) +#define mysql_client_register_plugin MARIADB_ADD_PREFIX(mysql_client_register_plugin) + +#endif // !EMBEDDED_LIBRARY && !MYSQL_DYNAMIC_PLUGIN + +#endif // !MARIADB_CAPI_RENAME_INCLUDED diff --git a/include/mysql.h b/include/mysql.h index a42ae085e0c..46ddc4b9ed9 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -63,6 +63,7 @@ typedef int my_socket; #endif /* my_socket_defined */ #endif /* MY_GLOBAL_INCLUDED */ +#include "mariadb_capi_rename.h" #include "mysql_version.h" #include "mysql_com.h" #include "mysql_time.h" diff --git a/include/mysql.h.pp b/include/mysql.h.pp deleted file mode 100644 index 81b6206ae3e..00000000000 --- a/include/mysql.h.pp +++ /dev/null @@ -1,795 +0,0 @@ -extern "C" { -typedef char my_bool; -typedef int my_socket; -enum enum_server_command -{ - COM_SLEEP, COM_QUIT, COM_INIT_DB, COM_QUERY, COM_FIELD_LIST, - COM_CREATE_DB, COM_DROP_DB, COM_REFRESH, COM_SHUTDOWN, COM_STATISTICS, - COM_PROCESS_INFO, COM_CONNECT, COM_PROCESS_KILL, COM_DEBUG, COM_PING, - COM_TIME, COM_DELAYED_INSERT, COM_CHANGE_USER, COM_BINLOG_DUMP, - COM_TABLE_DUMP, COM_CONNECT_OUT, COM_REGISTER_SLAVE, - COM_STMT_PREPARE, COM_STMT_EXECUTE, COM_STMT_SEND_LONG_DATA, COM_STMT_CLOSE, - COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH, COM_DAEMON, - COM_UNIMPLEMENTED, - COM_RESET_CONNECTION, - COM_MDB_GAP_BEG, - COM_MDB_GAP_END=249, - COM_STMT_BULK_EXECUTE=250, - COM_SLAVE_WORKER=251, - COM_SLAVE_IO=252, - COM_SLAVE_SQL=253, - COM_RESERVED_1=254, - COM_END=255 -}; -enum enum_indicator_type -{ - STMT_INDICATOR_NONE= 0, - STMT_INDICATOR_NULL, - STMT_INDICATOR_DEFAULT, - STMT_INDICATOR_IGNORE -}; -enum mariadb_field_attr_t -{ - MARIADB_FIELD_ATTR_DATA_TYPE_NAME= 0, - MARIADB_FIELD_ATTR_FORMAT_NAME= 1 -}; -struct st_vio; -typedef struct st_vio Vio; -typedef struct st_net { - Vio *vio; - unsigned char *buff,*buff_end,*write_pos,*read_pos; - my_socket fd; - unsigned long remain_in_buf,length, buf_length, where_b; - unsigned long max_packet,max_packet_size; - unsigned int pkt_nr,compress_pkt_nr; - unsigned int write_timeout, read_timeout, retry_count; - int fcntl; - unsigned int *return_status; - unsigned char reading_or_writing; - char save_char; - char net_skip_rest_factor; - my_bool thread_specific_malloc; - unsigned char compress; - my_bool unused3; - void *thd; - unsigned int last_errno; - unsigned char error; - my_bool unused4; - my_bool unused5; - char last_error[512]; - char sqlstate[5 +1]; - void *extension; -} NET; -enum enum_field_types { MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, - MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, - MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, - MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP, - MYSQL_TYPE_LONGLONG,MYSQL_TYPE_INT24, - MYSQL_TYPE_DATE, MYSQL_TYPE_TIME, - MYSQL_TYPE_DATETIME, MYSQL_TYPE_YEAR, - MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, - MYSQL_TYPE_BIT, - MYSQL_TYPE_TIMESTAMP2, - MYSQL_TYPE_DATETIME2, - MYSQL_TYPE_TIME2, - MYSQL_TYPE_BLOB_COMPRESSED= 140, - MYSQL_TYPE_VARCHAR_COMPRESSED= 141, - MYSQL_TYPE_NEWDECIMAL=246, - MYSQL_TYPE_ENUM=247, - MYSQL_TYPE_SET=248, - MYSQL_TYPE_TINY_BLOB=249, - MYSQL_TYPE_MEDIUM_BLOB=250, - MYSQL_TYPE_LONG_BLOB=251, - MYSQL_TYPE_BLOB=252, - MYSQL_TYPE_VAR_STRING=253, - MYSQL_TYPE_STRING=254, - MYSQL_TYPE_GEOMETRY=255 -}; -enum mysql_enum_shutdown_level { - SHUTDOWN_DEFAULT = 0, - SHUTDOWN_WAIT_CONNECTIONS= (unsigned char)(1 << 0), - SHUTDOWN_WAIT_TRANSACTIONS= (unsigned char)(1 << 1), - SHUTDOWN_WAIT_UPDATES= (unsigned char)(1 << 3), - SHUTDOWN_WAIT_ALL_BUFFERS= ((unsigned char)(1 << 3) << 1), - SHUTDOWN_WAIT_CRITICAL_BUFFERS= ((unsigned char)(1 << 3) << 1) + 1 -}; -enum enum_cursor_type -{ - CURSOR_TYPE_NO_CURSOR= 0, - CURSOR_TYPE_READ_ONLY= 1, - CURSOR_TYPE_FOR_UPDATE= 2, - CURSOR_TYPE_SCROLLABLE= 4 -}; -enum enum_mysql_set_option -{ - MYSQL_OPTION_MULTI_STATEMENTS_ON, - MYSQL_OPTION_MULTI_STATEMENTS_OFF -}; -enum enum_session_state_type -{ - SESSION_TRACK_SYSTEM_VARIABLES, - SESSION_TRACK_SCHEMA, - SESSION_TRACK_STATE_CHANGE, - SESSION_TRACK_GTIDS, - SESSION_TRACK_TRANSACTION_CHARACTERISTICS, - SESSION_TRACK_TRANSACTION_STATE, - SESSION_TRACK_always_at_the_end -}; -extern "C" { -my_bool my_net_init(NET *net, Vio* vio, void *thd, unsigned int my_flags); -void my_net_local_init(NET *net); -void net_end(NET *net); -void net_clear(NET *net, my_bool clear_buffer); -my_bool net_realloc(NET *net, size_t length); -my_bool net_flush(NET *net); -my_bool my_net_write(NET *net,const unsigned char *packet, size_t len); -my_bool net_write_command(NET *net,unsigned char command, - const unsigned char *header, size_t head_len, - const unsigned char *packet, size_t len); -int net_real_write(NET *net,const unsigned char *packet, size_t len); -unsigned long my_net_read_packet(NET *net, my_bool read_from_server); -unsigned long my_net_read_packet_reallen(NET *net, my_bool read_from_server, - unsigned long* reallen); -struct sockaddr; -int my_connect(my_socket s, const struct sockaddr *name, unsigned int namelen, - unsigned int timeout); -struct my_rnd_struct; -} -enum Item_result -{ - STRING_RESULT=0, REAL_RESULT, INT_RESULT, ROW_RESULT, DECIMAL_RESULT, - TIME_RESULT -}; -typedef struct st_udf_args -{ - unsigned int arg_count; - enum Item_result *arg_type; - char **args; - unsigned long *lengths; - char *maybe_null; - const char **attributes; - unsigned long *attribute_lengths; - void *extension; -} UDF_ARGS; -typedef struct st_udf_init -{ - my_bool maybe_null; - unsigned int decimals; - unsigned long max_length; - char *ptr; - my_bool const_item; - void *extension; -} UDF_INIT; -extern "C" { -void create_random_string(char *to, unsigned int length, - struct my_rnd_struct *rand_st); -void hash_password(unsigned long *to, const char *password, unsigned int password_len); -void make_scrambled_password_323(char *to, const char *password); -void scramble_323(char *to, const char *message, const char *password); -my_bool check_scramble_323(const unsigned char *reply, const char *message, - unsigned long *salt); -void get_salt_from_password_323(unsigned long *res, const char *password); -void make_scrambled_password(char *to, const char *password); -void scramble(char *to, const char *message, const char *password); -my_bool check_scramble(const unsigned char *reply, const char *message, - const unsigned char *hash_stage2); -void get_salt_from_password(unsigned char *res, const char *password); -char *octet2hex(char *to, const char *str, size_t len); -char *get_tty_password(const char *opt_message); -void get_tty_password_buff(const char *opt_message, char *to, size_t length); -const char *mysql_errno_to_sqlstate(unsigned int mysql_errno); -my_bool my_thread_init(void); -void my_thread_end(void); -} -typedef long my_time_t; -enum enum_mysql_timestamp_type -{ - MYSQL_TIMESTAMP_NONE= -2, MYSQL_TIMESTAMP_ERROR= -1, - MYSQL_TIMESTAMP_DATE= 0, MYSQL_TIMESTAMP_DATETIME= 1, MYSQL_TIMESTAMP_TIME= 2 -}; -typedef struct st_mysql_time -{ - unsigned int year, month, day, hour, minute, second; - unsigned long second_part; - my_bool neg; - enum enum_mysql_timestamp_type time_type; -} MYSQL_TIME; -extern "C" { -typedef struct st_list { - struct st_list *prev,*next; - void *data; -} LIST; -typedef int (*list_walk_action)(void *,void *); -extern LIST *list_add(LIST *root,LIST *element); -extern LIST *list_delete(LIST *root,LIST *element); -extern LIST *list_cons(void *data,LIST *root); -extern LIST *list_reverse(LIST *root); -extern void list_free(LIST *root,unsigned int free_data); -extern unsigned int list_length(LIST *); -extern int list_walk(LIST *,list_walk_action action,unsigned char * argument); -} -extern unsigned int mariadb_deinitialize_ssl; -extern unsigned int mysql_port; -extern char *mysql_unix_port; -typedef struct st_mysql_const_lex_string MARIADB_CONST_STRING; -typedef struct st_mysql_field { - char *name; - char *org_name; - char *table; - char *org_table; - char *db; - char *catalog; - char *def; - unsigned long length; - unsigned long max_length; - unsigned int name_length; - unsigned int org_name_length; - unsigned int table_length; - unsigned int org_table_length; - unsigned int db_length; - unsigned int catalog_length; - unsigned int def_length; - unsigned int flags; - unsigned int decimals; - unsigned int charsetnr; - enum enum_field_types type; - void *extension; -} MYSQL_FIELD; -typedef char **MYSQL_ROW; -typedef unsigned int MYSQL_FIELD_OFFSET; -typedef unsigned long long my_ulonglong; -extern "C" { -typedef unsigned int PSI_memory_key; -} -extern "C" { -typedef struct st_used_mem -{ - struct st_used_mem *next; - size_t left; - size_t size; -} USED_MEM; -typedef struct st_mem_root -{ - USED_MEM *free; - USED_MEM *used; - USED_MEM *pre_alloc; - size_t min_malloc; - size_t block_size; - unsigned int block_num; - unsigned short first_block_usage; - unsigned short flags; - void (*error_handler)(void); - PSI_memory_key psi_key; -} MEM_ROOT; -} -typedef struct st_typelib { - unsigned int count; - const char *name; - const char **type_names; - unsigned int *type_lengths; -} TYPELIB; -extern my_ulonglong find_typeset(const char *x, TYPELIB *typelib, - int *error_position); -extern int find_type_with_warning(const char *x, TYPELIB *typelib, - const char *option); -extern int find_type(const char *x, const TYPELIB *typelib, unsigned int flags); -extern void make_type(char *to,unsigned int nr,TYPELIB *typelib); -extern const char *get_type(TYPELIB *typelib,unsigned int nr); -extern TYPELIB *copy_typelib(MEM_ROOT *root, const TYPELIB *from); -extern TYPELIB sql_protocol_typelib; -my_ulonglong find_set_from_flags(const TYPELIB *lib, unsigned int default_name, - my_ulonglong cur_set, my_ulonglong default_set, - const char *str, unsigned int length, - char **err_pos, unsigned int *err_len); -typedef struct st_mysql_rows { - struct st_mysql_rows *next; - MYSQL_ROW data; - unsigned long length; -} MYSQL_ROWS; -typedef MYSQL_ROWS *MYSQL_ROW_OFFSET; -typedef struct embedded_query_result EMBEDDED_QUERY_RESULT; -typedef struct st_mysql_data { - MYSQL_ROWS *data; - struct embedded_query_result *embedded_info; - MEM_ROOT alloc; - my_ulonglong rows; - unsigned int fields; - void *extension; -} MYSQL_DATA; -enum mysql_option -{ - MYSQL_OPT_CONNECT_TIMEOUT, MYSQL_OPT_COMPRESS, MYSQL_OPT_NAMED_PIPE, - MYSQL_INIT_COMMAND, MYSQL_READ_DEFAULT_FILE, MYSQL_READ_DEFAULT_GROUP, - MYSQL_SET_CHARSET_DIR, MYSQL_SET_CHARSET_NAME, MYSQL_OPT_LOCAL_INFILE, - MYSQL_OPT_PROTOCOL, MYSQL_SHARED_MEMORY_BASE_NAME, MYSQL_OPT_READ_TIMEOUT, - MYSQL_OPT_WRITE_TIMEOUT, MYSQL_OPT_USE_RESULT, - MYSQL_OPT_USE_REMOTE_CONNECTION, MYSQL_OPT_USE_EMBEDDED_CONNECTION, - MYSQL_OPT_GUESS_CONNECTION, MYSQL_SET_CLIENT_IP, MYSQL_SECURE_AUTH, - MYSQL_REPORT_DATA_TRUNCATION, MYSQL_OPT_RECONNECT, - MYSQL_OPT_SSL_VERIFY_SERVER_CERT, MYSQL_PLUGIN_DIR, MYSQL_DEFAULT_AUTH, - MYSQL_OPT_BIND, - MYSQL_OPT_SSL_KEY, MYSQL_OPT_SSL_CERT, - MYSQL_OPT_SSL_CA, MYSQL_OPT_SSL_CAPATH, MYSQL_OPT_SSL_CIPHER, - MYSQL_OPT_SSL_CRL, MYSQL_OPT_SSL_CRLPATH, - MYSQL_OPT_CONNECT_ATTR_RESET, MYSQL_OPT_CONNECT_ATTR_ADD, - MYSQL_OPT_CONNECT_ATTR_DELETE, - MYSQL_SERVER_PUBLIC_KEY, - MYSQL_ENABLE_CLEARTEXT_PLUGIN, - MYSQL_OPT_CAN_HANDLE_EXPIRED_PASSWORDS, - MYSQL_PROGRESS_CALLBACK=5999, - MYSQL_OPT_NONBLOCK, - MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY -}; -struct st_mysql_options_extention; -struct st_mysql_options { - unsigned int connect_timeout, read_timeout, write_timeout; - unsigned int port, protocol; - unsigned long client_flag; - char *host,*user,*password,*unix_socket,*db; - struct st_dynamic_array *init_commands; - char *my_cnf_file,*my_cnf_group, *charset_dir, *charset_name; - char *ssl_key; - char *ssl_cert; - char *ssl_ca; - char *ssl_capath; - char *ssl_cipher; - char *shared_memory_base_name; - unsigned long max_allowed_packet; - my_bool use_ssl; - my_bool compress,named_pipe; - my_bool use_thread_specific_memory; - my_bool unused2; - my_bool unused3; - my_bool unused4; - enum mysql_option methods_to_use; - char *client_ip; - my_bool secure_auth; - my_bool report_data_truncation; - int (*local_infile_init)(void **, const char *, void *); - int (*local_infile_read)(void *, char *, unsigned int); - void (*local_infile_end)(void *); - int (*local_infile_error)(void *, char *, unsigned int); - void *local_infile_userdata; - struct st_mysql_options_extention *extension; -}; -enum mysql_status -{ - MYSQL_STATUS_READY, MYSQL_STATUS_GET_RESULT, MYSQL_STATUS_USE_RESULT, - MYSQL_STATUS_STATEMENT_GET_RESULT -}; -enum mysql_protocol_type -{ - MYSQL_PROTOCOL_DEFAULT, MYSQL_PROTOCOL_TCP, MYSQL_PROTOCOL_SOCKET, - MYSQL_PROTOCOL_PIPE, MYSQL_PROTOCOL_MEMORY -}; -typedef struct character_set -{ - unsigned int number; - unsigned int state; - const char *csname; - const char *name; - const char *comment; - const char *dir; - unsigned int mbminlen; - unsigned int mbmaxlen; -} MY_CHARSET_INFO; -struct st_mysql_methods; -struct st_mysql_stmt; -typedef struct st_mysql -{ - NET net; - unsigned char *connector_fd; - char *host,*user,*passwd,*unix_socket,*server_version,*host_info; - char *info, *db; - const struct charset_info_st *charset; - MYSQL_FIELD *fields; - MEM_ROOT field_alloc; - my_ulonglong affected_rows; - my_ulonglong insert_id; - my_ulonglong extra_info; - unsigned long thread_id; - unsigned long packet_length; - unsigned int port; - unsigned long client_flag,server_capabilities; - unsigned int protocol_version; - unsigned int field_count; - unsigned int server_status; - unsigned int server_language; - unsigned int warning_count; - struct st_mysql_options options; - enum mysql_status status; - my_bool free_me; - my_bool reconnect; - char scramble[20 +1]; - my_bool auto_local_infile; - void *unused2, *unused3, *unused4, *unused5; - LIST *stmts; - const struct st_mysql_methods *methods; - void *thd; - my_bool *unbuffered_fetch_owner; - char *info_buffer; - void *extension; -} MYSQL; -typedef struct st_mysql_res { - my_ulonglong row_count; - MYSQL_FIELD *fields; - MYSQL_DATA *data; - MYSQL_ROWS *data_cursor; - unsigned long *lengths; - MYSQL *handle; - const struct st_mysql_methods *methods; - MYSQL_ROW row; - MYSQL_ROW current_row; - MEM_ROOT field_alloc; - unsigned int field_count, current_field; - my_bool eof; - my_bool unbuffered_fetch_cancelled; - void *extension; -} MYSQL_RES; -typedef struct st_mysql_parameters -{ - unsigned long *p_max_allowed_packet; - unsigned long *p_net_buffer_length; - void *extension; -} MYSQL_PARAMETERS; -int mysql_server_init(int argc, char **argv, char **groups); -void mysql_server_end(void); -MYSQL_PARAMETERS * mysql_get_parameters(void); -my_bool mysql_thread_init(void); -void mysql_thread_end(void); -my_ulonglong mysql_num_rows(MYSQL_RES *res); -unsigned int mysql_num_fields(MYSQL_RES *res); -my_bool mysql_eof(MYSQL_RES *res); -MYSQL_FIELD * mysql_fetch_field_direct(MYSQL_RES *res, - unsigned int fieldnr); -MYSQL_FIELD * mysql_fetch_fields(MYSQL_RES *res); -MYSQL_ROW_OFFSET mysql_row_tell(MYSQL_RES *res); -MYSQL_FIELD_OFFSET mysql_field_tell(MYSQL_RES *res); -int mariadb_field_attr(MARIADB_CONST_STRING *attr, - const MYSQL_FIELD *field, - enum mariadb_field_attr_t type); -unsigned int mysql_field_count(MYSQL *mysql); -my_ulonglong mysql_affected_rows(MYSQL *mysql); -my_ulonglong mysql_insert_id(MYSQL *mysql); -unsigned int mysql_errno(MYSQL *mysql); -const char * mysql_error(MYSQL *mysql); -const char * mysql_sqlstate(MYSQL *mysql); -unsigned int mysql_warning_count(MYSQL *mysql); -const char * mysql_info(MYSQL *mysql); -unsigned long mysql_thread_id(MYSQL *mysql); -const char * mysql_character_set_name(MYSQL *mysql); -int mysql_set_character_set(MYSQL *mysql, const char *csname); -int mysql_set_character_set_start(int *ret, MYSQL *mysql, - const char *csname); -int mysql_set_character_set_cont(int *ret, MYSQL *mysql, - int status); -MYSQL * mysql_init(MYSQL *mysql); -my_bool mysql_ssl_set(MYSQL *mysql, const char *key, - const char *cert, const char *ca, - const char *capath, const char *cipher); -const char * mysql_get_ssl_cipher(MYSQL *mysql); -my_bool mysql_change_user(MYSQL *mysql, const char *user, - const char *passwd, const char *db); -int mysql_change_user_start(my_bool *ret, MYSQL *mysql, - const char *user, - const char *passwd, - const char *db); -int mysql_change_user_cont(my_bool *ret, MYSQL *mysql, - int status); -MYSQL * mysql_real_connect(MYSQL *mysql, const char *host, - const char *user, - const char *passwd, - const char *db, - unsigned int port, - const char *unix_socket, - unsigned long clientflag); -int mysql_real_connect_start(MYSQL **ret, MYSQL *mysql, - const char *host, - const char *user, - const char *passwd, - const char *db, - unsigned int port, - const char *unix_socket, - unsigned long clientflag); -int mysql_real_connect_cont(MYSQL **ret, MYSQL *mysql, - int status); -int mysql_select_db(MYSQL *mysql, const char *db); -int mysql_select_db_start(int *ret, MYSQL *mysql, - const char *db); -int mysql_select_db_cont(int *ret, MYSQL *mysql, - int status); -int mysql_query(MYSQL *mysql, const char *q); -int mysql_query_start(int *ret, MYSQL *mysql, - const char *q); -int mysql_query_cont(int *ret, MYSQL *mysql, - int status); -int mysql_send_query(MYSQL *mysql, const char *q, - unsigned long length); -int mysql_send_query_start(int *ret, MYSQL *mysql, - const char *q, - unsigned long length); -int mysql_send_query_cont(int *ret, MYSQL *mysql, - int status); -int mysql_real_query(MYSQL *mysql, const char *q, - unsigned long length); -int mysql_real_query_start(int *ret, MYSQL *mysql, - const char *q, - unsigned long length); -int mysql_real_query_cont(int *ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_store_result(MYSQL *mysql); -int mysql_store_result_start(MYSQL_RES **ret, MYSQL *mysql); -int mysql_store_result_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_use_result(MYSQL *mysql); -void mysql_get_character_set_info(MYSQL *mysql, - MY_CHARSET_INFO *charset); -void -mysql_set_local_infile_handler(MYSQL *mysql, - int (*local_infile_init)(void **, const char *, - void *), - int (*local_infile_read)(void *, char *, - unsigned int), - void (*local_infile_end)(void *), - int (*local_infile_error)(void *, char*, - unsigned int), - void *); -void -mysql_set_local_infile_default(MYSQL *mysql); -int mysql_shutdown(MYSQL *mysql, - enum mysql_enum_shutdown_level - shutdown_level); -int mysql_shutdown_start(int *ret, MYSQL *mysql, - enum mysql_enum_shutdown_level - shutdown_level); -int mysql_shutdown_cont(int *ret, MYSQL *mysql, - int status); -int mysql_dump_debug_info(MYSQL *mysql); -int mysql_dump_debug_info_start(int *ret, MYSQL *mysql); -int mysql_dump_debug_info_cont(int *ret, MYSQL *mysql, - int status); -int mysql_refresh(MYSQL *mysql, - unsigned int refresh_options); -int mysql_refresh_start(int *ret, MYSQL *mysql, - unsigned int refresh_options); -int mysql_refresh_cont(int *ret, MYSQL *mysql, int status); -int mysql_kill(MYSQL *mysql,unsigned long pid); -int mysql_kill_start(int *ret, MYSQL *mysql, - unsigned long pid); -int mysql_kill_cont(int *ret, MYSQL *mysql, int status); -int mysql_set_server_option(MYSQL *mysql, - enum enum_mysql_set_option - option); -int mysql_set_server_option_start(int *ret, MYSQL *mysql, - enum enum_mysql_set_option - option); -int mysql_set_server_option_cont(int *ret, MYSQL *mysql, - int status); -int mysql_ping(MYSQL *mysql); -int mysql_ping_start(int *ret, MYSQL *mysql); -int mysql_ping_cont(int *ret, MYSQL *mysql, int status); -const char * mysql_stat(MYSQL *mysql); -int mysql_stat_start(const char **ret, MYSQL *mysql); -int mysql_stat_cont(const char **ret, MYSQL *mysql, - int status); -const char * mysql_get_server_info(MYSQL *mysql); -const char * mysql_get_server_name(MYSQL *mysql); -const char * mysql_get_client_info(void); -unsigned long mysql_get_client_version(void); -const char * mysql_get_host_info(MYSQL *mysql); -unsigned long mysql_get_server_version(MYSQL *mysql); -unsigned int mysql_get_proto_info(MYSQL *mysql); -MYSQL_RES * mysql_list_dbs(MYSQL *mysql,const char *wild); -int mysql_list_dbs_start(MYSQL_RES **ret, MYSQL *mysql, - const char *wild); -int mysql_list_dbs_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_list_tables(MYSQL *mysql,const char *wild); -int mysql_list_tables_start(MYSQL_RES **ret, MYSQL *mysql, - const char *wild); -int mysql_list_tables_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_list_processes(MYSQL *mysql); -int mysql_list_processes_start(MYSQL_RES **ret, - MYSQL *mysql); -int mysql_list_processes_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -int mysql_options(MYSQL *mysql,enum mysql_option option, - const void *arg); -int mysql_options4(MYSQL *mysql,enum mysql_option option, - const void *arg1, const void *arg2); -void mysql_free_result(MYSQL_RES *result); -int mysql_free_result_start(MYSQL_RES *result); -int mysql_free_result_cont(MYSQL_RES *result, int status); -void mysql_data_seek(MYSQL_RES *result, - my_ulonglong offset); -MYSQL_ROW_OFFSET mysql_row_seek(MYSQL_RES *result, - MYSQL_ROW_OFFSET offset); -MYSQL_FIELD_OFFSET mysql_field_seek(MYSQL_RES *result, - MYSQL_FIELD_OFFSET offset); -MYSQL_ROW mysql_fetch_row(MYSQL_RES *result); -int mysql_fetch_row_start(MYSQL_ROW *ret, - MYSQL_RES *result); -int mysql_fetch_row_cont(MYSQL_ROW *ret, MYSQL_RES *result, - int status); -unsigned long * mysql_fetch_lengths(MYSQL_RES *result); -MYSQL_FIELD * mysql_fetch_field(MYSQL_RES *result); -MYSQL_RES * mysql_list_fields(MYSQL *mysql, const char *table, - const char *wild); -int mysql_list_fields_start(MYSQL_RES **ret, MYSQL *mysql, - const char *table, - const char *wild); -int mysql_list_fields_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -unsigned long mysql_escape_string(char *to,const char *from, - unsigned long from_length); -unsigned long mysql_hex_string(char *to,const char *from, - unsigned long from_length); -unsigned long mysql_real_escape_string(MYSQL *mysql, - char *to,const char *from, - unsigned long length); -void mysql_debug(const char *debug); -void myodbc_remove_escape(MYSQL *mysql,char *name); -unsigned int mysql_thread_safe(void); -my_bool mysql_embedded(void); -my_bool mariadb_connection(MYSQL *mysql); -my_bool mysql_read_query_result(MYSQL *mysql); -int mysql_read_query_result_start(my_bool *ret, - MYSQL *mysql); -int mysql_read_query_result_cont(my_bool *ret, - MYSQL *mysql, int status); -enum enum_mysql_stmt_state -{ - MYSQL_STMT_INIT_DONE= 1, MYSQL_STMT_PREPARE_DONE, MYSQL_STMT_EXECUTE_DONE, - MYSQL_STMT_FETCH_DONE -}; -typedef struct st_mysql_bind -{ - unsigned long *length; - my_bool *is_null; - void *buffer; - my_bool *error; - unsigned char *row_ptr; - void (*store_param_func)(NET *net, struct st_mysql_bind *param); - void (*fetch_result)(struct st_mysql_bind *, MYSQL_FIELD *, - unsigned char **row); - void (*skip_result)(struct st_mysql_bind *, MYSQL_FIELD *, - unsigned char **row); - unsigned long buffer_length; - unsigned long offset; - unsigned long length_value; - unsigned int param_number; - unsigned int pack_length; - enum enum_field_types buffer_type; - my_bool error_value; - my_bool is_unsigned; - my_bool long_data_used; - my_bool is_null_value; - void *extension; -} MYSQL_BIND; -struct st_mysql_stmt_extension; -typedef struct st_mysql_stmt -{ - MEM_ROOT mem_root; - LIST list; - MYSQL *mysql; - MYSQL_BIND *params; - MYSQL_BIND *bind; - MYSQL_FIELD *fields; - MYSQL_DATA result; - MYSQL_ROWS *data_cursor; - int (*read_row_func)(struct st_mysql_stmt *stmt, - unsigned char **row); - my_ulonglong affected_rows; - my_ulonglong insert_id; - unsigned long stmt_id; - unsigned long flags; - unsigned long prefetch_rows; - unsigned int server_status; - unsigned int last_errno; - unsigned int param_count; - unsigned int field_count; - enum enum_mysql_stmt_state state; - char last_error[512]; - char sqlstate[5 +1]; - my_bool send_types_to_server; - my_bool bind_param_done; - unsigned char bind_result_done; - my_bool unbuffered_fetch_cancelled; - my_bool update_max_length; - struct st_mysql_stmt_extension *extension; -} MYSQL_STMT; -enum enum_stmt_attr_type -{ - STMT_ATTR_UPDATE_MAX_LENGTH, - STMT_ATTR_CURSOR_TYPE, - STMT_ATTR_PREFETCH_ROWS -}; -MYSQL_STMT * mysql_stmt_init(MYSQL *mysql); -int mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, - unsigned long length); -int mysql_stmt_prepare_start(int *ret, MYSQL_STMT *stmt, - const char *query, unsigned long length); -int mysql_stmt_prepare_cont(int *ret, MYSQL_STMT *stmt, int status); -int mysql_stmt_execute(MYSQL_STMT *stmt); -int mysql_stmt_execute_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_execute_cont(int *ret, MYSQL_STMT *stmt, int status); -int mysql_stmt_fetch(MYSQL_STMT *stmt); -int mysql_stmt_fetch_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_fetch_cont(int *ret, MYSQL_STMT *stmt, int status); -int mysql_stmt_fetch_column(MYSQL_STMT *stmt, MYSQL_BIND *bind_arg, - unsigned int column, - unsigned long offset); -int mysql_stmt_store_result(MYSQL_STMT *stmt); -int mysql_stmt_store_result_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_store_result_cont(int *ret, MYSQL_STMT *stmt, - int status); -unsigned long mysql_stmt_param_count(MYSQL_STMT * stmt); -my_bool mysql_stmt_attr_set(MYSQL_STMT *stmt, - enum enum_stmt_attr_type attr_type, - const void *attr); -my_bool mysql_stmt_attr_get(MYSQL_STMT *stmt, - enum enum_stmt_attr_type attr_type, - void *attr); -my_bool mysql_stmt_bind_param(MYSQL_STMT * stmt, MYSQL_BIND * bnd); -my_bool mysql_stmt_bind_result(MYSQL_STMT * stmt, MYSQL_BIND * bnd); -my_bool mysql_stmt_close(MYSQL_STMT * stmt); -int mysql_stmt_close_start(my_bool *ret, MYSQL_STMT *stmt); -int mysql_stmt_close_cont(my_bool *ret, MYSQL_STMT * stmt, int status); -my_bool mysql_stmt_reset(MYSQL_STMT * stmt); -int mysql_stmt_reset_start(my_bool *ret, MYSQL_STMT * stmt); -int mysql_stmt_reset_cont(my_bool *ret, MYSQL_STMT *stmt, int status); -my_bool mysql_stmt_free_result(MYSQL_STMT *stmt); -int mysql_stmt_free_result_start(my_bool *ret, MYSQL_STMT *stmt); -int mysql_stmt_free_result_cont(my_bool *ret, MYSQL_STMT *stmt, - int status); -my_bool mysql_stmt_send_long_data(MYSQL_STMT *stmt, - unsigned int param_number, - const char *data, - unsigned long length); -int mysql_stmt_send_long_data_start(my_bool *ret, MYSQL_STMT *stmt, - unsigned int param_number, - const char *data, - unsigned long len); -int mysql_stmt_send_long_data_cont(my_bool *ret, MYSQL_STMT *stmt, - int status); -MYSQL_RES * mysql_stmt_result_metadata(MYSQL_STMT *stmt); -MYSQL_RES * mysql_stmt_param_metadata(MYSQL_STMT *stmt); -unsigned int mysql_stmt_errno(MYSQL_STMT * stmt); -const char * mysql_stmt_error(MYSQL_STMT * stmt); -const char * mysql_stmt_sqlstate(MYSQL_STMT * stmt); -MYSQL_ROW_OFFSET mysql_stmt_row_seek(MYSQL_STMT *stmt, - MYSQL_ROW_OFFSET offset); -MYSQL_ROW_OFFSET mysql_stmt_row_tell(MYSQL_STMT *stmt); -void mysql_stmt_data_seek(MYSQL_STMT *stmt, my_ulonglong offset); -my_ulonglong mysql_stmt_num_rows(MYSQL_STMT *stmt); -my_ulonglong mysql_stmt_affected_rows(MYSQL_STMT *stmt); -my_ulonglong mysql_stmt_insert_id(MYSQL_STMT *stmt); -unsigned int mysql_stmt_field_count(MYSQL_STMT *stmt); -my_bool mysql_commit(MYSQL * mysql); -int mysql_commit_start(my_bool *ret, MYSQL * mysql); -int mysql_commit_cont(my_bool *ret, MYSQL * mysql, int status); -my_bool mysql_rollback(MYSQL * mysql); -int mysql_rollback_start(my_bool *ret, MYSQL * mysql); -int mysql_rollback_cont(my_bool *ret, MYSQL * mysql, int status); -my_bool mysql_autocommit(MYSQL * mysql, my_bool auto_mode); -int mysql_autocommit_start(my_bool *ret, MYSQL * mysql, - my_bool auto_mode); -int mysql_autocommit_cont(my_bool *ret, MYSQL * mysql, int status); -my_bool mysql_more_results(MYSQL *mysql); -int mysql_next_result(MYSQL *mysql); -int mysql_next_result_start(int *ret, MYSQL *mysql); -int mysql_next_result_cont(int *ret, MYSQL *mysql, int status); -int mysql_stmt_next_result(MYSQL_STMT *stmt); -int mysql_stmt_next_result_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_next_result_cont(int *ret, MYSQL_STMT *stmt, int status); -void mysql_close_slow_part(MYSQL *mysql); -void mysql_close(MYSQL *sock); -int mysql_close_start(MYSQL *sock); -int mysql_close_cont(MYSQL *sock, int status); -my_socket mysql_get_socket(const MYSQL *mysql); -unsigned int mysql_get_timeout_value(const MYSQL *mysql); -unsigned int mysql_get_timeout_value_ms(const MYSQL *mysql); -unsigned long mysql_net_read_packet(MYSQL *mysql); -unsigned long mysql_net_field_length(unsigned char **packet); -} diff --git a/include/mysql/plugin_audit.h.pp b/include/mysql/plugin_audit.h.pp index fedfb5862b4..d1e9580a053 100644 --- a/include/mysql/plugin_audit.h.pp +++ b/include/mysql/plugin_audit.h.pp @@ -481,6 +481,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; MYSQL *mysql_real_connect_local(MYSQL *mysql); } diff --git a/include/mysql/plugin_auth.h.pp b/include/mysql/plugin_auth.h.pp index f4adae35ffd..2c89bc47de3 100644 --- a/include/mysql/plugin_auth.h.pp +++ b/include/mysql/plugin_auth.h.pp @@ -481,6 +481,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; MYSQL *mysql_real_connect_local(MYSQL *mysql); } diff --git a/include/mysql/plugin_data_type.h.pp b/include/mysql/plugin_data_type.h.pp index 68333230b00..bd4ef6af15f 100644 --- a/include/mysql/plugin_data_type.h.pp +++ b/include/mysql/plugin_data_type.h.pp @@ -481,6 +481,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; MYSQL *mysql_real_connect_local(MYSQL *mysql); } diff --git a/include/mysql/plugin_encryption.h.pp b/include/mysql/plugin_encryption.h.pp index a7dca09334d..96100d3791b 100644 --- a/include/mysql/plugin_encryption.h.pp +++ b/include/mysql/plugin_encryption.h.pp @@ -481,6 +481,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; MYSQL *mysql_real_connect_local(MYSQL *mysql); } diff --git a/include/mysql/plugin_ftparser.h.pp b/include/mysql/plugin_ftparser.h.pp index 3270f88defd..4760e31c4bd 100644 --- a/include/mysql/plugin_ftparser.h.pp +++ b/include/mysql/plugin_ftparser.h.pp @@ -481,6 +481,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; MYSQL *mysql_real_connect_local(MYSQL *mysql); } diff --git a/include/mysql/plugin_function.h.pp b/include/mysql/plugin_function.h.pp index 4f0c15e74eb..5c1f34d8184 100644 --- a/include/mysql/plugin_function.h.pp +++ b/include/mysql/plugin_function.h.pp @@ -481,6 +481,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; MYSQL *mysql_real_connect_local(MYSQL *mysql); } diff --git a/include/mysql/plugin_password_validation.h.pp b/include/mysql/plugin_password_validation.h.pp index d8dbd8b0dfe..a56a177b8fa 100644 --- a/include/mysql/plugin_password_validation.h.pp +++ b/include/mysql/plugin_password_validation.h.pp @@ -481,6 +481,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; MYSQL *mysql_real_connect_local(MYSQL *mysql); } diff --git a/include/mysql/service_sql.h b/include/mysql/service_sql.h index ef6de8b34ef..be9dcad669e 100644 --- a/include/mysql/service_sql.h +++ b/include/mysql/service_sql.h @@ -62,6 +62,12 @@ extern struct sql_service_st { void (STDCALL *mysql_free_result_func)(MYSQL_RES *result); MYSQL_ROW (STDCALL *mysql_fetch_row_func)(MYSQL_RES *result); void (STDCALL *mysql_close_func)(MYSQL *mysql); + int (STDCALL *mysql_options_func)(MYSQL *mysql, enum mysql_option option, + const void *arg); + unsigned long *(STDCALL *mysql_fetch_lengths_func)(MYSQL_RES *res); + int (STDCALL *mysql_set_character_set_func)(MYSQL *mysql, const char *cs_name); + unsigned int (STDCALL *mysql_num_fields_func)(MYSQL_RES *res); + int (STDCALL *mysql_select_db_func)(MYSQL *mysql, const char *db); } *sql_service; #ifdef MYSQL_DYNAMIC_PLUGIN @@ -78,6 +84,11 @@ extern struct sql_service_st { #define mysql_free_result(R) sql_service->mysql_free_result_func(R) #define mysql_fetch_row(R) sql_service->mysql_fetch_row_func(R) #define mysql_close(M) sql_service->mysql_close_func(M) +#define mysql_options(M,O,V) sql_service->mysql_options_func(M,O,V) +#define mysql_fetch_lengths(R) sql_service->mysql_fetch_lengths_func(R) +#define mysql_set_character_set(M,C) sql_service->mysql_set_character_set_func(M,C) +#define mysql_num_fields(R) sql_service->mysql_num_fields_func(R) +#define mysql_select_db(M,D) sql_service->mysql_select_db_func(M,D) #else diff --git a/include/service_versions.h b/include/service_versions.h index acc50e73cc6..9abae740a7b 100644 --- a/include/service_versions.h +++ b/include/service_versions.h @@ -44,7 +44,7 @@ #define VERSION_wsrep 0x0500 #define VERSION_json 0x0100 #define VERSION_thd_mdl 0x0100 -#define VERSION_sql_service 0x0100 +#define VERSION_sql_service 0x0101 #define VERSION_provider_bzip2 0x0100 #define VERSION_provider_lz4 0x0100 diff --git a/mysql-test/include/rowid_filter_debug_kill.inc b/mysql-test/include/rowid_filter_debug_kill.inc index 6a8c5d3f70d..c701d206297 100644 --- a/mysql-test/include/rowid_filter_debug_kill.inc +++ b/mysql-test/include/rowid_filter_debug_kill.inc @@ -9,9 +9,6 @@ create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; - # 100 rows create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; @@ -30,10 +27,10 @@ where table_schema=database() and table_name='t3'; insert into t3 select A.a, - A.a, + B.a, 'filler-data-filler-data' from - t0 A, t1 B; + t2 A, t2 B; analyze table t2,t3; @@ -63,6 +60,6 @@ disconnect con1; reap; set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; --source include/wait_until_count_sessions.inc diff --git a/mysql-test/main/bad_startup_options.result b/mysql-test/main/bad_startup_options.result new file mode 100644 index 00000000000..b1eed54e172 --- /dev/null +++ b/mysql-test/main/bad_startup_options.result @@ -0,0 +1,2 @@ +FOUND 1 /\[ERROR\] SSL error: Unable to get certificate/ in errorlog.err +# restart diff --git a/mysql-test/main/bad_startup_options.test b/mysql-test/main/bad_startup_options.test new file mode 100644 index 00000000000..bd0b6283854 --- /dev/null +++ b/mysql-test/main/bad_startup_options.test @@ -0,0 +1,19 @@ +--source include/not_embedded.inc +--source include/have_ssl_communication.inc + +--source include/shutdown_mysqld.inc + +# Try to start the server, with bad values for some options. +# Make sure, the starts fails, and expected message is in the error log + +--let errorlog=$MYSQL_TMP_DIR/errorlog.err +--let SEARCH_FILE=$errorlog + +# Bad ssl-cert +--error 1 +--exec $MYSQLD --defaults-group-suffix=.1 --defaults-file=$MYSQLTEST_VARDIR/my.cnf --ssl-cert=bad --log-error=$errorlog +--let SEARCH_PATTERN=\[ERROR\] SSL error: Unable to get certificate +--source include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +--source include/start_mysqld.inc diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result index 82fe9a53eee..5b323bb598c 100644 --- a/mysql-test/main/gis.result +++ b/mysql-test/main/gis.result @@ -4977,6 +4977,15 @@ ERROR HY000: Illegal parameter data type point for operation 'is_free_lock' SELECT IS_USED_LOCK(POINT(1,1)); ERROR HY000: Illegal parameter data type point for operation 'is_used_lock' # +# MDEV-26161 crash in Gis_point::calculate_haversine +# +select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +select st_distance_sphere(x'010300000004000000040000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +select st_distance_sphere(x'010300000001000000040000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +# # End of 10.3 tests # # diff --git a/mysql-test/main/gis.test b/mysql-test/main/gis.test index 44557e4088c..6fd1feda183 100644 --- a/mysql-test/main/gis.test +++ b/mysql-test/main/gis.test @@ -3089,6 +3089,15 @@ SELECT IS_FREE_LOCK(POINT(1,1)); --error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION SELECT IS_USED_LOCK(POINT(1,1)); +--echo # +--echo # MDEV-26161 crash in Gis_point::calculate_haversine +--echo # +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'010300000004000000040000', multipoint(point(124,204)), 10); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'010300000001000000040000', multipoint(point(124,204)), 10); --echo # --echo # End of 10.3 tests diff --git a/mysql-test/main/join_cache.result b/mysql-test/main/join_cache.result index 10ed8960b64..3a5c975f472 100644 --- a/mysql-test/main/join_cache.result +++ b/mysql-test/main/join_cache.result @@ -853,7 +853,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (flat, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1053,7 +1053,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (incremental, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (incremental, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1312,7 +1312,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1509,7 +1509,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1706,7 +1706,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1903,7 +1903,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2104,7 +2104,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (flat, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2208,7 +2208,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (incremental, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (incremental, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2312,7 +2312,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2416,7 +2416,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2520,7 +2520,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2624,7 +2624,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND diff --git a/mysql-test/main/join_nested_jcl6.result b/mysql-test/main/join_nested_jcl6.result index 5db6d030965..221a3118b8a 100644 --- a/mysql-test/main/join_nested_jcl6.result +++ b/mysql-test/main/join_nested_jcl6.result @@ -2033,7 +2033,7 @@ ON t6.b >= 2 AND t5.b=t7.b AND (t8.a > 0 OR t8.c IS NULL) AND t6.a>0 AND t7.a>0; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t5 ALL NULL NULL NULL NULL 3 -1 SIMPLE t7 ref|filter PRIMARY,b_i b_i|PRIMARY 5|4 test.t5.b 2 (29%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t7 range PRIMARY,b_i PRIMARY 4 NULL 2 Using where; Rowid-ordered scan; Using join buffer (flat, BNL join) 1 SIMPLE t6 range|filter PRIMARY,b_i PRIMARY|b_i 4|5 NULL 3 (86%) Using where; Rowid-ordered scan; Using join buffer (incremental, BNL join); Using rowid filter 1 SIMPLE t8 ref b_i b_i 5 test.t5.b 2 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b diff --git a/mysql-test/main/partition_alter.result b/mysql-test/main/partition_alter.result index 0dee3d209b1..28fef4e80d1 100644 --- a/mysql-test/main/partition_alter.result +++ b/mysql-test/main/partition_alter.result @@ -213,6 +213,29 @@ delete from t order by b limit 1; drop table t; # End of 10.3 tests # +# Start of 10.4 tests +# +# +# MDEV-28545 MyISAM reorganize partition corrupt older table format +# +SET GLOBAL mysql56_temporal_format=OFF; +CREATE TABLE t (ts timestamp, KEY (ts)) ENGINE=MyISAM +PARTITION BY RANGE (unix_timestamp(ts)) ( +PARTITION p1 VALUES LESS THAN (1645398000), +PARTITION pn VALUES LESS THAN MAXVALUE +); +SET GLOBAL mysql56_temporal_format=ON; +FLUSH TABLES; +ALTER TABLE t DROP PARTITION p1; +CHECK TABLE t; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; +SET GLOBAL mysql56_temporal_format=DEFAULT; +# +# End of 10.4 tests +# +# # MDEV-28576 RENAME COLUMN with NOCOPY algorithm leads to corrupt partitioned table # create table t (a int) diff --git a/mysql-test/main/partition_alter.test b/mysql-test/main/partition_alter.test index 393755c3b50..72b09549b18 100644 --- a/mysql-test/main/partition_alter.test +++ b/mysql-test/main/partition_alter.test @@ -199,6 +199,32 @@ drop table t; --echo # End of 10.3 tests --echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-28545 MyISAM reorganize partition corrupt older table format +--echo # + +SET GLOBAL mysql56_temporal_format=OFF; +CREATE TABLE t (ts timestamp, KEY (ts)) ENGINE=MyISAM +PARTITION BY RANGE (unix_timestamp(ts)) ( + PARTITION p1 VALUES LESS THAN (1645398000), + PARTITION pn VALUES LESS THAN MAXVALUE +); + +SET GLOBAL mysql56_temporal_format=ON; +FLUSH TABLES; +ALTER TABLE t DROP PARTITION p1; +CHECK TABLE t; +DROP TABLE t; +SET GLOBAL mysql56_temporal_format=DEFAULT; + +--echo # +--echo # End of 10.4 tests +--echo # + +--echo # --echo # MDEV-28576 RENAME COLUMN with NOCOPY algorithm leads to corrupt partitioned table --echo # create table t (a int) diff --git a/mysql-test/main/partition_pruning.result b/mysql-test/main/partition_pruning.result index 519bf590b9b..ec0cb144a51 100644 --- a/mysql-test/main/partition_pruning.result +++ b/mysql-test/main/partition_pruning.result @@ -2677,7 +2677,7 @@ select * from t1 X, t1 Y where X.b = Y.b and (X.a=1 or X.a=2) and (Y.a=2 or Y.a=3); id select_type table partitions type possible_keys key key_len ref rows Extra 1 SIMPLE X p1,p2 range a,b a 4 NULL 4 Using where -1 SIMPLE Y p2,p3 ref|filter a,b b|a 4|4 test.X.b 2 (50%) Using where; Using rowid filter +1 SIMPLE Y p2,p3 ref a,b b 4 test.X.b 2 Using where explain partitions select * from t1 X, t1 Y where X.a = Y.a and (X.a=1 or X.a=2); id select_type table partitions type possible_keys key key_len ref rows Extra diff --git a/mysql-test/main/range.result b/mysql-test/main/range.result index 9edf7c5a070..2fc6fbde38c 100644 --- a/mysql-test/main/range.result +++ b/mysql-test/main/range.result @@ -281,7 +281,7 @@ INSERT INTO t1 VALUES (33,5),(33,5),(33,5),(33,5),(34,5),(35,5); EXPLAIN SELECT * FROM t1 WHERE a IN(1,2) AND b=5; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter a,b b|a 5|5 const 15 (5%) Using where; Using rowid filter +1 SIMPLE t1 range|filter a,b a|b 5|5 NULL 2 (41%) Using index condition; Using where; Using rowid filter SELECT * FROM t1 WHERE a IN(1,2) AND b=5; a b DROP TABLE t1; diff --git a/mysql-test/main/rowid_filter.result b/mysql-test/main/rowid_filter.result index 47961ab6c9d..a83b0fdb0d1 100644 --- a/mysql-test/main/rowid_filter.result +++ b/mysql-test/main/rowid_filter.result @@ -134,6 +134,7 @@ ANALYZE "rows": 702, "selectivity_pct": 11.69025812, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.76470588, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -464,6 +465,7 @@ ANALYZE "rows": 69, "selectivity_pct": 4.6, "r_rows": 71, + "r_lookups": 96, "r_selectivity_pct": 10.41666667, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -740,6 +742,7 @@ ANALYZE "rows": 702, "selectivity_pct": 11.69025812, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.76470588, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -772,6 +775,7 @@ ANALYZE "rows": 139, "selectivity_pct": 9.266666667, "r_rows": 144, + "r_lookups": 59, "r_selectivity_pct": 25.42372881, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -1072,6 +1076,7 @@ ANALYZE "rows": 509, "selectivity_pct": 8.476269775, "r_rows": 510, + "r_lookups": 476, "r_selectivity_pct": 7.773109244, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -2205,7 +2210,7 @@ EXPLAIN EXTENDED SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING -2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table +2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0 DROP TABLE t1,t2; @@ -2214,7 +2219,7 @@ DROP TABLE t1,t2; # that uses in expensive subquery # CREATE TABLE t1 ( -pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) +pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -2233,21 +2238,31 @@ INSERT INTO t1 VALUES (101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'), (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); pk1 a1 b1 pk2 a2 b2 -65 2 a 109 65 NULL -EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +17 1 f 16 1 j +37 3 g 36 3 a +105 8 i 104 8 e +EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where -1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter +1 PRIMARY t1 ref|filter a1,b1 a1|b1 5|4 test.t2.a2 36 (29%) 28.75 Using where; Using rowid filter 2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2` -EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2 +EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); EXPLAIN { @@ -2260,29 +2275,29 @@ EXPLAIN "access_type": "ALL", "rows": 101, "filtered": 100, - "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null" + "attached_condition": "t2.a2 is not null" } }, { "table": { "table_name": "t1", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "b1"], - "key": "PRIMARY", - "key_length": "4", - "used_key_parts": ["pk1"], + "access_type": "ref", + "possible_keys": ["a1", "b1"], + "key": "a1", + "key_length": "5", + "used_key_parts": ["a1"], "ref": ["test.t2.a2"], "rowid_filter": { "range": { "key": "b1", "used_key_parts": ["b1"] }, - "rows": 87, - "selectivity_pct": 87 + "rows": 115, + "selectivity_pct": 28.75 }, - "rows": 1, - "filtered": 87, - "attached_condition": "t1.b1 <= (subquery#2)" + "rows": 36, + "filtered": 28.75, + "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2" } } ], @@ -2355,13 +2370,466 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE table t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK explain SELECT * FROM t1 WHERE a > 0 AND b=0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter a,b b|a 5|5 const 2 (14%) Using where; Using rowid filter +1 SIMPLE t1 range|filter a,b a|b 5|5 NULL 77 (34%) Using index condition; Using where; Using rowid filter SELECT * FROM t1 WHERE a > 0 AND b=0; a b 1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +# +# MDEV-28846: Poor performance when rowid filter contains no elements +# +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 tinyint default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 tinyint +) engine=myisam; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '500%' as a; +a +500% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 49.20000076, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } + ] + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +set optimizer_switch='rowid_filter=off'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 49.20000076, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } + ] + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '607%' as a; +a +607% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '607%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +select * from t1 where nm like '607%' AND fl2 = 0; +pk nm fl1 fl2 +721 607 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '75%' as a; +a +75% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '75%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter +analyze format=json +select * from t1 where nm like '75%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "2", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 115, + "selectivity_pct": 1.15, + "r_rows": 111, + "r_lookups": 100, + "r_selectivity_pct": 2, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 55, + "r_rows": 2, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 1.149999976, + "r_filtered": 100, + "attached_condition": "t1.nm like '75%'" + } + } + ] + } +} +select * from t1 where nm like '75%' AND fl2 = 0; +pk nm fl1 fl2 +4543 7503 0 0 +7373 7518 0 0 +drop table name, flag2; +drop table t1; +create table t1 ( +pk int primary key auto_increment, +nm char(255), +fl1 tinyint default 0, +fl2 int default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 int +) engine=myisam; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +pk nm fl1 fl2 +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 44, + "r_lookups": 1000, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 863, + "r_rows": 0, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 0.439999998, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } + ] + } +} +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 0, + "r_lookups": 0, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 853, + "r_rows": 0, + "filtered": 0.439999998, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } + ] + } +} +drop table t0; +set optimizer_switch='rowid_filter=default'; +drop table name, flag2; +drop table t1; set @@use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/main/rowid_filter.test b/mysql-test/main/rowid_filter.test index 163b71b6153..a2543e197ca 100644 --- a/mysql-test/main/rowid_filter.test +++ b/mysql-test/main/rowid_filter.test @@ -320,7 +320,7 @@ DROP TABLE t1,t2; --echo # CREATE TABLE t1 ( - pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) + pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -340,11 +340,16 @@ INSERT INTO t1 VALUES (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; + +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; + let $q= -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); eval $q; @@ -399,6 +404,15 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; + +ANALYZE table t1 PERSISTENT FOR ALL; + explain SELECT * FROM t1 WHERE a > 0 AND b=0; SELECT * FROM t1 WHERE a > 0 AND b=0; @@ -406,4 +420,194 @@ drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +--echo # +--echo # MDEV-28846: Poor performance when rowid filter contains no elements +--echo # + +--source include/have_sequence.inc + +create table t1 ( + pk int primary key auto_increment, + nm varchar(32), + fl1 tinyint default 0, + fl2 tinyint default 0, + index idx1(nm, fl1), + index idx2(fl2) +) engine=myisam; + +create table name ( + pk int primary key auto_increment, + nm bigint +) engine=myisam; + +create table flag2 ( + pk int primary key auto_increment, + fl2 tinyint +) engine=myisam; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +set optimizer_switch='rowid_filter=off'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat(left((select nm from t1 where fl2=0 order by RAND(13) limit 1),2),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +drop table name, flag2; +drop table t1; + +# This test shows that if the container is empty there are no lookups into it + +create table t1 ( + pk int primary key auto_increment, + nm char(255), + fl1 tinyint default 0, + fl2 int default 0, + index idx1(nm, fl1), + index idx2(fl2) +) engine=myisam; + +create table name ( + pk int primary key auto_increment, + nm bigint +) engine=myisam; + +create table flag2 ( + pk int primary key auto_increment, + fl2 int +) engine=myisam; + +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $q= +select * from t1 +where +( + nm like '3400%' or nm like '3402%' or nm like '3403%' or + nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or + nm like '3409%' or + nm like '3411%' or nm like '3412%' or nm like '3413%' or + nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or + nm like '3418%' or nm like '3419%' or + nm like '3421%' or nm like '3422%' or nm like '3423%' or + nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or + nm like '3428%' or nm like '3429%' or + nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or + nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or + nm like '3439%' or + nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or + nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or + nm like '3448%' +) and fl2 = 0; + +eval $q; +--source include/analyze-format.inc +eval analyze format=json $q; + +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +--source include/analyze-format.inc +eval analyze format=json $q; + +drop table t0; + +set optimizer_switch='rowid_filter=default'; + +drop table name, flag2; +drop table t1; + set @@use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result index d6ba961072a..1dda096213a 100644 --- a/mysql-test/main/rowid_filter_innodb.result +++ b/mysql-test/main/rowid_filter_innodb.result @@ -137,6 +137,7 @@ ANALYZE "rows": 605, "selectivity_pct": 10.07493755, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.76470588, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -719,6 +720,7 @@ ANALYZE "rows": 605, "selectivity_pct": 10.07493755, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.76470588, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -2156,7 +2158,7 @@ EXPLAIN EXTENDED SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING -2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table +2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0 DROP TABLE t1,t2; @@ -2165,7 +2167,7 @@ DROP TABLE t1,t2; # that uses in expensive subquery # CREATE TABLE t1 ( -pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) +pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -2184,21 +2186,31 @@ INSERT INTO t1 VALUES (101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'), (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); pk1 a1 b1 pk2 a2 b2 -65 2 a 109 65 NULL -EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +17 1 f 16 1 j +37 3 g 36 3 a +105 8 i 104 8 e +EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where -1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter +1 PRIMARY t1 ref|filter a1,b1 a1|b1 5|4 test.t2.a2 36 (29%) 28.75 Using where; Using rowid filter 2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2` -EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2 +EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); EXPLAIN { @@ -2211,29 +2223,29 @@ EXPLAIN "access_type": "ALL", "rows": 101, "filtered": 100, - "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null" + "attached_condition": "t2.a2 is not null" } }, { "table": { "table_name": "t1", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "b1"], - "key": "PRIMARY", - "key_length": "4", - "used_key_parts": ["pk1"], + "access_type": "ref", + "possible_keys": ["a1", "b1"], + "key": "a1", + "key_length": "5", + "used_key_parts": ["a1"], "ref": ["test.t2.a2"], "rowid_filter": { "range": { "key": "b1", "used_key_parts": ["b1"] }, - "rows": 87, - "selectivity_pct": 87 + "rows": 115, + "selectivity_pct": 28.75 }, - "rows": 1, - "filtered": 87, - "attached_condition": "t1.b1 <= (subquery#2)" + "rows": 36, + "filtered": 28.75, + "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2" } } ], @@ -2306,15 +2318,468 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE table t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK explain SELECT * FROM t1 WHERE a > 0 AND b=0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter a,b b|a 5|5 const 2 (14%) Using where; Using rowid filter +1 SIMPLE t1 range|filter a,b a|b 5|5 NULL 64 (29%) Using index condition; Using where; Using rowid filter SELECT * FROM t1 WHERE a > 0 AND b=0; a b 1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +# +# MDEV-28846: Poor performance when rowid filter contains no elements +# +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 tinyint default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 tinyint +) engine=myisam; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '500%' as a; +a +500% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 49.20000076, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } + ] + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +set optimizer_switch='rowid_filter=off'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 49.20000076, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } + ] + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '607%' as a; +a +607% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '607%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +select * from t1 where nm like '607%' AND fl2 = 0; +pk nm fl1 fl2 +721 607 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '75%' as a; +a +75% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '75%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter +analyze format=json +select * from t1 where nm like '75%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "2", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 115, + "selectivity_pct": 1.15, + "r_rows": 111, + "r_lookups": 100, + "r_selectivity_pct": 2, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 55, + "r_rows": 2, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 1.149999976, + "r_filtered": 100, + "attached_condition": "t1.nm like '75%'" + } + } + ] + } +} +select * from t1 where nm like '75%' AND fl2 = 0; +pk nm fl1 fl2 +4543 7503 0 0 +7373 7518 0 0 +drop table name, flag2; +drop table t1; +create table t1 ( +pk int primary key auto_increment, +nm char(255), +fl1 tinyint default 0, +fl2 int default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 int +) engine=myisam; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +pk nm fl1 fl2 +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 44, + "r_lookups": 1000, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 863, + "r_rows": 0, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 0.439999998, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } + ] + } +} +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 0, + "r_lookups": 0, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 853, + "r_rows": 0, + "filtered": 0.439999998, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } + ] + } +} +drop table t0; +set optimizer_switch='rowid_filter=default'; +drop table name, flag2; +drop table t1; set @@use_stat_tables=@save_use_stat_tables; SET GLOBAL innodb_stats_persistent=@save_stats_persistent; # @@ -2340,6 +2805,10 @@ insert into t1 values (81,'a','a',20),(82,'a','a',0),(83,'a','a',0),(84,'a','a',null), (85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160), (89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK ( select * from t1 where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a'))) union @@ -2458,46 +2927,44 @@ drop table t1, t2; # create table t1 (a int, b int, key (b), key (a)) engine=innodb; insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; analyze table t1; Table Op Msg_type Msg_text test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK set @save_optimizer_switch= @@optimizer_switch; set optimizer_switch='rowid_filter=off'; -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; count(*) -6 -explain extended select count(*) from t1 where a in (22,83,11) and b=2; +5 +explain extended select count(*) from t1 where a between 21 and 30 and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 range b,a a 5 NULL 33 5.90 Using index condition; Using where +1 SIMPLE t1 ref b,a b 5 const 24 9.60 Using where Warnings: -Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (22,83,11) -select * from t1 where a in (22,83,11) and b=2; +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` between 21 and 30 +select * from t1 where a between 21 and 30 and b=2; a b -11 2 -11 2 -11 2 +30 2 +21 2 22 2 -83 2 -83 2 +26 2 +25 2 set optimizer_switch='rowid_filter=on'; -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; count(*) -6 -explain extended select count(*) from t1 where a in (22,83,11) and b=2; +5 +explain extended select count(*) from t1 where a between 21 and 30 and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter b,a b|a 5|5 const 59 (3%) 3.30 Using where; Using rowid filter +1 SIMPLE t1 ref|filter b,a b|a 5|5 const 24 (10%) 9.60 Using where; Using rowid filter Warnings: -Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (22,83,11) -select * from t1 where a in (22,83,11) and b=2; +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` between 21 and 30 +select * from t1 where a between 21 and 30 and b=2; a b -11 2 -11 2 -83 2 -11 2 -83 2 +30 2 +21 2 22 2 +26 2 +25 2 drop table t1; set optimizer_switch=@save_optimizer_switch; SET SESSION DEFAULT_STORAGE_ENGINE=DEFAULT; @@ -2652,7 +3119,7 @@ set global innodb_stats_persistent= @stats.save; # CREATE TABLE t1 ( id int(11) unsigned NOT NULL AUTO_INCREMENT, -domain varchar(255) NOT NULL, +domain varchar(32) NOT NULL, registrant_name varchar(255) DEFAULT NULL, registrant_organization varchar(255) DEFAULT NULL, registrant_street1 varchar(255) DEFAULT NULL, @@ -2743,21 +3210,216 @@ null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'MAXIMILIAN V. KETELHODT', null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'GERMANY', 'ICANN@EXPIRES-2009.WEBCARE24.COM', '492214307580', '', '2017-01-30 10:08:29'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; +ANALYZE TABLE t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'json' +test.t1 analyze status OK SET @save_optimizer_switch=@@optimizer_switch; SET optimizer_switch='mrr=on,mrr_sort_keys=on'; SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND -timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) +timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; id domain registrant_name registrant_organization registrant_street1 registrant_street2 registrant_street3 registrant_street4 registrant_street5 registrant_city registrant_postal_code registrant_country registrant_email registrant_telephone administrative_name administrative_organization administrative_street1 administrative_street2 administrative_street3 administrative_street4 administrative_street5 administrative_city administrative_postal_code administrative_country administrative_email administrative_telephone technical_name technical_organization technical_street1 technical_street2 technical_street3 technical_street4 technical_street5 technical_city technical_postal_code technical_country technical_email technical_telephone json timestamp +80551 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80579 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80594 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80609 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND -timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) +timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter ixEventWhoisDomainDomain,ixEventWhoisDomainTimestamp ixEventWhoisDomainDomain|ixEventWhoisDomainTimestamp 767|4 const 2 (14%) 14.29 Using index condition; Using where; Using filesort; Using rowid filter +1 SIMPLE t1 ALL ixEventWhoisDomainDomain,ixEventWhoisDomainTimestamp NULL NULL NULL 60 22.22 Using where; Using filesort Warnings: -Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrative_street2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_city`,`test`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= <cache>(current_timestamp() + interval -1 month) order by `test`.`t1`.`timestamp` desc +Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrative_street2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_city`,`test`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= <cache>('2017-01-30 08:24:51' + interval -1 month) order by `test`.`t1`.`timestamp` desc SET optimizer_switch=@save_optimizer_switch; DROP TABLE t1; # @@ -2919,6 +3581,10 @@ insert into filt(id,aceid,clid,fh) values (6341490487802728361,6341490487802728360,1,1291319099896431785), (6341490487802728362,6341490487802728360,1,8948400944397203540), (6341490487802728363,6341490487802728361,1,6701841652906431497); +insert into filt select id+10000,aceid,clid,fh from filt; +insert into filt select id+20000,aceid,clid,fh from filt; +insert into filt select id+40000,aceid,clid,fh from filt; +insert into filt select id+80000,aceid,clid,fh from filt; analyze table filt, acei, acli; Table Op Msg_type Msg_text test.filt analyze status Engine-independent statistics collected @@ -2943,7 +3609,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where -1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 1 17.14 Using where +1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=off' for select t.id, fi.* @@ -2958,6 +3624,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for explain extended select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -2970,7 +3666,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where -1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 1 (17%) 17.14 Using where; Using rowid filter +1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using rowid filter Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=on' for select t.id, fi.* @@ -2985,6 +3681,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set optimizer_switch='mrr=on'; set join_cache_level=6; set statement optimizer_switch='rowid_filter=off' for explain extended select t.id, fi.* @@ -2999,7 +3725,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where; Using join buffer (flat, BKA join); Rowid-ordered scan -1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 1 17.14 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan +1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=off' for select t.id, fi.* @@ -3014,6 +3740,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for explain extended select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -3026,7 +3782,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where; Using join buffer (flat, BKA join); Rowid-ordered scan -1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 1 (17%) 17.14 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan; Using rowid filter +1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan; Using rowid filter Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=on' for select t.id, fi.* @@ -3041,6 +3797,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for analyze format=json select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -3131,23 +3917,24 @@ ANALYZE "key": "filt_fh", "used_key_parts": ["fh"] }, - "rows": 6, - "selectivity_pct": 17.14285714, - "r_rows": 5, + "rows": 81, + "selectivity_pct": 14.46428571, + "r_rows": 80, + "r_lookups": 80, "r_selectivity_pct": 40, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" }, "r_loops": 1, - "rows": 1, - "r_rows": 2, + "rows": 24, + "r_rows": 32, "r_table_time_ms": "REPLACED", "r_other_time_ms": "REPLACED", - "filtered": 17.1428566, + "filtered": 14.46428585, "r_filtered": 100 }, "buffer_type": "incremental", - "buffer_size": "603", + "buffer_size": "4Kb", "join_type": "BKA", "mrr_type": "Rowid-ordered scan", "attached_condition": "fi.fh in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774)", @@ -3170,39 +3957,100 @@ CREATE TABLE t1 (pk int NOT NULL, c1 varchar(1)) engine=innodb; INSERT INTO t1 VALUES (1,NULL),(15,'o'),(16,'x'),(19,'t'),(35,'k'),(36,'h'),(42,'t'),(43,'h'), (53,'l'),(62,'a'),(71,NULL),(79,'u'),(128,'y'),(129,NULL),(133,NULL); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; CREATE TABLE t2 ( -i1 int, c1 varchar(1) NOT NULL, KEY c1 (c1), KEY i1 (i1) +i1 int, c1 varchar(1) NOT NULL, +filler1 char(255) default '0', filler2 char(255) default '0', +KEY c1 (c1), KEY i1 (i1) ) engine=innodb; -INSERT INTO t2 VALUES -(1,'1'),(NULL,'1'),(42,'t'),(NULL,'1'),(79,'u'),(NULL,'1'), -(NULL,'4'),(NULL,'4'),(NULL,'1'),(NULL,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2(i1,c1) VALUES +(NULL,'1'),(1,'1'),(2,'t'),(3,'1'),(4,'u'),(5,'1'), +(6,'4'),(7,'4'),(8,'1'),(1,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 -WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); +WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); pk c1 +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL 15 o 16 x 19 t 35 k 36 h +42 t 43 h 53 l 62 a 71 NULL +79 u 128 y 129 NULL 133 NULL EXPLAIN EXTENDED SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 -WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); +WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 15 100.00 Using where -2 DEPENDENT SUBQUERY t2 ref|filter c1,i1 c1|i1 3|5 func 6 (33%) 33.33 Using where; Full scan on NULL key; Using rowid filter -2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 15 100.00 Using join buffer (flat, BNL join) +1 PRIMARY t1 ALL NULL NULL NULL NULL 60 100.00 Using where +2 DEPENDENT SUBQUERY t2 ref|filter c1,i1 c1|i1 3|5 func 38 (25%) 25.00 Using where; Full scan on NULL key; Using rowid filter +2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 60 100.00 Using join buffer (flat, BNL join) Warnings: Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<expr_cache><`test`.`t1`.`c1`,`test`.`t1`.`pk`>(<in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` is not null and trigcond(<cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c1`)))) +Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<expr_cache><`test`.`t1`.`c1`,`test`.`t1`.`pk`>(<in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` between 3 and 5 and trigcond(<cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c1`)))) DROP TABLE t1,t2; set global innodb_stats_persistent= @stats.save; # End of 10.4 tests diff --git a/mysql-test/main/rowid_filter_innodb.test b/mysql-test/main/rowid_filter_innodb.test index 1112d7b69c6..8705a0c9a12 100644 --- a/mysql-test/main/rowid_filter_innodb.test +++ b/mysql-test/main/rowid_filter_innodb.test @@ -38,6 +38,8 @@ insert into t1 values (85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160), (89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null); +analyze table t1; + let $q= ( select * from t1 where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a'))) @@ -79,13 +81,13 @@ drop table t1, t2; create table t1 (a int, b int, key (b), key (a)) engine=innodb; insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; analyze table t1; let $q= -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; let $q1= -select * from t1 where a in (22,83,11) and b=2; +select * from t1 where a between 21 and 30 and b=2; set @save_optimizer_switch= @@optimizer_switch; @@ -230,7 +232,7 @@ set global innodb_stats_persistent= @stats.save; CREATE TABLE t1 ( id int(11) unsigned NOT NULL AUTO_INCREMENT, - domain varchar(255) NOT NULL, + domain varchar(32) NOT NULL, registrant_name varchar(255) DEFAULT NULL, registrant_organization varchar(255) DEFAULT NULL, registrant_street1 varchar(255) DEFAULT NULL, @@ -323,6 +325,66 @@ technical_email, technical_telephone, json, timestamp) VALUES null, 'KOELN', '50937', 'GERMANY', 'ICANN@EXPIRES-2009.WEBCARE24.COM', '492214307580', '', '2017-01-30 10:08:29'); +let $sqi= +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, null, null, null, null, null, + null, null, '', '2016-12-22 09:18:28'); + +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; + +let $qi= +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; + +eval $qi; +eval $qi; + +ANALYZE TABLE t1 PERSISTENT FOR ALL; + SET @save_optimizer_switch=@@optimizer_switch; SET optimizer_switch='mrr=on,mrr_sort_keys=on'; @@ -330,7 +392,7 @@ SET optimizer_switch='mrr=on,mrr_sort_keys=on'; let $q= SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND - timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) + timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; eval $q; @@ -503,6 +565,11 @@ insert into filt(id,aceid,clid,fh) values (6341490487802728362,6341490487802728360,1,8948400944397203540), (6341490487802728363,6341490487802728361,1,6701841652906431497); +insert into filt select id+10000,aceid,clid,fh from filt; +insert into filt select id+20000,aceid,clid,fh from filt; +insert into filt select id+40000,aceid,clid,fh from filt; +insert into filt select id+80000,aceid,clid,fh from filt; + analyze table filt, acei, acli; let $q= @@ -553,19 +620,28 @@ CREATE TABLE t1 (pk int NOT NULL, c1 varchar(1)) engine=innodb; INSERT INTO t1 VALUES (1,NULL),(15,'o'),(16,'x'),(19,'t'),(35,'k'),(36,'h'),(42,'t'),(43,'h'), (53,'l'),(62,'a'),(71,NULL),(79,'u'),(128,'y'),(129,NULL),(133,NULL); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; CREATE TABLE t2 ( -i1 int, c1 varchar(1) NOT NULL, KEY c1 (c1), KEY i1 (i1) +i1 int, c1 varchar(1) NOT NULL, +filler1 char(255) default '0', filler2 char(255) default '0', +KEY c1 (c1), KEY i1 (i1) ) engine=innodb; -INSERT INTO t2 VALUES -(1,'1'),(NULL,'1'),(42,'t'),(NULL,'1'),(79,'u'),(NULL,'1'), -(NULL,'4'),(NULL,'4'),(NULL,'1'),(NULL,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2(i1,c1) VALUES +(NULL,'1'),(1,'1'),(2,'t'),(3,'1'),(4,'u'),(5,'1'), +(6,'4'),(7,'4'),(8,'1'),(1,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; + +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; let $q= SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 - WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); + WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); eval $q; eval EXPLAIN EXTENDED $q; diff --git a/mysql-test/main/rowid_filter_innodb_debug.result b/mysql-test/main/rowid_filter_innodb_debug.result index 6fd75294bdb..56226fe25ce 100644 --- a/mysql-test/main/rowid_filter_innodb_debug.result +++ b/mysql-test/main/rowid_filter_innodb_debug.result @@ -4,8 +4,6 @@ set default_storage_engine=innodb; # create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; CREATE TABLE t3 ( @@ -22,10 +20,10 @@ InnoDB insert into t3 select A.a, -A.a, +B.a, 'filler-data-filler-data' from -t0 A, t1 B; +t2 A, t2 B; analyze table t2,t3; Table Op Msg_type Msg_text test.t2 analyze status Engine-independent statistics collected @@ -38,7 +36,7 @@ where t3.key1=t2.a and t3.key2 in (2,3); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where -1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1000 (20%) Using where; Using rowid filter +1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go'; select * from t2, t3 where @@ -52,7 +50,7 @@ connection default; disconnect con1; ERROR 70100: Query execution was interrupted set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; set default_storage_engine=default; set @save_optimizer_switch= @@optimizer_switch; set @save_use_stat_tables= @@use_stat_tables; diff --git a/mysql-test/main/rowid_filter_myisam_debug.result b/mysql-test/main/rowid_filter_myisam_debug.result index 16fcb2a416e..32a989f50da 100644 --- a/mysql-test/main/rowid_filter_myisam_debug.result +++ b/mysql-test/main/rowid_filter_myisam_debug.result @@ -3,8 +3,6 @@ # create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; CREATE TABLE t3 ( @@ -21,10 +19,10 @@ MyISAM insert into t3 select A.a, -A.a, +B.a, 'filler-data-filler-data' from -t0 A, t1 B; +t2 A, t2 B; analyze table t2,t3; Table Op Msg_type Msg_text test.t2 analyze status Engine-independent statistics collected @@ -37,7 +35,7 @@ where t3.key1=t2.a and t3.key2 in (2,3); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where -1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1000 (18%) Using where; Using rowid filter +1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go'; select * from t2, t3 where @@ -51,4 +49,4 @@ connection default; disconnect con1; ERROR 70100: Query execution was interrupted set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result index a48b879de5d..acf08e6a8e9 100644 --- a/mysql-test/main/select.result +++ b/mysql-test/main/select.result @@ -3474,13 +3474,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition +1 SIMPLE t2 ref c c 5 test.t1.a 2 EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where +1 SIMPLE t2 ref c c 5 test.t1.a 2 DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result index f217964adc3..9259fa750cf 100644 --- a/mysql-test/main/select_jcl6.result +++ b/mysql-test/main/select_jcl6.result @@ -3485,13 +3485,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Rowid-ordered scan +1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3755,7 +3755,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result index a48b879de5d..acf08e6a8e9 100644 --- a/mysql-test/main/select_pkeycache.result +++ b/mysql-test/main/select_pkeycache.result @@ -3474,13 +3474,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition +1 SIMPLE t2 ref c c 5 test.t1.a 2 EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where +1 SIMPLE t2 ref c c 5 test.t1.a 2 DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result index 408094e3c1a..86c151630a2 100644 --- a/mysql-test/main/selectivity.result +++ b/mysql-test/main/selectivity.result @@ -1641,8 +1641,8 @@ drop function f1; # create table t1 (a int, b int, key (b), key (a)); insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; -analyze table t1 ; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; +analyze table t1 persistent for all; Table Op Msg_type Msg_text test.t1 analyze status Engine-independent statistics collected test.t1 analyze status Table is already up to date @@ -1654,14 +1654,14 @@ Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`a` in (17,51,5) explain extended select * from t1 use index () where b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 5.90 Using where +1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 2.40 Using where Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`b` = 2 # Now, the equality is used for ref access, while the range condition # gives selectivity data explain extended select * from t1 where a in (17,51,5) and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter b,a b|a 5|5 const 58 (3%) 2.90 Using where; Using rowid filter +1 SIMPLE t1 ref|filter b,a b|a 5|5 const 24 (3%) 2.90 Using where; Using rowid filter Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (17,51,5) drop table t1; @@ -1817,6 +1817,12 @@ create table t1 (id int, a int, PRIMARY KEY(id), key(a)); insert into t1 select seq,seq from seq_1_to_100; create table t2 (id int, a int, b int, PRIMARY KEY(id), key(a), key(b)); insert into t2 select seq,seq,seq from seq_1_to_100; +analyze table t1,t2 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status Table is already up to date set optimizer_switch='exists_to_in=off'; set optimizer_use_condition_selectivity=2; SELECT * FROM t1 @@ -1850,7 +1856,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter +2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 @@ -1862,7 +1868,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter +2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where set optimizer_switch= @save_optimizer_switch; set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test index 9c82a8d37a7..4e4513d09d6 100644 --- a/mysql-test/main/selectivity.test +++ b/mysql-test/main/selectivity.test @@ -1111,8 +1111,8 @@ drop function f1; --echo # create table t1 (a int, b int, key (b), key (a)); insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; -analyze table t1 ; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; +analyze table t1 persistent for all; --echo # Check what info the optimizer has about selectivities explain extended select * from t1 use index () where a in (17,51,5); @@ -1249,6 +1249,8 @@ insert into t1 select seq,seq from seq_1_to_100; create table t2 (id int, a int, b int, PRIMARY KEY(id), key(a), key(b)); insert into t2 select seq,seq,seq from seq_1_to_100; +analyze table t1,t2 persistent for all; + set optimizer_switch='exists_to_in=off'; set optimizer_use_condition_selectivity=2; diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result index 8120f51ca83..eb9b131b5a6 100644 --- a/mysql-test/main/selectivity_innodb.result +++ b/mysql-test/main/selectivity_innodb.result @@ -1653,8 +1653,8 @@ drop function f1; # create table t1 (a int, b int, key (b), key (a)); insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; -analyze table t1 ; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; +analyze table t1 persistent for all; Table Op Msg_type Msg_text test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK @@ -1666,14 +1666,14 @@ Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`a` in (17,51,5) explain extended select * from t1 use index () where b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 5.90 Using where +1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 2.40 Using where Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`b` = 2 # Now, the equality is used for ref access, while the range condition # gives selectivity data explain extended select * from t1 where a in (17,51,5) and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter b,a b|a 5|5 const 59 (3%) 2.90 Using where; Using rowid filter +1 SIMPLE t1 ref|filter b,a b|a 5|5 const 24 (3%) 2.90 Using where; Using rowid filter Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (17,51,5) drop table t1; @@ -1829,6 +1829,12 @@ create table t1 (id int, a int, PRIMARY KEY(id), key(a)); insert into t1 select seq,seq from seq_1_to_100; create table t2 (id int, a int, b int, PRIMARY KEY(id), key(a), key(b)); insert into t2 select seq,seq,seq from seq_1_to_100; +analyze table t1,t2 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK set optimizer_switch='exists_to_in=off'; set optimizer_use_condition_selectivity=2; SELECT * FROM t1 @@ -1862,7 +1868,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index -2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter +2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 @@ -1874,7 +1880,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index -2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter +2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where set optimizer_switch= @save_optimizer_switch; set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result index 17470fa128a..4209e2bc529 100644 --- a/mysql-test/main/subselect.result +++ b/mysql-test/main/subselect.result @@ -7382,6 +7382,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # Start of 10.4 tests # # diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test index 8c11517f5d3..be22169a1d6 100644 --- a/mysql-test/main/subselect.test +++ b/mysql-test/main/subselect.test @@ -6298,6 +6298,20 @@ drop table t1,t2,t3; --echo # End of 10.2 tests +--echo # +--echo # MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +--echo # + +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; + +# Cleanup +DROP TABLE t; + +--echo # +--echo # End of 10.3 tests +--echo # + --echo # --echo # Start of 10.4 tests diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result index 394a13a4207..e32e6007328 100644 --- a/mysql-test/main/subselect_no_exists_to_in.result +++ b/mysql-test/main/subselect_no_exists_to_in.result @@ -7382,6 +7382,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # Start of 10.4 tests # # diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result index 3d19a80871e..07755a5144a 100644 --- a/mysql-test/main/subselect_no_mat.result +++ b/mysql-test/main/subselect_no_mat.result @@ -7375,6 +7375,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # Start of 10.4 tests # # diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result index 2934726811a..15688fc1717 100644 --- a/mysql-test/main/subselect_no_opts.result +++ b/mysql-test/main/subselect_no_opts.result @@ -7373,6 +7373,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # Start of 10.4 tests # # diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result index 3c6dfbaa9c3..e3bdddbf84b 100644 --- a/mysql-test/main/subselect_no_scache.result +++ b/mysql-test/main/subselect_no_scache.result @@ -7388,6 +7388,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # Start of 10.4 tests # # diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result index 18de9cba17f..88f8f78019e 100644 --- a/mysql-test/main/subselect_no_semijoin.result +++ b/mysql-test/main/subselect_no_semijoin.result @@ -7373,6 +7373,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # Start of 10.4 tests # # diff --git a/mysql-test/main/type_time_hires.result b/mysql-test/main/type_time_hires.result index 7ec77db87d6..ed00a8d473d 100644 --- a/mysql-test/main/type_time_hires.result +++ b/mysql-test/main/type_time_hires.result @@ -360,7 +360,7 @@ select cast(1e-6 as time(6)); cast(1e-6 as time(6)) 00:00:00.000001 # -# Start of 10.4 tests +# End of 5.5 tests # # # MDEV-20397 Support TIMESTAMP, DATETIME, TIME in ROUND() and TRUNCATE() @@ -907,3 +907,14 @@ a CEILING(a) CEILING_SP(a) CEILING(a)=CEILING_SP(a) DROP FUNCTION FLOOR_SP; DROP FUNCTION CEILING_SP; DROP TABLE t1; +# +# MDEV-29924 Assertion `(((nr) % (1LL << 24)) % (int) log_10_int[6 - dec]) == 0' failed in my_time_packed_to_binary on SELECT when using TIME field +# +create table t1 (c decimal(3,1),d time(6)); +insert into t1 values (null,0.1),(null,0.1), (0.1,0.2); +select c from t1 where c<all (select d from t1); +c +drop table t1; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/type_time_hires.test b/mysql-test/main/type_time_hires.test index f9b4a5a9f27..51824c689ad 100644 --- a/mysql-test/main/type_time_hires.test +++ b/mysql-test/main/type_time_hires.test @@ -11,9 +11,8 @@ drop table t1; select cast(1e-6 as time(6)); - --echo # ---echo # Start of 10.4 tests +--echo # End of 5.5 tests --echo # --echo # @@ -196,3 +195,15 @@ DROP FUNCTION FLOOR_SP; DROP FUNCTION CEILING_SP; DROP TABLE t1; + +--echo # +--echo # MDEV-29924 Assertion `(((nr) % (1LL << 24)) % (int) log_10_int[6 - dec]) == 0' failed in my_time_packed_to_binary on SELECT when using TIME field +--echo # +create table t1 (c decimal(3,1),d time(6)); +insert into t1 values (null,0.1),(null,0.1), (0.1,0.2); +select c from t1 where c<all (select d from t1); +drop table t1; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm b/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm Binary files differnew file mode 100644 index 00000000000..d9cc5e6cc69 --- /dev/null +++ b/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm diff --git a/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result b/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result index 9f998e049c0..589570d8300 100644 --- a/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result +++ b/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result @@ -108,3 +108,111 @@ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +connect con1,localhost,root,,; +XA START '1'; +INSERT INTO t1 VALUES (2),(1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +SELECT * FROM t1 WHERE a = 2; +a +XA END '1'; +XA PREPARE '1'; +disconnect con1; +connection default; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 1 +XA COMMIT '1'; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +Must be no XA PREPARE group nor XA completion one: +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE SEQUENCE s ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # SELECT NEXT VALUE FOR s +master-bin.000001 # Table_map # # table_id: # (test.s) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # XA START X'32',X'',1 GTID #-#-# +master-bin.000001 # Query # # XA END X'32',X'',1 +master-bin.000001 # XA_prepare # # XA PREPARE X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # XA ROLLBACK X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +DROP TABLE t1; +connect con2,localhost,root,,; +CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM; +XA START '1'; +INSERT INTO tm VALUES (1),(1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +SELECT * FROM tm WHERE a = 2; +a +XA END '1'; +XA PREPARE '1'; +disconnect con2; +connection default; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 1 +XA ROLLBACK '1'; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +Must be no XA PREPARE group nor XA completion one: +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE SEQUENCE s ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # SELECT NEXT VALUE FOR s +master-bin.000001 # Table_map # # table_id: # (test.s) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # XA START X'32',X'',1 GTID #-#-# +master-bin.000001 # Query # # XA END X'32',X'',1 +master-bin.000001 # XA_prepare # # XA PREPARE X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # XA ROLLBACK X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tm VALUES (1),(1) +master-bin.000001 # Query # # COMMIT +DROP TABLE tm; diff --git a/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test b/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test index 443feb60627..2890c42a087 100644 --- a/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test +++ b/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test @@ -80,3 +80,55 @@ DROP TABLE t1; --echo # Proof of correct logging incl empty XA-PREPARE --source include/show_binlog_events.inc + + +# MDEV-25616 Binlog event for XA COMMIT is generated without matching XA START + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); + +--source include/count_sessions.inc +--connect(con1,localhost,root,,) + +XA START '1'; + --error ER_DUP_ENTRY + INSERT INTO t1 VALUES (2),(1); + SELECT * FROM t1 WHERE a = 2; +XA END '1'; +XA PREPARE '1'; + +--disconnect con1 + +--connection default +--source include/wait_until_count_sessions.inc +XA RECOVER; + +--error ER_XA_RBROLLBACK +XA COMMIT '1'; +--echo Must be no XA PREPARE group nor XA completion one: +--source include/show_binlog_events.inc +DROP TABLE t1; + +--source include/count_sessions.inc + +--connect(con2,localhost,root,,) +CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM; +XA START '1'; + --error ER_DUP_ENTRY + INSERT INTO tm VALUES (1),(1); + SELECT * FROM tm WHERE a = 2; +XA END '1'; +XA PREPARE '1'; + +--disconnect con2 + +--connection default +--source include/wait_until_count_sessions.inc +XA RECOVER; + +--error ER_XA_RBROLLBACK +XA ROLLBACK '1'; +--echo Must be no XA PREPARE group nor XA completion one: +--source include/show_binlog_events.inc +DROP TABLE tm; + diff --git a/mysql-test/suite/federated/federatedx_create_handlers.result b/mysql-test/suite/federated/federatedx_create_handlers.result index 6dcd53e77c7..0bec01ab30b 100644 --- a/mysql-test/suite/federated/federatedx_create_handlers.result +++ b/mysql-test/suite/federated/federatedx_create_handlers.result @@ -440,6 +440,55 @@ SELECT * FROM (SELECT * FROM federated.t1 LIMIT 70000) dt; SELECT COUNT(DISTINCT a) FROM federated.t2; COUNT(DISTINCT a) 70000 +# +# MDEV-29640 FederatedX does not properly handle pushdown +# in case of difference in local and remote table names +# +connection master; +# Use tables from the previous test. Make sure pushdown works: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PUSHED SELECT NULL NULL NULL NULL NULL NULL NULL NULL +SELECT COUNT(DISTINCT a) FROM federated.t2; +COUNT(DISTINCT a) +70000 +# Link remote table `federated.t1` with the local table named `t1_local` +CREATE TABLE federated.t1_local ENGINE="FEDERATED" +CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +# No pushdown here due to table names mismatch, retrieve data as usual: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t1_local; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1_local ALL NULL NULL NULL NULL 70000 +SELECT COUNT(DISTINCT a) FROM federated.t1_local; +COUNT(DISTINCT a) +70000 +# +# MDEV-29863 Server crashes in federatedx_txn::acquire after select from +# the Federated table with partitions and federated_pushdown=1 +# in case of difference in local and remote table names +# +connection slave; +CREATE TABLE federated.t3 (a INT); +INSERT INTO federated.t3 VALUES (1),(2),(3); +CREATE TABLE federated.t4 (a INT); +connection master; +CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated', +PORT SLAVE_PORT); +CREATE TABLE federated.t3 (a INT) +ENGINE=FEDERATED +CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3' + PARTITION BY list (a) +(PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3', +PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4'); +EXPLAIN SELECT * FROM federated.t3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 +SELECT * FROM federated.t3; +a +1 +2 +3 set global federated_pushdown=0; connection master; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/suite/federated/federatedx_create_handlers.test b/mysql-test/suite/federated/federatedx_create_handlers.test index 8863a057b47..2d6c2bc4197 100644 --- a/mysql-test/suite/federated/federatedx_create_handlers.test +++ b/mysql-test/suite/federated/federatedx_create_handlers.test @@ -1,6 +1,7 @@ --source have_federatedx.inc --source include/federated.inc --source include/no_valgrind_without_big.inc +--source include/have_partition.inc connection default; @@ -266,6 +267,53 @@ INSERT INTO federated.t2 SELECT * FROM (SELECT * FROM federated.t1 LIMIT 70000) dt; SELECT COUNT(DISTINCT a) FROM federated.t2; + +--echo # +--echo # MDEV-29640 FederatedX does not properly handle pushdown +--echo # in case of difference in local and remote table names +--echo # +connection master; +--echo # Use tables from the previous test. Make sure pushdown works: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t2; +SELECT COUNT(DISTINCT a) FROM federated.t2; + +--echo # Link remote table `federated.t1` with the local table named `t1_local` +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval +CREATE TABLE federated.t1_local ENGINE="FEDERATED" +CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; + +--echo # No pushdown here due to table names mismatch, retrieve data as usual: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t1_local; +SELECT COUNT(DISTINCT a) FROM federated.t1_local; + + +--echo # +--echo # MDEV-29863 Server crashes in federatedx_txn::acquire after select from +--echo # the Federated table with partitions and federated_pushdown=1 +--echo # in case of difference in local and remote table names +--echo # +connection slave; +CREATE TABLE federated.t3 (a INT); +INSERT INTO federated.t3 VALUES (1),(2),(3); +CREATE TABLE federated.t4 (a INT); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql + OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated', + PORT $SLAVE_MYPORT); + +CREATE TABLE federated.t3 (a INT) + ENGINE=FEDERATED + CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3' + PARTITION BY list (a) + (PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3', + PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4'); + +EXPLAIN SELECT * FROM federated.t3; +SELECT * FROM federated.t3; + set global federated_pushdown=0; source include/federated_cleanup.inc; diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index e518dca5a84..feac559ff55 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -37,4 +37,6 @@ query_cache: MDEV-15805 Test failure on galera.query_cache versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch galera_bf_abort_at_after_statement : Unstable galera_bf_abort_shutdown : MDEV-29773 Assertion failure on sql/wsrep_mysqld.cc:2893 in wsrep_bf_abort_shutdown - +galera.MW-284 : MDEV-29861: Galera test case hangs +galera.galera_binlog_checksum : MDEV-29861: Galera test case hangs +galera_var_notify_ssl_ipv6 : MDEV-29861: Galera test case hangs diff --git a/mysql-test/suite/galera/r/galera_many_rows.result b/mysql-test/suite/galera/r/galera_many_rows.result index 566bc59f8ab..b34c2484aea 100644 --- a/mysql-test/suite/galera/r/galera_many_rows.result +++ b/mysql-test/suite/galera/r/galera_many_rows.result @@ -5,32 +5,32 @@ connection node_2; connection node_1; SET SESSION innodb_lock_wait_timeout=600; SET SESSION lock_wait_timeout=600; -CREATE TABLE ten (f1 INTEGER) engine=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; connection node_2; SET SESSION wsrep_sync_wait = 0; SET SESSION wsrep_sync_wait = 15; SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H'; SELECT COUNT(*) FROM t1; COUNT(*) -100000 -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +10000 +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; connection node_1; SELECT COUNT(*) FROM t1; COUNT(*) -200000 +20000 UPDATE t1 SET f2 = 1; connection node_2; SELECT COUNT(*) FROM t1 WHERE f2 = 1; COUNT(*) -200000 +20000 connection node_1; START TRANSACTION; SELECT COUNT(*) FROM t1; COUNT(*) -200000 +20000 UPDATE t1 SET f2 = 3; connection node_2; START TRANSACTION; diff --git a/mysql-test/suite/galera/t/galera_many_rows.test b/mysql-test/suite/galera/t/galera_many_rows.test index bc9e99db8da..3623b3f33b0 100644 --- a/mysql-test/suite/galera/t/galera_many_rows.test +++ b/mysql-test/suite/galera/t/galera_many_rows.test @@ -10,11 +10,11 @@ SET SESSION innodb_lock_wait_timeout=600; SET SESSION lock_wait_timeout=600; -CREATE TABLE ten (f1 INTEGER) engine=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; --connection node_2 SET SESSION wsrep_sync_wait = 0; @@ -24,7 +24,7 @@ SET SESSION wsrep_sync_wait = 15; SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H'; SELECT COUNT(*) FROM t1; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; --connection node_1 SELECT COUNT(*) FROM t1; diff --git a/mysql-test/suite/innodb/r/instant_alter.result b/mysql-test/suite/innodb/r/instant_alter.result index 9cb5f9d2084..18680432c77 100644 --- a/mysql-test/suite/innodb/r/instant_alter.result +++ b/mysql-test/suite/innodb/r/instant_alter.result @@ -2929,3 +2929,14 @@ t1 CREATE TABLE `t1` ( KEY `f2` (`f2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci DROP TABLE t1, t2; +# +# MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index +# +CREATE TABLE mdev28822_100427_innodb ( +id int not null primary key, +msg varchar(10), +index(id, msg) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +FLUSH TABLES; +ALTER TABLE mdev28822_100427_innodb ADD i1 INTEGER, ALGORITHM=INSTANT; +DROP TABLE mdev28822_100427_innodb; diff --git a/mysql-test/suite/innodb/t/instant_alter.test b/mysql-test/suite/innodb/t/instant_alter.test index 22815798f69..8e333e3bb72 100644 --- a/mysql-test/suite/innodb/t/instant_alter.test +++ b/mysql-test/suite/innodb/t/instant_alter.test @@ -1,6 +1,8 @@ --source include/innodb_page_size.inc --source include/have_sequence.inc +let $datadir=`select @@datadir`; + --echo # --echo # MDEV-11369: Instant ADD COLUMN for InnoDB --echo # @@ -949,3 +951,19 @@ ALTER TABLE t1 DROP COLUMN f3, ADD FOREIGN KEY fk (f1) ALTER TABLE t1 DROP COLUMN f5; SHOW CREATE TABLE t1; DROP TABLE t1, t2; + + +--echo # +--echo # MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index +--echo # + +CREATE TABLE mdev28822_100427_innodb ( + id int not null primary key, + msg varchar(10), + index(id, msg) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +FLUSH TABLES; +remove_file $datadir/test/mdev28822_100427_innodb.frm; +copy_file std_data/mysql_upgrade/mdev28822_100427_innodb.frm $datadir/test/mdev28822_100427_innodb.frm; +ALTER TABLE mdev28822_100427_innodb ADD i1 INTEGER, ALGORITHM=INSTANT; +DROP TABLE mdev28822_100427_innodb; diff --git a/mysql-test/suite/plugins/t/false_dupes-6543.test b/mysql-test/suite/plugins/t/false_dupes-6543.test index ebdbe00e47c..ca278685967 100644 --- a/mysql-test/suite/plugins/t/false_dupes-6543.test +++ b/mysql-test/suite/plugins/t/false_dupes-6543.test @@ -1,3 +1,4 @@ +source include/not_embedded.inc; # # MDEV-6543 Crash if enable 'federatedx' when 'federated' plugin already enabled, and vice-versa # diff --git a/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc new file mode 100644 index 00000000000..4cb4fe8962f --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc @@ -0,0 +1,10 @@ +# +# Helper file to run each empty-due-to-err XA transaction test case both with +# and without detaching from the connection when the transaction is prepared. +# + +--let $use_disconnect=0 +--source rpl_xa_empty_transaction_test_case.inc + +--let $use_disconnect=1 +--source rpl_xa_empty_transaction_test_case.inc diff --git a/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc new file mode 100644 index 00000000000..6368336b8e3 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc @@ -0,0 +1,131 @@ +# +# Helper script to create an XA transaction and validate it was not +# binlogged +# +# Parameters +# $xa_completion_action : The action to end the XA transaction, either +# COMMIT or ROLLBACK +# $trx_statements : A comma separated list specifying how to build +# the statements of the transaction. Each item in +# the list is either T (for transactional) or N +# (for non-transactional). An empty list will not +# add any statements to the transaction. +# $use_disconnect : When TRUE, disconnect after preparing the XA +# transaction to test the detach/rollback case +# + +# +# Setup +--let $generic_assert_text= should not binlog XA transaction + +--connection server_1 +--let server_1_datadir=`select @@datadir` + +--connection server_2 +--let server_2_datadir=`select @@datadir` + +--connection server_3 +--let server_3_datadir=`select @@datadir` + +--let assert_file=$MYSQLTEST_VARDIR/tmp/binlog_decoded.out + +--connection server_1 +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +--source include/save_master_gtid.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc + +--connection server_1 + +if ($use_disconnect) +{ + --source include/count_sessions.inc + --connect(con1,localhost,root,,) +} + +XA START 'x'; +--let $_stmt_items= $trx_statements +--let $_ctr= 1 +while($_stmt_items) +{ + --let $_cur_stmt= `SELECT SUBSTRING_INDEX('$_stmt_items', ',', 1)` + --let $_stmt_items= `SELECT LTRIM(SUBSTRING('$_stmt_items', LENGTH('$_cur_stmt') + 2))` + + if (`SELECT strcmp("$_cur_stmt","T") = 0`) + { + --let $target_table= ti + } + + if (`SELECT strcmp("$_cur_stmt","N") = 0`) + { + --let $target_table= tm + } + + --error ER_DUP_ENTRY + --eval INSERT INTO $target_table VALUES ($_ctr),($_ctr); + inc $_ctr; + +} +XA END 'x'; +XA PREPARE 'x'; + +if ($use_disconnect) +{ + --disconnect con1 + --connection server_1 + --source include/wait_until_count_sessions.inc + XA RECOVER; + + --error ER_XA_RBROLLBACK + --eval XA $xa_completion_action 'x'; +} +if (!$use_disconnect) +{ + --eval XA $xa_completion_action 'x'; +} + +--source include/save_master_gtid.inc + +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_1_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_1 $generic_assert_text +--let assert_count= 0 +--let assert_select= XA START|XA END|XA PREPARE|XA COMMIT|XA ROLLBACK +--source include/assert_grep.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_2_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_2 $generic_assert_text +--source include/assert_grep.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_3_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_3 $generic_assert_text +--source include/assert_grep.inc + +# +# Cleanup +--connection server_1 +DROP TABLE ti,tm; +--source include/save_master_gtid.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc diff --git a/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result b/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result new file mode 100644 index 00000000000..f3ea53c219a --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result @@ -0,0 +1,1169 @@ +include/rpl_init.inc [topology=1->2->3] +connection server_1; +connection server_2; +connection server_3; +connection server_1; +# +# Test Case 1: An XA transaction without any statements should not be +# binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 2: An XA transaction consisting of a successfully rolled back +# statement should not be binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 3: An XA transaction with a statement that cannot be rolled +# back should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 4: An XA transaction with multiple statements that can all +# be rolled back should not be binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 5: A mixed XA transaction consisting of one statement that +# can successfully be rolled back (first statement), and another that +# can not (second statement) should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 6: A mixed XA transaction consisting of one statement that +# cannot successfully be rolled back (first statement), and another that +# can (second statement) should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 7: An XA transaction consisting of two failed +# non-transactional statements should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +connection server_1; +include/rpl_end.inc +# End of rpl_xa_empty_transaction.test diff --git a/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf new file mode 100644 index 00000000000..92acd0c73a6 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf @@ -0,0 +1,18 @@ +!include include/default_mysqld.cnf + +[mysqld.1] +log-slave-updates +innodb + +[mysqld.2] +log-slave-updates +innodb + +[mysqld.3] +log-slave-updates +innodb + +[ENV] +SERVER_MYPORT_1= @mysqld.1.port +SERVER_MYPORT_2= @mysqld.2.port +SERVER_MYPORT_3= @mysqld.3.port diff --git a/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test new file mode 100644 index 00000000000..61cc0621d5a --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test @@ -0,0 +1,175 @@ +# +# Purpose: +# This test ensures consistency in binlogging behavior for XA transactions +# that have all statements error and rollback, effectively leaving an "empty" +# transaction. In such cases, an empty XA transaction should be binlogged. The +# bug reported by MDEV-25616 revealed that an "empty" XA transaction would +# binlog an XA ROLLBACK or XA COMMIT event without a preceding setup, i.e. +# XA START through XA PREPARE. The bug presented differently for XA +# transactions consisting of transactional and non-transactional statements. +# Therefore, this test validates that an entire XA transaction is binlogged +# for different combinations of transactional or non-transactional statements. +# Note that the behavior changes when binlogging empty XA transactions +# depending on the binlog_row_format variables. That is, when the content of +# the transaction consists of errored transactional statements, in row format, +# an empty XA transaction will be binlogged; however, in mixed and statement +# formats, nothing will be written into the binary log. +# +# Methodology: +# Create XA transactions with various combinations of erroring transactional +# or non-transactional statements. The binary log is examined to ensure all +# XA components are written. Chain replication is used, i.e. +# (primary->replica->replica), to ensure replica binlogging is consistent with +# manual execution. The transactional and non-transactional tables use InnoDB +# and MyISAM, respectively. +# +# Parameters +# $expect_transactional_xa_binlog : Boolean indicating whether or not an +# errored transactional statement should result in XA statements written +# into the binary log. +# +# References: +# MDEV-25616: Binlog event for XA COMMIT is generated without matching +# XA START, replication aborts +# +--source include/have_log_bin.inc + +--let $rpl_server_count= 3 +--let $rpl_topology= 1->2->3 +--source include/rpl_init.inc + +--connection server_1 +-- source include/have_innodb.inc +--connection server_2 +-- source include/have_innodb.inc +--connection server_3 +-- source include/have_innodb.inc +--connection server_1 + +--echo # +--echo # Test Case 1: An XA transaction without any statements should not be +--echo # binlogged +--let $trx_statements= + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 2: An XA transaction consisting of a successfully rolled back +--echo # statement should not be binlogged +--let $trx_statements= T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 3: An XA transaction with a statement that cannot be rolled +--echo # back should be binlogged + +# TODO: remove work-around MDEV-24654 when fixed. +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + + +--echo # +--echo # Test Case 4: An XA transaction with multiple statements that can all +--echo # be rolled back should not be binlogged +--let $trx_statements= T,T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 5: A mixed XA transaction consisting of one statement that +--echo # can successfully be rolled back (first statement), and another that +--echo # can not (second statement) should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= T,N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + + +--echo # +--echo # Test Case 6: A mixed XA transaction consisting of one statement that +--echo # cannot successfully be rolled back (first statement), and another that +--echo # can (second statement) should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N,T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + +--echo # +--echo # Test Case 7: An XA transaction consisting of two failed +--echo # non-transactional statements should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N,N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + +# +# Cleanup +--connection server_1 +--source include/rpl_end.inc + +--echo # End of rpl_xa_empty_transaction.test diff --git a/sql/field.cc b/sql/field.cc index 32b1750dac8..e6c29eeb39b 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -10895,8 +10895,6 @@ Column_definition::Column_definition(THD *thd, Field *old_field, type_handler()->Column_definition_reuse_fix_attributes(thd, this, old_field); - type_handler()->Column_definition_implicit_upgrade(this); - /* Copy the default (constant/function) from the column object orig_field, if supplied. We do this if all these conditions are met: diff --git a/sql/field.h b/sql/field.h index f1956baf8b9..ace51b792ad 100644 --- a/sql/field.h +++ b/sql/field.h @@ -5709,6 +5709,12 @@ public: } /* Used to make a clone of this object for ALTER/CREATE TABLE */ Create_field *clone(MEM_ROOT *mem_root) const; + static void upgrade_data_types(List<Create_field> &list) + { + List_iterator<Create_field> it(list); + while (Create_field *f= it++) + f->type_handler()->Column_definition_implicit_upgrade(f); + } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index e225a05d490..8e1123b7c1d 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2586,56 +2586,58 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, double res= 0.0; // Length for the single point (25 Bytes) uint32 len= SRID_SIZE + POINT_DATA_SIZE + WKB_HEADER_SIZE; - int error= 0; + int err_hv= 0, err_sph= 0; switch (g2->get_class_info()->m_type_id) { case Geometry::wkb_point: - // Optimization for point-point case + { + Gis_point *g2p= static_cast<Gis_point *>(g2); + // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { - res= static_cast<Gis_point *>(g2)->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &err_hv); } else { // Optimization for single point in Multipoint if (g1->get_data_size() == len) { - res= static_cast<Gis_point *>(g2)->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &err_hv); } else { // There are multipoints in g1 // g1 is MultiPoint and calculate MP.sphericaldistance from g2 Point if (g1->get_data_size() != GET_SIZE_ERROR) - static_cast<Gis_point *>(g2)->spherical_distance_multipoints( - (Gis_multi_point *)g1, r, &res, &error); + err_sph= g2p->spherical_distance_multipoints(g1, r, &res, &err_hv); } } break; + } case Geometry::wkb_multipoint: // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { + Gis_point *g1p= static_cast<Gis_point *>(g1); // Optimization for single point in Multipoint g2 if (g2->get_data_size() == len) { - res= static_cast<Gis_point *>(g1)->calculate_haversine(g2, r, &error); + res= g1p->calculate_haversine(g2, r, &err_hv); } else { if (g2->get_data_size() != GET_SIZE_ERROR) // g1 is a point (casted to multi_point) and g2 multipoint - static_cast<Gis_point *>(g1)->spherical_distance_multipoints( - (Gis_multi_point *)g2, r, &res, &error); + err_sph= g1p->spherical_distance_multipoints(g2, r, &res, &err_hv); } } else { + Gis_multi_point *g1mp= static_cast<Gis_multi_point *>(g1); // Multipoints in g1 and g2 - no optimization - static_cast<Gis_multi_point *>(g1)->spherical_distance_multipoints( - (Gis_multi_point *)g2, r, &res, &error); + err_sph= g1mp->spherical_distance_multipoints(g2, r, &res, &err_hv); } break; @@ -2644,16 +2646,14 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, break; } - if (res < 0) - goto handle_error; - - handle_error: - if (error > 0) - my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), - "Longitude should be [-180,180]", "ST_Distance_Sphere"); - else if(error < 0) - my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), - "Latitude should be [-90,90]", "ST_Distance_Sphere"); + if (err_hv == 1) + my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), + "Longitude should be [-180,180]", "ST_Distance_Sphere"); + else if(err_hv < 0) + my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), + "Latitude should be [-90,90]", "ST_Distance_Sphere"); + else if (err_sph || err_hv == 2) + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); return res; } diff --git a/sql/log.cc b/sql/log.cc index 6aad1f8c724..7df14426319 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2120,29 +2120,62 @@ static int binlog_prepare(handlerton *hton, THD *thd, bool all) int binlog_commit_by_xid(handlerton *hton, XID *xid) { + int rc= 0; THD *thd= current_thd; if (thd->is_current_stmt_binlog_disabled()) return 0; + + /* the asserted state can't be reachable with xa commit */ + DBUG_ASSERT(!thd->get_stmt_da()->is_error() || + thd->get_stmt_da()->sql_errno() != ER_XA_RBROLLBACK); + /* + This is a recovered user xa transaction commit. + Create a "temporary" binlog transaction to write the commit record + into binlog. + */ + THD_TRANS trans; + trans.ha_list= NULL; + + thd->ha_data[hton->slot].ha_info[1].register_ha(&trans, hton); + thd->ha_data[binlog_hton->slot].ha_info[1].set_trx_read_write(); (void) thd->binlog_setup_trx_data(); DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_COMMIT); - return binlog_commit(thd, TRUE, FALSE); + rc= binlog_commit(thd, TRUE, FALSE); + thd->ha_data[binlog_hton->slot].ha_info[1].reset(); + + return rc; } int binlog_rollback_by_xid(handlerton *hton, XID *xid) { + int rc= 0; THD *thd= current_thd; if (thd->is_current_stmt_binlog_disabled()) return 0; + + if (thd->get_stmt_da()->is_error() && + thd->get_stmt_da()->sql_errno() == ER_XA_RBROLLBACK) + return rc; + + THD_TRANS trans; + trans.ha_list= NULL; + + thd->ha_data[hton->slot].ha_info[1].register_ha(&trans, hton); + thd->ha_data[hton->slot].ha_info[1].set_trx_read_write(); (void) thd->binlog_setup_trx_data(); DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_ROLLBACK || (thd->transaction->xid_state.get_state_code() == XA_ROLLBACK_ONLY)); - return binlog_rollback(hton, thd, TRUE); + + rc= binlog_rollback(hton, thd, TRUE); + thd->ha_data[hton->slot].ha_info[1].reset(); + + return rc; } @@ -2276,10 +2309,16 @@ int binlog_commit(THD *thd, bool all, bool ro_1pc) } if (cache_mngr->trx_cache.empty() && - thd->transaction->xid_state.get_state_code() != XA_PREPARED) + (thd->transaction->xid_state.get_state_code() != XA_PREPARED || + !(thd->ha_data[binlog_hton->slot].ha_info[1].is_started() && + thd->ha_data[binlog_hton->slot].ha_info[1].is_trx_read_write()))) { /* - we're here because cache_log was flushed in MYSQL_BIN_LOG::log_xid() + This is an empty transaction commit (both the regular and xa), + or such transaction xa-prepare or + either one's statement having no effect on the transactional cache + as any prior to it. + The empty xa-prepare sinks in only when binlog is read-only. */ cache_mngr->reset(false, true); THD_STAGE_INFO(thd, org_stage); @@ -2364,10 +2403,12 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) } if (!cache_mngr->trx_cache.has_incident() && cache_mngr->trx_cache.empty() && - thd->transaction->xid_state.get_state_code() != XA_PREPARED) + (thd->transaction->xid_state.get_state_code() != XA_PREPARED || + !(thd->ha_data[binlog_hton->slot].ha_info[1].is_started() && + thd->ha_data[binlog_hton->slot].ha_info[1].is_trx_read_write()))) { /* - we're here because cache_log was flushed in MYSQL_BIN_LOG::log_xid() + The same comments apply as in the binlog commit method's branch. */ cache_mngr->reset(false, true); thd->reset_binlog_for_next_statement(); @@ -10603,6 +10644,7 @@ int TC_LOG_BINLOG::unlog_xa_prepare(THD *thd, bool all) /* an empty XA-prepare event group is logged */ rc= write_empty_xa_prepare(thd, cache_mngr); // normally gains need_unlog trans_register_ha(thd, true, binlog_hton, 0); // do it for future commmit + thd->ha_data[binlog_hton->slot].ha_info[1].set_trx_read_write(); } if (rw_count == 0 || !cache_mngr->need_unlog) return rc; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 5d3c4facd81..1722b1e188a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4650,10 +4650,9 @@ static void init_ssl() DBUG_PRINT("info",("ssl_acceptor_fd: %p", ssl_acceptor_fd)); if (!ssl_acceptor_fd) { - sql_print_warning("Failed to setup SSL"); - sql_print_warning("SSL error: %s", sslGetErrString(error)); - opt_use_ssl = 0; - have_ssl= SHOW_OPTION_DISABLED; + sql_print_error("Failed to setup SSL"); + sql_print_error("SSL error: %s", sslGetErrString(error)); + unireg_abort(1); } else ssl_acceptor_stats.init(); diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h index 12710aecb18..cb1615c5925 100644 --- a/sql/rowid_filter.h +++ b/sql/rowid_filter.h @@ -192,6 +192,9 @@ public: */ virtual bool check(void *ctxt, char *elem) = 0; + /* True if the container does not contain any element */ + virtual bool is_empty() = 0; + virtual ~Rowid_filter_container() {} }; @@ -231,6 +234,8 @@ public: virtual ~Rowid_filter() {} + bool is_empty() { return container->is_empty(); } + Rowid_filter_container *get_container() { return container; } void set_tracker(Rowid_filter_tracker *track_arg) { tracker= track_arg; } @@ -268,6 +273,8 @@ public: bool check(char *elem) { + if (container->is_empty()) + return false; bool was_checked= container->check(table, elem); tracker->increment_checked_elements_count(was_checked); return was_checked; @@ -340,6 +347,8 @@ public: my_qsort2(array->front(), array->elements()/elem_size, elem_size, (qsort2_cmp) cmp, cmp_arg); } + + bool is_empty() { return elements() == 0; } }; @@ -369,6 +378,8 @@ public: bool add(void *ctxt, char *elem) { return refpos_container.add(elem); } bool check(void *ctxt, char *elem); + + bool is_empty() { return refpos_container.is_empty(); } }; /** diff --git a/sql/spatial.cc b/sql/spatial.cc index 527a1df9d1f..ec8279daa7a 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -1094,10 +1094,9 @@ double Gis_point::calculate_haversine(const Geometry *g, point_temp[point_size-1]= '\0'; Geometry_buffer gbuff; Geometry *gg= Geometry::construct(&gbuff, point_temp, point_size-1); - DBUG_ASSERT(gg); - if (static_cast<Gis_point *>(gg)->get_xy_radian(&x2r, &y2r)) + if (!gg || static_cast<Gis_point *>(gg)->get_xy_radian(&x2r, &y2r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } } @@ -1105,15 +1104,16 @@ double Gis_point::calculate_haversine(const Geometry *g, { if (static_cast<const Gis_point *>(g)->get_xy_radian(&x2r, &y2r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } } if (this->get_xy_radian(&x1r, &y1r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } + // // Check boundary conditions: longitude[-180,180] if (!((x2r >= -M_PI && x2r <= M_PI) && (x1r >= -M_PI && x1r <= M_PI))) { @@ -1166,15 +1166,20 @@ int Gis_point::spherical_distance_multipoints(Geometry *g, const double r, { Geometry_buffer buff_temp; Geometry *temp; + const char *pt_ptr= g->get_data_ptr()+ + 4+WKB_HEADER_SIZE*i + POINT_DATA_SIZE*(i-1); // First 4 bytes are handled already, make sure to create a Point memset(s + 4, Geometry::wkb_point, 1); + if (g->no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; + memcpy(s + 5, g->get_data_ptr() + 5, 4); - memcpy(s + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\ - POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); + memcpy(s + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); - DBUG_ASSERT(temp); + if (!temp) + return 1; temp_res= this->calculate_haversine(temp, r, err); if (res > temp_res) res= temp_res; @@ -2351,14 +2356,18 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, Geometry *temp; double temp_res= 0.0; char s[len]; + const char *pt_ptr= get_data_ptr()+ + 4+WKB_HEADER_SIZE*i + POINT_DATA_SIZE*(i-1); // First 4 bytes are handled already, make sure to create a Point memset(s + 4, Geometry::wkb_point, 1); + if (no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; memcpy(s + 5, this->get_data_ptr() + 5, 4); - memcpy(s + 4 + WKB_HEADER_SIZE, this->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\ - POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); + memcpy(s + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); - DBUG_ASSERT(temp); + if (!temp) + return 1; // Optimization for single Multipoint if (num_of_points2 == 1) { @@ -2370,14 +2379,18 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, Geometry_buffer buff_temp2; Geometry *temp2; char s2[len]; + const char *pt_ptr= g->get_data_ptr()+ + 4+WKB_HEADER_SIZE*j + POINT_DATA_SIZE*(j-1); // First 4 bytes are handled already, make sure to create a Point memset(s2 + 4, Geometry::wkb_point, 1); + if (g->no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; memcpy(s2 + 5, g->get_data_ptr() + 5, 4); - memcpy(s2 + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*j +\ - POINT_DATA_SIZE*(j-1), POINT_DATA_SIZE); + memcpy(s2 + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s2[len-1]= '\0'; temp2= Geometry::construct(&buff_temp2, s2, len); - DBUG_ASSERT(temp2); + if (!temp2) + return 1; temp_res= static_cast<Gis_point *>(temp)->calculate_haversine(temp2, r, err); if (res > temp_res) res= temp_res; diff --git a/sql/spatial.h b/sql/spatial.h index 7b33529ee94..8974511adf9 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -353,6 +353,7 @@ protected: const char *get_mbr_for_points(MBR *mbr, const char *data, uint offset) const; +public: /** Check if there're enough data remaining as requested @@ -383,6 +384,7 @@ protected: (expected_points > ((m_data_end - data) / (POINT_DATA_SIZE + extra_point_space)))); } +protected: const char *m_data; const char *m_data_end; }; diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h index 990c79fb9ad..968b8d38295 100644 --- a/sql/sql_analyze_stmt.h +++ b/sql/sql_analyze_stmt.h @@ -416,12 +416,13 @@ public: uint get_container_elements() const { return container_elements; } + uint get_container_lookups() { return n_checks; } + double get_r_selectivity_pct() const { - return static_cast<double>(n_positive_checks) / - static_cast<double>(n_checks); + return n_checks ? static_cast<double>(n_positive_checks) / + static_cast<double>(n_checks) : 0; } size_t get_container_buff_size() const { return container_buff_size; } }; - diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 0913b3834f8..69dff371f11 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -3721,8 +3721,10 @@ int select_max_min_finder_subselect::send_data(List<Item> &items) { cache= val_item->get_cache(thd); set_op(val_item->type_handler()); + cache->setup(thd, val_item); } - cache->store(val_item); + else + cache->store(val_item); it->store(0, cache); } it->assigned(1); diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 87095917f71..d839a9bc518 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -1693,6 +1693,7 @@ void Explain_rowid_filter::print_explain_json(Explain_query *query, if (is_analyze) { writer->add_member("r_rows").add_double(tracker->get_container_elements()); + writer->add_member("r_lookups").add_ll(tracker->get_container_lookups()); writer->add_member("r_selectivity_pct"). add_double(tracker->get_r_selectivity_pct() * 100.0); writer->add_member("r_buffer_size"). diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 7d05e2bae79..c7bef0f7027 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -4540,6 +4540,16 @@ TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items, alter_info->create_list.push_back(cr_field, thd->mem_root); } + /* + Item*::type_handler() always returns pointers to + type_handler_{time2|datetime2|timestamp2} no matter what + the current mysql56_temporal_format says. + Let's convert them according to mysql56_temporal_format. + QQ: This perhaps should eventually be fixed to have Item*::type_handler() + respect mysql56_temporal_format, and remove the upgrade from here. + */ + Create_field::upgrade_data_types(alter_info->create_list); + if (create_info->check_fields(thd, alter_info, create_table->table_name, create_table->db, diff --git a/sql/sql_plugin_services.inl b/sql/sql_plugin_services.inl index 3dd41ffbed9..e883081e9f7 100644 --- a/sql/sql_plugin_services.inl +++ b/sql/sql_plugin_services.inl @@ -245,6 +245,11 @@ struct sql_service_st sql_service_handler= mysql_free_result, mysql_fetch_row, mysql_close, + mysql_options, + mysql_fetch_lengths, + mysql_set_character_set, + mysql_num_fields, + mysql_select_db }; #define DEFINE_warning_function(name, ret) { \ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 794bcca00da..e8874a5ea78 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1322,6 +1322,15 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, /* Fix items that requires the join structure to exist */ fix_items_after_optimize(thd, select_lex); + /* + It is hack which force creating EXPLAIN object always on runt-time arena + (because very top JOIN::prepare executes always with runtime arena, but + constant subquery like (SELECT 'x') can be called with statement arena + during prepare phase of top SELECT). + */ + if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_PREPARE)) + create_explain_query_if_not_exists(thd->lex, thd->mem_root); + if (select_lex->handle_derived(thd->lex, DT_PREPARE)) DBUG_RETURN(-1); @@ -1809,7 +1818,6 @@ bool JOIN::build_explain() int JOIN::optimize() { int res= 0; - create_explain_query_if_not_exists(thd->lex, thd->mem_root); join_optimization_state init_state= optimization_state; if (select_lex->pushdown_select) { @@ -7724,6 +7732,7 @@ best_access_path(JOIN *join, double best= DBL_MAX; double best_time= DBL_MAX; double records= DBL_MAX; + ha_rows records_for_key= 0; table_map best_ref_depends_map= 0; /* key_dependent is 0 if all key parts could be used or if there was an @@ -7734,6 +7743,7 @@ best_access_path(JOIN *join, table_map key_dependent= 0; Range_rowid_filter_cost_info *best_filter= 0; double tmp; + double keyread_tmp= 0; ha_rows rec; bool best_uses_jbuf= FALSE; MY_BITMAP *eq_join_set= &s->table->eq_join_set; @@ -8016,8 +8026,12 @@ best_access_path(JOIN *join, /* Limit the number of matched rows */ tmp= cost_for_index_read(thd, table, key, (ha_rows) records, (ha_rows) s->worst_seeks); + records_for_key= (ha_rows) records; + set_if_smaller(records_for_key, thd->variables.max_seeks_for_key); + keyread_tmp= table->file->keyread_time(key, 1, records_for_key); got_cost: tmp= COST_MULT(tmp, record_count); + keyread_tmp= COST_MULT(keyread_tmp, record_count); } } else @@ -8191,12 +8205,14 @@ best_access_path(JOIN *join, } /* Limit the number of matched rows */ - tmp= records; - set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - tmp= cost_for_index_read(thd, table, key, (ha_rows) tmp, + tmp= cost_for_index_read(thd, table, key, (ha_rows) records, (ha_rows) s->worst_seeks); + records_for_key= (ha_rows) records; + set_if_smaller(records_for_key, thd->variables.max_seeks_for_key); + keyread_tmp= table->file->keyread_time(key, 1, records_for_key); got_cost2: tmp= COST_MULT(tmp, record_count); + keyread_tmp= COST_MULT(keyread_tmp, record_count); } else { @@ -8215,7 +8231,35 @@ best_access_path(JOIN *join, (found_part & 1)) // start_key->key can be used for index access { double rows= record_count * records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); + + /* + If we use filter F with selectivity s the the cost of fetching data + by key using this filter will be + cost_of_fetching_1_row * rows * s + + cost_of_fetching_1_key_tuple * rows * (1 - s) + + cost_of_1_lookup_into_filter * rows + Without using any filter the cost would be just + cost_of_fetching_1_row * rows + + So the gain in access cost per row will be + cost_of_fetching_1_row * (1 - s) - + cost_of_fetching_1_key_tuple * (1 - s) - + cost_of_1_lookup_into_filter + = + (cost_of_fetching_1_row - cost_of_fetching_1_key_tuple) * (1 - s) + - cost_of_1_lookup_into_filter + + Here we have: + cost_of_fetching_1_row = tmp/rows + cost_of_fetching_1_key_tuple = keyread_tmp/rows + + Note that access_cost_factor may be greater than 1.0. In this case + we still can expect a gain of using rowid filter due to smaller number + of checks for conditions pushed to the joined table. + */ + double rows_access_cost= MY_MIN(rows, s->worst_seeks); + double access_cost_factor= MY_MIN((rows_access_cost - keyread_tmp) / + rows, 1.0); filter= table->best_range_rowid_filter_for_partial_join(start_key->key, rows, access_cost_factor); @@ -8400,6 +8444,10 @@ best_access_path(JOIN *join, double rows= record_count * s->found_records; double access_cost_factor= MY_MIN(tmp / rows, 1.0); uint key_no= s->quick->index; + + /* See the comment concerning using rowid filter for with ref access */ + keyread_tmp= s->table->opt_range[key_no].index_only_cost; + access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); filter= s->table->best_range_rowid_filter_for_partial_join(key_no, rows, access_cost_factor); @@ -21251,6 +21299,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) DBUG_RETURN(NESTED_LOOP_ERROR); join_tab->build_range_rowid_filter_if_needed(); + if (join_tab->rowid_filter && join_tab->rowid_filter->is_empty()) + rc= NESTED_LOOP_NO_MORE_ROWS; join->return_tab= join_tab; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 15967ded4ad..7e35c17bb64 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -10411,6 +10411,18 @@ do_continue:; set_table_default_charset(thd, create_info, alter_ctx.db); + /* + The ALTER related code cannot alter partitions and change column data types + at the same time. So in case of partition change statements like: + ALTER TABLE t1 DROP PARTITION p1; + we skip implicit data type upgrade (such as "MariaDB 5.3 TIME" to + "MySQL 5.6 TIME" or vice versa according to mysql56_temporal_format). + Note, one can run a separate "ALTER TABLE t1 FORCE;" statement + before or after the partition change ALTER statement to upgrade data types. + */ + if (IF_PARTITIONING(!fast_alter_partition, 1)) + Create_field::upgrade_data_types(alter_info->create_list); + if (create_info->check_fields(thd, alter_info, table_list->table_name, table_list->db) || create_info->fix_period_fields(thd, alter_info)) diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 1519ed473df..d2ddfc5f489 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -3574,14 +3574,14 @@ char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!CheckMemory(g, initid, args, 2, false, false, true)) { JTYP type; BJNX bnx(g); - PBVAL jvp, top = NULL; + PBVAL jvp = NULL, top = NULL; PBVAL jsp[2] = {NULL, NULL}; for (int i = 0; i < 2; i++) { jvp = bnx.MakeValue(args, i, true); if (i) { - if (jvp->Type != type) { + if (jvp && (jvp->Type != type)) { PUSH_WARNING("Argument types mismatch"); goto fin; } // endif type @@ -5725,14 +5725,14 @@ char *bbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!CheckMemory(g, initid, args, 2, false, false, true)) { JTYP type; BJNX bnx(g); - PBVAL jvp, top = NULL; + PBVAL jvp = NULL, top = NULL; PBVAL jsp[2] = {NULL, NULL}; for (int i = 0; i < 2; i++) { if (i) { jvp = bnx.MakeValue(args, i, true); - if (jvp->Type != type) { + if (jvp && (jvp->Type != type)) { PUSH_WARNING("Argument types mismatch"); goto fin; } // endif type diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index b9ce16c5a7e..c7206b4def3 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -245,7 +245,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info) int rc, type, len, field, fields; bool bad, mul; PCSZ target, pwd; - DBFHEADER mainhead, *hp; + DBFHEADER mainhead, *hp = NULL; DESCRIPTOR thisfield, *tfp; FILE *infile = NULL; UNZIPUTL *zutp = NULL; diff --git a/storage/federatedx/CMakeLists.txt b/storage/federatedx/CMakeLists.txt index 5f983b5d637..6a9b12ee362 100644 --- a/storage/federatedx/CMakeLists.txt +++ b/storage/federatedx/CMakeLists.txt @@ -1,2 +1,3 @@ SET(FEDERATEDX_SOURCES ha_federatedx.cc federatedx_txn.cc federatedx_io.cc federatedx_io_null.cc federatedx_io_mysql.cc) -MYSQL_ADD_PLUGIN(federatedx ${FEDERATEDX_SOURCES} STORAGE_ENGINE) +MYSQL_ADD_PLUGIN(federatedx ${FEDERATEDX_SOURCES} STORAGE_ENGINE + RECOMPILE_FOR_EMBEDDED) diff --git a/storage/federatedx/federatedx_io_mysql.cc b/storage/federatedx/federatedx_io_mysql.cc index 669a8faef63..fc32146b5cd 100644 --- a/storage/federatedx/federatedx_io_mysql.cc +++ b/storage/federatedx/federatedx_io_mysql.cc @@ -31,6 +31,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <my_global.h> #include "sql_priv.h" #include <mysqld_error.h> +#include <mysql.h> #include "ha_federatedx.h" diff --git a/storage/federatedx/federatedx_pushdown.cc b/storage/federatedx/federatedx_pushdown.cc index 664f0570238..0d58e0d8270 100644 --- a/storage/federatedx/federatedx_pushdown.cc +++ b/storage/federatedx/federatedx_pushdown.cc @@ -35,6 +35,64 @@ */ +/* + Check if table and database names are equal on local and remote servers + + SYNOPSIS + local_and_remote_names_match() + tbl_share Pointer to current table TABLE_SHARE structure + fshare Pointer to current table FEDERATEDX_SHARE structure + + DESCRIPTION + FederatedX table on the local server may refer to a table having another + name on the remote server. The remote table may even reside in a different + database. For example: + + -- Remote server + CREATE TABLE t1 (id int(32)); + + -- Local server + CREATE TABLE t2 ENGINE="FEDERATEDX" + CONNECTION="mysql://joe:joespass@192.168.1.111:9308/federatedx/t1"; + + It's not a problem while the federated_pushdown is disabled 'cause + the CONNECTION strings are being parsed for every table during + the execution, so the table names are translated from local to remote. + But in case of the federated_pushdown the whole query is pushed down + to the engine without any translation, so the remote server may try + to select data from a nonexistent table (for example, query + "SELECT * FROM t2" will try to retrieve data from nonexistent "t2"). + + This function checks whether there is a mismatch between local and remote + table/database names + + RETURN VALUE + false names are equal + true names are not equal + +*/ +bool local_and_remote_names_mismatch(const TABLE_SHARE *tbl_share, + const FEDERATEDX_SHARE *fshare) +{ + + if (lower_case_table_names) + { + if (strcasecmp(fshare->database, tbl_share->db.str) != 0) + return true; + } + else + { + if (strncmp(fshare->database, tbl_share->db.str, tbl_share->db.length) != 0) + return true; + } + + return my_strnncoll(system_charset_info, (uchar *) fshare->table_name, + strlen(fshare->table_name), + (uchar *) tbl_share->table_name.str, + tbl_share->table_name.length) != 0; +} + + static derived_handler* create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived) { @@ -42,7 +100,6 @@ create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived) return 0; ha_federatedx_derived_handler* handler = NULL; - handlerton *ht= 0; SELECT_LEX_UNIT *unit= derived->derived; @@ -54,9 +111,16 @@ create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived) { if (!tbl->table) return 0; - if (!ht) - ht= tbl->table->file->partition_ht(); - else if (ht != tbl->table->file->partition_ht()) + /* + We intentionally don't support partitioned federatedx tables here, so + use file->ht and not file->partition_ht(). + */ + if (tbl->table->file->ht != federatedx_hton) + return 0; + + const FEDERATEDX_SHARE *fshare= + ((ha_federatedx*)tbl->table->file)->get_federatedx_share(); + if (local_and_remote_names_mismatch(tbl->table->s, fshare)) return 0; } } @@ -170,15 +234,22 @@ create_federatedx_select_handler(THD* thd, SELECT_LEX *sel) return 0; ha_federatedx_select_handler* handler = NULL; - handlerton *ht= 0; for (TABLE_LIST *tbl= thd->lex->query_tables; tbl; tbl= tbl->next_global) { if (!tbl->table) return 0; - if (!ht) - ht= tbl->table->file->partition_ht(); - else if (ht != tbl->table->file->partition_ht()) + /* + We intentionally don't support partitioned federatedx tables here, so + use file->ht and not file->partition_ht(). + */ + if (tbl->table->file->ht != federatedx_hton) + return 0; + + const FEDERATEDX_SHARE *fshare= + ((ha_federatedx*)tbl->table->file)->get_federatedx_share(); + + if (local_and_remote_names_mismatch(tbl->table->s, fshare)) return 0; } diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 62c48ed19d8..6199504e04f 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -315,6 +315,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define MYSQL_SERVER 1 #include <my_global.h> #include <mysql/plugin.h> +#include <mysql.h> #include "ha_federatedx.h" #include "sql_servers.h" #include "sql_analyse.h" // append_escaped() @@ -610,7 +611,7 @@ error: parse_url() mem_root MEM_ROOT pointer for memory allocation share pointer to FEDERATEDX share - table pointer to current TABLE class + table_s pointer to current TABLE_SHARE class table_create_flag determines what error to throw DESCRIPTION diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h index a382f9367f9..3eb7a9c662c 100644 --- a/storage/federatedx/ha_federatedx.h +++ b/storage/federatedx/ha_federatedx.h @@ -463,6 +463,7 @@ public: int reset(void); int free_result(void); + const FEDERATEDX_SHARE *get_federatedx_share() const { return share; } friend class ha_federatedx_derived_handler; friend class ha_federatedx_select_handler; }; diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 0478a7dd01a..d3f7ca90889 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -193,7 +193,7 @@ ADD_DEPENDENCIES(rocksdb_aux_lib GenError) # MARIAROCKS-TODO: how to properly depend on -lrt ? TARGET_LINK_LIBRARIES(rocksdb_aux_lib rocksdblib ${ZLIB_LIBRARY}) -if (UNIX AND NOT APPLE) +if (UNIX AND NOT APPLE AND NOT CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") TARGET_LINK_LIBRARIES(rocksdb_aux_lib -lrt) endif() TARGET_LINK_LIBRARIES(rocksdb_aux_lib ${ATOMIC_EXTRA_LIBS}) @@ -244,7 +244,7 @@ ENDIF() # ADD_SUBDIRECTORY(unittest) #ENDIF() -if (UNIX AND NOT APPLE) +if (UNIX AND NOT APPLE AND NOT CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") SET(rocksdb_static_libs ${rocksdb_static_libs} "-lrt") endif() diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 09635272747..fa9feb2c43b 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -266,7 +266,7 @@ Rdb_cf_manager cf_manager; Rdb_ddl_manager ddl_manager; Rdb_binlog_manager binlog_manager; -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) Rdb_io_watchdog *io_watchdog = nullptr; #endif /** @@ -850,7 +850,7 @@ static void rocksdb_set_io_write_timeout( void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { DBUG_ASSERT(save != nullptr); DBUG_ASSERT(rdb != nullptr); -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) DBUG_ASSERT(io_watchdog != nullptr); #endif @@ -859,7 +859,7 @@ static void rocksdb_set_io_write_timeout( const uint32_t new_val = *static_cast<const uint32_t *>(save); rocksdb_io_write_timeout_secs = new_val; -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); #endif RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); @@ -5787,7 +5787,7 @@ static int rocksdb_init_func(void *const p) { directories.push_back(myrocks::rocksdb_wal_dir); } -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) io_watchdog = new Rdb_io_watchdog(std::move(directories)); io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); #endif @@ -5886,7 +5886,7 @@ static int rocksdb_done_func(void *const p) { delete commit_latency_stats; commit_latency_stats = nullptr; -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) delete io_watchdog; io_watchdog = nullptr; #endif diff --git a/storage/sphinx/CMakeLists.txt b/storage/sphinx/CMakeLists.txt index 7cae7982e05..185ffdaaf91 100644 --- a/storage/sphinx/CMakeLists.txt +++ b/storage/sphinx/CMakeLists.txt @@ -19,4 +19,4 @@ IF(MSVC) ENDIF(MSVC) SET(SPHINX_SOURCES ha_sphinx.cc snippets_udf.cc) -MYSQL_ADD_PLUGIN(sphinx ${SPHINX_SOURCES} STORAGE_ENGINE) +MYSQL_ADD_PLUGIN(sphinx ${SPHINX_SOURCES} STORAGE_ENGINE RECOMPILE_FOR_EMBEDDED) diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c index 8ff71212878..289c28d4cf4 100644 --- a/vio/viosslfactories.c +++ b/vio/viosslfactories.c @@ -97,7 +97,7 @@ sslGetErrString(enum enum_ssl_init_error e) static int vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file, - enum enum_ssl_init_error* error) + my_bool is_client, enum enum_ssl_init_error* error) { DBUG_ENTER("vio_set_cert_stuff"); DBUG_PRINT("enter", ("ctx: %p cert_file: %s key_file: %s", @@ -134,10 +134,10 @@ vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file, } /* - If we are using DSA, we can copy the parameters from the private key - Now we know that a key and cert have been set against the SSL context + If certificate is used check if private key matches. + Note, that server side has to use certificate. */ - if (cert_file && !SSL_CTX_check_private_key(ctx)) + if ((cert_file != NULL || !is_client) && !SSL_CTX_check_private_key(ctx)) { *error= SSL_INITERR_NOMATCH; DBUG_PRINT("error", ("%s",sslGetErrString(*error))); @@ -353,7 +353,8 @@ new_VioSSLFd(const char *key_file, const char *cert_file, #endif } - if (vio_set_cert_stuff(ssl_fd->ssl_context, cert_file, key_file, error)) + if (vio_set_cert_stuff(ssl_fd->ssl_context, cert_file, key_file, + is_client_method, error)) { DBUG_PRINT("error", ("vio_set_cert_stuff failed")); goto err2; |